core/stdarch/crates/core_arch/src/aarch64/neon/
generated.rs

1// This code is automatically generated. DO NOT MODIFY.
2//
3// Instead, modify `crates/stdarch-gen-arm/neon.spec` and run the following command to re-generate this file:
4//
5// ```
6// OUT_DIR=`pwd`/crates/core_arch cargo run -p stdarch-gen-arm -- crates/stdarch-gen-arm/neon.spec
7// ```
8use super::*;
9#[cfg(test)]
10use stdarch_test::assert_instr;
11
12/// Three-way exclusive OR
13///
14/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s8)
15#[inline]
16#[target_feature(enable = "neon,sha3")]
17#[cfg_attr(test, assert_instr(eor3))]
18#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
19pub unsafe fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
20    #[allow(improper_ctypes)]
21    extern "unadjusted" {
22        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3s.v16i8")]
23        fn veor3q_s8_(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t;
24    }
25    veor3q_s8_(a, b, c)
26}
27
28/// Three-way exclusive OR
29///
30/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s16)
31#[inline]
32#[target_feature(enable = "neon,sha3")]
33#[cfg_attr(test, assert_instr(eor3))]
34#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
35pub unsafe fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
36    #[allow(improper_ctypes)]
37    extern "unadjusted" {
38        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3s.v8i16")]
39        fn veor3q_s16_(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
40    }
41    veor3q_s16_(a, b, c)
42}
43
44/// Three-way exclusive OR
45///
46/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s32)
47#[inline]
48#[target_feature(enable = "neon,sha3")]
49#[cfg_attr(test, assert_instr(eor3))]
50#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
51pub unsafe fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
52    #[allow(improper_ctypes)]
53    extern "unadjusted" {
54        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3s.v4i32")]
55        fn veor3q_s32_(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
56    }
57    veor3q_s32_(a, b, c)
58}
59
60/// Three-way exclusive OR
61///
62/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s64)
63#[inline]
64#[target_feature(enable = "neon,sha3")]
65#[cfg_attr(test, assert_instr(eor3))]
66#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
67pub unsafe fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
68    #[allow(improper_ctypes)]
69    extern "unadjusted" {
70        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3s.v2i64")]
71        fn veor3q_s64_(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t;
72    }
73    veor3q_s64_(a, b, c)
74}
75
76/// Three-way exclusive OR
77///
78/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u8)
79#[inline]
80#[target_feature(enable = "neon,sha3")]
81#[cfg_attr(test, assert_instr(eor3))]
82#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
83pub unsafe fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
84    #[allow(improper_ctypes)]
85    extern "unadjusted" {
86        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3u.v16i8")]
87        fn veor3q_u8_(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t;
88    }
89    veor3q_u8_(a, b, c)
90}
91
92/// Three-way exclusive OR
93///
94/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u16)
95#[inline]
96#[target_feature(enable = "neon,sha3")]
97#[cfg_attr(test, assert_instr(eor3))]
98#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
99pub unsafe fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
100    #[allow(improper_ctypes)]
101    extern "unadjusted" {
102        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3u.v8i16")]
103        fn veor3q_u16_(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t;
104    }
105    veor3q_u16_(a, b, c)
106}
107
108/// Three-way exclusive OR
109///
110/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u32)
111#[inline]
112#[target_feature(enable = "neon,sha3")]
113#[cfg_attr(test, assert_instr(eor3))]
114#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
115pub unsafe fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
116    #[allow(improper_ctypes)]
117    extern "unadjusted" {
118        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3u.v4i32")]
119        fn veor3q_u32_(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
120    }
121    veor3q_u32_(a, b, c)
122}
123
124/// Three-way exclusive OR
125///
126/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u64)
127#[inline]
128#[target_feature(enable = "neon,sha3")]
129#[cfg_attr(test, assert_instr(eor3))]
130#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
131pub unsafe fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
132    #[allow(improper_ctypes)]
133    extern "unadjusted" {
134        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3u.v2i64")]
135        fn veor3q_u64_(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
136    }
137    veor3q_u64_(a, b, c)
138}
139
140/// Absolute difference between the arguments of Floating
141///
142/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f64)
143#[inline]
144#[target_feature(enable = "neon")]
145#[cfg_attr(test, assert_instr(fabd))]
146#[stable(feature = "neon_intrinsics", since = "1.59.0")]
147pub unsafe fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
148    #[allow(improper_ctypes)]
149    extern "unadjusted" {
150        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fabd.v1f64")]
151        fn vabd_f64_(a: float64x1_t, b: float64x1_t) -> float64x1_t;
152    }
153    vabd_f64_(a, b)
154}
155
156/// Absolute difference between the arguments of Floating
157///
158/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f64)
159#[inline]
160#[target_feature(enable = "neon")]
161#[cfg_attr(test, assert_instr(fabd))]
162#[stable(feature = "neon_intrinsics", since = "1.59.0")]
163pub unsafe fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
164    #[allow(improper_ctypes)]
165    extern "unadjusted" {
166        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fabd.v2f64")]
167        fn vabdq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t;
168    }
169    vabdq_f64_(a, b)
170}
171
172/// Floating-point absolute difference
173///
174/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabds_f32)
175#[inline]
176#[target_feature(enable = "neon")]
177#[cfg_attr(test, assert_instr(fabd))]
178#[stable(feature = "neon_intrinsics", since = "1.59.0")]
179pub unsafe fn vabds_f32(a: f32, b: f32) -> f32 {
180    simd_extract!(vabd_f32(vdup_n_f32(a), vdup_n_f32(b)), 0)
181}
182
183/// Floating-point absolute difference
184///
185/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdd_f64)
186#[inline]
187#[target_feature(enable = "neon")]
188#[cfg_attr(test, assert_instr(fabd))]
189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
190pub unsafe fn vabdd_f64(a: f64, b: f64) -> f64 {
191    simd_extract!(vabd_f64(vdup_n_f64(a), vdup_n_f64(b)), 0)
192}
193
194/// Unsigned Absolute difference Long
195///
196/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u8)
197#[inline]
198#[target_feature(enable = "neon")]
199#[cfg_attr(test, assert_instr(uabdl))]
200#[stable(feature = "neon_intrinsics", since = "1.59.0")]
201pub unsafe fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
202    let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
203    let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
204    simd_cast(vabd_u8(c, d))
205}
206
207/// Unsigned Absolute difference Long
208///
209/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u16)
210#[inline]
211#[target_feature(enable = "neon")]
212#[cfg_attr(test, assert_instr(uabdl))]
213#[stable(feature = "neon_intrinsics", since = "1.59.0")]
214pub unsafe fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
215    let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
216    let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
217    simd_cast(vabd_u16(c, d))
218}
219
220/// Unsigned Absolute difference Long
221///
222/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u32)
223#[inline]
224#[target_feature(enable = "neon")]
225#[cfg_attr(test, assert_instr(uabdl))]
226#[stable(feature = "neon_intrinsics", since = "1.59.0")]
227pub unsafe fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
228    let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
229    let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
230    simd_cast(vabd_u32(c, d))
231}
232
233/// Signed Absolute difference Long
234///
235/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s8)
236#[inline]
237#[target_feature(enable = "neon")]
238#[cfg_attr(test, assert_instr(sabdl))]
239#[stable(feature = "neon_intrinsics", since = "1.59.0")]
240pub unsafe fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
241    let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
242    let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
243    let e: uint8x8_t = simd_cast(vabd_s8(c, d));
244    simd_cast(e)
245}
246
247/// Signed Absolute difference Long
248///
249/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s16)
250#[inline]
251#[target_feature(enable = "neon")]
252#[cfg_attr(test, assert_instr(sabdl))]
253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
254pub unsafe fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
255    let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
256    let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
257    let e: uint16x4_t = simd_cast(vabd_s16(c, d));
258    simd_cast(e)
259}
260
261/// Signed Absolute difference Long
262///
263/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s32)
264#[inline]
265#[target_feature(enable = "neon")]
266#[cfg_attr(test, assert_instr(sabdl))]
267#[stable(feature = "neon_intrinsics", since = "1.59.0")]
268pub unsafe fn vabdl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
269    let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
270    let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
271    let e: uint32x2_t = simd_cast(vabd_s32(c, d));
272    simd_cast(e)
273}
274
275/// Compare bitwise Equal (vector)
276///
277/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u64)
278#[inline]
279#[target_feature(enable = "neon")]
280#[cfg_attr(test, assert_instr(cmeq))]
281#[stable(feature = "neon_intrinsics", since = "1.59.0")]
282pub unsafe fn vceq_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
283    simd_eq(a, b)
284}
285
286/// Compare bitwise Equal (vector)
287///
288/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u64)
289#[inline]
290#[target_feature(enable = "neon")]
291#[cfg_attr(test, assert_instr(cmeq))]
292#[stable(feature = "neon_intrinsics", since = "1.59.0")]
293pub unsafe fn vceqq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
294    simd_eq(a, b)
295}
296
297/// Compare bitwise Equal (vector)
298///
299/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s64)
300#[inline]
301#[target_feature(enable = "neon")]
302#[cfg_attr(test, assert_instr(cmeq))]
303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
304pub unsafe fn vceq_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
305    simd_eq(a, b)
306}
307
308/// Compare bitwise Equal (vector)
309///
310/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s64)
311#[inline]
312#[target_feature(enable = "neon")]
313#[cfg_attr(test, assert_instr(cmeq))]
314#[stable(feature = "neon_intrinsics", since = "1.59.0")]
315pub unsafe fn vceqq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
316    simd_eq(a, b)
317}
318
319/// Compare bitwise Equal (vector)
320///
321/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p64)
322#[inline]
323#[target_feature(enable = "neon")]
324#[cfg_attr(test, assert_instr(cmeq))]
325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
326pub unsafe fn vceq_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
327    simd_eq(a, b)
328}
329
330/// Compare bitwise Equal (vector)
331///
332/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p64)
333#[inline]
334#[target_feature(enable = "neon")]
335#[cfg_attr(test, assert_instr(cmeq))]
336#[stable(feature = "neon_intrinsics", since = "1.59.0")]
337pub unsafe fn vceqq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
338    simd_eq(a, b)
339}
340
341/// Floating-point compare equal
342///
343/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f64)
344#[inline]
345#[target_feature(enable = "neon")]
346#[cfg_attr(test, assert_instr(fcmeq))]
347#[stable(feature = "neon_intrinsics", since = "1.59.0")]
348pub unsafe fn vceq_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
349    simd_eq(a, b)
350}
351
352/// Floating-point compare equal
353///
354/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f64)
355#[inline]
356#[target_feature(enable = "neon")]
357#[cfg_attr(test, assert_instr(fcmeq))]
358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
359pub unsafe fn vceqq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
360    simd_eq(a, b)
361}
362
363/// Compare bitwise equal
364///
365/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_s64)
366#[inline]
367#[target_feature(enable = "neon")]
368#[cfg_attr(test, assert_instr(cmp))]
369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
370pub unsafe fn vceqd_s64(a: i64, b: i64) -> u64 {
371    transmute(vceq_s64(transmute(a), transmute(b)))
372}
373
374/// Compare bitwise equal
375///
376/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_u64)
377#[inline]
378#[target_feature(enable = "neon")]
379#[cfg_attr(test, assert_instr(cmp))]
380#[stable(feature = "neon_intrinsics", since = "1.59.0")]
381pub unsafe fn vceqd_u64(a: u64, b: u64) -> u64 {
382    transmute(vceq_u64(transmute(a), transmute(b)))
383}
384
385/// Floating-point compare equal
386///
387/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqs_f32)
388#[inline]
389#[target_feature(enable = "neon")]
390#[cfg_attr(test, assert_instr(fcmp))]
391#[stable(feature = "neon_intrinsics", since = "1.59.0")]
392pub unsafe fn vceqs_f32(a: f32, b: f32) -> u32 {
393    simd_extract!(vceq_f32(vdup_n_f32(a), vdup_n_f32(b)), 0)
394}
395
396/// Floating-point compare equal
397///
398/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_f64)
399#[inline]
400#[target_feature(enable = "neon")]
401#[cfg_attr(test, assert_instr(fcmp))]
402#[stable(feature = "neon_intrinsics", since = "1.59.0")]
403pub unsafe fn vceqd_f64(a: f64, b: f64) -> u64 {
404    simd_extract!(vceq_f64(vdup_n_f64(a), vdup_n_f64(b)), 0)
405}
406
407/// Signed compare bitwise equal to zero
408///
409/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s8)
410#[inline]
411#[target_feature(enable = "neon")]
412#[cfg_attr(test, assert_instr(cmeq))]
413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
414pub unsafe fn vceqz_s8(a: int8x8_t) -> uint8x8_t {
415    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
416    simd_eq(a, transmute(b))
417}
418
419/// Signed compare bitwise equal to zero
420///
421/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s8)
422#[inline]
423#[target_feature(enable = "neon")]
424#[cfg_attr(test, assert_instr(cmeq))]
425#[stable(feature = "neon_intrinsics", since = "1.59.0")]
426pub unsafe fn vceqzq_s8(a: int8x16_t) -> uint8x16_t {
427    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
428    simd_eq(a, transmute(b))
429}
430
431/// Signed compare bitwise equal to zero
432///
433/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s16)
434#[inline]
435#[target_feature(enable = "neon")]
436#[cfg_attr(test, assert_instr(cmeq))]
437#[stable(feature = "neon_intrinsics", since = "1.59.0")]
438pub unsafe fn vceqz_s16(a: int16x4_t) -> uint16x4_t {
439    let b: i16x4 = i16x4::new(0, 0, 0, 0);
440    simd_eq(a, transmute(b))
441}
442
443/// Signed compare bitwise equal to zero
444///
445/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s16)
446#[inline]
447#[target_feature(enable = "neon")]
448#[cfg_attr(test, assert_instr(cmeq))]
449#[stable(feature = "neon_intrinsics", since = "1.59.0")]
450pub unsafe fn vceqzq_s16(a: int16x8_t) -> uint16x8_t {
451    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
452    simd_eq(a, transmute(b))
453}
454
455/// Signed compare bitwise equal to zero
456///
457/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s32)
458#[inline]
459#[target_feature(enable = "neon")]
460#[cfg_attr(test, assert_instr(cmeq))]
461#[stable(feature = "neon_intrinsics", since = "1.59.0")]
462pub unsafe fn vceqz_s32(a: int32x2_t) -> uint32x2_t {
463    let b: i32x2 = i32x2::new(0, 0);
464    simd_eq(a, transmute(b))
465}
466
467/// Signed compare bitwise equal to zero
468///
469/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s32)
470#[inline]
471#[target_feature(enable = "neon")]
472#[cfg_attr(test, assert_instr(cmeq))]
473#[stable(feature = "neon_intrinsics", since = "1.59.0")]
474pub unsafe fn vceqzq_s32(a: int32x4_t) -> uint32x4_t {
475    let b: i32x4 = i32x4::new(0, 0, 0, 0);
476    simd_eq(a, transmute(b))
477}
478
479/// Signed compare bitwise equal to zero
480///
481/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s64)
482#[inline]
483#[target_feature(enable = "neon")]
484#[cfg_attr(test, assert_instr(cmeq))]
485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
486pub unsafe fn vceqz_s64(a: int64x1_t) -> uint64x1_t {
487    let b: i64x1 = i64x1::new(0);
488    simd_eq(a, transmute(b))
489}
490
491/// Signed compare bitwise equal to zero
492///
493/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s64)
494#[inline]
495#[target_feature(enable = "neon")]
496#[cfg_attr(test, assert_instr(cmeq))]
497#[stable(feature = "neon_intrinsics", since = "1.59.0")]
498pub unsafe fn vceqzq_s64(a: int64x2_t) -> uint64x2_t {
499    let b: i64x2 = i64x2::new(0, 0);
500    simd_eq(a, transmute(b))
501}
502
503/// Signed compare bitwise equal to zero
504///
505/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p8)
506#[inline]
507#[target_feature(enable = "neon")]
508#[cfg_attr(test, assert_instr(cmeq))]
509#[stable(feature = "neon_intrinsics", since = "1.59.0")]
510pub unsafe fn vceqz_p8(a: poly8x8_t) -> uint8x8_t {
511    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
512    simd_eq(a, transmute(b))
513}
514
515/// Signed compare bitwise equal to zero
516///
517/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p8)
518#[inline]
519#[target_feature(enable = "neon")]
520#[cfg_attr(test, assert_instr(cmeq))]
521#[stable(feature = "neon_intrinsics", since = "1.59.0")]
522pub unsafe fn vceqzq_p8(a: poly8x16_t) -> uint8x16_t {
523    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
524    simd_eq(a, transmute(b))
525}
526
527/// Signed compare bitwise equal to zero
528///
529/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p64)
530#[inline]
531#[target_feature(enable = "neon")]
532#[cfg_attr(test, assert_instr(cmeq))]
533#[stable(feature = "neon_intrinsics", since = "1.59.0")]
534pub unsafe fn vceqz_p64(a: poly64x1_t) -> uint64x1_t {
535    let b: i64x1 = i64x1::new(0);
536    simd_eq(a, transmute(b))
537}
538
539/// Signed compare bitwise equal to zero
540///
541/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p64)
542#[inline]
543#[target_feature(enable = "neon")]
544#[cfg_attr(test, assert_instr(cmeq))]
545#[stable(feature = "neon_intrinsics", since = "1.59.0")]
546pub unsafe fn vceqzq_p64(a: poly64x2_t) -> uint64x2_t {
547    let b: i64x2 = i64x2::new(0, 0);
548    simd_eq(a, transmute(b))
549}
550
551/// Unsigned compare bitwise equal to zero
552///
553/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u8)
554#[inline]
555#[target_feature(enable = "neon")]
556#[cfg_attr(test, assert_instr(cmeq))]
557#[stable(feature = "neon_intrinsics", since = "1.59.0")]
558pub unsafe fn vceqz_u8(a: uint8x8_t) -> uint8x8_t {
559    let b: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
560    simd_eq(a, transmute(b))
561}
562
563/// Unsigned compare bitwise equal to zero
564///
565/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u8)
566#[inline]
567#[target_feature(enable = "neon")]
568#[cfg_attr(test, assert_instr(cmeq))]
569#[stable(feature = "neon_intrinsics", since = "1.59.0")]
570pub unsafe fn vceqzq_u8(a: uint8x16_t) -> uint8x16_t {
571    let b: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
572    simd_eq(a, transmute(b))
573}
574
575/// Unsigned compare bitwise equal to zero
576///
577/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u16)
578#[inline]
579#[target_feature(enable = "neon")]
580#[cfg_attr(test, assert_instr(cmeq))]
581#[stable(feature = "neon_intrinsics", since = "1.59.0")]
582pub unsafe fn vceqz_u16(a: uint16x4_t) -> uint16x4_t {
583    let b: u16x4 = u16x4::new(0, 0, 0, 0);
584    simd_eq(a, transmute(b))
585}
586
587/// Unsigned compare bitwise equal to zero
588///
589/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u16)
590#[inline]
591#[target_feature(enable = "neon")]
592#[cfg_attr(test, assert_instr(cmeq))]
593#[stable(feature = "neon_intrinsics", since = "1.59.0")]
594pub unsafe fn vceqzq_u16(a: uint16x8_t) -> uint16x8_t {
595    let b: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
596    simd_eq(a, transmute(b))
597}
598
599/// Unsigned compare bitwise equal to zero
600///
601/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u32)
602#[inline]
603#[target_feature(enable = "neon")]
604#[cfg_attr(test, assert_instr(cmeq))]
605#[stable(feature = "neon_intrinsics", since = "1.59.0")]
606pub unsafe fn vceqz_u32(a: uint32x2_t) -> uint32x2_t {
607    let b: u32x2 = u32x2::new(0, 0);
608    simd_eq(a, transmute(b))
609}
610
611/// Unsigned compare bitwise equal to zero
612///
613/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u32)
614#[inline]
615#[target_feature(enable = "neon")]
616#[cfg_attr(test, assert_instr(cmeq))]
617#[stable(feature = "neon_intrinsics", since = "1.59.0")]
618pub unsafe fn vceqzq_u32(a: uint32x4_t) -> uint32x4_t {
619    let b: u32x4 = u32x4::new(0, 0, 0, 0);
620    simd_eq(a, transmute(b))
621}
622
623/// Unsigned compare bitwise equal to zero
624///
625/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u64)
626#[inline]
627#[target_feature(enable = "neon")]
628#[cfg_attr(test, assert_instr(cmeq))]
629#[stable(feature = "neon_intrinsics", since = "1.59.0")]
630pub unsafe fn vceqz_u64(a: uint64x1_t) -> uint64x1_t {
631    let b: u64x1 = u64x1::new(0);
632    simd_eq(a, transmute(b))
633}
634
635/// Unsigned compare bitwise equal to zero
636///
637/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u64)
638#[inline]
639#[target_feature(enable = "neon")]
640#[cfg_attr(test, assert_instr(cmeq))]
641#[stable(feature = "neon_intrinsics", since = "1.59.0")]
642pub unsafe fn vceqzq_u64(a: uint64x2_t) -> uint64x2_t {
643    let b: u64x2 = u64x2::new(0, 0);
644    simd_eq(a, transmute(b))
645}
646
647/// Floating-point compare bitwise equal to zero
648///
649/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f32)
650#[inline]
651#[target_feature(enable = "neon")]
652#[cfg_attr(test, assert_instr(fcmeq))]
653#[stable(feature = "neon_intrinsics", since = "1.59.0")]
654pub unsafe fn vceqz_f32(a: float32x2_t) -> uint32x2_t {
655    let b: f32x2 = f32x2::new(0.0, 0.0);
656    simd_eq(a, transmute(b))
657}
658
659/// Floating-point compare bitwise equal to zero
660///
661/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f32)
662#[inline]
663#[target_feature(enable = "neon")]
664#[cfg_attr(test, assert_instr(fcmeq))]
665#[stable(feature = "neon_intrinsics", since = "1.59.0")]
666pub unsafe fn vceqzq_f32(a: float32x4_t) -> uint32x4_t {
667    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
668    simd_eq(a, transmute(b))
669}
670
671/// Floating-point compare bitwise equal to zero
672///
673/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f64)
674#[inline]
675#[target_feature(enable = "neon")]
676#[cfg_attr(test, assert_instr(fcmeq))]
677#[stable(feature = "neon_intrinsics", since = "1.59.0")]
678pub unsafe fn vceqz_f64(a: float64x1_t) -> uint64x1_t {
679    let b: f64 = 0.0;
680    simd_eq(a, transmute(b))
681}
682
683/// Floating-point compare bitwise equal to zero
684///
685/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f64)
686#[inline]
687#[target_feature(enable = "neon")]
688#[cfg_attr(test, assert_instr(fcmeq))]
689#[stable(feature = "neon_intrinsics", since = "1.59.0")]
690pub unsafe fn vceqzq_f64(a: float64x2_t) -> uint64x2_t {
691    let b: f64x2 = f64x2::new(0.0, 0.0);
692    simd_eq(a, transmute(b))
693}
694
695/// Compare bitwise equal to zero
696///
697/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_s64)
698#[inline]
699#[target_feature(enable = "neon")]
700#[cfg_attr(test, assert_instr(cmp))]
701#[stable(feature = "neon_intrinsics", since = "1.59.0")]
702pub unsafe fn vceqzd_s64(a: i64) -> u64 {
703    transmute(vceqz_s64(transmute(a)))
704}
705
706/// Compare bitwise equal to zero
707///
708/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_u64)
709#[inline]
710#[target_feature(enable = "neon")]
711#[cfg_attr(test, assert_instr(cmp))]
712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
713pub unsafe fn vceqzd_u64(a: u64) -> u64 {
714    transmute(vceqz_u64(transmute(a)))
715}
716
717/// Floating-point compare bitwise equal to zero
718///
719/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzs_f32)
720#[inline]
721#[target_feature(enable = "neon")]
722#[cfg_attr(test, assert_instr(fcmp))]
723#[stable(feature = "neon_intrinsics", since = "1.59.0")]
724pub unsafe fn vceqzs_f32(a: f32) -> u32 {
725    simd_extract!(vceqz_f32(vdup_n_f32(a)), 0)
726}
727
728/// Floating-point compare bitwise equal to zero
729///
730/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_f64)
731#[inline]
732#[target_feature(enable = "neon")]
733#[cfg_attr(test, assert_instr(fcmp))]
734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
735pub unsafe fn vceqzd_f64(a: f64) -> u64 {
736    simd_extract!(vceqz_f64(vdup_n_f64(a)), 0)
737}
738
739/// Signed compare bitwise Test bits nonzero
740///
741/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s64)
742#[inline]
743#[target_feature(enable = "neon")]
744#[cfg_attr(test, assert_instr(cmtst))]
745#[stable(feature = "neon_intrinsics", since = "1.59.0")]
746pub unsafe fn vtst_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
747    let c: int64x1_t = simd_and(a, b);
748    let d: i64x1 = i64x1::new(0);
749    simd_ne(c, transmute(d))
750}
751
752/// Signed compare bitwise Test bits nonzero
753///
754/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s64)
755#[inline]
756#[target_feature(enable = "neon")]
757#[cfg_attr(test, assert_instr(cmtst))]
758#[stable(feature = "neon_intrinsics", since = "1.59.0")]
759pub unsafe fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
760    let c: int64x2_t = simd_and(a, b);
761    let d: i64x2 = i64x2::new(0, 0);
762    simd_ne(c, transmute(d))
763}
764
765/// Signed compare bitwise Test bits nonzero
766///
767/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p64)
768#[inline]
769#[target_feature(enable = "neon")]
770#[cfg_attr(test, assert_instr(cmtst))]
771#[stable(feature = "neon_intrinsics", since = "1.59.0")]
772pub unsafe fn vtst_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
773    let c: poly64x1_t = simd_and(a, b);
774    let d: i64x1 = i64x1::new(0);
775    simd_ne(c, transmute(d))
776}
777
778/// Signed compare bitwise Test bits nonzero
779///
780/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p64)
781#[inline]
782#[target_feature(enable = "neon")]
783#[cfg_attr(test, assert_instr(cmtst))]
784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
785pub unsafe fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
786    let c: poly64x2_t = simd_and(a, b);
787    let d: i64x2 = i64x2::new(0, 0);
788    simd_ne(c, transmute(d))
789}
790
791/// Unsigned compare bitwise Test bits nonzero
792///
793/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u64)
794#[inline]
795#[target_feature(enable = "neon")]
796#[cfg_attr(test, assert_instr(cmtst))]
797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
798pub unsafe fn vtst_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
799    let c: uint64x1_t = simd_and(a, b);
800    let d: u64x1 = u64x1::new(0);
801    simd_ne(c, transmute(d))
802}
803
804/// Unsigned compare bitwise Test bits nonzero
805///
806/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u64)
807#[inline]
808#[target_feature(enable = "neon")]
809#[cfg_attr(test, assert_instr(cmtst))]
810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
811pub unsafe fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
812    let c: uint64x2_t = simd_and(a, b);
813    let d: u64x2 = u64x2::new(0, 0);
814    simd_ne(c, transmute(d))
815}
816
817/// Compare bitwise test bits nonzero
818///
819/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_s64)
820#[inline]
821#[target_feature(enable = "neon")]
822#[cfg_attr(test, assert_instr(tst))]
823#[stable(feature = "neon_intrinsics", since = "1.59.0")]
824pub unsafe fn vtstd_s64(a: i64, b: i64) -> u64 {
825    transmute(vtst_s64(transmute(a), transmute(b)))
826}
827
828/// Compare bitwise test bits nonzero
829///
830/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_u64)
831#[inline]
832#[target_feature(enable = "neon")]
833#[cfg_attr(test, assert_instr(tst))]
834#[stable(feature = "neon_intrinsics", since = "1.59.0")]
835pub unsafe fn vtstd_u64(a: u64, b: u64) -> u64 {
836    transmute(vtst_u64(transmute(a), transmute(b)))
837}
838
839/// Signed saturating accumulate of unsigned value
840///
841/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadds_s32)
842#[inline]
843#[target_feature(enable = "neon")]
844#[cfg_attr(test, assert_instr(suqadd))]
845#[stable(feature = "neon_intrinsics", since = "1.59.0")]
846pub unsafe fn vuqadds_s32(a: i32, b: u32) -> i32 {
847    #[allow(improper_ctypes)]
848    extern "unadjusted" {
849        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.i32")]
850        fn vuqadds_s32_(a: i32, b: u32) -> i32;
851    }
852    vuqadds_s32_(a, b)
853}
854
855/// Signed saturating accumulate of unsigned value
856///
857/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddd_s64)
858#[inline]
859#[target_feature(enable = "neon")]
860#[cfg_attr(test, assert_instr(suqadd))]
861#[stable(feature = "neon_intrinsics", since = "1.59.0")]
862pub unsafe fn vuqaddd_s64(a: i64, b: u64) -> i64 {
863    #[allow(improper_ctypes)]
864    extern "unadjusted" {
865        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.i64")]
866        fn vuqaddd_s64_(a: i64, b: u64) -> i64;
867    }
868    vuqaddd_s64_(a, b)
869}
870
871/// Signed saturating accumulate of unsigned value
872///
873/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddb_s8)
874#[inline]
875#[target_feature(enable = "neon")]
876#[cfg_attr(test, assert_instr(suqadd))]
877#[stable(feature = "neon_intrinsics", since = "1.59.0")]
878pub unsafe fn vuqaddb_s8(a: i8, b: u8) -> i8 {
879    simd_extract!(vuqadd_s8(vdup_n_s8(a), vdup_n_u8(b)), 0)
880}
881
882/// Signed saturating accumulate of unsigned value
883///
884/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddh_s16)
885#[inline]
886#[target_feature(enable = "neon")]
887#[cfg_attr(test, assert_instr(suqadd))]
888#[stable(feature = "neon_intrinsics", since = "1.59.0")]
889pub unsafe fn vuqaddh_s16(a: i16, b: u16) -> i16 {
890    simd_extract!(vuqadd_s16(vdup_n_s16(a), vdup_n_u16(b)), 0)
891}
892
893/// Floating-point absolute value
894///
895/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f64)
896#[inline]
897#[target_feature(enable = "neon")]
898#[cfg_attr(test, assert_instr(fabs))]
899#[stable(feature = "neon_intrinsics", since = "1.59.0")]
900pub unsafe fn vabs_f64(a: float64x1_t) -> float64x1_t {
901    simd_fabs(a)
902}
903
904/// Floating-point absolute value
905///
906/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f64)
907#[inline]
908#[target_feature(enable = "neon")]
909#[cfg_attr(test, assert_instr(fabs))]
910#[stable(feature = "neon_intrinsics", since = "1.59.0")]
911pub unsafe fn vabsq_f64(a: float64x2_t) -> float64x2_t {
912    simd_fabs(a)
913}
914
915/// Compare signed greater than
916///
917/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s64)
918#[inline]
919#[target_feature(enable = "neon")]
920#[cfg_attr(test, assert_instr(cmgt))]
921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
922pub unsafe fn vcgt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
923    simd_gt(a, b)
924}
925
926/// Compare signed greater than
927///
928/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s64)
929#[inline]
930#[target_feature(enable = "neon")]
931#[cfg_attr(test, assert_instr(cmgt))]
932#[stable(feature = "neon_intrinsics", since = "1.59.0")]
933pub unsafe fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
934    simd_gt(a, b)
935}
936
937/// Compare unsigned greater than
938///
939/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u64)
940#[inline]
941#[target_feature(enable = "neon")]
942#[cfg_attr(test, assert_instr(cmhi))]
943#[stable(feature = "neon_intrinsics", since = "1.59.0")]
944pub unsafe fn vcgt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
945    simd_gt(a, b)
946}
947
948/// Compare unsigned greater than
949///
950/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u64)
951#[inline]
952#[target_feature(enable = "neon")]
953#[cfg_attr(test, assert_instr(cmhi))]
954#[stable(feature = "neon_intrinsics", since = "1.59.0")]
955pub unsafe fn vcgtq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
956    simd_gt(a, b)
957}
958
959/// Floating-point compare greater than
960///
961/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f64)
962#[inline]
963#[target_feature(enable = "neon")]
964#[cfg_attr(test, assert_instr(fcmgt))]
965#[stable(feature = "neon_intrinsics", since = "1.59.0")]
966pub unsafe fn vcgt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
967    simd_gt(a, b)
968}
969
970/// Floating-point compare greater than
971///
972/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f64)
973#[inline]
974#[target_feature(enable = "neon")]
975#[cfg_attr(test, assert_instr(fcmgt))]
976#[stable(feature = "neon_intrinsics", since = "1.59.0")]
977pub unsafe fn vcgtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
978    simd_gt(a, b)
979}
980
981/// Compare greater than
982///
983/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_s64)
984#[inline]
985#[target_feature(enable = "neon")]
986#[cfg_attr(test, assert_instr(cmp))]
987#[stable(feature = "neon_intrinsics", since = "1.59.0")]
988pub unsafe fn vcgtd_s64(a: i64, b: i64) -> u64 {
989    transmute(vcgt_s64(transmute(a), transmute(b)))
990}
991
992/// Compare greater than
993///
994/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_u64)
995#[inline]
996#[target_feature(enable = "neon")]
997#[cfg_attr(test, assert_instr(cmp))]
998#[stable(feature = "neon_intrinsics", since = "1.59.0")]
999pub unsafe fn vcgtd_u64(a: u64, b: u64) -> u64 {
1000    transmute(vcgt_u64(transmute(a), transmute(b)))
1001}
1002
1003/// Floating-point compare greater than
1004///
1005/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgts_f32)
1006#[inline]
1007#[target_feature(enable = "neon")]
1008#[cfg_attr(test, assert_instr(fcmp))]
1009#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1010pub unsafe fn vcgts_f32(a: f32, b: f32) -> u32 {
1011    simd_extract!(vcgt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0)
1012}
1013
1014/// Floating-point compare greater than
1015///
1016/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_f64)
1017#[inline]
1018#[target_feature(enable = "neon")]
1019#[cfg_attr(test, assert_instr(fcmp))]
1020#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1021pub unsafe fn vcgtd_f64(a: f64, b: f64) -> u64 {
1022    simd_extract!(vcgt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0)
1023}
1024
1025/// Compare signed less than
1026///
1027/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s64)
1028#[inline]
1029#[target_feature(enable = "neon")]
1030#[cfg_attr(test, assert_instr(cmgt))]
1031#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1032pub unsafe fn vclt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1033    simd_lt(a, b)
1034}
1035
1036/// Compare signed less than
1037///
1038/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s64)
1039#[inline]
1040#[target_feature(enable = "neon")]
1041#[cfg_attr(test, assert_instr(cmgt))]
1042#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1043pub unsafe fn vcltq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1044    simd_lt(a, b)
1045}
1046
1047/// Compare unsigned less than
1048///
1049/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u64)
1050#[inline]
1051#[target_feature(enable = "neon")]
1052#[cfg_attr(test, assert_instr(cmhi))]
1053#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1054pub unsafe fn vclt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1055    simd_lt(a, b)
1056}
1057
1058/// Compare unsigned less than
1059///
1060/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u64)
1061#[inline]
1062#[target_feature(enable = "neon")]
1063#[cfg_attr(test, assert_instr(cmhi))]
1064#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1065pub unsafe fn vcltq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1066    simd_lt(a, b)
1067}
1068
1069/// Floating-point compare less than
1070///
1071/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f64)
1072#[inline]
1073#[target_feature(enable = "neon")]
1074#[cfg_attr(test, assert_instr(fcmgt))]
1075#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1076pub unsafe fn vclt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1077    simd_lt(a, b)
1078}
1079
1080/// Floating-point compare less than
1081///
1082/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f64)
1083#[inline]
1084#[target_feature(enable = "neon")]
1085#[cfg_attr(test, assert_instr(fcmgt))]
1086#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1087pub unsafe fn vcltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1088    simd_lt(a, b)
1089}
1090
1091/// Compare less than
1092///
1093/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_s64)
1094#[inline]
1095#[target_feature(enable = "neon")]
1096#[cfg_attr(test, assert_instr(cmp))]
1097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1098pub unsafe fn vcltd_s64(a: i64, b: i64) -> u64 {
1099    transmute(vclt_s64(transmute(a), transmute(b)))
1100}
1101
1102/// Compare less than
1103///
1104/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_u64)
1105#[inline]
1106#[target_feature(enable = "neon")]
1107#[cfg_attr(test, assert_instr(cmp))]
1108#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1109pub unsafe fn vcltd_u64(a: u64, b: u64) -> u64 {
1110    transmute(vclt_u64(transmute(a), transmute(b)))
1111}
1112
1113/// Floating-point compare less than
1114///
1115/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclts_f32)
1116#[inline]
1117#[target_feature(enable = "neon")]
1118#[cfg_attr(test, assert_instr(fcmp))]
1119#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1120pub unsafe fn vclts_f32(a: f32, b: f32) -> u32 {
1121    simd_extract!(vclt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0)
1122}
1123
1124/// Floating-point compare less than
1125///
1126/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_f64)
1127#[inline]
1128#[target_feature(enable = "neon")]
1129#[cfg_attr(test, assert_instr(fcmp))]
1130#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1131pub unsafe fn vcltd_f64(a: f64, b: f64) -> u64 {
1132    simd_extract!(vclt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0)
1133}
1134
1135/// Compare signed less than or equal
1136///
1137/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s64)
1138#[inline]
1139#[target_feature(enable = "neon")]
1140#[cfg_attr(test, assert_instr(cmge))]
1141#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1142pub unsafe fn vcle_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1143    simd_le(a, b)
1144}
1145
1146/// Compare signed less than or equal
1147///
1148/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s64)
1149#[inline]
1150#[target_feature(enable = "neon")]
1151#[cfg_attr(test, assert_instr(cmge))]
1152#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1153pub unsafe fn vcleq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1154    simd_le(a, b)
1155}
1156
1157/// Compare greater than or equal
1158///
1159/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_s64)
1160#[inline]
1161#[target_feature(enable = "neon")]
1162#[cfg_attr(test, assert_instr(cmp))]
1163#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1164pub unsafe fn vcged_s64(a: i64, b: i64) -> u64 {
1165    transmute(vcge_s64(transmute(a), transmute(b)))
1166}
1167
1168/// Compare greater than or equal
1169///
1170/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_u64)
1171#[inline]
1172#[target_feature(enable = "neon")]
1173#[cfg_attr(test, assert_instr(cmp))]
1174#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1175pub unsafe fn vcged_u64(a: u64, b: u64) -> u64 {
1176    transmute(vcge_u64(transmute(a), transmute(b)))
1177}
1178
1179/// Floating-point compare greater than or equal
1180///
1181/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcges_f32)
1182#[inline]
1183#[target_feature(enable = "neon")]
1184#[cfg_attr(test, assert_instr(fcmp))]
1185#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1186pub unsafe fn vcges_f32(a: f32, b: f32) -> u32 {
1187    simd_extract!(vcge_f32(vdup_n_f32(a), vdup_n_f32(b)), 0)
1188}
1189
1190/// Floating-point compare greater than or equal
1191///
1192/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_f64)
1193#[inline]
1194#[target_feature(enable = "neon")]
1195#[cfg_attr(test, assert_instr(fcmp))]
1196#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1197pub unsafe fn vcged_f64(a: f64, b: f64) -> u64 {
1198    simd_extract!(vcge_f64(vdup_n_f64(a), vdup_n_f64(b)), 0)
1199}
1200
1201/// Compare unsigned less than or equal
1202///
1203/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u64)
1204#[inline]
1205#[target_feature(enable = "neon")]
1206#[cfg_attr(test, assert_instr(cmhs))]
1207#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1208pub unsafe fn vcle_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1209    simd_le(a, b)
1210}
1211
1212/// Compare unsigned less than or equal
1213///
1214/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u64)
1215#[inline]
1216#[target_feature(enable = "neon")]
1217#[cfg_attr(test, assert_instr(cmhs))]
1218#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1219pub unsafe fn vcleq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1220    simd_le(a, b)
1221}
1222
1223/// Floating-point compare less than or equal
1224///
1225/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f64)
1226#[inline]
1227#[target_feature(enable = "neon")]
1228#[cfg_attr(test, assert_instr(fcmge))]
1229#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1230pub unsafe fn vcle_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1231    simd_le(a, b)
1232}
1233
1234/// Floating-point compare less than or equal
1235///
1236/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f64)
1237#[inline]
1238#[target_feature(enable = "neon")]
1239#[cfg_attr(test, assert_instr(fcmge))]
1240#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1241pub unsafe fn vcleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1242    simd_le(a, b)
1243}
1244
1245/// Compare less than or equal
1246///
1247/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_s64)
1248#[inline]
1249#[target_feature(enable = "neon")]
1250#[cfg_attr(test, assert_instr(cmp))]
1251#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1252pub unsafe fn vcled_s64(a: i64, b: i64) -> u64 {
1253    transmute(vcle_s64(transmute(a), transmute(b)))
1254}
1255
1256/// Compare less than or equal
1257///
1258/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_u64)
1259#[inline]
1260#[target_feature(enable = "neon")]
1261#[cfg_attr(test, assert_instr(cmp))]
1262#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1263pub unsafe fn vcled_u64(a: u64, b: u64) -> u64 {
1264    transmute(vcle_u64(transmute(a), transmute(b)))
1265}
1266
1267/// Floating-point compare less than or equal
1268///
1269/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcles_f32)
1270#[inline]
1271#[target_feature(enable = "neon")]
1272#[cfg_attr(test, assert_instr(fcmp))]
1273#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1274pub unsafe fn vcles_f32(a: f32, b: f32) -> u32 {
1275    simd_extract!(vcle_f32(vdup_n_f32(a), vdup_n_f32(b)), 0)
1276}
1277
1278/// Floating-point compare less than or equal
1279///
1280/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_f64)
1281#[inline]
1282#[target_feature(enable = "neon")]
1283#[cfg_attr(test, assert_instr(fcmp))]
1284#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1285pub unsafe fn vcled_f64(a: f64, b: f64) -> u64 {
1286    simd_extract!(vcle_f64(vdup_n_f64(a), vdup_n_f64(b)), 0)
1287}
1288
1289/// Compare signed greater than or equal
1290///
1291/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s64)
1292#[inline]
1293#[target_feature(enable = "neon")]
1294#[cfg_attr(test, assert_instr(cmge))]
1295#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1296pub unsafe fn vcge_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1297    simd_ge(a, b)
1298}
1299
1300/// Compare signed greater than or equal
1301///
1302/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s64)
1303#[inline]
1304#[target_feature(enable = "neon")]
1305#[cfg_attr(test, assert_instr(cmge))]
1306#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1307pub unsafe fn vcgeq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1308    simd_ge(a, b)
1309}
1310
1311/// Compare unsigned greater than or equal
1312///
1313/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u64)
1314#[inline]
1315#[target_feature(enable = "neon")]
1316#[cfg_attr(test, assert_instr(cmhs))]
1317#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1318pub unsafe fn vcge_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1319    simd_ge(a, b)
1320}
1321
1322/// Compare unsigned greater than or equal
1323///
1324/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u64)
1325#[inline]
1326#[target_feature(enable = "neon")]
1327#[cfg_attr(test, assert_instr(cmhs))]
1328#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1329pub unsafe fn vcgeq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1330    simd_ge(a, b)
1331}
1332
1333/// Floating-point compare greater than or equal
1334///
1335/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f64)
1336#[inline]
1337#[target_feature(enable = "neon")]
1338#[cfg_attr(test, assert_instr(fcmge))]
1339#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1340pub unsafe fn vcge_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1341    simd_ge(a, b)
1342}
1343
1344/// Floating-point compare greater than or equal
1345///
1346/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f64)
1347#[inline]
1348#[target_feature(enable = "neon")]
1349#[cfg_attr(test, assert_instr(fcmge))]
1350#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1351pub unsafe fn vcgeq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1352    simd_ge(a, b)
1353}
1354
1355/// Compare signed greater than or equal to zero
1356///
1357/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s8)
1358#[inline]
1359#[target_feature(enable = "neon")]
1360#[cfg_attr(test, assert_instr(cmge))]
1361#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1362pub unsafe fn vcgez_s8(a: int8x8_t) -> uint8x8_t {
1363    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1364    simd_ge(a, transmute(b))
1365}
1366
1367/// Compare signed greater than or equal to zero
1368///
1369/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s8)
1370#[inline]
1371#[target_feature(enable = "neon")]
1372#[cfg_attr(test, assert_instr(cmge))]
1373#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1374pub unsafe fn vcgezq_s8(a: int8x16_t) -> uint8x16_t {
1375    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1376    simd_ge(a, transmute(b))
1377}
1378
1379/// Compare signed greater than or equal to zero
1380///
1381/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s16)
1382#[inline]
1383#[target_feature(enable = "neon")]
1384#[cfg_attr(test, assert_instr(cmge))]
1385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1386pub unsafe fn vcgez_s16(a: int16x4_t) -> uint16x4_t {
1387    let b: i16x4 = i16x4::new(0, 0, 0, 0);
1388    simd_ge(a, transmute(b))
1389}
1390
1391/// Compare signed greater than or equal to zero
1392///
1393/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s16)
1394#[inline]
1395#[target_feature(enable = "neon")]
1396#[cfg_attr(test, assert_instr(cmge))]
1397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1398pub unsafe fn vcgezq_s16(a: int16x8_t) -> uint16x8_t {
1399    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1400    simd_ge(a, transmute(b))
1401}
1402
1403/// Compare signed greater than or equal to zero
1404///
1405/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s32)
1406#[inline]
1407#[target_feature(enable = "neon")]
1408#[cfg_attr(test, assert_instr(cmge))]
1409#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1410pub unsafe fn vcgez_s32(a: int32x2_t) -> uint32x2_t {
1411    let b: i32x2 = i32x2::new(0, 0);
1412    simd_ge(a, transmute(b))
1413}
1414
1415/// Compare signed greater than or equal to zero
1416///
1417/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s32)
1418#[inline]
1419#[target_feature(enable = "neon")]
1420#[cfg_attr(test, assert_instr(cmge))]
1421#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1422pub unsafe fn vcgezq_s32(a: int32x4_t) -> uint32x4_t {
1423    let b: i32x4 = i32x4::new(0, 0, 0, 0);
1424    simd_ge(a, transmute(b))
1425}
1426
1427/// Compare signed greater than or equal to zero
1428///
1429/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s64)
1430#[inline]
1431#[target_feature(enable = "neon")]
1432#[cfg_attr(test, assert_instr(cmge))]
1433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1434pub unsafe fn vcgez_s64(a: int64x1_t) -> uint64x1_t {
1435    let b: i64x1 = i64x1::new(0);
1436    simd_ge(a, transmute(b))
1437}
1438
1439/// Compare signed greater than or equal to zero
1440///
1441/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s64)
1442#[inline]
1443#[target_feature(enable = "neon")]
1444#[cfg_attr(test, assert_instr(cmge))]
1445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1446pub unsafe fn vcgezq_s64(a: int64x2_t) -> uint64x2_t {
1447    let b: i64x2 = i64x2::new(0, 0);
1448    simd_ge(a, transmute(b))
1449}
1450
1451/// Floating-point compare greater than or equal to zero
1452///
1453/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f32)
1454#[inline]
1455#[target_feature(enable = "neon")]
1456#[cfg_attr(test, assert_instr(fcmge))]
1457#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1458pub unsafe fn vcgez_f32(a: float32x2_t) -> uint32x2_t {
1459    let b: f32x2 = f32x2::new(0.0, 0.0);
1460    simd_ge(a, transmute(b))
1461}
1462
1463/// Floating-point compare greater than or equal to zero
1464///
1465/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f32)
1466#[inline]
1467#[target_feature(enable = "neon")]
1468#[cfg_attr(test, assert_instr(fcmge))]
1469#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1470pub unsafe fn vcgezq_f32(a: float32x4_t) -> uint32x4_t {
1471    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
1472    simd_ge(a, transmute(b))
1473}
1474
1475/// Floating-point compare greater than or equal to zero
1476///
1477/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f64)
1478#[inline]
1479#[target_feature(enable = "neon")]
1480#[cfg_attr(test, assert_instr(fcmge))]
1481#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1482pub unsafe fn vcgez_f64(a: float64x1_t) -> uint64x1_t {
1483    let b: f64 = 0.0;
1484    simd_ge(a, transmute(b))
1485}
1486
1487/// Floating-point compare greater than or equal to zero
1488///
1489/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f64)
1490#[inline]
1491#[target_feature(enable = "neon")]
1492#[cfg_attr(test, assert_instr(fcmge))]
1493#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1494pub unsafe fn vcgezq_f64(a: float64x2_t) -> uint64x2_t {
1495    let b: f64x2 = f64x2::new(0.0, 0.0);
1496    simd_ge(a, transmute(b))
1497}
1498
1499/// Compare signed greater than or equal to zero
1500///
1501/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_s64)
1502#[inline]
1503#[target_feature(enable = "neon")]
1504#[cfg_attr(test, assert_instr(nop))]
1505#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1506pub unsafe fn vcgezd_s64(a: i64) -> u64 {
1507    transmute(vcgez_s64(transmute(a)))
1508}
1509
1510/// Floating-point compare greater than or equal to zero
1511///
1512/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezs_f32)
1513#[inline]
1514#[target_feature(enable = "neon")]
1515#[cfg_attr(test, assert_instr(fcmp))]
1516#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1517pub unsafe fn vcgezs_f32(a: f32) -> u32 {
1518    simd_extract!(vcgez_f32(vdup_n_f32(a)), 0)
1519}
1520
1521/// Floating-point compare greater than or equal to zero
1522///
1523/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_f64)
1524#[inline]
1525#[target_feature(enable = "neon")]
1526#[cfg_attr(test, assert_instr(fcmp))]
1527#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1528pub unsafe fn vcgezd_f64(a: f64) -> u64 {
1529    simd_extract!(vcgez_f64(vdup_n_f64(a)), 0)
1530}
1531
1532/// Compare signed greater than zero
1533///
1534/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s8)
1535#[inline]
1536#[target_feature(enable = "neon")]
1537#[cfg_attr(test, assert_instr(cmgt))]
1538#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1539pub unsafe fn vcgtz_s8(a: int8x8_t) -> uint8x8_t {
1540    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1541    simd_gt(a, transmute(b))
1542}
1543
1544/// Compare signed greater than zero
1545///
1546/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s8)
1547#[inline]
1548#[target_feature(enable = "neon")]
1549#[cfg_attr(test, assert_instr(cmgt))]
1550#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1551pub unsafe fn vcgtzq_s8(a: int8x16_t) -> uint8x16_t {
1552    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1553    simd_gt(a, transmute(b))
1554}
1555
1556/// Compare signed greater than zero
1557///
1558/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s16)
1559#[inline]
1560#[target_feature(enable = "neon")]
1561#[cfg_attr(test, assert_instr(cmgt))]
1562#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1563pub unsafe fn vcgtz_s16(a: int16x4_t) -> uint16x4_t {
1564    let b: i16x4 = i16x4::new(0, 0, 0, 0);
1565    simd_gt(a, transmute(b))
1566}
1567
1568/// Compare signed greater than zero
1569///
1570/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s16)
1571#[inline]
1572#[target_feature(enable = "neon")]
1573#[cfg_attr(test, assert_instr(cmgt))]
1574#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1575pub unsafe fn vcgtzq_s16(a: int16x8_t) -> uint16x8_t {
1576    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1577    simd_gt(a, transmute(b))
1578}
1579
1580/// Compare signed greater than zero
1581///
1582/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s32)
1583#[inline]
1584#[target_feature(enable = "neon")]
1585#[cfg_attr(test, assert_instr(cmgt))]
1586#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1587pub unsafe fn vcgtz_s32(a: int32x2_t) -> uint32x2_t {
1588    let b: i32x2 = i32x2::new(0, 0);
1589    simd_gt(a, transmute(b))
1590}
1591
1592/// Compare signed greater than zero
1593///
1594/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s32)
1595#[inline]
1596#[target_feature(enable = "neon")]
1597#[cfg_attr(test, assert_instr(cmgt))]
1598#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1599pub unsafe fn vcgtzq_s32(a: int32x4_t) -> uint32x4_t {
1600    let b: i32x4 = i32x4::new(0, 0, 0, 0);
1601    simd_gt(a, transmute(b))
1602}
1603
1604/// Compare signed greater than zero
1605///
1606/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s64)
1607#[inline]
1608#[target_feature(enable = "neon")]
1609#[cfg_attr(test, assert_instr(cmgt))]
1610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1611pub unsafe fn vcgtz_s64(a: int64x1_t) -> uint64x1_t {
1612    let b: i64x1 = i64x1::new(0);
1613    simd_gt(a, transmute(b))
1614}
1615
1616/// Compare signed greater than zero
1617///
1618/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s64)
1619#[inline]
1620#[target_feature(enable = "neon")]
1621#[cfg_attr(test, assert_instr(cmgt))]
1622#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1623pub unsafe fn vcgtzq_s64(a: int64x2_t) -> uint64x2_t {
1624    let b: i64x2 = i64x2::new(0, 0);
1625    simd_gt(a, transmute(b))
1626}
1627
1628/// Floating-point compare greater than zero
1629///
1630/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f32)
1631#[inline]
1632#[target_feature(enable = "neon")]
1633#[cfg_attr(test, assert_instr(fcmgt))]
1634#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1635pub unsafe fn vcgtz_f32(a: float32x2_t) -> uint32x2_t {
1636    let b: f32x2 = f32x2::new(0.0, 0.0);
1637    simd_gt(a, transmute(b))
1638}
1639
1640/// Floating-point compare greater than zero
1641///
1642/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f32)
1643#[inline]
1644#[target_feature(enable = "neon")]
1645#[cfg_attr(test, assert_instr(fcmgt))]
1646#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1647pub unsafe fn vcgtzq_f32(a: float32x4_t) -> uint32x4_t {
1648    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
1649    simd_gt(a, transmute(b))
1650}
1651
1652/// Floating-point compare greater than zero
1653///
1654/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f64)
1655#[inline]
1656#[target_feature(enable = "neon")]
1657#[cfg_attr(test, assert_instr(fcmgt))]
1658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1659pub unsafe fn vcgtz_f64(a: float64x1_t) -> uint64x1_t {
1660    let b: f64 = 0.0;
1661    simd_gt(a, transmute(b))
1662}
1663
1664/// Floating-point compare greater than zero
1665///
1666/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f64)
1667#[inline]
1668#[target_feature(enable = "neon")]
1669#[cfg_attr(test, assert_instr(fcmgt))]
1670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1671pub unsafe fn vcgtzq_f64(a: float64x2_t) -> uint64x2_t {
1672    let b: f64x2 = f64x2::new(0.0, 0.0);
1673    simd_gt(a, transmute(b))
1674}
1675
1676/// Compare signed greater than zero
1677///
1678/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_s64)
1679#[inline]
1680#[target_feature(enable = "neon")]
1681#[cfg_attr(test, assert_instr(cmp))]
1682#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1683pub unsafe fn vcgtzd_s64(a: i64) -> u64 {
1684    transmute(vcgtz_s64(transmute(a)))
1685}
1686
1687/// Floating-point compare greater than zero
1688///
1689/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzs_f32)
1690#[inline]
1691#[target_feature(enable = "neon")]
1692#[cfg_attr(test, assert_instr(fcmp))]
1693#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1694pub unsafe fn vcgtzs_f32(a: f32) -> u32 {
1695    simd_extract!(vcgtz_f32(vdup_n_f32(a)), 0)
1696}
1697
1698/// Floating-point compare greater than zero
1699///
1700/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_f64)
1701#[inline]
1702#[target_feature(enable = "neon")]
1703#[cfg_attr(test, assert_instr(fcmp))]
1704#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1705pub unsafe fn vcgtzd_f64(a: f64) -> u64 {
1706    simd_extract!(vcgtz_f64(vdup_n_f64(a)), 0)
1707}
1708
1709/// Compare signed less than or equal to zero
1710///
1711/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s8)
1712#[inline]
1713#[target_feature(enable = "neon")]
1714#[cfg_attr(test, assert_instr(cmle))]
1715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1716pub unsafe fn vclez_s8(a: int8x8_t) -> uint8x8_t {
1717    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1718    simd_le(a, transmute(b))
1719}
1720
1721/// Compare signed less than or equal to zero
1722///
1723/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s8)
1724#[inline]
1725#[target_feature(enable = "neon")]
1726#[cfg_attr(test, assert_instr(cmle))]
1727#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1728pub unsafe fn vclezq_s8(a: int8x16_t) -> uint8x16_t {
1729    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1730    simd_le(a, transmute(b))
1731}
1732
1733/// Compare signed less than or equal to zero
1734///
1735/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s16)
1736#[inline]
1737#[target_feature(enable = "neon")]
1738#[cfg_attr(test, assert_instr(cmle))]
1739#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1740pub unsafe fn vclez_s16(a: int16x4_t) -> uint16x4_t {
1741    let b: i16x4 = i16x4::new(0, 0, 0, 0);
1742    simd_le(a, transmute(b))
1743}
1744
1745/// Compare signed less than or equal to zero
1746///
1747/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s16)
1748#[inline]
1749#[target_feature(enable = "neon")]
1750#[cfg_attr(test, assert_instr(cmle))]
1751#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1752pub unsafe fn vclezq_s16(a: int16x8_t) -> uint16x8_t {
1753    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1754    simd_le(a, transmute(b))
1755}
1756
1757/// Compare signed less than or equal to zero
1758///
1759/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s32)
1760#[inline]
1761#[target_feature(enable = "neon")]
1762#[cfg_attr(test, assert_instr(cmle))]
1763#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1764pub unsafe fn vclez_s32(a: int32x2_t) -> uint32x2_t {
1765    let b: i32x2 = i32x2::new(0, 0);
1766    simd_le(a, transmute(b))
1767}
1768
1769/// Compare signed less than or equal to zero
1770///
1771/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s32)
1772#[inline]
1773#[target_feature(enable = "neon")]
1774#[cfg_attr(test, assert_instr(cmle))]
1775#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1776pub unsafe fn vclezq_s32(a: int32x4_t) -> uint32x4_t {
1777    let b: i32x4 = i32x4::new(0, 0, 0, 0);
1778    simd_le(a, transmute(b))
1779}
1780
1781/// Compare signed less than or equal to zero
1782///
1783/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s64)
1784#[inline]
1785#[target_feature(enable = "neon")]
1786#[cfg_attr(test, assert_instr(cmle))]
1787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1788pub unsafe fn vclez_s64(a: int64x1_t) -> uint64x1_t {
1789    let b: i64x1 = i64x1::new(0);
1790    simd_le(a, transmute(b))
1791}
1792
1793/// Compare signed less than or equal to zero
1794///
1795/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s64)
1796#[inline]
1797#[target_feature(enable = "neon")]
1798#[cfg_attr(test, assert_instr(cmle))]
1799#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1800pub unsafe fn vclezq_s64(a: int64x2_t) -> uint64x2_t {
1801    let b: i64x2 = i64x2::new(0, 0);
1802    simd_le(a, transmute(b))
1803}
1804
1805/// Floating-point compare less than or equal to zero
1806///
1807/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f32)
1808#[inline]
1809#[target_feature(enable = "neon")]
1810#[cfg_attr(test, assert_instr(fcmle))]
1811#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1812pub unsafe fn vclez_f32(a: float32x2_t) -> uint32x2_t {
1813    let b: f32x2 = f32x2::new(0.0, 0.0);
1814    simd_le(a, transmute(b))
1815}
1816
1817/// Floating-point compare less than or equal to zero
1818///
1819/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f32)
1820#[inline]
1821#[target_feature(enable = "neon")]
1822#[cfg_attr(test, assert_instr(fcmle))]
1823#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1824pub unsafe fn vclezq_f32(a: float32x4_t) -> uint32x4_t {
1825    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
1826    simd_le(a, transmute(b))
1827}
1828
1829/// Floating-point compare less than or equal to zero
1830///
1831/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f64)
1832#[inline]
1833#[target_feature(enable = "neon")]
1834#[cfg_attr(test, assert_instr(fcmle))]
1835#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1836pub unsafe fn vclez_f64(a: float64x1_t) -> uint64x1_t {
1837    let b: f64 = 0.0;
1838    simd_le(a, transmute(b))
1839}
1840
1841/// Floating-point compare less than or equal to zero
1842///
1843/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f64)
1844#[inline]
1845#[target_feature(enable = "neon")]
1846#[cfg_attr(test, assert_instr(fcmle))]
1847#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1848pub unsafe fn vclezq_f64(a: float64x2_t) -> uint64x2_t {
1849    let b: f64x2 = f64x2::new(0.0, 0.0);
1850    simd_le(a, transmute(b))
1851}
1852
1853/// Compare less than or equal to zero
1854///
1855/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_s64)
1856#[inline]
1857#[target_feature(enable = "neon")]
1858#[cfg_attr(test, assert_instr(cmp))]
1859#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1860pub unsafe fn vclezd_s64(a: i64) -> u64 {
1861    transmute(vclez_s64(transmute(a)))
1862}
1863
1864/// Floating-point compare less than or equal to zero
1865///
1866/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezs_f32)
1867#[inline]
1868#[target_feature(enable = "neon")]
1869#[cfg_attr(test, assert_instr(fcmp))]
1870#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1871pub unsafe fn vclezs_f32(a: f32) -> u32 {
1872    simd_extract!(vclez_f32(vdup_n_f32(a)), 0)
1873}
1874
1875/// Floating-point compare less than or equal to zero
1876///
1877/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_f64)
1878#[inline]
1879#[target_feature(enable = "neon")]
1880#[cfg_attr(test, assert_instr(fcmp))]
1881#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1882pub unsafe fn vclezd_f64(a: f64) -> u64 {
1883    simd_extract!(vclez_f64(vdup_n_f64(a)), 0)
1884}
1885
1886/// Compare signed less than zero
1887///
1888/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s8)
1889#[inline]
1890#[target_feature(enable = "neon")]
1891#[cfg_attr(test, assert_instr(cmlt))]
1892#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1893pub unsafe fn vcltz_s8(a: int8x8_t) -> uint8x8_t {
1894    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1895    simd_lt(a, transmute(b))
1896}
1897
1898/// Compare signed less than zero
1899///
1900/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s8)
1901#[inline]
1902#[target_feature(enable = "neon")]
1903#[cfg_attr(test, assert_instr(cmlt))]
1904#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1905pub unsafe fn vcltzq_s8(a: int8x16_t) -> uint8x16_t {
1906    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1907    simd_lt(a, transmute(b))
1908}
1909
1910/// Compare signed less than zero
1911///
1912/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s16)
1913#[inline]
1914#[target_feature(enable = "neon")]
1915#[cfg_attr(test, assert_instr(cmlt))]
1916#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1917pub unsafe fn vcltz_s16(a: int16x4_t) -> uint16x4_t {
1918    let b: i16x4 = i16x4::new(0, 0, 0, 0);
1919    simd_lt(a, transmute(b))
1920}
1921
1922/// Compare signed less than zero
1923///
1924/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s16)
1925#[inline]
1926#[target_feature(enable = "neon")]
1927#[cfg_attr(test, assert_instr(cmlt))]
1928#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1929pub unsafe fn vcltzq_s16(a: int16x8_t) -> uint16x8_t {
1930    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1931    simd_lt(a, transmute(b))
1932}
1933
1934/// Compare signed less than zero
1935///
1936/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s32)
1937#[inline]
1938#[target_feature(enable = "neon")]
1939#[cfg_attr(test, assert_instr(cmlt))]
1940#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1941pub unsafe fn vcltz_s32(a: int32x2_t) -> uint32x2_t {
1942    let b: i32x2 = i32x2::new(0, 0);
1943    simd_lt(a, transmute(b))
1944}
1945
1946/// Compare signed less than zero
1947///
1948/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s32)
1949#[inline]
1950#[target_feature(enable = "neon")]
1951#[cfg_attr(test, assert_instr(cmlt))]
1952#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1953pub unsafe fn vcltzq_s32(a: int32x4_t) -> uint32x4_t {
1954    let b: i32x4 = i32x4::new(0, 0, 0, 0);
1955    simd_lt(a, transmute(b))
1956}
1957
1958/// Compare signed less than zero
1959///
1960/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s64)
1961#[inline]
1962#[target_feature(enable = "neon")]
1963#[cfg_attr(test, assert_instr(cmlt))]
1964#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1965pub unsafe fn vcltz_s64(a: int64x1_t) -> uint64x1_t {
1966    let b: i64x1 = i64x1::new(0);
1967    simd_lt(a, transmute(b))
1968}
1969
1970/// Compare signed less than zero
1971///
1972/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s64)
1973#[inline]
1974#[target_feature(enable = "neon")]
1975#[cfg_attr(test, assert_instr(cmlt))]
1976#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1977pub unsafe fn vcltzq_s64(a: int64x2_t) -> uint64x2_t {
1978    let b: i64x2 = i64x2::new(0, 0);
1979    simd_lt(a, transmute(b))
1980}
1981
1982/// Floating-point compare less than zero
1983///
1984/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f32)
1985#[inline]
1986#[target_feature(enable = "neon")]
1987#[cfg_attr(test, assert_instr(fcmlt))]
1988#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1989pub unsafe fn vcltz_f32(a: float32x2_t) -> uint32x2_t {
1990    let b: f32x2 = f32x2::new(0.0, 0.0);
1991    simd_lt(a, transmute(b))
1992}
1993
1994/// Floating-point compare less than zero
1995///
1996/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f32)
1997#[inline]
1998#[target_feature(enable = "neon")]
1999#[cfg_attr(test, assert_instr(fcmlt))]
2000#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2001pub unsafe fn vcltzq_f32(a: float32x4_t) -> uint32x4_t {
2002    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2003    simd_lt(a, transmute(b))
2004}
2005
2006/// Floating-point compare less than zero
2007///
2008/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f64)
2009#[inline]
2010#[target_feature(enable = "neon")]
2011#[cfg_attr(test, assert_instr(fcmlt))]
2012#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2013pub unsafe fn vcltz_f64(a: float64x1_t) -> uint64x1_t {
2014    let b: f64 = 0.0;
2015    simd_lt(a, transmute(b))
2016}
2017
2018/// Floating-point compare less than zero
2019///
2020/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f64)
2021#[inline]
2022#[target_feature(enable = "neon")]
2023#[cfg_attr(test, assert_instr(fcmlt))]
2024#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2025pub unsafe fn vcltzq_f64(a: float64x2_t) -> uint64x2_t {
2026    let b: f64x2 = f64x2::new(0.0, 0.0);
2027    simd_lt(a, transmute(b))
2028}
2029
2030/// Compare less than zero
2031///
2032/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_s64)
2033#[inline]
2034#[target_feature(enable = "neon")]
2035#[cfg_attr(test, assert_instr(asr))]
2036#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2037pub unsafe fn vcltzd_s64(a: i64) -> u64 {
2038    transmute(vcltz_s64(transmute(a)))
2039}
2040
2041/// Floating-point compare less than zero
2042///
2043/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzs_f32)
2044#[inline]
2045#[target_feature(enable = "neon")]
2046#[cfg_attr(test, assert_instr(fcmp))]
2047#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2048pub unsafe fn vcltzs_f32(a: f32) -> u32 {
2049    simd_extract!(vcltz_f32(vdup_n_f32(a)), 0)
2050}
2051
2052/// Floating-point compare less than zero
2053///
2054/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_f64)
2055#[inline]
2056#[target_feature(enable = "neon")]
2057#[cfg_attr(test, assert_instr(fcmp))]
2058#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2059pub unsafe fn vcltzd_f64(a: f64) -> u64 {
2060    simd_extract!(vcltz_f64(vdup_n_f64(a)), 0)
2061}
2062
2063/// Floating-point absolute compare greater than
2064///
2065/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f64)
2066#[inline]
2067#[target_feature(enable = "neon")]
2068#[cfg_attr(test, assert_instr(facgt))]
2069#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2070pub unsafe fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2071    #[allow(improper_ctypes)]
2072    extern "unadjusted" {
2073        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facgt.v1i64.v1f64")]
2074        fn vcagt_f64_(a: float64x1_t, b: float64x1_t) -> uint64x1_t;
2075    }
2076    vcagt_f64_(a, b)
2077}
2078
2079/// Floating-point absolute compare greater than
2080///
2081/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f64)
2082#[inline]
2083#[target_feature(enable = "neon")]
2084#[cfg_attr(test, assert_instr(facgt))]
2085#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2086pub unsafe fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2087    #[allow(improper_ctypes)]
2088    extern "unadjusted" {
2089        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facgt.v2i64.v2f64")]
2090        fn vcagtq_f64_(a: float64x2_t, b: float64x2_t) -> uint64x2_t;
2091    }
2092    vcagtq_f64_(a, b)
2093}
2094
2095/// Floating-point absolute compare greater than
2096///
2097/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagts_f32)
2098#[inline]
2099#[target_feature(enable = "neon")]
2100#[cfg_attr(test, assert_instr(facgt))]
2101#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2102pub unsafe fn vcagts_f32(a: f32, b: f32) -> u32 {
2103    #[allow(improper_ctypes)]
2104    extern "unadjusted" {
2105        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facgt.i32.f32")]
2106        fn vcagts_f32_(a: f32, b: f32) -> u32;
2107    }
2108    vcagts_f32_(a, b)
2109}
2110
2111/// Floating-point absolute compare greater than
2112///
2113/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtd_f64)
2114#[inline]
2115#[target_feature(enable = "neon")]
2116#[cfg_attr(test, assert_instr(facgt))]
2117#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2118pub unsafe fn vcagtd_f64(a: f64, b: f64) -> u64 {
2119    #[allow(improper_ctypes)]
2120    extern "unadjusted" {
2121        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facgt.i64.f64")]
2122        fn vcagtd_f64_(a: f64, b: f64) -> u64;
2123    }
2124    vcagtd_f64_(a, b)
2125}
2126
2127/// Floating-point absolute compare greater than or equal
2128///
2129/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f64)
2130#[inline]
2131#[target_feature(enable = "neon")]
2132#[cfg_attr(test, assert_instr(facge))]
2133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2134pub unsafe fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2135    #[allow(improper_ctypes)]
2136    extern "unadjusted" {
2137        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facge.v1i64.v1f64")]
2138        fn vcage_f64_(a: float64x1_t, b: float64x1_t) -> uint64x1_t;
2139    }
2140    vcage_f64_(a, b)
2141}
2142
2143/// Floating-point absolute compare greater than or equal
2144///
2145/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f64)
2146#[inline]
2147#[target_feature(enable = "neon")]
2148#[cfg_attr(test, assert_instr(facge))]
2149#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2150pub unsafe fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2151    #[allow(improper_ctypes)]
2152    extern "unadjusted" {
2153        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facge.v2i64.v2f64")]
2154        fn vcageq_f64_(a: float64x2_t, b: float64x2_t) -> uint64x2_t;
2155    }
2156    vcageq_f64_(a, b)
2157}
2158
2159/// Floating-point absolute compare greater than or equal
2160///
2161/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcages_f32)
2162#[inline]
2163#[target_feature(enable = "neon")]
2164#[cfg_attr(test, assert_instr(facge))]
2165#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2166pub unsafe fn vcages_f32(a: f32, b: f32) -> u32 {
2167    #[allow(improper_ctypes)]
2168    extern "unadjusted" {
2169        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facge.i32.f32")]
2170        fn vcages_f32_(a: f32, b: f32) -> u32;
2171    }
2172    vcages_f32_(a, b)
2173}
2174
2175/// Floating-point absolute compare greater than or equal
2176///
2177/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaged_f64)
2178#[inline]
2179#[target_feature(enable = "neon")]
2180#[cfg_attr(test, assert_instr(facge))]
2181#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2182pub unsafe fn vcaged_f64(a: f64, b: f64) -> u64 {
2183    #[allow(improper_ctypes)]
2184    extern "unadjusted" {
2185        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facge.i64.f64")]
2186        fn vcaged_f64_(a: f64, b: f64) -> u64;
2187    }
2188    vcaged_f64_(a, b)
2189}
2190
2191/// Floating-point absolute compare less than
2192///
2193/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f64)
2194#[inline]
2195#[target_feature(enable = "neon")]
2196#[cfg_attr(test, assert_instr(facgt))]
2197#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2198pub unsafe fn vcalt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2199    vcagt_f64(b, a)
2200}
2201
2202/// Floating-point absolute compare less than
2203///
2204/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f64)
2205#[inline]
2206#[target_feature(enable = "neon")]
2207#[cfg_attr(test, assert_instr(facgt))]
2208#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2209pub unsafe fn vcaltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2210    vcagtq_f64(b, a)
2211}
2212
2213/// Floating-point absolute compare less than
2214///
2215/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalts_f32)
2216#[inline]
2217#[target_feature(enable = "neon")]
2218#[cfg_attr(test, assert_instr(facgt))]
2219#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2220pub unsafe fn vcalts_f32(a: f32, b: f32) -> u32 {
2221    vcagts_f32(b, a)
2222}
2223
2224/// Floating-point absolute compare less than
2225///
2226/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltd_f64)
2227#[inline]
2228#[target_feature(enable = "neon")]
2229#[cfg_attr(test, assert_instr(facgt))]
2230#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2231pub unsafe fn vcaltd_f64(a: f64, b: f64) -> u64 {
2232    vcagtd_f64(b, a)
2233}
2234
2235/// Floating-point absolute compare less than or equal
2236///
2237/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f64)
2238#[inline]
2239#[target_feature(enable = "neon")]
2240#[cfg_attr(test, assert_instr(facge))]
2241#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2242pub unsafe fn vcale_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2243    vcage_f64(b, a)
2244}
2245
2246/// Floating-point absolute compare less than or equal
2247///
2248/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f64)
2249#[inline]
2250#[target_feature(enable = "neon")]
2251#[cfg_attr(test, assert_instr(facge))]
2252#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2253pub unsafe fn vcaleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2254    vcageq_f64(b, a)
2255}
2256
2257/// Floating-point absolute compare less than or equal
2258///
2259/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcales_f32)
2260#[inline]
2261#[target_feature(enable = "neon")]
2262#[cfg_attr(test, assert_instr(facge))]
2263#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2264pub unsafe fn vcales_f32(a: f32, b: f32) -> u32 {
2265    vcages_f32(b, a)
2266}
2267
2268/// Floating-point absolute compare less than or equal
2269///
2270/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaled_f64)
2271#[inline]
2272#[target_feature(enable = "neon")]
2273#[cfg_attr(test, assert_instr(facge))]
2274#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2275pub unsafe fn vcaled_f64(a: f64, b: f64) -> u64 {
2276    vcaged_f64(b, a)
2277}
2278
2279/// Insert vector element from another vector element
2280///
2281/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s8)
2282#[inline]
2283#[target_feature(enable = "neon")]
2284#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2285#[rustc_legacy_const_generics(1, 3)]
2286#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2287pub unsafe fn vcopy_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
2288    static_assert_uimm_bits!(LANE1, 3);
2289    static_assert_uimm_bits!(LANE2, 3);
2290    match LANE1 & 0b111 {
2291        0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
2292        1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
2293        2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
2294        3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
2295        4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
2296        5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
2297        6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
2298        7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
2299        _ => unreachable_unchecked(),
2300    }
2301}
2302
2303/// Insert vector element from another vector element
2304///
2305/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s8)
2306#[inline]
2307#[target_feature(enable = "neon")]
2308#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2309#[rustc_legacy_const_generics(1, 3)]
2310#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2311pub unsafe fn vcopyq_laneq_s8<const LANE1: i32, const LANE2: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
2312    static_assert_uimm_bits!(LANE1, 4);
2313    static_assert_uimm_bits!(LANE2, 4);
2314    match LANE1 & 0b1111 {
2315        0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2316        1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2317        2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2318        3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2319        4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2320        5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2321        6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2322        7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]),
2323        8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]),
2324        9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]),
2325        10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]),
2326        11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]),
2327        12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]),
2328        13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]),
2329        14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]),
2330        15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]),
2331        _ => unreachable_unchecked(),
2332    }
2333}
2334
2335/// Insert vector element from another vector element
2336///
2337/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s16)
2338#[inline]
2339#[target_feature(enable = "neon")]
2340#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2341#[rustc_legacy_const_generics(1, 3)]
2342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2343pub unsafe fn vcopy_lane_s16<const LANE1: i32, const LANE2: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
2344    static_assert_uimm_bits!(LANE1, 2);
2345    static_assert_uimm_bits!(LANE2, 2);
2346    match LANE1 & 0b11 {
2347        0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
2348        1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
2349        2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
2350        3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
2351        _ => unreachable_unchecked(),
2352    }
2353}
2354
2355/// Insert vector element from another vector element
2356///
2357/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s16)
2358#[inline]
2359#[target_feature(enable = "neon")]
2360#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2361#[rustc_legacy_const_generics(1, 3)]
2362#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2363pub unsafe fn vcopyq_laneq_s16<const LANE1: i32, const LANE2: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
2364    static_assert_uimm_bits!(LANE1, 3);
2365    static_assert_uimm_bits!(LANE2, 3);
2366    match LANE1 & 0b111 {
2367        0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
2368        1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
2369        2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
2370        3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
2371        4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
2372        5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
2373        6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
2374        7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
2375        _ => unreachable_unchecked(),
2376    }
2377}
2378
2379/// Insert vector element from another vector element
2380///
2381/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s32)
2382#[inline]
2383#[target_feature(enable = "neon")]
2384#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2385#[rustc_legacy_const_generics(1, 3)]
2386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2387pub unsafe fn vcopy_lane_s32<const LANE1: i32, const LANE2: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
2388    static_assert_uimm_bits!(LANE1, 1);
2389    static_assert_uimm_bits!(LANE2, 1);
2390    match LANE1 & 0b1 {
2391        0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
2392        1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
2393        _ => unreachable_unchecked(),
2394    }
2395}
2396
2397/// Insert vector element from another vector element
2398///
2399/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s32)
2400#[inline]
2401#[target_feature(enable = "neon")]
2402#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2403#[rustc_legacy_const_generics(1, 3)]
2404#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2405pub unsafe fn vcopyq_laneq_s32<const LANE1: i32, const LANE2: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
2406    static_assert_uimm_bits!(LANE1, 2);
2407    static_assert_uimm_bits!(LANE2, 2);
2408    match LANE1 & 0b11 {
2409        0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
2410        1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
2411        2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
2412        3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
2413        _ => unreachable_unchecked(),
2414    }
2415}
2416
2417/// Insert vector element from another vector element
2418///
2419/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s64)
2420#[inline]
2421#[target_feature(enable = "neon")]
2422#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2423#[rustc_legacy_const_generics(1, 3)]
2424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2425pub unsafe fn vcopyq_laneq_s64<const LANE1: i32, const LANE2: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
2426    static_assert_uimm_bits!(LANE1, 1);
2427    static_assert_uimm_bits!(LANE2, 1);
2428    match LANE1 & 0b1 {
2429        0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
2430        1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
2431        _ => unreachable_unchecked(),
2432    }
2433}
2434
2435/// Insert vector element from another vector element
2436///
2437/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u8)
2438#[inline]
2439#[target_feature(enable = "neon")]
2440#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2441#[rustc_legacy_const_generics(1, 3)]
2442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2443pub unsafe fn vcopy_lane_u8<const LANE1: i32, const LANE2: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
2444    static_assert_uimm_bits!(LANE1, 3);
2445    static_assert_uimm_bits!(LANE2, 3);
2446    match LANE1 & 0b111 {
2447        0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
2448        1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
2449        2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
2450        3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
2451        4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
2452        5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
2453        6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
2454        7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
2455        _ => unreachable_unchecked(),
2456    }
2457}
2458
2459/// Insert vector element from another vector element
2460///
2461/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u8)
2462#[inline]
2463#[target_feature(enable = "neon")]
2464#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2465#[rustc_legacy_const_generics(1, 3)]
2466#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2467pub unsafe fn vcopyq_laneq_u8<const LANE1: i32, const LANE2: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
2468    static_assert_uimm_bits!(LANE1, 4);
2469    static_assert_uimm_bits!(LANE2, 4);
2470    match LANE1 & 0b1111 {
2471        0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2472        1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2473        2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2474        3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2475        4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2476        5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2477        6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2478        7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]),
2479        8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]),
2480        9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]),
2481        10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]),
2482        11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]),
2483        12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]),
2484        13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]),
2485        14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]),
2486        15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]),
2487        _ => unreachable_unchecked(),
2488    }
2489}
2490
2491/// Insert vector element from another vector element
2492///
2493/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u16)
2494#[inline]
2495#[target_feature(enable = "neon")]
2496#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2497#[rustc_legacy_const_generics(1, 3)]
2498#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2499pub unsafe fn vcopy_lane_u16<const LANE1: i32, const LANE2: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
2500    static_assert_uimm_bits!(LANE1, 2);
2501    static_assert_uimm_bits!(LANE2, 2);
2502    match LANE1 & 0b11 {
2503        0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
2504        1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
2505        2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
2506        3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
2507        _ => unreachable_unchecked(),
2508    }
2509}
2510
2511/// Insert vector element from another vector element
2512///
2513/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u16)
2514#[inline]
2515#[target_feature(enable = "neon")]
2516#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2517#[rustc_legacy_const_generics(1, 3)]
2518#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2519pub unsafe fn vcopyq_laneq_u16<const LANE1: i32, const LANE2: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
2520    static_assert_uimm_bits!(LANE1, 3);
2521    static_assert_uimm_bits!(LANE2, 3);
2522    match LANE1 & 0b111 {
2523        0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
2524        1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
2525        2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
2526        3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
2527        4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
2528        5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
2529        6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
2530        7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
2531        _ => unreachable_unchecked(),
2532    }
2533}
2534
2535/// Insert vector element from another vector element
2536///
2537/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u32)
2538#[inline]
2539#[target_feature(enable = "neon")]
2540#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2541#[rustc_legacy_const_generics(1, 3)]
2542#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2543pub unsafe fn vcopy_lane_u32<const LANE1: i32, const LANE2: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
2544    static_assert_uimm_bits!(LANE1, 1);
2545    static_assert_uimm_bits!(LANE2, 1);
2546    match LANE1 & 0b1 {
2547        0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
2548        1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
2549        _ => unreachable_unchecked(),
2550    }
2551}
2552
2553/// Insert vector element from another vector element
2554///
2555/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u32)
2556#[inline]
2557#[target_feature(enable = "neon")]
2558#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2559#[rustc_legacy_const_generics(1, 3)]
2560#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2561pub unsafe fn vcopyq_laneq_u32<const LANE1: i32, const LANE2: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
2562    static_assert_uimm_bits!(LANE1, 2);
2563    static_assert_uimm_bits!(LANE2, 2);
2564    match LANE1 & 0b11 {
2565        0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
2566        1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
2567        2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
2568        3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
2569        _ => unreachable_unchecked(),
2570    }
2571}
2572
2573/// Insert vector element from another vector element
2574///
2575/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u64)
2576#[inline]
2577#[target_feature(enable = "neon")]
2578#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2579#[rustc_legacy_const_generics(1, 3)]
2580#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2581pub unsafe fn vcopyq_laneq_u64<const LANE1: i32, const LANE2: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2582    static_assert_uimm_bits!(LANE1, 1);
2583    static_assert_uimm_bits!(LANE2, 1);
2584    match LANE1 & 0b1 {
2585        0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
2586        1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
2587        _ => unreachable_unchecked(),
2588    }
2589}
2590
2591/// Insert vector element from another vector element
2592///
2593/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p8)
2594#[inline]
2595#[target_feature(enable = "neon")]
2596#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2597#[rustc_legacy_const_generics(1, 3)]
2598#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2599pub unsafe fn vcopy_lane_p8<const LANE1: i32, const LANE2: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
2600    static_assert_uimm_bits!(LANE1, 3);
2601    static_assert_uimm_bits!(LANE2, 3);
2602    match LANE1 & 0b111 {
2603        0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
2604        1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
2605        2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
2606        3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
2607        4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
2608        5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
2609        6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
2610        7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
2611        _ => unreachable_unchecked(),
2612    }
2613}
2614
2615/// Insert vector element from another vector element
2616///
2617/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p8)
2618#[inline]
2619#[target_feature(enable = "neon")]
2620#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2621#[rustc_legacy_const_generics(1, 3)]
2622#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2623pub unsafe fn vcopyq_laneq_p8<const LANE1: i32, const LANE2: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
2624    static_assert_uimm_bits!(LANE1, 4);
2625    static_assert_uimm_bits!(LANE2, 4);
2626    match LANE1 & 0b1111 {
2627        0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2628        1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2629        2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2630        3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2631        4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2632        5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2633        6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2634        7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]),
2635        8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]),
2636        9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]),
2637        10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]),
2638        11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]),
2639        12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]),
2640        13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]),
2641        14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]),
2642        15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]),
2643        _ => unreachable_unchecked(),
2644    }
2645}
2646
2647/// Insert vector element from another vector element
2648///
2649/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p16)
2650#[inline]
2651#[target_feature(enable = "neon")]
2652#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2653#[rustc_legacy_const_generics(1, 3)]
2654#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2655pub unsafe fn vcopy_lane_p16<const LANE1: i32, const LANE2: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
2656    static_assert_uimm_bits!(LANE1, 2);
2657    static_assert_uimm_bits!(LANE2, 2);
2658    match LANE1 & 0b11 {
2659        0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
2660        1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
2661        2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
2662        3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
2663        _ => unreachable_unchecked(),
2664    }
2665}
2666
2667/// Insert vector element from another vector element
2668///
2669/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p16)
2670#[inline]
2671#[target_feature(enable = "neon")]
2672#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2673#[rustc_legacy_const_generics(1, 3)]
2674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2675pub unsafe fn vcopyq_laneq_p16<const LANE1: i32, const LANE2: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
2676    static_assert_uimm_bits!(LANE1, 3);
2677    static_assert_uimm_bits!(LANE2, 3);
2678    match LANE1 & 0b111 {
2679        0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
2680        1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
2681        2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
2682        3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
2683        4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
2684        5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
2685        6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
2686        7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
2687        _ => unreachable_unchecked(),
2688    }
2689}
2690
2691/// Insert vector element from another vector element
2692///
2693/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p64)
2694#[inline]
2695#[target_feature(enable = "neon")]
2696#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2697#[rustc_legacy_const_generics(1, 3)]
2698#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2699pub unsafe fn vcopyq_laneq_p64<const LANE1: i32, const LANE2: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
2700    static_assert_uimm_bits!(LANE1, 1);
2701    static_assert_uimm_bits!(LANE2, 1);
2702    match LANE1 & 0b1 {
2703        0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
2704        1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
2705        _ => unreachable_unchecked(),
2706    }
2707}
2708
2709/// Insert vector element from another vector element
2710///
2711/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_f32)
2712#[inline]
2713#[target_feature(enable = "neon")]
2714#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2715#[rustc_legacy_const_generics(1, 3)]
2716#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2717pub unsafe fn vcopy_lane_f32<const LANE1: i32, const LANE2: i32>(a: float32x2_t, b: float32x2_t) -> float32x2_t {
2718    static_assert_uimm_bits!(LANE1, 1);
2719    static_assert_uimm_bits!(LANE2, 1);
2720    match LANE1 & 0b1 {
2721        0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
2722        1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
2723        _ => unreachable_unchecked(),
2724    }
2725}
2726
2727/// Insert vector element from another vector element
2728///
2729/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f32)
2730#[inline]
2731#[target_feature(enable = "neon")]
2732#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2733#[rustc_legacy_const_generics(1, 3)]
2734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2735pub unsafe fn vcopyq_laneq_f32<const LANE1: i32, const LANE2: i32>(a: float32x4_t, b: float32x4_t) -> float32x4_t {
2736    static_assert_uimm_bits!(LANE1, 2);
2737    static_assert_uimm_bits!(LANE2, 2);
2738    match LANE1 & 0b11 {
2739        0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
2740        1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
2741        2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
2742        3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
2743        _ => unreachable_unchecked(),
2744    }
2745}
2746
2747/// Insert vector element from another vector element
2748///
2749/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f64)
2750#[inline]
2751#[target_feature(enable = "neon")]
2752#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2753#[rustc_legacy_const_generics(1, 3)]
2754#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2755pub unsafe fn vcopyq_laneq_f64<const LANE1: i32, const LANE2: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
2756    static_assert_uimm_bits!(LANE1, 1);
2757    static_assert_uimm_bits!(LANE2, 1);
2758    match LANE1 & 0b1 {
2759        0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
2760        1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
2761        _ => unreachable_unchecked(),
2762    }
2763}
2764
2765/// Insert vector element from another vector element
2766///
2767/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s8)
2768#[inline]
2769#[target_feature(enable = "neon")]
2770#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2771#[rustc_legacy_const_generics(1, 3)]
2772#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2773pub unsafe fn vcopy_laneq_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x16_t) -> int8x8_t {
2774    static_assert_uimm_bits!(LANE1, 3);
2775    static_assert_uimm_bits!(LANE2, 4);
2776    let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
2777    match LANE1 & 0b111 {
2778        0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
2779        1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
2780        2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
2781        3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
2782        4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
2783        5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
2784        6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
2785        7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
2786        _ => unreachable_unchecked(),
2787    }
2788}
2789
2790/// Insert vector element from another vector element
2791///
2792/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s16)
2793#[inline]
2794#[target_feature(enable = "neon")]
2795#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2796#[rustc_legacy_const_generics(1, 3)]
2797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2798pub unsafe fn vcopy_laneq_s16<const LANE1: i32, const LANE2: i32>(a: int16x4_t, b: int16x8_t) -> int16x4_t {
2799    static_assert_uimm_bits!(LANE1, 2);
2800    static_assert_uimm_bits!(LANE2, 3);
2801    let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);
2802    match LANE1 & 0b11 {
2803        0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
2804        1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
2805        2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
2806        3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
2807        _ => unreachable_unchecked(),
2808    }
2809}
2810
2811/// Insert vector element from another vector element
2812///
2813/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s32)
2814#[inline]
2815#[target_feature(enable = "neon")]
2816#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2817#[rustc_legacy_const_generics(1, 3)]
2818#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2819pub unsafe fn vcopy_laneq_s32<const LANE1: i32, const LANE2: i32>(a: int32x2_t, b: int32x4_t) -> int32x2_t {
2820    static_assert_uimm_bits!(LANE1, 1);
2821    static_assert_uimm_bits!(LANE2, 2);
2822    let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]);
2823    match LANE1 & 0b1 {
2824        0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
2825        1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
2826        _ => unreachable_unchecked(),
2827    }
2828}
2829
2830/// Insert vector element from another vector element
2831///
2832/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u8)
2833#[inline]
2834#[target_feature(enable = "neon")]
2835#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2836#[rustc_legacy_const_generics(1, 3)]
2837#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2838pub unsafe fn vcopy_laneq_u8<const LANE1: i32, const LANE2: i32>(a: uint8x8_t, b: uint8x16_t) -> uint8x8_t {
2839    static_assert_uimm_bits!(LANE1, 3);
2840    static_assert_uimm_bits!(LANE2, 4);
2841    let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
2842    match LANE1 & 0b111 {
2843        0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
2844        1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
2845        2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
2846        3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
2847        4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
2848        5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
2849        6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
2850        7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
2851        _ => unreachable_unchecked(),
2852    }
2853}
2854
2855/// Insert vector element from another vector element
2856///
2857/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u16)
2858#[inline]
2859#[target_feature(enable = "neon")]
2860#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2861#[rustc_legacy_const_generics(1, 3)]
2862#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2863pub unsafe fn vcopy_laneq_u16<const LANE1: i32, const LANE2: i32>(a: uint16x4_t, b: uint16x8_t) -> uint16x4_t {
2864    static_assert_uimm_bits!(LANE1, 2);
2865    static_assert_uimm_bits!(LANE2, 3);
2866    let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);
2867    match LANE1 & 0b11 {
2868        0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
2869        1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
2870        2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
2871        3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
2872        _ => unreachable_unchecked(),
2873    }
2874}
2875
2876/// Insert vector element from another vector element
2877///
2878/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u32)
2879#[inline]
2880#[target_feature(enable = "neon")]
2881#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2882#[rustc_legacy_const_generics(1, 3)]
2883#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2884pub unsafe fn vcopy_laneq_u32<const LANE1: i32, const LANE2: i32>(a: uint32x2_t, b: uint32x4_t) -> uint32x2_t {
2885    static_assert_uimm_bits!(LANE1, 1);
2886    static_assert_uimm_bits!(LANE2, 2);
2887    let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]);
2888    match LANE1 & 0b1 {
2889        0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
2890        1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
2891        _ => unreachable_unchecked(),
2892    }
2893}
2894
2895/// Insert vector element from another vector element
2896///
2897/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p8)
2898#[inline]
2899#[target_feature(enable = "neon")]
2900#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2901#[rustc_legacy_const_generics(1, 3)]
2902#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2903pub unsafe fn vcopy_laneq_p8<const LANE1: i32, const LANE2: i32>(a: poly8x8_t, b: poly8x16_t) -> poly8x8_t {
2904    static_assert_uimm_bits!(LANE1, 3);
2905    static_assert_uimm_bits!(LANE2, 4);
2906    let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
2907    match LANE1 & 0b111 {
2908        0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
2909        1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
2910        2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
2911        3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
2912        4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
2913        5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
2914        6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
2915        7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
2916        _ => unreachable_unchecked(),
2917    }
2918}
2919
2920/// Insert vector element from another vector element
2921///
2922/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p16)
2923#[inline]
2924#[target_feature(enable = "neon")]
2925#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2926#[rustc_legacy_const_generics(1, 3)]
2927#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2928pub unsafe fn vcopy_laneq_p16<const LANE1: i32, const LANE2: i32>(a: poly16x4_t, b: poly16x8_t) -> poly16x4_t {
2929    static_assert_uimm_bits!(LANE1, 2);
2930    static_assert_uimm_bits!(LANE2, 3);
2931    let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);
2932    match LANE1 & 0b11 {
2933        0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
2934        1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
2935        2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
2936        3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
2937        _ => unreachable_unchecked(),
2938    }
2939}
2940
2941/// Insert vector element from another vector element
2942///
2943/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_f32)
2944#[inline]
2945#[target_feature(enable = "neon")]
2946#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2947#[rustc_legacy_const_generics(1, 3)]
2948#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2949pub unsafe fn vcopy_laneq_f32<const LANE1: i32, const LANE2: i32>(a: float32x2_t, b: float32x4_t) -> float32x2_t {
2950    static_assert_uimm_bits!(LANE1, 1);
2951    static_assert_uimm_bits!(LANE2, 2);
2952    let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]);
2953    match LANE1 & 0b1 {
2954        0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
2955        1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
2956        _ => unreachable_unchecked(),
2957    }
2958}
2959
2960/// Insert vector element from another vector element
2961///
2962/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s8)
2963#[inline]
2964#[target_feature(enable = "neon")]
2965#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2966#[rustc_legacy_const_generics(1, 3)]
2967#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2968pub unsafe fn vcopyq_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x16_t, b: int8x8_t) -> int8x16_t {
2969    static_assert_uimm_bits!(LANE1, 4);
2970    static_assert_uimm_bits!(LANE2, 3);
2971    let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
2972    match LANE1 & 0b1111 {
2973        0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2974        1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2975        2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2976        3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2977        4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2978        5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2979        6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
2980        7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]),
2981        8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]),
2982        9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]),
2983        10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]),
2984        11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]),
2985        12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]),
2986        13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]),
2987        14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]),
2988        15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]),
2989        _ => unreachable_unchecked(),
2990    }
2991}
2992
2993/// Insert vector element from another vector element
2994///
2995/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s16)
2996#[inline]
2997#[target_feature(enable = "neon")]
2998#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
2999#[rustc_legacy_const_generics(1, 3)]
3000#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3001pub unsafe fn vcopyq_lane_s16<const LANE1: i32, const LANE2: i32>(a: int16x8_t, b: int16x4_t) -> int16x8_t {
3002    static_assert_uimm_bits!(LANE1, 3);
3003    static_assert_uimm_bits!(LANE2, 2);
3004    let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]);
3005    match LANE1 & 0b111 {
3006        0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
3007        1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
3008        2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
3009        3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
3010        4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
3011        5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
3012        6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
3013        7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
3014        _ => unreachable_unchecked(),
3015    }
3016}
3017
3018/// Insert vector element from another vector element
3019///
3020/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s32)
3021#[inline]
3022#[target_feature(enable = "neon")]
3023#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
3024#[rustc_legacy_const_generics(1, 3)]
3025#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3026pub unsafe fn vcopyq_lane_s32<const LANE1: i32, const LANE2: i32>(a: int32x4_t, b: int32x2_t) -> int32x4_t {
3027    static_assert_uimm_bits!(LANE1, 2);
3028    static_assert_uimm_bits!(LANE2, 1);
3029    let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]);
3030    match LANE1 & 0b11 {
3031        0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
3032        1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
3033        2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
3034        3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
3035        _ => unreachable_unchecked(),
3036    }
3037}
3038
3039/// Insert vector element from another vector element
3040///
3041/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u8)
3042#[inline]
3043#[target_feature(enable = "neon")]
3044#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
3045#[rustc_legacy_const_generics(1, 3)]
3046#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3047pub unsafe fn vcopyq_lane_u8<const LANE1: i32, const LANE2: i32>(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t {
3048    static_assert_uimm_bits!(LANE1, 4);
3049    static_assert_uimm_bits!(LANE2, 3);
3050    let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
3051    match LANE1 & 0b1111 {
3052        0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
3053        1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
3054        2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
3055        3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
3056        4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
3057        5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
3058        6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
3059        7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]),
3060        8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]),
3061        9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]),
3062        10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]),
3063        11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]),
3064        12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]),
3065        13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]),
3066        14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]),
3067        15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]),
3068        _ => unreachable_unchecked(),
3069    }
3070}
3071
3072/// Insert vector element from another vector element
3073///
3074/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u16)
3075#[inline]
3076#[target_feature(enable = "neon")]
3077#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
3078#[rustc_legacy_const_generics(1, 3)]
3079#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3080pub unsafe fn vcopyq_lane_u16<const LANE1: i32, const LANE2: i32>(a: uint16x8_t, b: uint16x4_t) -> uint16x8_t {
3081    static_assert_uimm_bits!(LANE1, 3);
3082    static_assert_uimm_bits!(LANE2, 2);
3083    let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]);
3084    match LANE1 & 0b111 {
3085        0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
3086        1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
3087        2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
3088        3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
3089        4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
3090        5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
3091        6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
3092        7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
3093        _ => unreachable_unchecked(),
3094    }
3095}
3096
3097/// Insert vector element from another vector element
3098///
3099/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u32)
3100#[inline]
3101#[target_feature(enable = "neon")]
3102#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
3103#[rustc_legacy_const_generics(1, 3)]
3104#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3105pub unsafe fn vcopyq_lane_u32<const LANE1: i32, const LANE2: i32>(a: uint32x4_t, b: uint32x2_t) -> uint32x4_t {
3106    static_assert_uimm_bits!(LANE1, 2);
3107    static_assert_uimm_bits!(LANE2, 1);
3108    let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]);
3109    match LANE1 & 0b11 {
3110        0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
3111        1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
3112        2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
3113        3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
3114        _ => unreachable_unchecked(),
3115    }
3116}
3117
3118/// Insert vector element from another vector element
3119///
3120/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p8)
3121#[inline]
3122#[target_feature(enable = "neon")]
3123#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
3124#[rustc_legacy_const_generics(1, 3)]
3125#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3126pub unsafe fn vcopyq_lane_p8<const LANE1: i32, const LANE2: i32>(a: poly8x16_t, b: poly8x8_t) -> poly8x16_t {
3127    static_assert_uimm_bits!(LANE1, 4);
3128    static_assert_uimm_bits!(LANE2, 3);
3129    let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
3130    match LANE1 & 0b1111 {
3131        0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
3132        1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
3133        2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
3134        3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
3135        4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
3136        5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
3137        6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
3138        7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]),
3139        8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]),
3140        9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]),
3141        10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]),
3142        11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]),
3143        12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]),
3144        13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]),
3145        14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]),
3146        15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]),
3147        _ => unreachable_unchecked(),
3148    }
3149}
3150
3151/// Insert vector element from another vector element
3152///
3153/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p16)
3154#[inline]
3155#[target_feature(enable = "neon")]
3156#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
3157#[rustc_legacy_const_generics(1, 3)]
3158#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3159pub unsafe fn vcopyq_lane_p16<const LANE1: i32, const LANE2: i32>(a: poly16x8_t, b: poly16x4_t) -> poly16x8_t {
3160    static_assert_uimm_bits!(LANE1, 3);
3161    static_assert_uimm_bits!(LANE2, 2);
3162    let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]);
3163    match LANE1 & 0b111 {
3164        0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
3165        1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
3166        2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
3167        3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
3168        4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
3169        5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
3170        6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
3171        7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
3172        _ => unreachable_unchecked(),
3173    }
3174}
3175
3176/// Insert vector element from another vector element
3177///
3178/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s64)
3179#[inline]
3180#[target_feature(enable = "neon")]
3181#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
3182#[rustc_legacy_const_generics(1, 3)]
3183#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3184pub unsafe fn vcopyq_lane_s64<const LANE1: i32, const LANE2: i32>(a: int64x2_t, b: int64x1_t) -> int64x2_t {
3185    static_assert_uimm_bits!(LANE1, 1);
3186    static_assert!(LANE2 == 0);
3187    let b: int64x2_t = simd_shuffle!(b, b, [0, 1]);
3188    match LANE1 & 0b1 {
3189        0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
3190        1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
3191        _ => unreachable_unchecked(),
3192    }
3193}
3194
3195/// Insert vector element from another vector element
3196///
3197/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u64)
3198#[inline]
3199#[target_feature(enable = "neon")]
3200#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
3201#[rustc_legacy_const_generics(1, 3)]
3202#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3203pub unsafe fn vcopyq_lane_u64<const LANE1: i32, const LANE2: i32>(a: uint64x2_t, b: uint64x1_t) -> uint64x2_t {
3204    static_assert_uimm_bits!(LANE1, 1);
3205    static_assert!(LANE2 == 0);
3206    let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]);
3207    match LANE1 & 0b1 {
3208        0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
3209        1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
3210        _ => unreachable_unchecked(),
3211    }
3212}
3213
3214/// Insert vector element from another vector element
3215///
3216/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p64)
3217#[inline]
3218#[target_feature(enable = "neon")]
3219#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
3220#[rustc_legacy_const_generics(1, 3)]
3221#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3222pub unsafe fn vcopyq_lane_p64<const LANE1: i32, const LANE2: i32>(a: poly64x2_t, b: poly64x1_t) -> poly64x2_t {
3223    static_assert_uimm_bits!(LANE1, 1);
3224    static_assert!(LANE2 == 0);
3225    let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]);
3226    match LANE1 & 0b1 {
3227        0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
3228        1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
3229        _ => unreachable_unchecked(),
3230    }
3231}
3232
3233/// Insert vector element from another vector element
3234///
3235/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f32)
3236#[inline]
3237#[target_feature(enable = "neon")]
3238#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
3239#[rustc_legacy_const_generics(1, 3)]
3240#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3241pub unsafe fn vcopyq_lane_f32<const LANE1: i32, const LANE2: i32>(a: float32x4_t, b: float32x2_t) -> float32x4_t {
3242    static_assert_uimm_bits!(LANE1, 2);
3243    static_assert_uimm_bits!(LANE2, 1);
3244    let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]);
3245    match LANE1 & 0b11 {
3246        0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
3247        1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
3248        2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
3249        3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
3250        _ => unreachable_unchecked(),
3251    }
3252}
3253
3254/// Insert vector element from another vector element
3255///
3256/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f64)
3257#[inline]
3258#[target_feature(enable = "neon")]
3259#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
3260#[rustc_legacy_const_generics(1, 3)]
3261#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3262pub unsafe fn vcopyq_lane_f64<const LANE1: i32, const LANE2: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
3263    static_assert_uimm_bits!(LANE1, 1);
3264    static_assert!(LANE2 == 0);
3265    let b: float64x2_t = simd_shuffle!(b, b, [0, 1]);
3266    match LANE1 & 0b1 {
3267        0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
3268        1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
3269        _ => unreachable_unchecked(),
3270    }
3271}
3272
3273/// Insert vector element from another vector element
3274///
3275/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f64)
3276#[inline]
3277#[target_feature(enable = "neon")]
3278#[cfg_attr(test, assert_instr(nop))]
3279#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3280pub unsafe fn vcreate_f64(a: u64) -> float64x1_t {
3281    transmute(a)
3282}
3283
3284/// Fixed-point convert to floating-point
3285///
3286/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_s64)
3287#[inline]
3288#[target_feature(enable = "neon")]
3289#[cfg_attr(test, assert_instr(scvtf))]
3290#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3291pub unsafe fn vcvt_f64_s64(a: int64x1_t) -> float64x1_t {
3292    simd_cast(a)
3293}
3294
3295/// Fixed-point convert to floating-point
3296///
3297/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_s64)
3298#[inline]
3299#[target_feature(enable = "neon")]
3300#[cfg_attr(test, assert_instr(scvtf))]
3301#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3302pub unsafe fn vcvtq_f64_s64(a: int64x2_t) -> float64x2_t {
3303    simd_cast(a)
3304}
3305
3306/// Fixed-point convert to floating-point
3307///
3308/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_u64)
3309#[inline]
3310#[target_feature(enable = "neon")]
3311#[cfg_attr(test, assert_instr(ucvtf))]
3312#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3313pub unsafe fn vcvt_f64_u64(a: uint64x1_t) -> float64x1_t {
3314    simd_cast(a)
3315}
3316
3317/// Fixed-point convert to floating-point
3318///
3319/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_u64)
3320#[inline]
3321#[target_feature(enable = "neon")]
3322#[cfg_attr(test, assert_instr(ucvtf))]
3323#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3324pub unsafe fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t {
3325    simd_cast(a)
3326}
3327
3328/// Floating-point convert to higher precision long
3329///
3330/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_f32)
3331#[inline]
3332#[target_feature(enable = "neon")]
3333#[cfg_attr(test, assert_instr(fcvtl))]
3334#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3335pub unsafe fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t {
3336    simd_cast(a)
3337}
3338
3339/// Floating-point convert to higher precision long
3340///
3341/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f64_f32)
3342#[inline]
3343#[target_feature(enable = "neon")]
3344#[cfg_attr(test, assert_instr(fcvtl))]
3345#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3346pub unsafe fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t {
3347    let b: float32x2_t = simd_shuffle!(a, a, [2, 3]);
3348    simd_cast(b)
3349}
3350
3351/// Floating-point convert to lower precision narrow
3352///
3353/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f64)
3354#[inline]
3355#[target_feature(enable = "neon")]
3356#[cfg_attr(test, assert_instr(fcvtn))]
3357#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3358pub unsafe fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t {
3359    simd_cast(a)
3360}
3361
3362/// Floating-point convert to lower precision narrow
3363///
3364/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f64)
3365#[inline]
3366#[target_feature(enable = "neon")]
3367#[cfg_attr(test, assert_instr(fcvtn))]
3368#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3369pub unsafe fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
3370    simd_shuffle!(a, simd_cast(b), [0, 1, 2, 3])
3371}
3372
3373/// Floating-point convert to lower precision narrow, rounding to odd
3374///
3375/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_f32_f64)
3376#[inline]
3377#[target_feature(enable = "neon")]
3378#[cfg_attr(test, assert_instr(fcvtxn))]
3379#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3380pub unsafe fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t {
3381    #[allow(improper_ctypes)]
3382    extern "unadjusted" {
3383        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtxn.v2f32.v2f64")]
3384        fn vcvtx_f32_f64_(a: float64x2_t) -> float32x2_t;
3385    }
3386    vcvtx_f32_f64_(a)
3387}
3388
3389/// Floating-point convert to lower precision narrow, rounding to odd
3390///
3391/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtxd_f32_f64)
3392#[inline]
3393#[target_feature(enable = "neon")]
3394#[cfg_attr(test, assert_instr(fcvtxn))]
3395#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3396pub unsafe fn vcvtxd_f32_f64(a: f64) -> f32 {
3397    simd_extract!(vcvtx_f32_f64(vdupq_n_f64(a)), 0)
3398}
3399
3400/// Floating-point convert to lower precision narrow, rounding to odd
3401///
3402/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_high_f32_f64)
3403#[inline]
3404#[target_feature(enable = "neon")]
3405#[cfg_attr(test, assert_instr(fcvtxn))]
3406#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3407pub unsafe fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
3408    simd_shuffle!(a, vcvtx_f32_f64(b), [0, 1, 2, 3])
3409}
3410
3411/// Fixed-point convert to floating-point
3412///
3413/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_s64)
3414#[inline]
3415#[target_feature(enable = "neon")]
3416#[cfg_attr(test, assert_instr(scvtf, N = 2))]
3417#[rustc_legacy_const_generics(1)]
3418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3419pub unsafe fn vcvt_n_f64_s64<const N: i32>(a: int64x1_t) -> float64x1_t {
3420    static_assert!(N >= 1 && N <= 64);
3421    #[allow(improper_ctypes)]
3422    extern "unadjusted" {
3423        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxs2fp.v1f64.v1i64")]
3424        fn vcvt_n_f64_s64_(a: int64x1_t, n: i32) -> float64x1_t;
3425    }
3426    vcvt_n_f64_s64_(a, N)
3427}
3428
3429/// Fixed-point convert to floating-point
3430///
3431/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_s64)
3432#[inline]
3433#[target_feature(enable = "neon")]
3434#[cfg_attr(test, assert_instr(scvtf, N = 2))]
3435#[rustc_legacy_const_generics(1)]
3436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3437pub unsafe fn vcvtq_n_f64_s64<const N: i32>(a: int64x2_t) -> float64x2_t {
3438    static_assert!(N >= 1 && N <= 64);
3439    #[allow(improper_ctypes)]
3440    extern "unadjusted" {
3441        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64")]
3442        fn vcvtq_n_f64_s64_(a: int64x2_t, n: i32) -> float64x2_t;
3443    }
3444    vcvtq_n_f64_s64_(a, N)
3445}
3446
3447/// Fixed-point convert to floating-point
3448///
3449/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_s32)
3450#[inline]
3451#[target_feature(enable = "neon")]
3452#[cfg_attr(test, assert_instr(scvtf, N = 2))]
3453#[rustc_legacy_const_generics(1)]
3454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3455pub unsafe fn vcvts_n_f32_s32<const N: i32>(a: i32) -> f32 {
3456    static_assert!(N >= 1 && N <= 32);
3457    #[allow(improper_ctypes)]
3458    extern "unadjusted" {
3459        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxs2fp.f32.i32")]
3460        fn vcvts_n_f32_s32_(a: i32, n: i32) -> f32;
3461    }
3462    vcvts_n_f32_s32_(a, N)
3463}
3464
3465/// Fixed-point convert to floating-point
3466///
3467/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_s64)
3468#[inline]
3469#[target_feature(enable = "neon")]
3470#[cfg_attr(test, assert_instr(scvtf, N = 2))]
3471#[rustc_legacy_const_generics(1)]
3472#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3473pub unsafe fn vcvtd_n_f64_s64<const N: i32>(a: i64) -> f64 {
3474    static_assert!(N >= 1 && N <= 64);
3475    #[allow(improper_ctypes)]
3476    extern "unadjusted" {
3477        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxs2fp.f64.i64")]
3478        fn vcvtd_n_f64_s64_(a: i64, n: i32) -> f64;
3479    }
3480    vcvtd_n_f64_s64_(a, N)
3481}
3482
3483/// Fixed-point convert to floating-point
3484///
3485/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_u64)
3486#[inline]
3487#[target_feature(enable = "neon")]
3488#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
3489#[rustc_legacy_const_generics(1)]
3490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3491pub unsafe fn vcvt_n_f64_u64<const N: i32>(a: uint64x1_t) -> float64x1_t {
3492    static_assert!(N >= 1 && N <= 64);
3493    #[allow(improper_ctypes)]
3494    extern "unadjusted" {
3495        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64")]
3496        fn vcvt_n_f64_u64_(a: uint64x1_t, n: i32) -> float64x1_t;
3497    }
3498    vcvt_n_f64_u64_(a, N)
3499}
3500
3501/// Fixed-point convert to floating-point
3502///
3503/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_u64)
3504#[inline]
3505#[target_feature(enable = "neon")]
3506#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
3507#[rustc_legacy_const_generics(1)]
3508#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3509pub unsafe fn vcvtq_n_f64_u64<const N: i32>(a: uint64x2_t) -> float64x2_t {
3510    static_assert!(N >= 1 && N <= 64);
3511    #[allow(improper_ctypes)]
3512    extern "unadjusted" {
3513        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64")]
3514        fn vcvtq_n_f64_u64_(a: uint64x2_t, n: i32) -> float64x2_t;
3515    }
3516    vcvtq_n_f64_u64_(a, N)
3517}
3518
3519/// Fixed-point convert to floating-point
3520///
3521/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_u32)
3522#[inline]
3523#[target_feature(enable = "neon")]
3524#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
3525#[rustc_legacy_const_generics(1)]
3526#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3527pub unsafe fn vcvts_n_f32_u32<const N: i32>(a: u32) -> f32 {
3528    static_assert!(N >= 1 && N <= 32);
3529    #[allow(improper_ctypes)]
3530    extern "unadjusted" {
3531        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.f32.i32")]
3532        fn vcvts_n_f32_u32_(a: u32, n: i32) -> f32;
3533    }
3534    vcvts_n_f32_u32_(a, N)
3535}
3536
3537/// Fixed-point convert to floating-point
3538///
3539/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_u64)
3540#[inline]
3541#[target_feature(enable = "neon")]
3542#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
3543#[rustc_legacy_const_generics(1)]
3544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3545pub unsafe fn vcvtd_n_f64_u64<const N: i32>(a: u64) -> f64 {
3546    static_assert!(N >= 1 && N <= 64);
3547    #[allow(improper_ctypes)]
3548    extern "unadjusted" {
3549        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.f64.i64")]
3550        fn vcvtd_n_f64_u64_(a: u64, n: i32) -> f64;
3551    }
3552    vcvtd_n_f64_u64_(a, N)
3553}
3554
3555/// Floating-point convert to fixed-point, rounding toward zero
3556///
3557/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s64_f64)
3558#[inline]
3559#[target_feature(enable = "neon")]
3560#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
3561#[rustc_legacy_const_generics(1)]
3562#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3563pub unsafe fn vcvt_n_s64_f64<const N: i32>(a: float64x1_t) -> int64x1_t {
3564    static_assert!(N >= 1 && N <= 64);
3565    #[allow(improper_ctypes)]
3566    extern "unadjusted" {
3567        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxs.v1i64.v1f64")]
3568        fn vcvt_n_s64_f64_(a: float64x1_t, n: i32) -> int64x1_t;
3569    }
3570    vcvt_n_s64_f64_(a, N)
3571}
3572
3573/// Floating-point convert to fixed-point, rounding toward zero
3574///
3575/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s64_f64)
3576#[inline]
3577#[target_feature(enable = "neon")]
3578#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
3579#[rustc_legacy_const_generics(1)]
3580#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3581pub unsafe fn vcvtq_n_s64_f64<const N: i32>(a: float64x2_t) -> int64x2_t {
3582    static_assert!(N >= 1 && N <= 64);
3583    #[allow(improper_ctypes)]
3584    extern "unadjusted" {
3585        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64")]
3586        fn vcvtq_n_s64_f64_(a: float64x2_t, n: i32) -> int64x2_t;
3587    }
3588    vcvtq_n_s64_f64_(a, N)
3589}
3590
3591/// Floating-point convert to fixed-point, rounding toward zero
3592///
3593/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_s32_f32)
3594#[inline]
3595#[target_feature(enable = "neon")]
3596#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
3597#[rustc_legacy_const_generics(1)]
3598#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3599pub unsafe fn vcvts_n_s32_f32<const N: i32>(a: f32) -> i32 {
3600    static_assert!(N >= 1 && N <= 32);
3601    #[allow(improper_ctypes)]
3602    extern "unadjusted" {
3603        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f32")]
3604        fn vcvts_n_s32_f32_(a: f32, n: i32) -> i32;
3605    }
3606    vcvts_n_s32_f32_(a, N)
3607}
3608
3609/// Floating-point convert to fixed-point, rounding toward zero
3610///
3611/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_s64_f64)
3612#[inline]
3613#[target_feature(enable = "neon")]
3614#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
3615#[rustc_legacy_const_generics(1)]
3616#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3617pub unsafe fn vcvtd_n_s64_f64<const N: i32>(a: f64) -> i64 {
3618    static_assert!(N >= 1 && N <= 64);
3619    #[allow(improper_ctypes)]
3620    extern "unadjusted" {
3621        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f64")]
3622        fn vcvtd_n_s64_f64_(a: f64, n: i32) -> i64;
3623    }
3624    vcvtd_n_s64_f64_(a, N)
3625}
3626
3627/// Floating-point convert to fixed-point, rounding toward zero
3628///
3629/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u64_f64)
3630#[inline]
3631#[target_feature(enable = "neon")]
3632#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
3633#[rustc_legacy_const_generics(1)]
3634#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3635pub unsafe fn vcvt_n_u64_f64<const N: i32>(a: float64x1_t) -> uint64x1_t {
3636    static_assert!(N >= 1 && N <= 64);
3637    #[allow(improper_ctypes)]
3638    extern "unadjusted" {
3639        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64")]
3640        fn vcvt_n_u64_f64_(a: float64x1_t, n: i32) -> uint64x1_t;
3641    }
3642    vcvt_n_u64_f64_(a, N)
3643}
3644
3645/// Floating-point convert to fixed-point, rounding toward zero
3646///
3647/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u64_f64)
3648#[inline]
3649#[target_feature(enable = "neon")]
3650#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
3651#[rustc_legacy_const_generics(1)]
3652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3653pub unsafe fn vcvtq_n_u64_f64<const N: i32>(a: float64x2_t) -> uint64x2_t {
3654    static_assert!(N >= 1 && N <= 64);
3655    #[allow(improper_ctypes)]
3656    extern "unadjusted" {
3657        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64")]
3658        fn vcvtq_n_u64_f64_(a: float64x2_t, n: i32) -> uint64x2_t;
3659    }
3660    vcvtq_n_u64_f64_(a, N)
3661}
3662
3663/// Floating-point convert to fixed-point, rounding toward zero
3664///
3665/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_u32_f32)
3666#[inline]
3667#[target_feature(enable = "neon")]
3668#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
3669#[rustc_legacy_const_generics(1)]
3670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3671pub unsafe fn vcvts_n_u32_f32<const N: i32>(a: f32) -> u32 {
3672    static_assert!(N >= 1 && N <= 32);
3673    #[allow(improper_ctypes)]
3674    extern "unadjusted" {
3675        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f32")]
3676        fn vcvts_n_u32_f32_(a: f32, n: i32) -> u32;
3677    }
3678    vcvts_n_u32_f32_(a, N)
3679}
3680
3681/// Floating-point convert to fixed-point, rounding toward zero
3682///
3683/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_u64_f64)
3684#[inline]
3685#[target_feature(enable = "neon")]
3686#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
3687#[rustc_legacy_const_generics(1)]
3688#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3689pub unsafe fn vcvtd_n_u64_f64<const N: i32>(a: f64) -> u64 {
3690    static_assert!(N >= 1 && N <= 64);
3691    #[allow(improper_ctypes)]
3692    extern "unadjusted" {
3693        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f64")]
3694        fn vcvtd_n_u64_f64_(a: f64, n: i32) -> u64;
3695    }
3696    vcvtd_n_u64_f64_(a, N)
3697}
3698
3699/// Fixed-point convert to floating-point
3700///
3701/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_s32)
3702#[inline]
3703#[target_feature(enable = "neon")]
3704#[cfg_attr(test, assert_instr(scvtf))]
3705#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3706pub unsafe fn vcvts_f32_s32(a: i32) -> f32 {
3707    a as f32
3708}
3709
3710/// Fixed-point convert to floating-point
3711///
3712/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_s64)
3713#[inline]
3714#[target_feature(enable = "neon")]
3715#[cfg_attr(test, assert_instr(scvtf))]
3716#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3717pub unsafe fn vcvtd_f64_s64(a: i64) -> f64 {
3718    a as f64
3719}
3720
3721/// Fixed-point convert to floating-point
3722///
3723/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_u32)
3724#[inline]
3725#[target_feature(enable = "neon")]
3726#[cfg_attr(test, assert_instr(ucvtf))]
3727#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3728pub unsafe fn vcvts_f32_u32(a: u32) -> f32 {
3729    a as f32
3730}
3731
3732/// Fixed-point convert to floating-point
3733///
3734/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_u64)
3735#[inline]
3736#[target_feature(enable = "neon")]
3737#[cfg_attr(test, assert_instr(ucvtf))]
3738#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3739pub unsafe fn vcvtd_f64_u64(a: u64) -> f64 {
3740    a as f64
3741}
3742
3743/// Fixed-point convert to floating-point
3744///
3745/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_s32_f32)
3746#[inline]
3747#[target_feature(enable = "neon")]
3748#[cfg_attr(test, assert_instr(fcvtzs))]
3749#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3750pub unsafe fn vcvts_s32_f32(a: f32) -> i32 {
3751    a as i32
3752}
3753
3754/// Fixed-point convert to floating-point
3755///
3756/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_s64_f64)
3757#[inline]
3758#[target_feature(enable = "neon")]
3759#[cfg_attr(test, assert_instr(fcvtzs))]
3760#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3761pub unsafe fn vcvtd_s64_f64(a: f64) -> i64 {
3762    a as i64
3763}
3764
3765/// Fixed-point convert to floating-point
3766///
3767/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_u32_f32)
3768#[inline]
3769#[target_feature(enable = "neon")]
3770#[cfg_attr(test, assert_instr(fcvtzu))]
3771#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3772pub unsafe fn vcvts_u32_f32(a: f32) -> u32 {
3773    a as u32
3774}
3775
3776/// Fixed-point convert to floating-point
3777///
3778/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_u64_f64)
3779#[inline]
3780#[target_feature(enable = "neon")]
3781#[cfg_attr(test, assert_instr(fcvtzu))]
3782#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3783pub unsafe fn vcvtd_u64_f64(a: f64) -> u64 {
3784    a as u64
3785}
3786
3787/// Floating-point convert to signed fixed-point, rounding toward zero
3788///
3789/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s64_f64)
3790#[inline]
3791#[target_feature(enable = "neon")]
3792#[cfg_attr(test, assert_instr(fcvtzs))]
3793#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3794pub unsafe fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t {
3795    #[allow(improper_ctypes)]
3796    extern "unadjusted" {
3797        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fptosi.sat.v1i64.v1f64")]
3798        fn vcvt_s64_f64_(a: float64x1_t) -> int64x1_t;
3799    }
3800    vcvt_s64_f64_(a)
3801}
3802
3803/// Floating-point convert to signed fixed-point, rounding toward zero
3804///
3805/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s64_f64)
3806#[inline]
3807#[target_feature(enable = "neon")]
3808#[cfg_attr(test, assert_instr(fcvtzs))]
3809#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3810pub unsafe fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t {
3811    #[allow(improper_ctypes)]
3812    extern "unadjusted" {
3813        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fptosi.sat.v2i64.v2f64")]
3814        fn vcvtq_s64_f64_(a: float64x2_t) -> int64x2_t;
3815    }
3816    vcvtq_s64_f64_(a)
3817}
3818
3819/// Floating-point convert to unsigned fixed-point, rounding toward zero
3820///
3821/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u64_f64)
3822#[inline]
3823#[target_feature(enable = "neon")]
3824#[cfg_attr(test, assert_instr(fcvtzu))]
3825#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3826pub unsafe fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t {
3827    #[allow(improper_ctypes)]
3828    extern "unadjusted" {
3829        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fptoui.sat.v1i64.v1f64")]
3830        fn vcvt_u64_f64_(a: float64x1_t) -> uint64x1_t;
3831    }
3832    vcvt_u64_f64_(a)
3833}
3834
3835/// Floating-point convert to unsigned fixed-point, rounding toward zero
3836///
3837/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u64_f64)
3838#[inline]
3839#[target_feature(enable = "neon")]
3840#[cfg_attr(test, assert_instr(fcvtzu))]
3841#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3842pub unsafe fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t {
3843    #[allow(improper_ctypes)]
3844    extern "unadjusted" {
3845        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fptoui.sat.v2i64.v2f64")]
3846        fn vcvtq_u64_f64_(a: float64x2_t) -> uint64x2_t;
3847    }
3848    vcvtq_u64_f64_(a)
3849}
3850
3851/// Floating-point convert to signed integer, rounding to nearest with ties to away
3852///
3853/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s32_f32)
3854#[inline]
3855#[target_feature(enable = "neon")]
3856#[cfg_attr(test, assert_instr(fcvtas))]
3857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3858pub unsafe fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t {
3859    #[allow(improper_ctypes)]
3860    extern "unadjusted" {
3861        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtas.v2i32.v2f32")]
3862        fn vcvta_s32_f32_(a: float32x2_t) -> int32x2_t;
3863    }
3864    vcvta_s32_f32_(a)
3865}
3866
3867/// Floating-point convert to signed integer, rounding to nearest with ties to away
3868///
3869/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s32_f32)
3870#[inline]
3871#[target_feature(enable = "neon")]
3872#[cfg_attr(test, assert_instr(fcvtas))]
3873#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3874pub unsafe fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t {
3875    #[allow(improper_ctypes)]
3876    extern "unadjusted" {
3877        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtas.v4i32.v4f32")]
3878        fn vcvtaq_s32_f32_(a: float32x4_t) -> int32x4_t;
3879    }
3880    vcvtaq_s32_f32_(a)
3881}
3882
3883/// Floating-point convert to signed integer, rounding to nearest with ties to away
3884///
3885/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s64_f64)
3886#[inline]
3887#[target_feature(enable = "neon")]
3888#[cfg_attr(test, assert_instr(fcvtas))]
3889#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3890pub unsafe fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t {
3891    #[allow(improper_ctypes)]
3892    extern "unadjusted" {
3893        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtas.v1i64.v1f64")]
3894        fn vcvta_s64_f64_(a: float64x1_t) -> int64x1_t;
3895    }
3896    vcvta_s64_f64_(a)
3897}
3898
3899/// Floating-point convert to signed integer, rounding to nearest with ties to away
3900///
3901/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s64_f64)
3902#[inline]
3903#[target_feature(enable = "neon")]
3904#[cfg_attr(test, assert_instr(fcvtas))]
3905#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3906pub unsafe fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t {
3907    #[allow(improper_ctypes)]
3908    extern "unadjusted" {
3909        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtas.v2i64.v2f64")]
3910        fn vcvtaq_s64_f64_(a: float64x2_t) -> int64x2_t;
3911    }
3912    vcvtaq_s64_f64_(a)
3913}
3914
3915/// Floating-point convert to integer, rounding to nearest with ties to away
3916///
3917/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_s32_f32)
3918#[inline]
3919#[target_feature(enable = "neon")]
3920#[cfg_attr(test, assert_instr(fcvtas))]
3921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3922pub unsafe fn vcvtas_s32_f32(a: f32) -> i32 {
3923    #[allow(improper_ctypes)]
3924    extern "unadjusted" {
3925        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtas.i32.f32")]
3926        fn vcvtas_s32_f32_(a: f32) -> i32;
3927    }
3928    vcvtas_s32_f32_(a)
3929}
3930
3931/// Floating-point convert to integer, rounding to nearest with ties to away
3932///
3933/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_s64_f64)
3934#[inline]
3935#[target_feature(enable = "neon")]
3936#[cfg_attr(test, assert_instr(fcvtas))]
3937#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3938pub unsafe fn vcvtad_s64_f64(a: f64) -> i64 {
3939    #[allow(improper_ctypes)]
3940    extern "unadjusted" {
3941        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtas.i64.f64")]
3942        fn vcvtad_s64_f64_(a: f64) -> i64;
3943    }
3944    vcvtad_s64_f64_(a)
3945}
3946
3947/// Floating-point convert to integer, rounding to nearest with ties to away
3948///
3949/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_u32_f32)
3950#[inline]
3951#[target_feature(enable = "neon")]
3952#[cfg_attr(test, assert_instr(fcvtau))]
3953#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3954pub unsafe fn vcvtas_u32_f32(a: f32) -> u32 {
3955    #[allow(improper_ctypes)]
3956    extern "unadjusted" {
3957        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.i32.f32")]
3958        fn vcvtas_u32_f32_(a: f32) -> u32;
3959    }
3960    vcvtas_u32_f32_(a)
3961}
3962
3963/// Floating-point convert to integer, rounding to nearest with ties to away
3964///
3965/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_u64_f64)
3966#[inline]
3967#[target_feature(enable = "neon")]
3968#[cfg_attr(test, assert_instr(fcvtau))]
3969#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3970pub unsafe fn vcvtad_u64_f64(a: f64) -> u64 {
3971    #[allow(improper_ctypes)]
3972    extern "unadjusted" {
3973        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.i64.f64")]
3974        fn vcvtad_u64_f64_(a: f64) -> u64;
3975    }
3976    vcvtad_u64_f64_(a)
3977}
3978
3979/// Floating-point convert to signed integer, rounding to nearest with ties to even
3980///
3981/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s32_f32)
3982#[inline]
3983#[target_feature(enable = "neon")]
3984#[cfg_attr(test, assert_instr(fcvtns))]
3985#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3986pub unsafe fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t {
3987    #[allow(improper_ctypes)]
3988    extern "unadjusted" {
3989        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtns.v2i32.v2f32")]
3990        fn vcvtn_s32_f32_(a: float32x2_t) -> int32x2_t;
3991    }
3992    vcvtn_s32_f32_(a)
3993}
3994
3995/// Floating-point convert to signed integer, rounding to nearest with ties to even
3996///
3997/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s32_f32)
3998#[inline]
3999#[target_feature(enable = "neon")]
4000#[cfg_attr(test, assert_instr(fcvtns))]
4001#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4002pub unsafe fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t {
4003    #[allow(improper_ctypes)]
4004    extern "unadjusted" {
4005        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtns.v4i32.v4f32")]
4006        fn vcvtnq_s32_f32_(a: float32x4_t) -> int32x4_t;
4007    }
4008    vcvtnq_s32_f32_(a)
4009}
4010
4011/// Floating-point convert to signed integer, rounding to nearest with ties to even
4012///
4013/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s64_f64)
4014#[inline]
4015#[target_feature(enable = "neon")]
4016#[cfg_attr(test, assert_instr(fcvtns))]
4017#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4018pub unsafe fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t {
4019    #[allow(improper_ctypes)]
4020    extern "unadjusted" {
4021        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtns.v1i64.v1f64")]
4022        fn vcvtn_s64_f64_(a: float64x1_t) -> int64x1_t;
4023    }
4024    vcvtn_s64_f64_(a)
4025}
4026
4027/// Floating-point convert to signed integer, rounding to nearest with ties to even
4028///
4029/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s64_f64)
4030#[inline]
4031#[target_feature(enable = "neon")]
4032#[cfg_attr(test, assert_instr(fcvtns))]
4033#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4034pub unsafe fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t {
4035    #[allow(improper_ctypes)]
4036    extern "unadjusted" {
4037        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtns.v2i64.v2f64")]
4038        fn vcvtnq_s64_f64_(a: float64x2_t) -> int64x2_t;
4039    }
4040    vcvtnq_s64_f64_(a)
4041}
4042
4043/// Floating-point convert to signed integer, rounding to nearest with ties to even
4044///
4045/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_s32_f32)
4046#[inline]
4047#[target_feature(enable = "neon")]
4048#[cfg_attr(test, assert_instr(fcvtns))]
4049#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4050pub unsafe fn vcvtns_s32_f32(a: f32) -> i32 {
4051    #[allow(improper_ctypes)]
4052    extern "unadjusted" {
4053        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtns.i32.f32")]
4054        fn vcvtns_s32_f32_(a: f32) -> i32;
4055    }
4056    vcvtns_s32_f32_(a)
4057}
4058
4059/// Floating-point convert to signed integer, rounding to nearest with ties to even
4060///
4061/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_s64_f64)
4062#[inline]
4063#[target_feature(enable = "neon")]
4064#[cfg_attr(test, assert_instr(fcvtns))]
4065#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4066pub unsafe fn vcvtnd_s64_f64(a: f64) -> i64 {
4067    #[allow(improper_ctypes)]
4068    extern "unadjusted" {
4069        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtns.i64.f64")]
4070        fn vcvtnd_s64_f64_(a: f64) -> i64;
4071    }
4072    vcvtnd_s64_f64_(a)
4073}
4074
4075/// Floating-point convert to signed integer, rounding toward minus infinity
4076///
4077/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s32_f32)
4078#[inline]
4079#[target_feature(enable = "neon")]
4080#[cfg_attr(test, assert_instr(fcvtms))]
4081#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4082pub unsafe fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t {
4083    #[allow(improper_ctypes)]
4084    extern "unadjusted" {
4085        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtms.v2i32.v2f32")]
4086        fn vcvtm_s32_f32_(a: float32x2_t) -> int32x2_t;
4087    }
4088    vcvtm_s32_f32_(a)
4089}
4090
4091/// Floating-point convert to signed integer, rounding toward minus infinity
4092///
4093/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s32_f32)
4094#[inline]
4095#[target_feature(enable = "neon")]
4096#[cfg_attr(test, assert_instr(fcvtms))]
4097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4098pub unsafe fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t {
4099    #[allow(improper_ctypes)]
4100    extern "unadjusted" {
4101        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtms.v4i32.v4f32")]
4102        fn vcvtmq_s32_f32_(a: float32x4_t) -> int32x4_t;
4103    }
4104    vcvtmq_s32_f32_(a)
4105}
4106
4107/// Floating-point convert to signed integer, rounding toward minus infinity
4108///
4109/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s64_f64)
4110#[inline]
4111#[target_feature(enable = "neon")]
4112#[cfg_attr(test, assert_instr(fcvtms))]
4113#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4114pub unsafe fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t {
4115    #[allow(improper_ctypes)]
4116    extern "unadjusted" {
4117        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtms.v1i64.v1f64")]
4118        fn vcvtm_s64_f64_(a: float64x1_t) -> int64x1_t;
4119    }
4120    vcvtm_s64_f64_(a)
4121}
4122
4123/// Floating-point convert to signed integer, rounding toward minus infinity
4124///
4125/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s64_f64)
4126#[inline]
4127#[target_feature(enable = "neon")]
4128#[cfg_attr(test, assert_instr(fcvtms))]
4129#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4130pub unsafe fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t {
4131    #[allow(improper_ctypes)]
4132    extern "unadjusted" {
4133        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtms.v2i64.v2f64")]
4134        fn vcvtmq_s64_f64_(a: float64x2_t) -> int64x2_t;
4135    }
4136    vcvtmq_s64_f64_(a)
4137}
4138
4139/// Floating-point convert to signed integer, rounding toward minus infinity
4140///
4141/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_s32_f32)
4142#[inline]
4143#[target_feature(enable = "neon")]
4144#[cfg_attr(test, assert_instr(fcvtms))]
4145#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4146pub unsafe fn vcvtms_s32_f32(a: f32) -> i32 {
4147    #[allow(improper_ctypes)]
4148    extern "unadjusted" {
4149        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtms.i32.f32")]
4150        fn vcvtms_s32_f32_(a: f32) -> i32;
4151    }
4152    vcvtms_s32_f32_(a)
4153}
4154
4155/// Floating-point convert to signed integer, rounding toward minus infinity
4156///
4157/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_s64_f64)
4158#[inline]
4159#[target_feature(enable = "neon")]
4160#[cfg_attr(test, assert_instr(fcvtms))]
4161#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4162pub unsafe fn vcvtmd_s64_f64(a: f64) -> i64 {
4163    #[allow(improper_ctypes)]
4164    extern "unadjusted" {
4165        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtms.i64.f64")]
4166        fn vcvtmd_s64_f64_(a: f64) -> i64;
4167    }
4168    vcvtmd_s64_f64_(a)
4169}
4170
4171/// Floating-point convert to signed integer, rounding toward plus infinity
4172///
4173/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s32_f32)
4174#[inline]
4175#[target_feature(enable = "neon")]
4176#[cfg_attr(test, assert_instr(fcvtps))]
4177#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4178pub unsafe fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t {
4179    #[allow(improper_ctypes)]
4180    extern "unadjusted" {
4181        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtps.v2i32.v2f32")]
4182        fn vcvtp_s32_f32_(a: float32x2_t) -> int32x2_t;
4183    }
4184    vcvtp_s32_f32_(a)
4185}
4186
4187/// Floating-point convert to signed integer, rounding toward plus infinity
4188///
4189/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s32_f32)
4190#[inline]
4191#[target_feature(enable = "neon")]
4192#[cfg_attr(test, assert_instr(fcvtps))]
4193#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4194pub unsafe fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t {
4195    #[allow(improper_ctypes)]
4196    extern "unadjusted" {
4197        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtps.v4i32.v4f32")]
4198        fn vcvtpq_s32_f32_(a: float32x4_t) -> int32x4_t;
4199    }
4200    vcvtpq_s32_f32_(a)
4201}
4202
4203/// Floating-point convert to signed integer, rounding toward plus infinity
4204///
4205/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s64_f64)
4206#[inline]
4207#[target_feature(enable = "neon")]
4208#[cfg_attr(test, assert_instr(fcvtps))]
4209#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4210pub unsafe fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t {
4211    #[allow(improper_ctypes)]
4212    extern "unadjusted" {
4213        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtps.v1i64.v1f64")]
4214        fn vcvtp_s64_f64_(a: float64x1_t) -> int64x1_t;
4215    }
4216    vcvtp_s64_f64_(a)
4217}
4218
4219/// Floating-point convert to signed integer, rounding toward plus infinity
4220///
4221/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s64_f64)
4222#[inline]
4223#[target_feature(enable = "neon")]
4224#[cfg_attr(test, assert_instr(fcvtps))]
4225#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4226pub unsafe fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t {
4227    #[allow(improper_ctypes)]
4228    extern "unadjusted" {
4229        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtps.v2i64.v2f64")]
4230        fn vcvtpq_s64_f64_(a: float64x2_t) -> int64x2_t;
4231    }
4232    vcvtpq_s64_f64_(a)
4233}
4234
4235/// Floating-point convert to signed integer, rounding toward plus infinity
4236///
4237/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_s32_f32)
4238#[inline]
4239#[target_feature(enable = "neon")]
4240#[cfg_attr(test, assert_instr(fcvtps))]
4241#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4242pub unsafe fn vcvtps_s32_f32(a: f32) -> i32 {
4243    #[allow(improper_ctypes)]
4244    extern "unadjusted" {
4245        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtps.i32.f32")]
4246        fn vcvtps_s32_f32_(a: f32) -> i32;
4247    }
4248    vcvtps_s32_f32_(a)
4249}
4250
4251/// Floating-point convert to signed integer, rounding toward plus infinity
4252///
4253/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_s64_f64)
4254#[inline]
4255#[target_feature(enable = "neon")]
4256#[cfg_attr(test, assert_instr(fcvtps))]
4257#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4258pub unsafe fn vcvtpd_s64_f64(a: f64) -> i64 {
4259    #[allow(improper_ctypes)]
4260    extern "unadjusted" {
4261        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtps.i64.f64")]
4262        fn vcvtpd_s64_f64_(a: f64) -> i64;
4263    }
4264    vcvtpd_s64_f64_(a)
4265}
4266
4267/// Floating-point convert to unsigned integer, rounding to nearest with ties to away
4268///
4269/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u32_f32)
4270#[inline]
4271#[target_feature(enable = "neon")]
4272#[cfg_attr(test, assert_instr(fcvtau))]
4273#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4274pub unsafe fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t {
4275    #[allow(improper_ctypes)]
4276    extern "unadjusted" {
4277        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.v2i32.v2f32")]
4278        fn vcvta_u32_f32_(a: float32x2_t) -> uint32x2_t;
4279    }
4280    vcvta_u32_f32_(a)
4281}
4282
4283/// Floating-point convert to unsigned integer, rounding to nearest with ties to away
4284///
4285/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u32_f32)
4286#[inline]
4287#[target_feature(enable = "neon")]
4288#[cfg_attr(test, assert_instr(fcvtau))]
4289#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4290pub unsafe fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t {
4291    #[allow(improper_ctypes)]
4292    extern "unadjusted" {
4293        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.v4i32.v4f32")]
4294        fn vcvtaq_u32_f32_(a: float32x4_t) -> uint32x4_t;
4295    }
4296    vcvtaq_u32_f32_(a)
4297}
4298
4299/// Floating-point convert to unsigned integer, rounding to nearest with ties to away
4300///
4301/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u64_f64)
4302#[inline]
4303#[target_feature(enable = "neon")]
4304#[cfg_attr(test, assert_instr(fcvtau))]
4305#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4306pub unsafe fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t {
4307    #[allow(improper_ctypes)]
4308    extern "unadjusted" {
4309        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.v1i64.v1f64")]
4310        fn vcvta_u64_f64_(a: float64x1_t) -> uint64x1_t;
4311    }
4312    vcvta_u64_f64_(a)
4313}
4314
4315/// Floating-point convert to unsigned integer, rounding to nearest with ties to away
4316///
4317/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u64_f64)
4318#[inline]
4319#[target_feature(enable = "neon")]
4320#[cfg_attr(test, assert_instr(fcvtau))]
4321#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4322pub unsafe fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t {
4323    #[allow(improper_ctypes)]
4324    extern "unadjusted" {
4325        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.v2i64.v2f64")]
4326        fn vcvtaq_u64_f64_(a: float64x2_t) -> uint64x2_t;
4327    }
4328    vcvtaq_u64_f64_(a)
4329}
4330
4331/// Floating-point convert to unsigned integer, rounding to nearest with ties to even
4332///
4333/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u32_f32)
4334#[inline]
4335#[target_feature(enable = "neon")]
4336#[cfg_attr(test, assert_instr(fcvtnu))]
4337#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4338pub unsafe fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t {
4339    #[allow(improper_ctypes)]
4340    extern "unadjusted" {
4341        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.v2i32.v2f32")]
4342        fn vcvtn_u32_f32_(a: float32x2_t) -> uint32x2_t;
4343    }
4344    vcvtn_u32_f32_(a)
4345}
4346
4347/// Floating-point convert to unsigned integer, rounding to nearest with ties to even
4348///
4349/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u32_f32)
4350#[inline]
4351#[target_feature(enable = "neon")]
4352#[cfg_attr(test, assert_instr(fcvtnu))]
4353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4354pub unsafe fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t {
4355    #[allow(improper_ctypes)]
4356    extern "unadjusted" {
4357        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.v4i32.v4f32")]
4358        fn vcvtnq_u32_f32_(a: float32x4_t) -> uint32x4_t;
4359    }
4360    vcvtnq_u32_f32_(a)
4361}
4362
4363/// Floating-point convert to unsigned integer, rounding to nearest with ties to even
4364///
4365/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u64_f64)
4366#[inline]
4367#[target_feature(enable = "neon")]
4368#[cfg_attr(test, assert_instr(fcvtnu))]
4369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4370pub unsafe fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t {
4371    #[allow(improper_ctypes)]
4372    extern "unadjusted" {
4373        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.v1i64.v1f64")]
4374        fn vcvtn_u64_f64_(a: float64x1_t) -> uint64x1_t;
4375    }
4376    vcvtn_u64_f64_(a)
4377}
4378
4379/// Floating-point convert to unsigned integer, rounding to nearest with ties to even
4380///
4381/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u64_f64)
4382#[inline]
4383#[target_feature(enable = "neon")]
4384#[cfg_attr(test, assert_instr(fcvtnu))]
4385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4386pub unsafe fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t {
4387    #[allow(improper_ctypes)]
4388    extern "unadjusted" {
4389        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.v2i64.v2f64")]
4390        fn vcvtnq_u64_f64_(a: float64x2_t) -> uint64x2_t;
4391    }
4392    vcvtnq_u64_f64_(a)
4393}
4394
4395/// Floating-point convert to unsigned integer, rounding to nearest with ties to even
4396///
4397/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_u32_f32)
4398#[inline]
4399#[target_feature(enable = "neon")]
4400#[cfg_attr(test, assert_instr(fcvtnu))]
4401#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4402pub unsafe fn vcvtns_u32_f32(a: f32) -> u32 {
4403    #[allow(improper_ctypes)]
4404    extern "unadjusted" {
4405        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.i32.f32")]
4406        fn vcvtns_u32_f32_(a: f32) -> u32;
4407    }
4408    vcvtns_u32_f32_(a)
4409}
4410
4411/// Floating-point convert to unsigned integer, rounding to nearest with ties to even
4412///
4413/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_u64_f64)
4414#[inline]
4415#[target_feature(enable = "neon")]
4416#[cfg_attr(test, assert_instr(fcvtnu))]
4417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4418pub unsafe fn vcvtnd_u64_f64(a: f64) -> u64 {
4419    #[allow(improper_ctypes)]
4420    extern "unadjusted" {
4421        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.i64.f64")]
4422        fn vcvtnd_u64_f64_(a: f64) -> u64;
4423    }
4424    vcvtnd_u64_f64_(a)
4425}
4426
4427/// Floating-point convert to unsigned integer, rounding toward minus infinity
4428///
4429/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u32_f32)
4430#[inline]
4431#[target_feature(enable = "neon")]
4432#[cfg_attr(test, assert_instr(fcvtmu))]
4433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4434pub unsafe fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t {
4435    #[allow(improper_ctypes)]
4436    extern "unadjusted" {
4437        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.v2i32.v2f32")]
4438        fn vcvtm_u32_f32_(a: float32x2_t) -> uint32x2_t;
4439    }
4440    vcvtm_u32_f32_(a)
4441}
4442
4443/// Floating-point convert to unsigned integer, rounding toward minus infinity
4444///
4445/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u32_f32)
4446#[inline]
4447#[target_feature(enable = "neon")]
4448#[cfg_attr(test, assert_instr(fcvtmu))]
4449#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4450pub unsafe fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t {
4451    #[allow(improper_ctypes)]
4452    extern "unadjusted" {
4453        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.v4i32.v4f32")]
4454        fn vcvtmq_u32_f32_(a: float32x4_t) -> uint32x4_t;
4455    }
4456    vcvtmq_u32_f32_(a)
4457}
4458
4459/// Floating-point convert to unsigned integer, rounding toward minus infinity
4460///
4461/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u64_f64)
4462#[inline]
4463#[target_feature(enable = "neon")]
4464#[cfg_attr(test, assert_instr(fcvtmu))]
4465#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4466pub unsafe fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t {
4467    #[allow(improper_ctypes)]
4468    extern "unadjusted" {
4469        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.v1i64.v1f64")]
4470        fn vcvtm_u64_f64_(a: float64x1_t) -> uint64x1_t;
4471    }
4472    vcvtm_u64_f64_(a)
4473}
4474
4475/// Floating-point convert to unsigned integer, rounding toward minus infinity
4476///
4477/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u64_f64)
4478#[inline]
4479#[target_feature(enable = "neon")]
4480#[cfg_attr(test, assert_instr(fcvtmu))]
4481#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4482pub unsafe fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t {
4483    #[allow(improper_ctypes)]
4484    extern "unadjusted" {
4485        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.v2i64.v2f64")]
4486        fn vcvtmq_u64_f64_(a: float64x2_t) -> uint64x2_t;
4487    }
4488    vcvtmq_u64_f64_(a)
4489}
4490
4491/// Floating-point convert to unsigned integer, rounding toward minus infinity
4492///
4493/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_u32_f32)
4494#[inline]
4495#[target_feature(enable = "neon")]
4496#[cfg_attr(test, assert_instr(fcvtmu))]
4497#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4498pub unsafe fn vcvtms_u32_f32(a: f32) -> u32 {
4499    #[allow(improper_ctypes)]
4500    extern "unadjusted" {
4501        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.i32.f32")]
4502        fn vcvtms_u32_f32_(a: f32) -> u32;
4503    }
4504    vcvtms_u32_f32_(a)
4505}
4506
4507/// Floating-point convert to unsigned integer, rounding toward minus infinity
4508///
4509/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_u64_f64)
4510#[inline]
4511#[target_feature(enable = "neon")]
4512#[cfg_attr(test, assert_instr(fcvtmu))]
4513#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4514pub unsafe fn vcvtmd_u64_f64(a: f64) -> u64 {
4515    #[allow(improper_ctypes)]
4516    extern "unadjusted" {
4517        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.i64.f64")]
4518        fn vcvtmd_u64_f64_(a: f64) -> u64;
4519    }
4520    vcvtmd_u64_f64_(a)
4521}
4522
4523/// Floating-point convert to unsigned integer, rounding toward plus infinity
4524///
4525/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u32_f32)
4526#[inline]
4527#[target_feature(enable = "neon")]
4528#[cfg_attr(test, assert_instr(fcvtpu))]
4529#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4530pub unsafe fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t {
4531    #[allow(improper_ctypes)]
4532    extern "unadjusted" {
4533        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.v2i32.v2f32")]
4534        fn vcvtp_u32_f32_(a: float32x2_t) -> uint32x2_t;
4535    }
4536    vcvtp_u32_f32_(a)
4537}
4538
4539/// Floating-point convert to unsigned integer, rounding toward plus infinity
4540///
4541/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u32_f32)
4542#[inline]
4543#[target_feature(enable = "neon")]
4544#[cfg_attr(test, assert_instr(fcvtpu))]
4545#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4546pub unsafe fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t {
4547    #[allow(improper_ctypes)]
4548    extern "unadjusted" {
4549        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.v4i32.v4f32")]
4550        fn vcvtpq_u32_f32_(a: float32x4_t) -> uint32x4_t;
4551    }
4552    vcvtpq_u32_f32_(a)
4553}
4554
4555/// Floating-point convert to unsigned integer, rounding toward plus infinity
4556///
4557/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u64_f64)
4558#[inline]
4559#[target_feature(enable = "neon")]
4560#[cfg_attr(test, assert_instr(fcvtpu))]
4561#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4562pub unsafe fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t {
4563    #[allow(improper_ctypes)]
4564    extern "unadjusted" {
4565        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.v1i64.v1f64")]
4566        fn vcvtp_u64_f64_(a: float64x1_t) -> uint64x1_t;
4567    }
4568    vcvtp_u64_f64_(a)
4569}
4570
4571/// Floating-point convert to unsigned integer, rounding toward plus infinity
4572///
4573/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u64_f64)
4574#[inline]
4575#[target_feature(enable = "neon")]
4576#[cfg_attr(test, assert_instr(fcvtpu))]
4577#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4578pub unsafe fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t {
4579    #[allow(improper_ctypes)]
4580    extern "unadjusted" {
4581        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.v2i64.v2f64")]
4582        fn vcvtpq_u64_f64_(a: float64x2_t) -> uint64x2_t;
4583    }
4584    vcvtpq_u64_f64_(a)
4585}
4586
4587/// Floating-point convert to unsigned integer, rounding toward plus infinity
4588///
4589/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_u32_f32)
4590#[inline]
4591#[target_feature(enable = "neon")]
4592#[cfg_attr(test, assert_instr(fcvtpu))]
4593#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4594pub unsafe fn vcvtps_u32_f32(a: f32) -> u32 {
4595    #[allow(improper_ctypes)]
4596    extern "unadjusted" {
4597        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.i32.f32")]
4598        fn vcvtps_u32_f32_(a: f32) -> u32;
4599    }
4600    vcvtps_u32_f32_(a)
4601}
4602
4603/// Floating-point convert to unsigned integer, rounding toward plus infinity
4604///
4605/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_u64_f64)
4606#[inline]
4607#[target_feature(enable = "neon")]
4608#[cfg_attr(test, assert_instr(fcvtpu))]
4609#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4610pub unsafe fn vcvtpd_u64_f64(a: f64) -> u64 {
4611    #[allow(improper_ctypes)]
4612    extern "unadjusted" {
4613        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.i64.f64")]
4614        fn vcvtpd_u64_f64_(a: f64) -> u64;
4615    }
4616    vcvtpd_u64_f64_(a)
4617}
4618
4619/// Set all vector lanes to the same value
4620///
4621/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p64)
4622#[inline]
4623#[target_feature(enable = "neon")]
4624#[cfg_attr(test, assert_instr(dup, N = 1))]
4625#[rustc_legacy_const_generics(1)]
4626#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4627pub unsafe fn vdupq_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x2_t {
4628    static_assert_uimm_bits!(N, 1);
4629    simd_shuffle!(a, a, [N as u32, N as u32])
4630}
4631
4632/// Set all vector lanes to the same value
4633///
4634/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p64)
4635#[inline]
4636#[target_feature(enable = "neon")]
4637#[cfg_attr(test, assert_instr(dup, N = 0))]
4638#[rustc_legacy_const_generics(1)]
4639#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4640pub unsafe fn vdupq_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x2_t {
4641    static_assert!(N == 0);
4642    simd_shuffle!(a, a, [N as u32, N as u32])
4643}
4644
4645/// Set all vector lanes to the same value
4646///
4647/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f64)
4648#[inline]
4649#[target_feature(enable = "neon")]
4650#[cfg_attr(test, assert_instr(dup, N = 1))]
4651#[rustc_legacy_const_generics(1)]
4652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4653pub unsafe fn vdupq_laneq_f64<const N: i32>(a: float64x2_t) -> float64x2_t {
4654    static_assert_uimm_bits!(N, 1);
4655    simd_shuffle!(a, a, [N as u32, N as u32])
4656}
4657
4658/// Set all vector lanes to the same value
4659///
4660/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f64)
4661#[inline]
4662#[target_feature(enable = "neon")]
4663#[cfg_attr(test, assert_instr(dup, N = 0))]
4664#[rustc_legacy_const_generics(1)]
4665#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4666pub unsafe fn vdupq_lane_f64<const N: i32>(a: float64x1_t) -> float64x2_t {
4667    static_assert!(N == 0);
4668    simd_shuffle!(a, a, [N as u32, N as u32])
4669}
4670
4671/// Set all vector lanes to the same value
4672///
4673/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p64)
4674#[inline]
4675#[target_feature(enable = "neon")]
4676#[cfg_attr(test, assert_instr(nop, N = 0))]
4677#[rustc_legacy_const_generics(1)]
4678#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4679pub unsafe fn vdup_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x1_t {
4680    static_assert!(N == 0);
4681    a
4682}
4683
4684/// Set all vector lanes to the same value
4685///
4686/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f64)
4687#[inline]
4688#[target_feature(enable = "neon")]
4689#[cfg_attr(test, assert_instr(nop, N = 0))]
4690#[rustc_legacy_const_generics(1)]
4691#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4692pub unsafe fn vdup_lane_f64<const N: i32>(a: float64x1_t) -> float64x1_t {
4693    static_assert!(N == 0);
4694    a
4695}
4696
4697/// Set all vector lanes to the same value
4698///
4699/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p64)
4700#[inline]
4701#[target_feature(enable = "neon")]
4702#[cfg_attr(test, assert_instr(nop, N = 1))]
4703#[rustc_legacy_const_generics(1)]
4704#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4705pub unsafe fn vdup_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x1_t {
4706    static_assert_uimm_bits!(N, 1);
4707    transmute::<u64, _>(simd_extract!(a, N as u32))
4708}
4709
4710/// Set all vector lanes to the same value
4711///
4712/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f64)
4713#[inline]
4714#[target_feature(enable = "neon")]
4715#[cfg_attr(test, assert_instr(nop, N = 1))]
4716#[rustc_legacy_const_generics(1)]
4717#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4718pub unsafe fn vdup_laneq_f64<const N: i32>(a: float64x2_t) -> float64x1_t {
4719    static_assert_uimm_bits!(N, 1);
4720    transmute::<f64, _>(simd_extract!(a, N as u32))
4721}
4722
4723/// Set all vector lanes to the same value
4724///
4725/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_s8)
4726#[inline]
4727#[target_feature(enable = "neon")]
4728#[cfg_attr(test, assert_instr(nop, N = 4))]
4729#[rustc_legacy_const_generics(1)]
4730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4731pub unsafe fn vdupb_lane_s8<const N: i32>(a: int8x8_t) -> i8 {
4732    static_assert_uimm_bits!(N, 3);
4733    simd_extract!(a, N as u32)
4734}
4735
4736/// Set all vector lanes to the same value
4737///
4738/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_s8)
4739#[inline]
4740#[target_feature(enable = "neon")]
4741#[cfg_attr(test, assert_instr(nop, N = 8))]
4742#[rustc_legacy_const_generics(1)]
4743#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4744pub unsafe fn vdupb_laneq_s8<const N: i32>(a: int8x16_t) -> i8 {
4745    static_assert_uimm_bits!(N, 4);
4746    simd_extract!(a, N as u32)
4747}
4748
4749/// Set all vector lanes to the same value
4750///
4751/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_s16)
4752#[inline]
4753#[target_feature(enable = "neon")]
4754#[cfg_attr(test, assert_instr(nop, N = 2))]
4755#[rustc_legacy_const_generics(1)]
4756#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4757pub unsafe fn vduph_lane_s16<const N: i32>(a: int16x4_t) -> i16 {
4758    static_assert_uimm_bits!(N, 2);
4759    simd_extract!(a, N as u32)
4760}
4761
4762/// Set all vector lanes to the same value
4763///
4764/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_s16)
4765#[inline]
4766#[target_feature(enable = "neon")]
4767#[cfg_attr(test, assert_instr(nop, N = 4))]
4768#[rustc_legacy_const_generics(1)]
4769#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4770pub unsafe fn vduph_laneq_s16<const N: i32>(a: int16x8_t) -> i16 {
4771    static_assert_uimm_bits!(N, 3);
4772    simd_extract!(a, N as u32)
4773}
4774
4775/// Set all vector lanes to the same value
4776///
4777/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_s32)
4778#[inline]
4779#[target_feature(enable = "neon")]
4780#[cfg_attr(test, assert_instr(nop, N = 1))]
4781#[rustc_legacy_const_generics(1)]
4782#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4783pub unsafe fn vdups_lane_s32<const N: i32>(a: int32x2_t) -> i32 {
4784    static_assert_uimm_bits!(N, 1);
4785    simd_extract!(a, N as u32)
4786}
4787
4788/// Set all vector lanes to the same value
4789///
4790/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_s32)
4791#[inline]
4792#[target_feature(enable = "neon")]
4793#[cfg_attr(test, assert_instr(nop, N = 2))]
4794#[rustc_legacy_const_generics(1)]
4795#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4796pub unsafe fn vdups_laneq_s32<const N: i32>(a: int32x4_t) -> i32 {
4797    static_assert_uimm_bits!(N, 2);
4798    simd_extract!(a, N as u32)
4799}
4800
4801/// Set all vector lanes to the same value
4802///
4803/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_s64)
4804#[inline]
4805#[target_feature(enable = "neon")]
4806#[cfg_attr(test, assert_instr(nop, N = 0))]
4807#[rustc_legacy_const_generics(1)]
4808#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4809pub unsafe fn vdupd_lane_s64<const N: i32>(a: int64x1_t) -> i64 {
4810    static_assert!(N == 0);
4811    simd_extract!(a, N as u32)
4812}
4813
4814/// Set all vector lanes to the same value
4815///
4816/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_s64)
4817#[inline]
4818#[target_feature(enable = "neon")]
4819#[cfg_attr(test, assert_instr(nop, N = 1))]
4820#[rustc_legacy_const_generics(1)]
4821#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4822pub unsafe fn vdupd_laneq_s64<const N: i32>(a: int64x2_t) -> i64 {
4823    static_assert_uimm_bits!(N, 1);
4824    simd_extract!(a, N as u32)
4825}
4826
4827/// Set all vector lanes to the same value
4828///
4829/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_u8)
4830#[inline]
4831#[target_feature(enable = "neon")]
4832#[cfg_attr(test, assert_instr(nop, N = 4))]
4833#[rustc_legacy_const_generics(1)]
4834#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4835pub unsafe fn vdupb_lane_u8<const N: i32>(a: uint8x8_t) -> u8 {
4836    static_assert_uimm_bits!(N, 3);
4837    simd_extract!(a, N as u32)
4838}
4839
4840/// Set all vector lanes to the same value
4841///
4842/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_u8)
4843#[inline]
4844#[target_feature(enable = "neon")]
4845#[cfg_attr(test, assert_instr(nop, N = 8))]
4846#[rustc_legacy_const_generics(1)]
4847#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4848pub unsafe fn vdupb_laneq_u8<const N: i32>(a: uint8x16_t) -> u8 {
4849    static_assert_uimm_bits!(N, 4);
4850    simd_extract!(a, N as u32)
4851}
4852
4853/// Set all vector lanes to the same value
4854///
4855/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_u16)
4856#[inline]
4857#[target_feature(enable = "neon")]
4858#[cfg_attr(test, assert_instr(nop, N = 2))]
4859#[rustc_legacy_const_generics(1)]
4860#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4861pub unsafe fn vduph_lane_u16<const N: i32>(a: uint16x4_t) -> u16 {
4862    static_assert_uimm_bits!(N, 2);
4863    simd_extract!(a, N as u32)
4864}
4865
4866/// Set all vector lanes to the same value
4867///
4868/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_u16)
4869#[inline]
4870#[target_feature(enable = "neon")]
4871#[cfg_attr(test, assert_instr(nop, N = 4))]
4872#[rustc_legacy_const_generics(1)]
4873#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4874pub unsafe fn vduph_laneq_u16<const N: i32>(a: uint16x8_t) -> u16 {
4875    static_assert_uimm_bits!(N, 3);
4876    simd_extract!(a, N as u32)
4877}
4878
4879/// Set all vector lanes to the same value
4880///
4881/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_u32)
4882#[inline]
4883#[target_feature(enable = "neon")]
4884#[cfg_attr(test, assert_instr(nop, N = 1))]
4885#[rustc_legacy_const_generics(1)]
4886#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4887pub unsafe fn vdups_lane_u32<const N: i32>(a: uint32x2_t) -> u32 {
4888    static_assert_uimm_bits!(N, 1);
4889    simd_extract!(a, N as u32)
4890}
4891
4892/// Set all vector lanes to the same value
4893///
4894/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_u32)
4895#[inline]
4896#[target_feature(enable = "neon")]
4897#[cfg_attr(test, assert_instr(nop, N = 2))]
4898#[rustc_legacy_const_generics(1)]
4899#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4900pub unsafe fn vdups_laneq_u32<const N: i32>(a: uint32x4_t) -> u32 {
4901    static_assert_uimm_bits!(N, 2);
4902    simd_extract!(a, N as u32)
4903}
4904
4905/// Set all vector lanes to the same value
4906///
4907/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_u64)
4908#[inline]
4909#[target_feature(enable = "neon")]
4910#[cfg_attr(test, assert_instr(nop, N = 0))]
4911#[rustc_legacy_const_generics(1)]
4912#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4913pub unsafe fn vdupd_lane_u64<const N: i32>(a: uint64x1_t) -> u64 {
4914    static_assert!(N == 0);
4915    simd_extract!(a, N as u32)
4916}
4917
4918/// Set all vector lanes to the same value
4919///
4920/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_u64)
4921#[inline]
4922#[target_feature(enable = "neon")]
4923#[cfg_attr(test, assert_instr(nop, N = 1))]
4924#[rustc_legacy_const_generics(1)]
4925#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4926pub unsafe fn vdupd_laneq_u64<const N: i32>(a: uint64x2_t) -> u64 {
4927    static_assert_uimm_bits!(N, 1);
4928    simd_extract!(a, N as u32)
4929}
4930
4931/// Set all vector lanes to the same value
4932///
4933/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_p8)
4934#[inline]
4935#[target_feature(enable = "neon")]
4936#[cfg_attr(test, assert_instr(nop, N = 4))]
4937#[rustc_legacy_const_generics(1)]
4938#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4939pub unsafe fn vdupb_lane_p8<const N: i32>(a: poly8x8_t) -> p8 {
4940    static_assert_uimm_bits!(N, 3);
4941    simd_extract!(a, N as u32)
4942}
4943
4944/// Set all vector lanes to the same value
4945///
4946/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_p8)
4947#[inline]
4948#[target_feature(enable = "neon")]
4949#[cfg_attr(test, assert_instr(nop, N = 8))]
4950#[rustc_legacy_const_generics(1)]
4951#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4952pub unsafe fn vdupb_laneq_p8<const N: i32>(a: poly8x16_t) -> p8 {
4953    static_assert_uimm_bits!(N, 4);
4954    simd_extract!(a, N as u32)
4955}
4956
4957/// Set all vector lanes to the same value
4958///
4959/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_p16)
4960#[inline]
4961#[target_feature(enable = "neon")]
4962#[cfg_attr(test, assert_instr(nop, N = 2))]
4963#[rustc_legacy_const_generics(1)]
4964#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4965pub unsafe fn vduph_lane_p16<const N: i32>(a: poly16x4_t) -> p16 {
4966    static_assert_uimm_bits!(N, 2);
4967    simd_extract!(a, N as u32)
4968}
4969
4970/// Set all vector lanes to the same value
4971///
4972/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_p16)
4973#[inline]
4974#[target_feature(enable = "neon")]
4975#[cfg_attr(test, assert_instr(nop, N = 4))]
4976#[rustc_legacy_const_generics(1)]
4977#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4978pub unsafe fn vduph_laneq_p16<const N: i32>(a: poly16x8_t) -> p16 {
4979    static_assert_uimm_bits!(N, 3);
4980    simd_extract!(a, N as u32)
4981}
4982
4983/// Set all vector lanes to the same value
4984///
4985/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_f32)
4986#[inline]
4987#[target_feature(enable = "neon")]
4988#[cfg_attr(test, assert_instr(nop, N = 1))]
4989#[rustc_legacy_const_generics(1)]
4990#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4991pub unsafe fn vdups_lane_f32<const N: i32>(a: float32x2_t) -> f32 {
4992    static_assert_uimm_bits!(N, 1);
4993    simd_extract!(a, N as u32)
4994}
4995
4996/// Set all vector lanes to the same value
4997///
4998/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_f32)
4999#[inline]
5000#[target_feature(enable = "neon")]
5001#[cfg_attr(test, assert_instr(nop, N = 2))]
5002#[rustc_legacy_const_generics(1)]
5003#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5004pub unsafe fn vdups_laneq_f32<const N: i32>(a: float32x4_t) -> f32 {
5005    static_assert_uimm_bits!(N, 2);
5006    simd_extract!(a, N as u32)
5007}
5008
5009/// Set all vector lanes to the same value
5010///
5011/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_f64)
5012#[inline]
5013#[target_feature(enable = "neon")]
5014#[cfg_attr(test, assert_instr(nop, N = 0))]
5015#[rustc_legacy_const_generics(1)]
5016#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5017pub unsafe fn vdupd_lane_f64<const N: i32>(a: float64x1_t) -> f64 {
5018    static_assert!(N == 0);
5019    simd_extract!(a, N as u32)
5020}
5021
5022/// Set all vector lanes to the same value
5023///
5024/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_f64)
5025#[inline]
5026#[target_feature(enable = "neon")]
5027#[cfg_attr(test, assert_instr(nop, N = 1))]
5028#[rustc_legacy_const_generics(1)]
5029#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5030pub unsafe fn vdupd_laneq_f64<const N: i32>(a: float64x2_t) -> f64 {
5031    static_assert_uimm_bits!(N, 1);
5032    simd_extract!(a, N as u32)
5033}
5034
5035/// Extract vector from pair of vectors
5036///
5037/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p64)
5038#[inline]
5039#[target_feature(enable = "neon")]
5040#[cfg_attr(test, assert_instr(ext, N = 1))]
5041#[rustc_legacy_const_generics(2)]
5042#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5043pub unsafe fn vextq_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
5044    static_assert_uimm_bits!(N, 1);
5045    match N & 0b1 {
5046        0 => simd_shuffle!(a, b, [0, 1]),
5047        1 => simd_shuffle!(a, b, [1, 2]),
5048        _ => unreachable_unchecked(),
5049    }
5050}
5051
5052/// Extract vector from pair of vectors
5053///
5054/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f64)
5055#[inline]
5056#[target_feature(enable = "neon")]
5057#[cfg_attr(test, assert_instr(ext, N = 1))]
5058#[rustc_legacy_const_generics(2)]
5059#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5060pub unsafe fn vextq_f64<const N: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
5061    static_assert_uimm_bits!(N, 1);
5062    match N & 0b1 {
5063        0 => simd_shuffle!(a, b, [0, 1]),
5064        1 => simd_shuffle!(a, b, [1, 2]),
5065        _ => unreachable_unchecked(),
5066    }
5067}
5068
5069/// Floating-point multiply-add to accumulator
5070///
5071/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f64)
5072#[inline]
5073#[target_feature(enable = "neon")]
5074#[cfg_attr(test, assert_instr(fmul))]
5075#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5076pub unsafe fn vmla_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
5077    simd_add(a, simd_mul(b, c))
5078}
5079
5080/// Floating-point multiply-add to accumulator
5081///
5082/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f64)
5083#[inline]
5084#[target_feature(enable = "neon")]
5085#[cfg_attr(test, assert_instr(fmul))]
5086#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5087pub unsafe fn vmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
5088    simd_add(a, simd_mul(b, c))
5089}
5090
5091/// Signed multiply-add long
5092///
5093/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s8)
5094#[inline]
5095#[target_feature(enable = "neon")]
5096#[cfg_attr(test, assert_instr(smlal2))]
5097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5098pub unsafe fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
5099    let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
5100    let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
5101    vmlal_s8(a, b, c)
5102}
5103
5104/// Signed multiply-add long
5105///
5106/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s16)
5107#[inline]
5108#[target_feature(enable = "neon")]
5109#[cfg_attr(test, assert_instr(smlal2))]
5110#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5111pub unsafe fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
5112    let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
5113    let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
5114    vmlal_s16(a, b, c)
5115}
5116
5117/// Signed multiply-add long
5118///
5119/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s32)
5120#[inline]
5121#[target_feature(enable = "neon")]
5122#[cfg_attr(test, assert_instr(smlal2))]
5123#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5124pub unsafe fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
5125    let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
5126    let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
5127    vmlal_s32(a, b, c)
5128}
5129
5130/// Unsigned multiply-add long
5131///
5132/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u8)
5133#[inline]
5134#[target_feature(enable = "neon")]
5135#[cfg_attr(test, assert_instr(umlal2))]
5136#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5137pub unsafe fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
5138    let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
5139    let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
5140    vmlal_u8(a, b, c)
5141}
5142
5143/// Unsigned multiply-add long
5144///
5145/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u16)
5146#[inline]
5147#[target_feature(enable = "neon")]
5148#[cfg_attr(test, assert_instr(umlal2))]
5149#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5150pub unsafe fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
5151    let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
5152    let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
5153    vmlal_u16(a, b, c)
5154}
5155
5156/// Unsigned multiply-add long
5157///
5158/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u32)
5159#[inline]
5160#[target_feature(enable = "neon")]
5161#[cfg_attr(test, assert_instr(umlal2))]
5162#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5163pub unsafe fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
5164    let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
5165    let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
5166    vmlal_u32(a, b, c)
5167}
5168
5169/// Multiply-add long
5170///
5171/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s16)
5172#[inline]
5173#[target_feature(enable = "neon")]
5174#[cfg_attr(test, assert_instr(smlal2))]
5175#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5176pub unsafe fn vmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
5177    vmlal_high_s16(a, b, vdupq_n_s16(c))
5178}
5179
5180/// Multiply-add long
5181///
5182/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s32)
5183#[inline]
5184#[target_feature(enable = "neon")]
5185#[cfg_attr(test, assert_instr(smlal2))]
5186#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5187pub unsafe fn vmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
5188    vmlal_high_s32(a, b, vdupq_n_s32(c))
5189}
5190
5191/// Multiply-add long
5192///
5193/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u16)
5194#[inline]
5195#[target_feature(enable = "neon")]
5196#[cfg_attr(test, assert_instr(umlal2))]
5197#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5198pub unsafe fn vmlal_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t {
5199    vmlal_high_u16(a, b, vdupq_n_u16(c))
5200}
5201
5202/// Multiply-add long
5203///
5204/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u32)
5205#[inline]
5206#[target_feature(enable = "neon")]
5207#[cfg_attr(test, assert_instr(umlal2))]
5208#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5209pub unsafe fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t {
5210    vmlal_high_u32(a, b, vdupq_n_u32(c))
5211}
5212
5213/// Multiply-add long
5214///
5215/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s16)
5216#[inline]
5217#[target_feature(enable = "neon")]
5218#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
5219#[rustc_legacy_const_generics(3)]
5220#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5221pub unsafe fn vmlal_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
5222    static_assert_uimm_bits!(LANE, 2);
5223    vmlal_high_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
5224}
5225
5226/// Multiply-add long
5227///
5228/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s16)
5229#[inline]
5230#[target_feature(enable = "neon")]
5231#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
5232#[rustc_legacy_const_generics(3)]
5233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5234pub unsafe fn vmlal_high_laneq_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
5235    static_assert_uimm_bits!(LANE, 3);
5236    vmlal_high_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
5237}
5238
5239/// Multiply-add long
5240///
5241/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s32)
5242#[inline]
5243#[target_feature(enable = "neon")]
5244#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
5245#[rustc_legacy_const_generics(3)]
5246#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5247pub unsafe fn vmlal_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
5248    static_assert_uimm_bits!(LANE, 1);
5249    vmlal_high_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
5250}
5251
5252/// Multiply-add long
5253///
5254/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s32)
5255#[inline]
5256#[target_feature(enable = "neon")]
5257#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
5258#[rustc_legacy_const_generics(3)]
5259#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5260pub unsafe fn vmlal_high_laneq_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
5261    static_assert_uimm_bits!(LANE, 2);
5262    vmlal_high_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
5263}
5264
5265/// Multiply-add long
5266///
5267/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u16)
5268#[inline]
5269#[target_feature(enable = "neon")]
5270#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
5271#[rustc_legacy_const_generics(3)]
5272#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5273pub unsafe fn vmlal_high_lane_u16<const LANE: i32>(a: uint32x4_t, b: uint16x8_t, c: uint16x4_t) -> uint32x4_t {
5274    static_assert_uimm_bits!(LANE, 2);
5275    vmlal_high_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
5276}
5277
5278/// Multiply-add long
5279///
5280/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u16)
5281#[inline]
5282#[target_feature(enable = "neon")]
5283#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
5284#[rustc_legacy_const_generics(3)]
5285#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5286pub unsafe fn vmlal_high_laneq_u16<const LANE: i32>(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
5287    static_assert_uimm_bits!(LANE, 3);
5288    vmlal_high_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
5289}
5290
5291/// Multiply-add long
5292///
5293/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u32)
5294#[inline]
5295#[target_feature(enable = "neon")]
5296#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
5297#[rustc_legacy_const_generics(3)]
5298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5299pub unsafe fn vmlal_high_lane_u32<const LANE: i32>(a: uint64x2_t, b: uint32x4_t, c: uint32x2_t) -> uint64x2_t {
5300    static_assert_uimm_bits!(LANE, 1);
5301    vmlal_high_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
5302}
5303
5304/// Multiply-add long
5305///
5306/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u32)
5307#[inline]
5308#[target_feature(enable = "neon")]
5309#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
5310#[rustc_legacy_const_generics(3)]
5311#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5312pub unsafe fn vmlal_high_laneq_u32<const LANE: i32>(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
5313    static_assert_uimm_bits!(LANE, 2);
5314    vmlal_high_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
5315}
5316
5317/// Floating-point multiply-subtract from accumulator
5318///
5319/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f64)
5320#[inline]
5321#[target_feature(enable = "neon")]
5322#[cfg_attr(test, assert_instr(fmul))]
5323#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5324pub unsafe fn vmls_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
5325    simd_sub(a, simd_mul(b, c))
5326}
5327
5328/// Floating-point multiply-subtract from accumulator
5329///
5330/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f64)
5331#[inline]
5332#[target_feature(enable = "neon")]
5333#[cfg_attr(test, assert_instr(fmul))]
5334#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5335pub unsafe fn vmlsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
5336    simd_sub(a, simd_mul(b, c))
5337}
5338
5339/// Signed multiply-subtract long
5340///
5341/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s8)
5342#[inline]
5343#[target_feature(enable = "neon")]
5344#[cfg_attr(test, assert_instr(smlsl2))]
5345#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5346pub unsafe fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
5347    let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
5348    let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
5349    vmlsl_s8(a, b, c)
5350}
5351
5352/// Signed multiply-subtract long
5353///
5354/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s16)
5355#[inline]
5356#[target_feature(enable = "neon")]
5357#[cfg_attr(test, assert_instr(smlsl2))]
5358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5359pub unsafe fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
5360    let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
5361    let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
5362    vmlsl_s16(a, b, c)
5363}
5364
5365/// Signed multiply-subtract long
5366///
5367/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s32)
5368#[inline]
5369#[target_feature(enable = "neon")]
5370#[cfg_attr(test, assert_instr(smlsl2))]
5371#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5372pub unsafe fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
5373    let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
5374    let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
5375    vmlsl_s32(a, b, c)
5376}
5377
5378/// Unsigned multiply-subtract long
5379///
5380/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u8)
5381#[inline]
5382#[target_feature(enable = "neon")]
5383#[cfg_attr(test, assert_instr(umlsl2))]
5384#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5385pub unsafe fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
5386    let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
5387    let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
5388    vmlsl_u8(a, b, c)
5389}
5390
5391/// Unsigned multiply-subtract long
5392///
5393/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u16)
5394#[inline]
5395#[target_feature(enable = "neon")]
5396#[cfg_attr(test, assert_instr(umlsl2))]
5397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5398pub unsafe fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
5399    let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
5400    let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
5401    vmlsl_u16(a, b, c)
5402}
5403
5404/// Unsigned multiply-subtract long
5405///
5406/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u32)
5407#[inline]
5408#[target_feature(enable = "neon")]
5409#[cfg_attr(test, assert_instr(umlsl2))]
5410#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5411pub unsafe fn vmlsl_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
5412    let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
5413    let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
5414    vmlsl_u32(a, b, c)
5415}
5416
5417/// Multiply-subtract long
5418///
5419/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s16)
5420#[inline]
5421#[target_feature(enable = "neon")]
5422#[cfg_attr(test, assert_instr(smlsl2))]
5423#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5424pub unsafe fn vmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
5425    vmlsl_high_s16(a, b, vdupq_n_s16(c))
5426}
5427
5428/// Multiply-subtract long
5429///
5430/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s32)
5431#[inline]
5432#[target_feature(enable = "neon")]
5433#[cfg_attr(test, assert_instr(smlsl2))]
5434#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5435pub unsafe fn vmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
5436    vmlsl_high_s32(a, b, vdupq_n_s32(c))
5437}
5438
5439/// Multiply-subtract long
5440///
5441/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u16)
5442#[inline]
5443#[target_feature(enable = "neon")]
5444#[cfg_attr(test, assert_instr(umlsl2))]
5445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5446pub unsafe fn vmlsl_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t {
5447    vmlsl_high_u16(a, b, vdupq_n_u16(c))
5448}
5449
5450/// Multiply-subtract long
5451///
5452/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u32)
5453#[inline]
5454#[target_feature(enable = "neon")]
5455#[cfg_attr(test, assert_instr(umlsl2))]
5456#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5457pub unsafe fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t {
5458    vmlsl_high_u32(a, b, vdupq_n_u32(c))
5459}
5460
5461/// Multiply-subtract long
5462///
5463/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s16)
5464#[inline]
5465#[target_feature(enable = "neon")]
5466#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
5467#[rustc_legacy_const_generics(3)]
5468#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5469pub unsafe fn vmlsl_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
5470    static_assert_uimm_bits!(LANE, 2);
5471    vmlsl_high_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
5472}
5473
5474/// Multiply-subtract long
5475///
5476/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s16)
5477#[inline]
5478#[target_feature(enable = "neon")]
5479#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
5480#[rustc_legacy_const_generics(3)]
5481#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5482pub unsafe fn vmlsl_high_laneq_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
5483    static_assert_uimm_bits!(LANE, 3);
5484    vmlsl_high_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
5485}
5486
5487/// Multiply-subtract long
5488///
5489/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s32)
5490#[inline]
5491#[target_feature(enable = "neon")]
5492#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
5493#[rustc_legacy_const_generics(3)]
5494#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5495pub unsafe fn vmlsl_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
5496    static_assert_uimm_bits!(LANE, 1);
5497    vmlsl_high_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
5498}
5499
5500/// Multiply-subtract long
5501///
5502/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s32)
5503#[inline]
5504#[target_feature(enable = "neon")]
5505#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
5506#[rustc_legacy_const_generics(3)]
5507#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5508pub unsafe fn vmlsl_high_laneq_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
5509    static_assert_uimm_bits!(LANE, 2);
5510    vmlsl_high_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
5511}
5512
5513/// Multiply-subtract long
5514///
5515/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u16)
5516#[inline]
5517#[target_feature(enable = "neon")]
5518#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
5519#[rustc_legacy_const_generics(3)]
5520#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5521pub unsafe fn vmlsl_high_lane_u16<const LANE: i32>(a: uint32x4_t, b: uint16x8_t, c: uint16x4_t) -> uint32x4_t {
5522    static_assert_uimm_bits!(LANE, 2);
5523    vmlsl_high_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
5524}
5525
5526/// Multiply-subtract long
5527///
5528/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u16)
5529#[inline]
5530#[target_feature(enable = "neon")]
5531#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
5532#[rustc_legacy_const_generics(3)]
5533#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5534pub unsafe fn vmlsl_high_laneq_u16<const LANE: i32>(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
5535    static_assert_uimm_bits!(LANE, 3);
5536    vmlsl_high_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
5537}
5538
5539/// Multiply-subtract long
5540///
5541/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u32)
5542#[inline]
5543#[target_feature(enable = "neon")]
5544#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
5545#[rustc_legacy_const_generics(3)]
5546#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5547pub unsafe fn vmlsl_high_lane_u32<const LANE: i32>(a: uint64x2_t, b: uint32x4_t, c: uint32x2_t) -> uint64x2_t {
5548    static_assert_uimm_bits!(LANE, 1);
5549    vmlsl_high_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
5550}
5551
5552/// Multiply-subtract long
5553///
5554/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u32)
5555#[inline]
5556#[target_feature(enable = "neon")]
5557#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
5558#[rustc_legacy_const_generics(3)]
5559#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5560pub unsafe fn vmlsl_high_laneq_u32<const LANE: i32>(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
5561    static_assert_uimm_bits!(LANE, 2);
5562    vmlsl_high_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
5563}
5564
5565/// Extract narrow
5566///
5567/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s16)
5568#[inline]
5569#[target_feature(enable = "neon")]
5570#[cfg_attr(test, assert_instr(xtn2))]
5571#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5572pub unsafe fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
5573    let c: int8x8_t = simd_cast(b);
5574    simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
5575}
5576
5577/// Extract narrow
5578///
5579/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s32)
5580#[inline]
5581#[target_feature(enable = "neon")]
5582#[cfg_attr(test, assert_instr(xtn2))]
5583#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5584pub unsafe fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
5585    let c: int16x4_t = simd_cast(b);
5586    simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
5587}
5588
5589/// Extract narrow
5590///
5591/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s64)
5592#[inline]
5593#[target_feature(enable = "neon")]
5594#[cfg_attr(test, assert_instr(xtn2))]
5595#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5596pub unsafe fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
5597    let c: int32x2_t = simd_cast(b);
5598    simd_shuffle!(a, c, [0, 1, 2, 3])
5599}
5600
5601/// Extract narrow
5602///
5603/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u16)
5604#[inline]
5605#[target_feature(enable = "neon")]
5606#[cfg_attr(test, assert_instr(xtn2))]
5607#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5608pub unsafe fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
5609    let c: uint8x8_t = simd_cast(b);
5610    simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
5611}
5612
5613/// Extract narrow
5614///
5615/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u32)
5616#[inline]
5617#[target_feature(enable = "neon")]
5618#[cfg_attr(test, assert_instr(xtn2))]
5619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5620pub unsafe fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
5621    let c: uint16x4_t = simd_cast(b);
5622    simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
5623}
5624
5625/// Extract narrow
5626///
5627/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u64)
5628#[inline]
5629#[target_feature(enable = "neon")]
5630#[cfg_attr(test, assert_instr(xtn2))]
5631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5632pub unsafe fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
5633    let c: uint32x2_t = simd_cast(b);
5634    simd_shuffle!(a, c, [0, 1, 2, 3])
5635}
5636
5637/// Negate
5638///
5639/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s64)
5640#[inline]
5641#[target_feature(enable = "neon")]
5642#[cfg_attr(test, assert_instr(neg))]
5643#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5644pub unsafe fn vneg_s64(a: int64x1_t) -> int64x1_t {
5645    simd_neg(a)
5646}
5647
5648/// Negate
5649///
5650/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s64)
5651#[inline]
5652#[target_feature(enable = "neon")]
5653#[cfg_attr(test, assert_instr(neg))]
5654#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5655pub unsafe fn vnegq_s64(a: int64x2_t) -> int64x2_t {
5656    simd_neg(a)
5657}
5658
5659/// Negate
5660///
5661/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegd_s64)
5662#[inline]
5663#[target_feature(enable = "neon")]
5664#[cfg_attr(test, assert_instr(neg))]
5665#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5666pub unsafe fn vnegd_s64(a: i64) -> i64 {
5667    a.wrapping_neg()
5668}
5669
5670/// Negate
5671///
5672/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f64)
5673#[inline]
5674#[target_feature(enable = "neon")]
5675#[cfg_attr(test, assert_instr(fneg))]
5676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5677pub unsafe fn vneg_f64(a: float64x1_t) -> float64x1_t {
5678    simd_neg(a)
5679}
5680
5681/// Negate
5682///
5683/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f64)
5684#[inline]
5685#[target_feature(enable = "neon")]
5686#[cfg_attr(test, assert_instr(fneg))]
5687#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5688pub unsafe fn vnegq_f64(a: float64x2_t) -> float64x2_t {
5689    simd_neg(a)
5690}
5691
5692/// Signed saturating negate
5693///
5694/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s64)
5695#[inline]
5696#[target_feature(enable = "neon")]
5697#[cfg_attr(test, assert_instr(sqneg))]
5698#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5699pub unsafe fn vqneg_s64(a: int64x1_t) -> int64x1_t {
5700    #[allow(improper_ctypes)]
5701    extern "unadjusted" {
5702        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqneg.v1i64")]
5703        fn vqneg_s64_(a: int64x1_t) -> int64x1_t;
5704    }
5705    vqneg_s64_(a)
5706}
5707
5708/// Signed saturating negate
5709///
5710/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s64)
5711#[inline]
5712#[target_feature(enable = "neon")]
5713#[cfg_attr(test, assert_instr(sqneg))]
5714#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5715pub unsafe fn vqnegq_s64(a: int64x2_t) -> int64x2_t {
5716    #[allow(improper_ctypes)]
5717    extern "unadjusted" {
5718        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqneg.v2i64")]
5719        fn vqnegq_s64_(a: int64x2_t) -> int64x2_t;
5720    }
5721    vqnegq_s64_(a)
5722}
5723
5724/// Signed saturating negate
5725///
5726/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegb_s8)
5727#[inline]
5728#[target_feature(enable = "neon")]
5729#[cfg_attr(test, assert_instr(sqneg))]
5730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5731pub unsafe fn vqnegb_s8(a: i8) -> i8 {
5732    simd_extract!(vqneg_s8(vdup_n_s8(a)), 0)
5733}
5734
5735/// Signed saturating negate
5736///
5737/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegh_s16)
5738#[inline]
5739#[target_feature(enable = "neon")]
5740#[cfg_attr(test, assert_instr(sqneg))]
5741#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5742pub unsafe fn vqnegh_s16(a: i16) -> i16 {
5743    simd_extract!(vqneg_s16(vdup_n_s16(a)), 0)
5744}
5745
5746/// Signed saturating negate
5747///
5748/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegs_s32)
5749#[inline]
5750#[target_feature(enable = "neon")]
5751#[cfg_attr(test, assert_instr(sqneg))]
5752#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5753pub unsafe fn vqnegs_s32(a: i32) -> i32 {
5754    simd_extract!(vqneg_s32(vdup_n_s32(a)), 0)
5755}
5756
5757/// Signed saturating negate
5758///
5759/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegd_s64)
5760#[inline]
5761#[target_feature(enable = "neon")]
5762#[cfg_attr(test, assert_instr(sqneg))]
5763#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5764pub unsafe fn vqnegd_s64(a: i64) -> i64 {
5765    simd_extract!(vqneg_s64(vdup_n_s64(a)), 0)
5766}
5767
5768/// Saturating subtract
5769///
5770/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_s8)
5771#[inline]
5772#[target_feature(enable = "neon")]
5773#[cfg_attr(test, assert_instr(sqsub))]
5774#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5775pub unsafe fn vqsubb_s8(a: i8, b: i8) -> i8 {
5776    let a: int8x8_t = vdup_n_s8(a);
5777    let b: int8x8_t = vdup_n_s8(b);
5778    simd_extract!(vqsub_s8(a, b), 0)
5779}
5780
5781/// Saturating subtract
5782///
5783/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_s16)
5784#[inline]
5785#[target_feature(enable = "neon")]
5786#[cfg_attr(test, assert_instr(sqsub))]
5787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5788pub unsafe fn vqsubh_s16(a: i16, b: i16) -> i16 {
5789    let a: int16x4_t = vdup_n_s16(a);
5790    let b: int16x4_t = vdup_n_s16(b);
5791    simd_extract!(vqsub_s16(a, b), 0)
5792}
5793
5794/// Saturating subtract
5795///
5796/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_u8)
5797#[inline]
5798#[target_feature(enable = "neon")]
5799#[cfg_attr(test, assert_instr(uqsub))]
5800#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5801pub unsafe fn vqsubb_u8(a: u8, b: u8) -> u8 {
5802    let a: uint8x8_t = vdup_n_u8(a);
5803    let b: uint8x8_t = vdup_n_u8(b);
5804    simd_extract!(vqsub_u8(a, b), 0)
5805}
5806
5807/// Saturating subtract
5808///
5809/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_u16)
5810#[inline]
5811#[target_feature(enable = "neon")]
5812#[cfg_attr(test, assert_instr(uqsub))]
5813#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5814pub unsafe fn vqsubh_u16(a: u16, b: u16) -> u16 {
5815    let a: uint16x4_t = vdup_n_u16(a);
5816    let b: uint16x4_t = vdup_n_u16(b);
5817    simd_extract!(vqsub_u16(a, b), 0)
5818}
5819
5820/// Saturating subtract
5821///
5822/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_u32)
5823#[inline]
5824#[target_feature(enable = "neon")]
5825#[cfg_attr(test, assert_instr(uqsub))]
5826#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5827pub unsafe fn vqsubs_u32(a: u32, b: u32) -> u32 {
5828    #[allow(improper_ctypes)]
5829    extern "unadjusted" {
5830        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.i32")]
5831        fn vqsubs_u32_(a: u32, b: u32) -> u32;
5832    }
5833    vqsubs_u32_(a, b)
5834}
5835
5836/// Saturating subtract
5837///
5838/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_u64)
5839#[inline]
5840#[target_feature(enable = "neon")]
5841#[cfg_attr(test, assert_instr(uqsub))]
5842#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5843pub unsafe fn vqsubd_u64(a: u64, b: u64) -> u64 {
5844    #[allow(improper_ctypes)]
5845    extern "unadjusted" {
5846        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.i64")]
5847        fn vqsubd_u64_(a: u64, b: u64) -> u64;
5848    }
5849    vqsubd_u64_(a, b)
5850}
5851
5852/// Saturating subtract
5853///
5854/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_s32)
5855#[inline]
5856#[target_feature(enable = "neon")]
5857#[cfg_attr(test, assert_instr(sqsub))]
5858#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5859pub unsafe fn vqsubs_s32(a: i32, b: i32) -> i32 {
5860    #[allow(improper_ctypes)]
5861    extern "unadjusted" {
5862        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.i32")]
5863        fn vqsubs_s32_(a: i32, b: i32) -> i32;
5864    }
5865    vqsubs_s32_(a, b)
5866}
5867
5868/// Saturating subtract
5869///
5870/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_s64)
5871#[inline]
5872#[target_feature(enable = "neon")]
5873#[cfg_attr(test, assert_instr(sqsub))]
5874#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5875pub unsafe fn vqsubd_s64(a: i64, b: i64) -> i64 {
5876    #[allow(improper_ctypes)]
5877    extern "unadjusted" {
5878        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqsub.i64")]
5879        fn vqsubd_s64_(a: i64, b: i64) -> i64;
5880    }
5881    vqsubd_s64_(a, b)
5882}
5883
5884/// Reverse bit order
5885///
5886/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_s8)
5887#[inline]
5888#[target_feature(enable = "neon")]
5889#[cfg_attr(test, assert_instr(rbit))]
5890#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5891pub unsafe fn vrbit_s8(a: int8x8_t) -> int8x8_t {
5892    #[allow(improper_ctypes)]
5893    extern "unadjusted" {
5894        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.rbit.v8i8")]
5895        fn vrbit_s8_(a: int8x8_t) -> int8x8_t;
5896    }
5897    vrbit_s8_(a)
5898}
5899
5900/// Reverse bit order
5901///
5902/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_s8)
5903#[inline]
5904#[target_feature(enable = "neon")]
5905#[cfg_attr(test, assert_instr(rbit))]
5906#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5907pub unsafe fn vrbitq_s8(a: int8x16_t) -> int8x16_t {
5908    #[allow(improper_ctypes)]
5909    extern "unadjusted" {
5910        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.rbit.v16i8")]
5911        fn vrbitq_s8_(a: int8x16_t) -> int8x16_t;
5912    }
5913    vrbitq_s8_(a)
5914}
5915
5916/// Reverse bit order
5917///
5918/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)
5919#[inline]
5920#[target_feature(enable = "neon")]
5921#[cfg_attr(test, assert_instr(rbit))]
5922#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5923pub unsafe fn vrbit_u8(a: uint8x8_t) -> uint8x8_t {
5924    transmute(vrbit_s8(transmute(a)))
5925}
5926
5927/// Reverse bit order
5928///
5929/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)
5930#[inline]
5931#[target_feature(enable = "neon")]
5932#[cfg_attr(test, assert_instr(rbit))]
5933#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5934pub unsafe fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t {
5935    transmute(vrbitq_s8(transmute(a)))
5936}
5937
5938/// Reverse bit order
5939///
5940/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)
5941#[inline]
5942#[target_feature(enable = "neon")]
5943#[cfg_attr(test, assert_instr(rbit))]
5944#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5945pub unsafe fn vrbit_p8(a: poly8x8_t) -> poly8x8_t {
5946    transmute(vrbit_s8(transmute(a)))
5947}
5948
5949/// Reverse bit order
5950///
5951/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)
5952#[inline]
5953#[target_feature(enable = "neon")]
5954#[cfg_attr(test, assert_instr(rbit))]
5955#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5956pub unsafe fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t {
5957    transmute(vrbitq_s8(transmute(a)))
5958}
5959
5960/// Floating-point round to integral exact, using current rounding mode
5961///
5962/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f32)
5963#[inline]
5964#[target_feature(enable = "neon")]
5965#[cfg_attr(test, assert_instr(frintx))]
5966#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5967pub unsafe fn vrndx_f32(a: float32x2_t) -> float32x2_t {
5968    #[allow(improper_ctypes)]
5969    extern "unadjusted" {
5970        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.rint.v2f32")]
5971        fn vrndx_f32_(a: float32x2_t) -> float32x2_t;
5972    }
5973    vrndx_f32_(a)
5974}
5975
5976/// Floating-point round to integral exact, using current rounding mode
5977///
5978/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f32)
5979#[inline]
5980#[target_feature(enable = "neon")]
5981#[cfg_attr(test, assert_instr(frintx))]
5982#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5983pub unsafe fn vrndxq_f32(a: float32x4_t) -> float32x4_t {
5984    #[allow(improper_ctypes)]
5985    extern "unadjusted" {
5986        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.rint.v4f32")]
5987        fn vrndxq_f32_(a: float32x4_t) -> float32x4_t;
5988    }
5989    vrndxq_f32_(a)
5990}
5991
5992/// Floating-point round to integral exact, using current rounding mode
5993///
5994/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f64)
5995#[inline]
5996#[target_feature(enable = "neon")]
5997#[cfg_attr(test, assert_instr(frintx))]
5998#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5999pub unsafe fn vrndx_f64(a: float64x1_t) -> float64x1_t {
6000    #[allow(improper_ctypes)]
6001    extern "unadjusted" {
6002        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.rint.v1f64")]
6003        fn vrndx_f64_(a: float64x1_t) -> float64x1_t;
6004    }
6005    vrndx_f64_(a)
6006}
6007
6008/// Floating-point round to integral exact, using current rounding mode
6009///
6010/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f64)
6011#[inline]
6012#[target_feature(enable = "neon")]
6013#[cfg_attr(test, assert_instr(frintx))]
6014#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6015pub unsafe fn vrndxq_f64(a: float64x2_t) -> float64x2_t {
6016    #[allow(improper_ctypes)]
6017    extern "unadjusted" {
6018        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.rint.v2f64")]
6019        fn vrndxq_f64_(a: float64x2_t) -> float64x2_t;
6020    }
6021    vrndxq_f64_(a)
6022}
6023
6024/// Floating-point round to integral, to nearest with ties to away
6025///
6026/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f32)
6027#[inline]
6028#[target_feature(enable = "neon")]
6029#[cfg_attr(test, assert_instr(frinta))]
6030#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6031pub unsafe fn vrnda_f32(a: float32x2_t) -> float32x2_t {
6032    #[allow(improper_ctypes)]
6033    extern "unadjusted" {
6034        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.round.v2f32")]
6035        fn vrnda_f32_(a: float32x2_t) -> float32x2_t;
6036    }
6037    vrnda_f32_(a)
6038}
6039
6040/// Floating-point round to integral, to nearest with ties to away
6041///
6042/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f32)
6043#[inline]
6044#[target_feature(enable = "neon")]
6045#[cfg_attr(test, assert_instr(frinta))]
6046#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6047pub unsafe fn vrndaq_f32(a: float32x4_t) -> float32x4_t {
6048    #[allow(improper_ctypes)]
6049    extern "unadjusted" {
6050        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.round.v4f32")]
6051        fn vrndaq_f32_(a: float32x4_t) -> float32x4_t;
6052    }
6053    vrndaq_f32_(a)
6054}
6055
6056/// Floating-point round to integral, to nearest with ties to away
6057///
6058/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f64)
6059#[inline]
6060#[target_feature(enable = "neon")]
6061#[cfg_attr(test, assert_instr(frinta))]
6062#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6063pub unsafe fn vrnda_f64(a: float64x1_t) -> float64x1_t {
6064    #[allow(improper_ctypes)]
6065    extern "unadjusted" {
6066        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.round.v1f64")]
6067        fn vrnda_f64_(a: float64x1_t) -> float64x1_t;
6068    }
6069    vrnda_f64_(a)
6070}
6071
6072/// Floating-point round to integral, to nearest with ties to away
6073///
6074/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f64)
6075#[inline]
6076#[target_feature(enable = "neon")]
6077#[cfg_attr(test, assert_instr(frinta))]
6078#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6079pub unsafe fn vrndaq_f64(a: float64x2_t) -> float64x2_t {
6080    #[allow(improper_ctypes)]
6081    extern "unadjusted" {
6082        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.round.v2f64")]
6083        fn vrndaq_f64_(a: float64x2_t) -> float64x2_t;
6084    }
6085    vrndaq_f64_(a)
6086}
6087
6088/// Floating-point round to integral, to nearest with ties to even
6089///
6090/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f64)
6091#[inline]
6092#[target_feature(enable = "neon")]
6093#[cfg_attr(test, assert_instr(frintn))]
6094#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6095pub unsafe fn vrndn_f64(a: float64x1_t) -> float64x1_t {
6096    #[allow(improper_ctypes)]
6097    extern "unadjusted" {
6098        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frintn.v1f64")]
6099        fn vrndn_f64_(a: float64x1_t) -> float64x1_t;
6100    }
6101    vrndn_f64_(a)
6102}
6103
6104/// Floating-point round to integral, to nearest with ties to even
6105///
6106/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f64)
6107#[inline]
6108#[target_feature(enable = "neon")]
6109#[cfg_attr(test, assert_instr(frintn))]
6110#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6111pub unsafe fn vrndnq_f64(a: float64x2_t) -> float64x2_t {
6112    #[allow(improper_ctypes)]
6113    extern "unadjusted" {
6114        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frintn.v2f64")]
6115        fn vrndnq_f64_(a: float64x2_t) -> float64x2_t;
6116    }
6117    vrndnq_f64_(a)
6118}
6119
6120/// Floating-point round to integral, to nearest with ties to even
6121///
6122/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndns_f32)
6123#[inline]
6124#[target_feature(enable = "neon")]
6125#[cfg_attr(test, assert_instr(frintn))]
6126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6127pub unsafe fn vrndns_f32(a: f32) -> f32 {
6128    #[allow(improper_ctypes)]
6129    extern "unadjusted" {
6130        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.roundeven.f32")]
6131        fn vrndns_f32_(a: f32) -> f32;
6132    }
6133    vrndns_f32_(a)
6134}
6135
6136/// Floating-point round to integral, toward minus infinity
6137///
6138/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f32)
6139#[inline]
6140#[target_feature(enable = "neon")]
6141#[cfg_attr(test, assert_instr(frintm))]
6142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6143pub unsafe fn vrndm_f32(a: float32x2_t) -> float32x2_t {
6144    #[allow(improper_ctypes)]
6145    extern "unadjusted" {
6146        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.floor.v2f32")]
6147        fn vrndm_f32_(a: float32x2_t) -> float32x2_t;
6148    }
6149    vrndm_f32_(a)
6150}
6151
6152/// Floating-point round to integral, toward minus infinity
6153///
6154/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f32)
6155#[inline]
6156#[target_feature(enable = "neon")]
6157#[cfg_attr(test, assert_instr(frintm))]
6158#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6159pub unsafe fn vrndmq_f32(a: float32x4_t) -> float32x4_t {
6160    #[allow(improper_ctypes)]
6161    extern "unadjusted" {
6162        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.floor.v4f32")]
6163        fn vrndmq_f32_(a: float32x4_t) -> float32x4_t;
6164    }
6165    vrndmq_f32_(a)
6166}
6167
6168/// Floating-point round to integral, toward minus infinity
6169///
6170/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f64)
6171#[inline]
6172#[target_feature(enable = "neon")]
6173#[cfg_attr(test, assert_instr(frintm))]
6174#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6175pub unsafe fn vrndm_f64(a: float64x1_t) -> float64x1_t {
6176    #[allow(improper_ctypes)]
6177    extern "unadjusted" {
6178        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.floor.v1f64")]
6179        fn vrndm_f64_(a: float64x1_t) -> float64x1_t;
6180    }
6181    vrndm_f64_(a)
6182}
6183
6184/// Floating-point round to integral, toward minus infinity
6185///
6186/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f64)
6187#[inline]
6188#[target_feature(enable = "neon")]
6189#[cfg_attr(test, assert_instr(frintm))]
6190#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6191pub unsafe fn vrndmq_f64(a: float64x2_t) -> float64x2_t {
6192    #[allow(improper_ctypes)]
6193    extern "unadjusted" {
6194        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.floor.v2f64")]
6195        fn vrndmq_f64_(a: float64x2_t) -> float64x2_t;
6196    }
6197    vrndmq_f64_(a)
6198}
6199
6200/// Floating-point round to integral, toward plus infinity
6201///
6202/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f32)
6203#[inline]
6204#[target_feature(enable = "neon")]
6205#[cfg_attr(test, assert_instr(frintp))]
6206#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6207pub unsafe fn vrndp_f32(a: float32x2_t) -> float32x2_t {
6208    #[allow(improper_ctypes)]
6209    extern "unadjusted" {
6210        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.ceil.v2f32")]
6211        fn vrndp_f32_(a: float32x2_t) -> float32x2_t;
6212    }
6213    vrndp_f32_(a)
6214}
6215
6216/// Floating-point round to integral, toward plus infinity
6217///
6218/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f32)
6219#[inline]
6220#[target_feature(enable = "neon")]
6221#[cfg_attr(test, assert_instr(frintp))]
6222#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6223pub unsafe fn vrndpq_f32(a: float32x4_t) -> float32x4_t {
6224    #[allow(improper_ctypes)]
6225    extern "unadjusted" {
6226        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.ceil.v4f32")]
6227        fn vrndpq_f32_(a: float32x4_t) -> float32x4_t;
6228    }
6229    vrndpq_f32_(a)
6230}
6231
6232/// Floating-point round to integral, toward plus infinity
6233///
6234/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f64)
6235#[inline]
6236#[target_feature(enable = "neon")]
6237#[cfg_attr(test, assert_instr(frintp))]
6238#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6239pub unsafe fn vrndp_f64(a: float64x1_t) -> float64x1_t {
6240    #[allow(improper_ctypes)]
6241    extern "unadjusted" {
6242        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.ceil.v1f64")]
6243        fn vrndp_f64_(a: float64x1_t) -> float64x1_t;
6244    }
6245    vrndp_f64_(a)
6246}
6247
6248/// Floating-point round to integral, toward plus infinity
6249///
6250/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f64)
6251#[inline]
6252#[target_feature(enable = "neon")]
6253#[cfg_attr(test, assert_instr(frintp))]
6254#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6255pub unsafe fn vrndpq_f64(a: float64x2_t) -> float64x2_t {
6256    #[allow(improper_ctypes)]
6257    extern "unadjusted" {
6258        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.ceil.v2f64")]
6259        fn vrndpq_f64_(a: float64x2_t) -> float64x2_t;
6260    }
6261    vrndpq_f64_(a)
6262}
6263
6264/// Floating-point round to integral, toward zero
6265///
6266/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f32)
6267#[inline]
6268#[target_feature(enable = "neon")]
6269#[cfg_attr(test, assert_instr(frintz))]
6270#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6271pub unsafe fn vrnd_f32(a: float32x2_t) -> float32x2_t {
6272    #[allow(improper_ctypes)]
6273    extern "unadjusted" {
6274        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.trunc.v2f32")]
6275        fn vrnd_f32_(a: float32x2_t) -> float32x2_t;
6276    }
6277    vrnd_f32_(a)
6278}
6279
6280/// Floating-point round to integral, toward zero
6281///
6282/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f32)
6283#[inline]
6284#[target_feature(enable = "neon")]
6285#[cfg_attr(test, assert_instr(frintz))]
6286#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6287pub unsafe fn vrndq_f32(a: float32x4_t) -> float32x4_t {
6288    #[allow(improper_ctypes)]
6289    extern "unadjusted" {
6290        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.trunc.v4f32")]
6291        fn vrndq_f32_(a: float32x4_t) -> float32x4_t;
6292    }
6293    vrndq_f32_(a)
6294}
6295
6296/// Floating-point round to integral, toward zero
6297///
6298/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f64)
6299#[inline]
6300#[target_feature(enable = "neon")]
6301#[cfg_attr(test, assert_instr(frintz))]
6302#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6303pub unsafe fn vrnd_f64(a: float64x1_t) -> float64x1_t {
6304    #[allow(improper_ctypes)]
6305    extern "unadjusted" {
6306        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.trunc.v1f64")]
6307        fn vrnd_f64_(a: float64x1_t) -> float64x1_t;
6308    }
6309    vrnd_f64_(a)
6310}
6311
6312/// Floating-point round to integral, toward zero
6313///
6314/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f64)
6315#[inline]
6316#[target_feature(enable = "neon")]
6317#[cfg_attr(test, assert_instr(frintz))]
6318#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6319pub unsafe fn vrndq_f64(a: float64x2_t) -> float64x2_t {
6320    #[allow(improper_ctypes)]
6321    extern "unadjusted" {
6322        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.trunc.v2f64")]
6323        fn vrndq_f64_(a: float64x2_t) -> float64x2_t;
6324    }
6325    vrndq_f64_(a)
6326}
6327
6328/// Floating-point round to integral, using current rounding mode
6329///
6330/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f32)
6331#[inline]
6332#[target_feature(enable = "neon")]
6333#[cfg_attr(test, assert_instr(frinti))]
6334#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6335pub unsafe fn vrndi_f32(a: float32x2_t) -> float32x2_t {
6336    #[allow(improper_ctypes)]
6337    extern "unadjusted" {
6338        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.nearbyint.v2f32")]
6339        fn vrndi_f32_(a: float32x2_t) -> float32x2_t;
6340    }
6341    vrndi_f32_(a)
6342}
6343
6344/// Floating-point round to integral, using current rounding mode
6345///
6346/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f32)
6347#[inline]
6348#[target_feature(enable = "neon")]
6349#[cfg_attr(test, assert_instr(frinti))]
6350#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6351pub unsafe fn vrndiq_f32(a: float32x4_t) -> float32x4_t {
6352    #[allow(improper_ctypes)]
6353    extern "unadjusted" {
6354        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.nearbyint.v4f32")]
6355        fn vrndiq_f32_(a: float32x4_t) -> float32x4_t;
6356    }
6357    vrndiq_f32_(a)
6358}
6359
6360/// Floating-point round to integral, using current rounding mode
6361///
6362/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f64)
6363#[inline]
6364#[target_feature(enable = "neon")]
6365#[cfg_attr(test, assert_instr(frinti))]
6366#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6367pub unsafe fn vrndi_f64(a: float64x1_t) -> float64x1_t {
6368    #[allow(improper_ctypes)]
6369    extern "unadjusted" {
6370        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.nearbyint.v1f64")]
6371        fn vrndi_f64_(a: float64x1_t) -> float64x1_t;
6372    }
6373    vrndi_f64_(a)
6374}
6375
6376/// Floating-point round to integral, using current rounding mode
6377///
6378/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f64)
6379#[inline]
6380#[target_feature(enable = "neon")]
6381#[cfg_attr(test, assert_instr(frinti))]
6382#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6383pub unsafe fn vrndiq_f64(a: float64x2_t) -> float64x2_t {
6384    #[allow(improper_ctypes)]
6385    extern "unadjusted" {
6386        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.nearbyint.v2f64")]
6387        fn vrndiq_f64_(a: float64x2_t) -> float64x2_t;
6388    }
6389    vrndiq_f64_(a)
6390}
6391
6392/// Saturating add
6393///
6394/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_s8)
6395#[inline]
6396#[target_feature(enable = "neon")]
6397#[cfg_attr(test, assert_instr(sqadd))]
6398#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6399pub unsafe fn vqaddb_s8(a: i8, b: i8) -> i8 {
6400    let a: int8x8_t = vdup_n_s8(a);
6401    let b: int8x8_t = vdup_n_s8(b);
6402    simd_extract!(vqadd_s8(a, b), 0)
6403}
6404
6405/// Saturating add
6406///
6407/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_s16)
6408#[inline]
6409#[target_feature(enable = "neon")]
6410#[cfg_attr(test, assert_instr(sqadd))]
6411#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6412pub unsafe fn vqaddh_s16(a: i16, b: i16) -> i16 {
6413    let a: int16x4_t = vdup_n_s16(a);
6414    let b: int16x4_t = vdup_n_s16(b);
6415    simd_extract!(vqadd_s16(a, b), 0)
6416}
6417
6418/// Saturating add
6419///
6420/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_u8)
6421#[inline]
6422#[target_feature(enable = "neon")]
6423#[cfg_attr(test, assert_instr(uqadd))]
6424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6425pub unsafe fn vqaddb_u8(a: u8, b: u8) -> u8 {
6426    let a: uint8x8_t = vdup_n_u8(a);
6427    let b: uint8x8_t = vdup_n_u8(b);
6428    simd_extract!(vqadd_u8(a, b), 0)
6429}
6430
6431/// Saturating add
6432///
6433/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_u16)
6434#[inline]
6435#[target_feature(enable = "neon")]
6436#[cfg_attr(test, assert_instr(uqadd))]
6437#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6438pub unsafe fn vqaddh_u16(a: u16, b: u16) -> u16 {
6439    let a: uint16x4_t = vdup_n_u16(a);
6440    let b: uint16x4_t = vdup_n_u16(b);
6441    simd_extract!(vqadd_u16(a, b), 0)
6442}
6443
6444/// Saturating add
6445///
6446/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_u32)
6447#[inline]
6448#[target_feature(enable = "neon")]
6449#[cfg_attr(test, assert_instr(uqadd))]
6450#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6451pub unsafe fn vqadds_u32(a: u32, b: u32) -> u32 {
6452    #[allow(improper_ctypes)]
6453    extern "unadjusted" {
6454        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.i32")]
6455        fn vqadds_u32_(a: u32, b: u32) -> u32;
6456    }
6457    vqadds_u32_(a, b)
6458}
6459
6460/// Saturating add
6461///
6462/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_u64)
6463#[inline]
6464#[target_feature(enable = "neon")]
6465#[cfg_attr(test, assert_instr(uqadd))]
6466#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6467pub unsafe fn vqaddd_u64(a: u64, b: u64) -> u64 {
6468    #[allow(improper_ctypes)]
6469    extern "unadjusted" {
6470        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.i64")]
6471        fn vqaddd_u64_(a: u64, b: u64) -> u64;
6472    }
6473    vqaddd_u64_(a, b)
6474}
6475
6476/// Saturating add
6477///
6478/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_s32)
6479#[inline]
6480#[target_feature(enable = "neon")]
6481#[cfg_attr(test, assert_instr(sqadd))]
6482#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6483pub unsafe fn vqadds_s32(a: i32, b: i32) -> i32 {
6484    #[allow(improper_ctypes)]
6485    extern "unadjusted" {
6486        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.i32")]
6487        fn vqadds_s32_(a: i32, b: i32) -> i32;
6488    }
6489    vqadds_s32_(a, b)
6490}
6491
6492/// Saturating add
6493///
6494/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_s64)
6495#[inline]
6496#[target_feature(enable = "neon")]
6497#[cfg_attr(test, assert_instr(sqadd))]
6498#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6499pub unsafe fn vqaddd_s64(a: i64, b: i64) -> i64 {
6500    #[allow(improper_ctypes)]
6501    extern "unadjusted" {
6502        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqadd.i64")]
6503        fn vqaddd_s64_(a: i64, b: i64) -> i64;
6504    }
6505    vqaddd_s64_(a, b)
6506}
6507
6508/// Load multiple single-element structures to one, two, three, or four registers
6509///
6510/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x2)
6511#[inline]
6512#[target_feature(enable = "neon")]
6513#[cfg_attr(test, assert_instr(ld1))]
6514#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6515pub unsafe fn vld1_f64_x2(a: *const f64) -> float64x1x2_t {
6516    #[allow(improper_ctypes)]
6517    extern "unadjusted" {
6518        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v1f64.p0f64")]
6519        fn vld1_f64_x2_(a: *const f64) -> float64x1x2_t;
6520    }
6521    vld1_f64_x2_(a)
6522}
6523
6524/// Load multiple single-element structures to one, two, three, or four registers
6525///
6526/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x2)
6527#[inline]
6528#[target_feature(enable = "neon")]
6529#[cfg_attr(test, assert_instr(ld1))]
6530#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6531pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t {
6532    #[allow(improper_ctypes)]
6533    extern "unadjusted" {
6534        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x2.v2f64.p0f64")]
6535        fn vld1q_f64_x2_(a: *const f64) -> float64x2x2_t;
6536    }
6537    vld1q_f64_x2_(a)
6538}
6539
6540/// Load multiple single-element structures to one, two, three, or four registers
6541///
6542/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x3)
6543#[inline]
6544#[target_feature(enable = "neon")]
6545#[cfg_attr(test, assert_instr(ld1))]
6546#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6547pub unsafe fn vld1_f64_x3(a: *const f64) -> float64x1x3_t {
6548    #[allow(improper_ctypes)]
6549    extern "unadjusted" {
6550        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v1f64.p0f64")]
6551        fn vld1_f64_x3_(a: *const f64) -> float64x1x3_t;
6552    }
6553    vld1_f64_x3_(a)
6554}
6555
6556/// Load multiple single-element structures to one, two, three, or four registers
6557///
6558/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x3)
6559#[inline]
6560#[target_feature(enable = "neon")]
6561#[cfg_attr(test, assert_instr(ld1))]
6562#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6563pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t {
6564    #[allow(improper_ctypes)]
6565    extern "unadjusted" {
6566        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x3.v2f64.p0f64")]
6567        fn vld1q_f64_x3_(a: *const f64) -> float64x2x3_t;
6568    }
6569    vld1q_f64_x3_(a)
6570}
6571
6572/// Load multiple single-element structures to one, two, three, or four registers
6573///
6574/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x4)
6575#[inline]
6576#[target_feature(enable = "neon")]
6577#[cfg_attr(test, assert_instr(ld1))]
6578#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6579pub unsafe fn vld1_f64_x4(a: *const f64) -> float64x1x4_t {
6580    #[allow(improper_ctypes)]
6581    extern "unadjusted" {
6582        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v1f64.p0f64")]
6583        fn vld1_f64_x4_(a: *const f64) -> float64x1x4_t;
6584    }
6585    vld1_f64_x4_(a)
6586}
6587
6588/// Load multiple single-element structures to one, two, three, or four registers
6589///
6590/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x4)
6591#[inline]
6592#[target_feature(enable = "neon")]
6593#[cfg_attr(test, assert_instr(ld1))]
6594#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6595pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t {
6596    #[allow(improper_ctypes)]
6597    extern "unadjusted" {
6598        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld1x4.v2f64.p0f64")]
6599        fn vld1q_f64_x4_(a: *const f64) -> float64x2x4_t;
6600    }
6601    vld1q_f64_x4_(a)
6602}
6603
6604/// Load multiple 2-element structures to two registers
6605///
6606/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s64)
6607#[inline]
6608#[target_feature(enable = "neon")]
6609#[cfg_attr(test, assert_instr(ld2))]
6610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6611pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t {
6612    #[allow(improper_ctypes)]
6613    extern "unadjusted" {
6614        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v2i64.p0v2i64")]
6615        fn vld2q_s64_(ptr: *const int64x2_t) -> int64x2x2_t;
6616    }
6617    vld2q_s64_(a as _)
6618}
6619
6620/// Load multiple 2-element structures to two registers
6621///
6622/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)
6623#[inline]
6624#[target_feature(enable = "neon")]
6625#[cfg_attr(test, assert_instr(ld2))]
6626#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6627pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t {
6628    transmute(vld2q_s64(transmute(a)))
6629}
6630
6631/// Load multiple 2-element structures to two registers
6632///
6633/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)
6634#[inline]
6635#[target_feature(enable = "neon,aes")]
6636#[cfg_attr(test, assert_instr(ld2))]
6637#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6638pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t {
6639    transmute(vld2q_s64(transmute(a)))
6640}
6641
6642/// Load multiple 2-element structures to two registers
6643///
6644/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f64)
6645#[inline]
6646#[target_feature(enable = "neon")]
6647#[cfg_attr(test, assert_instr(nop))]
6648#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6649pub unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t {
6650    #[allow(improper_ctypes)]
6651    extern "unadjusted" {
6652        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v1f64.p0v1f64")]
6653        fn vld2_f64_(ptr: *const float64x1_t) -> float64x1x2_t;
6654    }
6655    vld2_f64_(a as _)
6656}
6657
6658/// Load multiple 2-element structures to two registers
6659///
6660/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f64)
6661#[inline]
6662#[target_feature(enable = "neon")]
6663#[cfg_attr(test, assert_instr(ld2))]
6664#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6665pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t {
6666    #[allow(improper_ctypes)]
6667    extern "unadjusted" {
6668        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2.v2f64.p0v2f64")]
6669        fn vld2q_f64_(ptr: *const float64x2_t) -> float64x2x2_t;
6670    }
6671    vld2q_f64_(a as _)
6672}
6673
6674/// Load single 2-element structure and replicate to all lanes of two registers
6675///
6676/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s64)
6677#[inline]
6678#[target_feature(enable = "neon")]
6679#[cfg_attr(test, assert_instr(ld2r))]
6680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6681pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t {
6682    #[allow(improper_ctypes)]
6683    extern "unadjusted" {
6684        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v2i64.p0i64")]
6685        fn vld2q_dup_s64_(ptr: *const i64) -> int64x2x2_t;
6686    }
6687    vld2q_dup_s64_(a as _)
6688}
6689
6690/// Load single 2-element structure and replicate to all lanes of two registers
6691///
6692/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)
6693#[inline]
6694#[target_feature(enable = "neon")]
6695#[cfg_attr(test, assert_instr(ld2r))]
6696#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6697pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t {
6698    transmute(vld2q_dup_s64(transmute(a)))
6699}
6700
6701/// Load single 2-element structure and replicate to all lanes of two registers
6702///
6703/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)
6704#[inline]
6705#[target_feature(enable = "neon,aes")]
6706#[cfg_attr(test, assert_instr(ld2r))]
6707#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6708pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t {
6709    transmute(vld2q_dup_s64(transmute(a)))
6710}
6711
6712/// Load single 2-element structure and replicate to all lanes of two registers
6713///
6714/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f64)
6715#[inline]
6716#[target_feature(enable = "neon")]
6717#[cfg_attr(test, assert_instr(ld2r))]
6718#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6719pub unsafe fn vld2_dup_f64(a: *const f64) -> float64x1x2_t {
6720    #[allow(improper_ctypes)]
6721    extern "unadjusted" {
6722        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v1f64.p0f64")]
6723        fn vld2_dup_f64_(ptr: *const f64) -> float64x1x2_t;
6724    }
6725    vld2_dup_f64_(a as _)
6726}
6727
6728/// Load single 2-element structure and replicate to all lanes of two registers
6729///
6730/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f64)
6731#[inline]
6732#[target_feature(enable = "neon")]
6733#[cfg_attr(test, assert_instr(ld2r))]
6734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6735pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t {
6736    #[allow(improper_ctypes)]
6737    extern "unadjusted" {
6738        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2r.v2f64.p0f64")]
6739        fn vld2q_dup_f64_(ptr: *const f64) -> float64x2x2_t;
6740    }
6741    vld2q_dup_f64_(a as _)
6742}
6743
6744/// Load multiple 2-element structures to two registers
6745///
6746/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s8)
6747#[inline]
6748#[target_feature(enable = "neon")]
6749#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
6750#[rustc_legacy_const_generics(2)]
6751#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6752pub unsafe fn vld2q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x2_t) -> int8x16x2_t {
6753    static_assert_uimm_bits!(LANE, 4);
6754    #[allow(improper_ctypes)]
6755    extern "unadjusted" {
6756        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v16i8.p0i8")]
6757        fn vld2q_lane_s8_(a: int8x16_t, b: int8x16_t, n: i64, ptr: *const i8) -> int8x16x2_t;
6758    }
6759    vld2q_lane_s8_(b.0, b.1, LANE as i64, a as _)
6760}
6761
6762/// Load multiple 2-element structures to two registers
6763///
6764/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s64)
6765#[inline]
6766#[target_feature(enable = "neon")]
6767#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
6768#[rustc_legacy_const_generics(2)]
6769#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6770pub unsafe fn vld2_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x2_t) -> int64x1x2_t {
6771    static_assert!(LANE == 0);
6772    #[allow(improper_ctypes)]
6773    extern "unadjusted" {
6774        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v1i64.p0i8")]
6775        fn vld2_lane_s64_(a: int64x1_t, b: int64x1_t, n: i64, ptr: *const i8) -> int64x1x2_t;
6776    }
6777    vld2_lane_s64_(b.0, b.1, LANE as i64, a as _)
6778}
6779
6780/// Load multiple 2-element structures to two registers
6781///
6782/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s64)
6783#[inline]
6784#[target_feature(enable = "neon")]
6785#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
6786#[rustc_legacy_const_generics(2)]
6787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6788pub unsafe fn vld2q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x2_t) -> int64x2x2_t {
6789    static_assert_uimm_bits!(LANE, 1);
6790    #[allow(improper_ctypes)]
6791    extern "unadjusted" {
6792        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v2i64.p0i8")]
6793        fn vld2q_lane_s64_(a: int64x2_t, b: int64x2_t, n: i64, ptr: *const i8) -> int64x2x2_t;
6794    }
6795    vld2q_lane_s64_(b.0, b.1, LANE as i64, a as _)
6796}
6797
6798/// Load multiple 2-element structures to two registers
6799///
6800/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p64)
6801#[inline]
6802#[target_feature(enable = "neon,aes")]
6803#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
6804#[rustc_legacy_const_generics(2)]
6805#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6806pub unsafe fn vld2_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x2_t) -> poly64x1x2_t {
6807    static_assert!(LANE == 0);
6808    transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
6809}
6810
6811/// Load multiple 2-element structures to two registers
6812///
6813/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p64)
6814#[inline]
6815#[target_feature(enable = "neon,aes")]
6816#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
6817#[rustc_legacy_const_generics(2)]
6818#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6819pub unsafe fn vld2q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x2_t) -> poly64x2x2_t {
6820    static_assert_uimm_bits!(LANE, 1);
6821    transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
6822}
6823
6824/// Load multiple 2-element structures to two registers
6825///
6826/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u8)
6827#[inline]
6828#[target_feature(enable = "neon")]
6829#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
6830#[rustc_legacy_const_generics(2)]
6831#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6832pub unsafe fn vld2q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x2_t) -> uint8x16x2_t {
6833    static_assert_uimm_bits!(LANE, 4);
6834    transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
6835}
6836
6837/// Load multiple 2-element structures to two registers
6838///
6839/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u64)
6840#[inline]
6841#[target_feature(enable = "neon")]
6842#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
6843#[rustc_legacy_const_generics(2)]
6844#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6845pub unsafe fn vld2_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x2_t) -> uint64x1x2_t {
6846    static_assert!(LANE == 0);
6847    transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
6848}
6849
6850/// Load multiple 2-element structures to two registers
6851///
6852/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u64)
6853#[inline]
6854#[target_feature(enable = "neon")]
6855#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
6856#[rustc_legacy_const_generics(2)]
6857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6858pub unsafe fn vld2q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x2_t) -> uint64x2x2_t {
6859    static_assert_uimm_bits!(LANE, 1);
6860    transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
6861}
6862
6863/// Load multiple 2-element structures to two registers
6864///
6865/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p8)
6866#[inline]
6867#[target_feature(enable = "neon")]
6868#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
6869#[rustc_legacy_const_generics(2)]
6870#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6871pub unsafe fn vld2q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x2_t) -> poly8x16x2_t {
6872    static_assert_uimm_bits!(LANE, 4);
6873    transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
6874}
6875
6876/// Load multiple 2-element structures to two registers
6877///
6878/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f64)
6879#[inline]
6880#[target_feature(enable = "neon")]
6881#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
6882#[rustc_legacy_const_generics(2)]
6883#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6884pub unsafe fn vld2_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x2_t) -> float64x1x2_t {
6885    static_assert!(LANE == 0);
6886    #[allow(improper_ctypes)]
6887    extern "unadjusted" {
6888        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v1f64.p0i8")]
6889        fn vld2_lane_f64_(a: float64x1_t, b: float64x1_t, n: i64, ptr: *const i8) -> float64x1x2_t;
6890    }
6891    vld2_lane_f64_(b.0, b.1, LANE as i64, a as _)
6892}
6893
6894/// Load multiple 2-element structures to two registers
6895///
6896/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f64)
6897#[inline]
6898#[target_feature(enable = "neon")]
6899#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
6900#[rustc_legacy_const_generics(2)]
6901#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6902pub unsafe fn vld2q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x2_t) -> float64x2x2_t {
6903    static_assert_uimm_bits!(LANE, 1);
6904    #[allow(improper_ctypes)]
6905    extern "unadjusted" {
6906        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld2lane.v2f64.p0i8")]
6907        fn vld2q_lane_f64_(a: float64x2_t, b: float64x2_t, n: i64, ptr: *const i8) -> float64x2x2_t;
6908    }
6909    vld2q_lane_f64_(b.0, b.1, LANE as i64, a as _)
6910}
6911
6912/// Load multiple 3-element structures to three registers
6913///
6914/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s64)
6915#[inline]
6916#[target_feature(enable = "neon")]
6917#[cfg_attr(test, assert_instr(ld3))]
6918#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6919pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t {
6920    #[allow(improper_ctypes)]
6921    extern "unadjusted" {
6922        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v2i64.p0v2i64")]
6923        fn vld3q_s64_(ptr: *const int64x2_t) -> int64x2x3_t;
6924    }
6925    vld3q_s64_(a as _)
6926}
6927
6928/// Load multiple 3-element structures to three registers
6929///
6930/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)
6931#[inline]
6932#[target_feature(enable = "neon")]
6933#[cfg_attr(test, assert_instr(ld3))]
6934#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6935pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t {
6936    transmute(vld3q_s64(transmute(a)))
6937}
6938
6939/// Load multiple 3-element structures to three registers
6940///
6941/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)
6942#[inline]
6943#[target_feature(enable = "neon,aes")]
6944#[cfg_attr(test, assert_instr(ld3))]
6945#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6946pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t {
6947    transmute(vld3q_s64(transmute(a)))
6948}
6949
6950/// Load multiple 3-element structures to three registers
6951///
6952/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f64)
6953#[inline]
6954#[target_feature(enable = "neon")]
6955#[cfg_attr(test, assert_instr(nop))]
6956#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6957pub unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t {
6958    #[allow(improper_ctypes)]
6959    extern "unadjusted" {
6960        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v1f64.p0v1f64")]
6961        fn vld3_f64_(ptr: *const float64x1_t) -> float64x1x3_t;
6962    }
6963    vld3_f64_(a as _)
6964}
6965
6966/// Load multiple 3-element structures to three registers
6967///
6968/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f64)
6969#[inline]
6970#[target_feature(enable = "neon")]
6971#[cfg_attr(test, assert_instr(ld3))]
6972#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6973pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t {
6974    #[allow(improper_ctypes)]
6975    extern "unadjusted" {
6976        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3.v2f64.p0v2f64")]
6977        fn vld3q_f64_(ptr: *const float64x2_t) -> float64x2x3_t;
6978    }
6979    vld3q_f64_(a as _)
6980}
6981
6982/// Load single 3-element structure and replicate to all lanes of three registers
6983///
6984/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s64)
6985#[inline]
6986#[target_feature(enable = "neon")]
6987#[cfg_attr(test, assert_instr(ld3r))]
6988#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6989pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t {
6990    #[allow(improper_ctypes)]
6991    extern "unadjusted" {
6992        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v2i64.p0i64")]
6993        fn vld3q_dup_s64_(ptr: *const i64) -> int64x2x3_t;
6994    }
6995    vld3q_dup_s64_(a as _)
6996}
6997
6998/// Load single 3-element structure and replicate to all lanes of three registers
6999///
7000/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)
7001#[inline]
7002#[target_feature(enable = "neon")]
7003#[cfg_attr(test, assert_instr(ld3r))]
7004#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7005pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t {
7006    transmute(vld3q_dup_s64(transmute(a)))
7007}
7008
7009/// Load single 3-element structure and replicate to all lanes of three registers
7010///
7011/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)
7012#[inline]
7013#[target_feature(enable = "neon,aes")]
7014#[cfg_attr(test, assert_instr(ld3r))]
7015#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7016pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t {
7017    transmute(vld3q_dup_s64(transmute(a)))
7018}
7019
7020/// Load single 3-element structure and replicate to all lanes of three registers
7021///
7022/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f64)
7023#[inline]
7024#[target_feature(enable = "neon")]
7025#[cfg_attr(test, assert_instr(ld3r))]
7026#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7027pub unsafe fn vld3_dup_f64(a: *const f64) -> float64x1x3_t {
7028    #[allow(improper_ctypes)]
7029    extern "unadjusted" {
7030        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v1f64.p0f64")]
7031        fn vld3_dup_f64_(ptr: *const f64) -> float64x1x3_t;
7032    }
7033    vld3_dup_f64_(a as _)
7034}
7035
7036/// Load single 3-element structure and replicate to all lanes of three registers
7037///
7038/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f64)
7039#[inline]
7040#[target_feature(enable = "neon")]
7041#[cfg_attr(test, assert_instr(ld3r))]
7042#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7043pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t {
7044    #[allow(improper_ctypes)]
7045    extern "unadjusted" {
7046        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3r.v2f64.p0f64")]
7047        fn vld3q_dup_f64_(ptr: *const f64) -> float64x2x3_t;
7048    }
7049    vld3q_dup_f64_(a as _)
7050}
7051
7052/// Load multiple 3-element structures to two registers
7053///
7054/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s8)
7055#[inline]
7056#[target_feature(enable = "neon")]
7057#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
7058#[rustc_legacy_const_generics(2)]
7059#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7060pub unsafe fn vld3q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x3_t) -> int8x16x3_t {
7061    static_assert_uimm_bits!(LANE, 4);
7062    #[allow(improper_ctypes)]
7063    extern "unadjusted" {
7064        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v16i8.p0i8")]
7065        fn vld3q_lane_s8_(a: int8x16_t, b: int8x16_t, c: int8x16_t, n: i64, ptr: *const i8) -> int8x16x3_t;
7066    }
7067    vld3q_lane_s8_(b.0, b.1, b.2, LANE as i64, a as _)
7068}
7069
7070/// Load multiple 3-element structures to two registers
7071///
7072/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s64)
7073#[inline]
7074#[target_feature(enable = "neon")]
7075#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
7076#[rustc_legacy_const_generics(2)]
7077#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7078pub unsafe fn vld3_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x3_t) -> int64x1x3_t {
7079    static_assert!(LANE == 0);
7080    #[allow(improper_ctypes)]
7081    extern "unadjusted" {
7082        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v1i64.p0i8")]
7083        fn vld3_lane_s64_(a: int64x1_t, b: int64x1_t, c: int64x1_t, n: i64, ptr: *const i8) -> int64x1x3_t;
7084    }
7085    vld3_lane_s64_(b.0, b.1, b.2, LANE as i64, a as _)
7086}
7087
7088/// Load multiple 3-element structures to two registers
7089///
7090/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s64)
7091#[inline]
7092#[target_feature(enable = "neon")]
7093#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
7094#[rustc_legacy_const_generics(2)]
7095#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7096pub unsafe fn vld3q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x3_t) -> int64x2x3_t {
7097    static_assert_uimm_bits!(LANE, 1);
7098    #[allow(improper_ctypes)]
7099    extern "unadjusted" {
7100        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v2i64.p0i8")]
7101        fn vld3q_lane_s64_(a: int64x2_t, b: int64x2_t, c: int64x2_t, n: i64, ptr: *const i8) -> int64x2x3_t;
7102    }
7103    vld3q_lane_s64_(b.0, b.1, b.2, LANE as i64, a as _)
7104}
7105
7106/// Load multiple 3-element structures to three registers
7107///
7108/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p64)
7109#[inline]
7110#[target_feature(enable = "neon,aes")]
7111#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
7112#[rustc_legacy_const_generics(2)]
7113#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7114pub unsafe fn vld3_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x3_t) -> poly64x1x3_t {
7115    static_assert!(LANE == 0);
7116    transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
7117}
7118
7119/// Load multiple 3-element structures to three registers
7120///
7121/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p64)
7122#[inline]
7123#[target_feature(enable = "neon,aes")]
7124#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
7125#[rustc_legacy_const_generics(2)]
7126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7127pub unsafe fn vld3q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x3_t) -> poly64x2x3_t {
7128    static_assert_uimm_bits!(LANE, 1);
7129    transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
7130}
7131
7132/// Load multiple 3-element structures to three registers
7133///
7134/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p8)
7135#[inline]
7136#[target_feature(enable = "neon")]
7137#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
7138#[rustc_legacy_const_generics(2)]
7139#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7140pub unsafe fn vld3q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x3_t) -> poly8x16x3_t {
7141    static_assert_uimm_bits!(LANE, 4);
7142    transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
7143}
7144
7145/// Load multiple 3-element structures to three registers
7146///
7147/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u8)
7148#[inline]
7149#[target_feature(enable = "neon")]
7150#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
7151#[rustc_legacy_const_generics(2)]
7152#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7153pub unsafe fn vld3q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x3_t) -> uint8x16x3_t {
7154    static_assert_uimm_bits!(LANE, 4);
7155    transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
7156}
7157
7158/// Load multiple 3-element structures to three registers
7159///
7160/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u64)
7161#[inline]
7162#[target_feature(enable = "neon")]
7163#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
7164#[rustc_legacy_const_generics(2)]
7165#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7166pub unsafe fn vld3_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x3_t) -> uint64x1x3_t {
7167    static_assert!(LANE == 0);
7168    transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
7169}
7170
7171/// Load multiple 3-element structures to three registers
7172///
7173/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u64)
7174#[inline]
7175#[target_feature(enable = "neon")]
7176#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
7177#[rustc_legacy_const_generics(2)]
7178#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7179pub unsafe fn vld3q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x3_t) -> uint64x2x3_t {
7180    static_assert_uimm_bits!(LANE, 1);
7181    transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
7182}
7183
7184/// Load multiple 3-element structures to three registers
7185///
7186/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f64)
7187#[inline]
7188#[target_feature(enable = "neon")]
7189#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
7190#[rustc_legacy_const_generics(2)]
7191#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7192pub unsafe fn vld3_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x3_t) -> float64x1x3_t {
7193    static_assert!(LANE == 0);
7194    #[allow(improper_ctypes)]
7195    extern "unadjusted" {
7196        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v1f64.p0i8")]
7197        fn vld3_lane_f64_(a: float64x1_t, b: float64x1_t, c: float64x1_t, n: i64, ptr: *const i8) -> float64x1x3_t;
7198    }
7199    vld3_lane_f64_(b.0, b.1, b.2, LANE as i64, a as _)
7200}
7201
7202/// Load multiple 3-element structures to three registers
7203///
7204/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f64)
7205#[inline]
7206#[target_feature(enable = "neon")]
7207#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
7208#[rustc_legacy_const_generics(2)]
7209#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7210pub unsafe fn vld3q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x3_t) -> float64x2x3_t {
7211    static_assert_uimm_bits!(LANE, 1);
7212    #[allow(improper_ctypes)]
7213    extern "unadjusted" {
7214        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld3lane.v2f64.p0i8")]
7215        fn vld3q_lane_f64_(a: float64x2_t, b: float64x2_t, c: float64x2_t, n: i64, ptr: *const i8) -> float64x2x3_t;
7216    }
7217    vld3q_lane_f64_(b.0, b.1, b.2, LANE as i64, a as _)
7218}
7219
7220/// Load multiple 4-element structures to four registers
7221///
7222/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s64)
7223#[inline]
7224#[target_feature(enable = "neon")]
7225#[cfg_attr(test, assert_instr(ld4))]
7226#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7227pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t {
7228    #[allow(improper_ctypes)]
7229    extern "unadjusted" {
7230        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v2i64.p0v2i64")]
7231        fn vld4q_s64_(ptr: *const int64x2_t) -> int64x2x4_t;
7232    }
7233    vld4q_s64_(a as _)
7234}
7235
7236/// Load multiple 4-element structures to four registers
7237///
7238/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)
7239#[inline]
7240#[target_feature(enable = "neon")]
7241#[cfg_attr(test, assert_instr(ld4))]
7242#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7243pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t {
7244    transmute(vld4q_s64(transmute(a)))
7245}
7246
7247/// Load multiple 4-element structures to four registers
7248///
7249/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)
7250#[inline]
7251#[target_feature(enable = "neon,aes")]
7252#[cfg_attr(test, assert_instr(ld4))]
7253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7254pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t {
7255    transmute(vld4q_s64(transmute(a)))
7256}
7257
7258/// Load multiple 4-element structures to four registers
7259///
7260/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f64)
7261#[inline]
7262#[target_feature(enable = "neon")]
7263#[cfg_attr(test, assert_instr(nop))]
7264#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7265pub unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t {
7266    #[allow(improper_ctypes)]
7267    extern "unadjusted" {
7268        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v1f64.p0v1f64")]
7269        fn vld4_f64_(ptr: *const float64x1_t) -> float64x1x4_t;
7270    }
7271    vld4_f64_(a as _)
7272}
7273
7274/// Load multiple 4-element structures to four registers
7275///
7276/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f64)
7277#[inline]
7278#[target_feature(enable = "neon")]
7279#[cfg_attr(test, assert_instr(ld4))]
7280#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7281pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t {
7282    #[allow(improper_ctypes)]
7283    extern "unadjusted" {
7284        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4.v2f64.p0v2f64")]
7285        fn vld4q_f64_(ptr: *const float64x2_t) -> float64x2x4_t;
7286    }
7287    vld4q_f64_(a as _)
7288}
7289
7290/// Load single 4-element structure and replicate to all lanes of four registers
7291///
7292/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s64)
7293#[inline]
7294#[target_feature(enable = "neon")]
7295#[cfg_attr(test, assert_instr(ld4r))]
7296#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7297pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t {
7298    #[allow(improper_ctypes)]
7299    extern "unadjusted" {
7300        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v2i64.p0i64")]
7301        fn vld4q_dup_s64_(ptr: *const i64) -> int64x2x4_t;
7302    }
7303    vld4q_dup_s64_(a as _)
7304}
7305
7306/// Load single 4-element structure and replicate to all lanes of four registers
7307///
7308/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)
7309#[inline]
7310#[target_feature(enable = "neon")]
7311#[cfg_attr(test, assert_instr(ld4r))]
7312#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7313pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t {
7314    transmute(vld4q_dup_s64(transmute(a)))
7315}
7316
7317/// Load single 4-element structure and replicate to all lanes of four registers
7318///
7319/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)
7320#[inline]
7321#[target_feature(enable = "neon,aes")]
7322#[cfg_attr(test, assert_instr(ld4r))]
7323#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7324pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t {
7325    transmute(vld4q_dup_s64(transmute(a)))
7326}
7327
7328/// Load single 4-element structure and replicate to all lanes of four registers
7329///
7330/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f64)
7331#[inline]
7332#[target_feature(enable = "neon")]
7333#[cfg_attr(test, assert_instr(ld4r))]
7334#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7335pub unsafe fn vld4_dup_f64(a: *const f64) -> float64x1x4_t {
7336    #[allow(improper_ctypes)]
7337    extern "unadjusted" {
7338        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v1f64.p0f64")]
7339        fn vld4_dup_f64_(ptr: *const f64) -> float64x1x4_t;
7340    }
7341    vld4_dup_f64_(a as _)
7342}
7343
7344/// Load single 4-element structure and replicate to all lanes of four registers
7345///
7346/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f64)
7347#[inline]
7348#[target_feature(enable = "neon")]
7349#[cfg_attr(test, assert_instr(ld4r))]
7350#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7351pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t {
7352    #[allow(improper_ctypes)]
7353    extern "unadjusted" {
7354        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4r.v2f64.p0f64")]
7355        fn vld4q_dup_f64_(ptr: *const f64) -> float64x2x4_t;
7356    }
7357    vld4q_dup_f64_(a as _)
7358}
7359
7360/// Load multiple 4-element structures to four registers
7361///
7362/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s8)
7363#[inline]
7364#[target_feature(enable = "neon")]
7365#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
7366#[rustc_legacy_const_generics(2)]
7367#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7368pub unsafe fn vld4q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x4_t) -> int8x16x4_t {
7369    static_assert_uimm_bits!(LANE, 4);
7370    #[allow(improper_ctypes)]
7371    extern "unadjusted" {
7372        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v16i8.p0i8")]
7373        fn vld4q_lane_s8_(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, n: i64, ptr: *const i8) -> int8x16x4_t;
7374    }
7375    vld4q_lane_s8_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
7376}
7377
7378/// Load multiple 4-element structures to four registers
7379///
7380/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s64)
7381#[inline]
7382#[target_feature(enable = "neon")]
7383#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
7384#[rustc_legacy_const_generics(2)]
7385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7386pub unsafe fn vld4_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x4_t) -> int64x1x4_t {
7387    static_assert!(LANE == 0);
7388    #[allow(improper_ctypes)]
7389    extern "unadjusted" {
7390        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v1i64.p0i8")]
7391        fn vld4_lane_s64_(a: int64x1_t, b: int64x1_t, c: int64x1_t, d: int64x1_t, n: i64, ptr: *const i8) -> int64x1x4_t;
7392    }
7393    vld4_lane_s64_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
7394}
7395
7396/// Load multiple 4-element structures to four registers
7397///
7398/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s64)
7399#[inline]
7400#[target_feature(enable = "neon")]
7401#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
7402#[rustc_legacy_const_generics(2)]
7403#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7404pub unsafe fn vld4q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x4_t) -> int64x2x4_t {
7405    static_assert_uimm_bits!(LANE, 1);
7406    #[allow(improper_ctypes)]
7407    extern "unadjusted" {
7408        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v2i64.p0i8")]
7409        fn vld4q_lane_s64_(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, n: i64, ptr: *const i8) -> int64x2x4_t;
7410    }
7411    vld4q_lane_s64_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
7412}
7413
7414/// Load multiple 4-element structures to four registers
7415///
7416/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p64)
7417#[inline]
7418#[target_feature(enable = "neon,aes")]
7419#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
7420#[rustc_legacy_const_generics(2)]
7421#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7422pub unsafe fn vld4_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x4_t) -> poly64x1x4_t {
7423    static_assert!(LANE == 0);
7424    transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
7425}
7426
7427/// Load multiple 4-element structures to four registers
7428///
7429/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p64)
7430#[inline]
7431#[target_feature(enable = "neon,aes")]
7432#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
7433#[rustc_legacy_const_generics(2)]
7434#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7435pub unsafe fn vld4q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x4_t) -> poly64x2x4_t {
7436    static_assert_uimm_bits!(LANE, 1);
7437    transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
7438}
7439
7440/// Load multiple 4-element structures to four registers
7441///
7442/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p8)
7443#[inline]
7444#[target_feature(enable = "neon")]
7445#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
7446#[rustc_legacy_const_generics(2)]
7447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7448pub unsafe fn vld4q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x4_t) -> poly8x16x4_t {
7449    static_assert_uimm_bits!(LANE, 4);
7450    transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
7451}
7452
7453/// Load multiple 4-element structures to four registers
7454///
7455/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u8)
7456#[inline]
7457#[target_feature(enable = "neon")]
7458#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
7459#[rustc_legacy_const_generics(2)]
7460#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7461pub unsafe fn vld4q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x4_t) -> uint8x16x4_t {
7462    static_assert_uimm_bits!(LANE, 4);
7463    transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
7464}
7465
7466/// Load multiple 4-element structures to four registers
7467///
7468/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u64)
7469#[inline]
7470#[target_feature(enable = "neon")]
7471#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
7472#[rustc_legacy_const_generics(2)]
7473#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7474pub unsafe fn vld4_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x4_t) -> uint64x1x4_t {
7475    static_assert!(LANE == 0);
7476    transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
7477}
7478
7479/// Load multiple 4-element structures to four registers
7480///
7481/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u64)
7482#[inline]
7483#[target_feature(enable = "neon")]
7484#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
7485#[rustc_legacy_const_generics(2)]
7486#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7487pub unsafe fn vld4q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x4_t) -> uint64x2x4_t {
7488    static_assert_uimm_bits!(LANE, 1);
7489    transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
7490}
7491
7492/// Load multiple 4-element structures to four registers
7493///
7494/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f64)
7495#[inline]
7496#[target_feature(enable = "neon")]
7497#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
7498#[rustc_legacy_const_generics(2)]
7499#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7500pub unsafe fn vld4_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x4_t) -> float64x1x4_t {
7501    static_assert!(LANE == 0);
7502    #[allow(improper_ctypes)]
7503    extern "unadjusted" {
7504        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v1f64.p0i8")]
7505        fn vld4_lane_f64_(a: float64x1_t, b: float64x1_t, c: float64x1_t, d: float64x1_t, n: i64, ptr: *const i8) -> float64x1x4_t;
7506    }
7507    vld4_lane_f64_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
7508}
7509
7510/// Load multiple 4-element structures to four registers
7511///
7512/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f64)
7513#[inline]
7514#[target_feature(enable = "neon")]
7515#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
7516#[rustc_legacy_const_generics(2)]
7517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7518pub unsafe fn vld4q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x4_t) -> float64x2x4_t {
7519    static_assert_uimm_bits!(LANE, 1);
7520    #[allow(improper_ctypes)]
7521    extern "unadjusted" {
7522        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ld4lane.v2f64.p0i8")]
7523        fn vld4q_lane_f64_(a: float64x2_t, b: float64x2_t, c: float64x2_t, d: float64x2_t, n: i64, ptr: *const i8) -> float64x2x4_t;
7524    }
7525    vld4q_lane_f64_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
7526}
7527
7528/// Store multiple single-element structures from one, two, three, or four registers
7529///
7530/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f64)
7531#[inline]
7532#[target_feature(enable = "neon")]
7533#[cfg_attr(test, assert_instr(nop, LANE = 0))]
7534#[rustc_legacy_const_generics(2)]
7535#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7536pub unsafe fn vst1_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1_t) {
7537    static_assert!(LANE == 0);
7538    *a = simd_extract!(b, LANE as u32);
7539}
7540
7541/// Store multiple single-element structures from one, two, three, or four registers
7542///
7543/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f64)
7544#[inline]
7545#[target_feature(enable = "neon")]
7546#[cfg_attr(test, assert_instr(nop, LANE = 0))]
7547#[rustc_legacy_const_generics(2)]
7548#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7549pub unsafe fn vst1q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2_t) {
7550    static_assert_uimm_bits!(LANE, 1);
7551    *a = simd_extract!(b, LANE as u32);
7552}
7553
7554/// Store multiple single-element structures to one, two, three, or four registers
7555///
7556/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x2)
7557#[inline]
7558#[target_feature(enable = "neon")]
7559#[cfg_attr(test, assert_instr(st1))]
7560#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7561pub unsafe fn vst1_f64_x2(a: *mut f64, b: float64x1x2_t) {
7562    #[allow(improper_ctypes)]
7563    extern "unadjusted" {
7564        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v1f64.p0f64")]
7565        fn vst1_f64_x2_(a: float64x1_t, b: float64x1_t, ptr: *mut f64);
7566    }
7567    vst1_f64_x2_(b.0, b.1, a)
7568}
7569
7570/// Store multiple single-element structures to one, two, three, or four registers
7571///
7572/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x2)
7573#[inline]
7574#[target_feature(enable = "neon")]
7575#[cfg_attr(test, assert_instr(st1))]
7576#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7577pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) {
7578    #[allow(improper_ctypes)]
7579    extern "unadjusted" {
7580        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x2.v2f64.p0f64")]
7581        fn vst1q_f64_x2_(a: float64x2_t, b: float64x2_t, ptr: *mut f64);
7582    }
7583    vst1q_f64_x2_(b.0, b.1, a)
7584}
7585
7586/// Store multiple single-element structures to one, two, three, or four registers
7587///
7588/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x3)
7589#[inline]
7590#[target_feature(enable = "neon")]
7591#[cfg_attr(test, assert_instr(st1))]
7592#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7593pub unsafe fn vst1_f64_x3(a: *mut f64, b: float64x1x3_t) {
7594    #[allow(improper_ctypes)]
7595    extern "unadjusted" {
7596        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v1f64.p0f64")]
7597        fn vst1_f64_x3_(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut f64);
7598    }
7599    vst1_f64_x3_(b.0, b.1, b.2, a)
7600}
7601
7602/// Store multiple single-element structures to one, two, three, or four registers
7603///
7604/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x3)
7605#[inline]
7606#[target_feature(enable = "neon")]
7607#[cfg_attr(test, assert_instr(st1))]
7608#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7609pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) {
7610    #[allow(improper_ctypes)]
7611    extern "unadjusted" {
7612        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x3.v2f64.p0f64")]
7613        fn vst1q_f64_x3_(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut f64);
7614    }
7615    vst1q_f64_x3_(b.0, b.1, b.2, a)
7616}
7617
7618/// Store multiple single-element structures to one, two, three, or four registers
7619///
7620/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x4)
7621#[inline]
7622#[target_feature(enable = "neon")]
7623#[cfg_attr(test, assert_instr(st1))]
7624#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7625pub unsafe fn vst1_f64_x4(a: *mut f64, b: float64x1x4_t) {
7626    #[allow(improper_ctypes)]
7627    extern "unadjusted" {
7628        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v1f64.p0f64")]
7629        fn vst1_f64_x4_(a: float64x1_t, b: float64x1_t, c: float64x1_t, d: float64x1_t, ptr: *mut f64);
7630    }
7631    vst1_f64_x4_(b.0, b.1, b.2, b.3, a)
7632}
7633
7634/// Store multiple single-element structures to one, two, three, or four registers
7635///
7636/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x4)
7637#[inline]
7638#[target_feature(enable = "neon")]
7639#[cfg_attr(test, assert_instr(st1))]
7640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7641pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) {
7642    #[allow(improper_ctypes)]
7643    extern "unadjusted" {
7644        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st1x4.v2f64.p0f64")]
7645        fn vst1q_f64_x4_(a: float64x2_t, b: float64x2_t, c: float64x2_t, d: float64x2_t, ptr: *mut f64);
7646    }
7647    vst1q_f64_x4_(b.0, b.1, b.2, b.3, a)
7648}
7649
7650/// Store multiple 2-element structures from two registers
7651///
7652/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s64)
7653#[inline]
7654#[target_feature(enable = "neon")]
7655#[cfg_attr(test, assert_instr(st2))]
7656#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7657pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) {
7658    #[allow(improper_ctypes)]
7659    extern "unadjusted" {
7660        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v2i64.p0i8")]
7661        fn vst2q_s64_(a: int64x2_t, b: int64x2_t, ptr: *mut i8);
7662    }
7663    vst2q_s64_(b.0, b.1, a as _)
7664}
7665
7666/// Store multiple 2-element structures from two registers
7667///
7668/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u64)
7669#[inline]
7670#[target_feature(enable = "neon")]
7671#[cfg_attr(test, assert_instr(st2))]
7672#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7673pub unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t) {
7674    vst2q_s64(transmute(a), transmute(b))
7675}
7676
7677/// Store multiple 2-element structures from two registers
7678///
7679/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p64)
7680#[inline]
7681#[target_feature(enable = "neon,aes")]
7682#[cfg_attr(test, assert_instr(st2))]
7683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7684pub unsafe fn vst2q_p64(a: *mut p64, b: poly64x2x2_t) {
7685    vst2q_s64(transmute(a), transmute(b))
7686}
7687
7688/// Store multiple 2-element structures from two registers
7689///
7690/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f64)
7691#[inline]
7692#[target_feature(enable = "neon")]
7693#[cfg_attr(test, assert_instr(st1))]
7694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7695pub unsafe fn vst2_f64(a: *mut f64, b: float64x1x2_t) {
7696    #[allow(improper_ctypes)]
7697    extern "unadjusted" {
7698        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v1f64.p0i8")]
7699        fn vst2_f64_(a: float64x1_t, b: float64x1_t, ptr: *mut i8);
7700    }
7701    vst2_f64_(b.0, b.1, a as _)
7702}
7703
7704/// Store multiple 2-element structures from two registers
7705///
7706/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f64)
7707#[inline]
7708#[target_feature(enable = "neon")]
7709#[cfg_attr(test, assert_instr(st2))]
7710#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7711pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) {
7712    #[allow(improper_ctypes)]
7713    extern "unadjusted" {
7714        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2.v2f64.p0i8")]
7715        fn vst2q_f64_(a: float64x2_t, b: float64x2_t, ptr: *mut i8);
7716    }
7717    vst2q_f64_(b.0, b.1, a as _)
7718}
7719
7720/// Store multiple 2-element structures from two registers
7721///
7722/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s8)
7723#[inline]
7724#[target_feature(enable = "neon")]
7725#[cfg_attr(test, assert_instr(st2, LANE = 0))]
7726#[rustc_legacy_const_generics(2)]
7727#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7728pub unsafe fn vst2q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x2_t) {
7729    static_assert_uimm_bits!(LANE, 4);
7730    #[allow(improper_ctypes)]
7731    extern "unadjusted" {
7732        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v16i8.p0i8")]
7733        fn vst2q_lane_s8_(a: int8x16_t, b: int8x16_t, n: i64, ptr: *mut i8);
7734    }
7735    vst2q_lane_s8_(b.0, b.1, LANE as i64, a as _)
7736}
7737
7738/// Store multiple 2-element structures from two registers
7739///
7740/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s64)
7741#[inline]
7742#[target_feature(enable = "neon")]
7743#[cfg_attr(test, assert_instr(st2, LANE = 0))]
7744#[rustc_legacy_const_generics(2)]
7745#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7746pub unsafe fn vst2_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x2_t) {
7747    static_assert!(LANE == 0);
7748    #[allow(improper_ctypes)]
7749    extern "unadjusted" {
7750        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v1i64.p0i8")]
7751        fn vst2_lane_s64_(a: int64x1_t, b: int64x1_t, n: i64, ptr: *mut i8);
7752    }
7753    vst2_lane_s64_(b.0, b.1, LANE as i64, a as _)
7754}
7755
7756/// Store multiple 2-element structures from two registers
7757///
7758/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s64)
7759#[inline]
7760#[target_feature(enable = "neon")]
7761#[cfg_attr(test, assert_instr(st2, LANE = 0))]
7762#[rustc_legacy_const_generics(2)]
7763#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7764pub unsafe fn vst2q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x2_t) {
7765    static_assert_uimm_bits!(LANE, 1);
7766    #[allow(improper_ctypes)]
7767    extern "unadjusted" {
7768        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v2i64.p0i8")]
7769        fn vst2q_lane_s64_(a: int64x2_t, b: int64x2_t, n: i64, ptr: *mut i8);
7770    }
7771    vst2q_lane_s64_(b.0, b.1, LANE as i64, a as _)
7772}
7773
7774/// Store multiple 2-element structures from two registers
7775///
7776/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u8)
7777#[inline]
7778#[target_feature(enable = "neon")]
7779#[cfg_attr(test, assert_instr(st2, LANE = 0))]
7780#[rustc_legacy_const_generics(2)]
7781#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7782pub unsafe fn vst2q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x2_t) {
7783    static_assert_uimm_bits!(LANE, 4);
7784    vst2q_lane_s8::<LANE>(transmute(a), transmute(b))
7785}
7786
7787/// Store multiple 2-element structures from two registers
7788///
7789/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u64)
7790#[inline]
7791#[target_feature(enable = "neon")]
7792#[cfg_attr(test, assert_instr(st2, LANE = 0))]
7793#[rustc_legacy_const_generics(2)]
7794#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7795pub unsafe fn vst2_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x2_t) {
7796    static_assert!(LANE == 0);
7797    vst2_lane_s64::<LANE>(transmute(a), transmute(b))
7798}
7799
7800/// Store multiple 2-element structures from two registers
7801///
7802/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u64)
7803#[inline]
7804#[target_feature(enable = "neon")]
7805#[cfg_attr(test, assert_instr(st2, LANE = 0))]
7806#[rustc_legacy_const_generics(2)]
7807#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7808pub unsafe fn vst2q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x2_t) {
7809    static_assert_uimm_bits!(LANE, 1);
7810    vst2q_lane_s64::<LANE>(transmute(a), transmute(b))
7811}
7812
7813/// Store multiple 2-element structures from two registers
7814///
7815/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p8)
7816#[inline]
7817#[target_feature(enable = "neon")]
7818#[cfg_attr(test, assert_instr(st2, LANE = 0))]
7819#[rustc_legacy_const_generics(2)]
7820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7821pub unsafe fn vst2q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x2_t) {
7822    static_assert_uimm_bits!(LANE, 4);
7823    vst2q_lane_s8::<LANE>(transmute(a), transmute(b))
7824}
7825
7826/// Store multiple 2-element structures from two registers
7827///
7828/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p64)
7829#[inline]
7830#[target_feature(enable = "neon,aes")]
7831#[cfg_attr(test, assert_instr(st2, LANE = 0))]
7832#[rustc_legacy_const_generics(2)]
7833#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7834pub unsafe fn vst2_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x2_t) {
7835    static_assert!(LANE == 0);
7836    vst2_lane_s64::<LANE>(transmute(a), transmute(b))
7837}
7838
7839/// Store multiple 2-element structures from two registers
7840///
7841/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p64)
7842#[inline]
7843#[target_feature(enable = "neon,aes")]
7844#[cfg_attr(test, assert_instr(st2, LANE = 0))]
7845#[rustc_legacy_const_generics(2)]
7846#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7847pub unsafe fn vst2q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x2_t) {
7848    static_assert_uimm_bits!(LANE, 1);
7849    vst2q_lane_s64::<LANE>(transmute(a), transmute(b))
7850}
7851
7852/// Store multiple 2-element structures from two registers
7853///
7854/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f64)
7855#[inline]
7856#[target_feature(enable = "neon")]
7857#[cfg_attr(test, assert_instr(st2, LANE = 0))]
7858#[rustc_legacy_const_generics(2)]
7859#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7860pub unsafe fn vst2_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x2_t) {
7861    static_assert!(LANE == 0);
7862    #[allow(improper_ctypes)]
7863    extern "unadjusted" {
7864        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v1f64.p0i8")]
7865        fn vst2_lane_f64_(a: float64x1_t, b: float64x1_t, n: i64, ptr: *mut i8);
7866    }
7867    vst2_lane_f64_(b.0, b.1, LANE as i64, a as _)
7868}
7869
7870/// Store multiple 2-element structures from two registers
7871///
7872/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f64)
7873#[inline]
7874#[target_feature(enable = "neon")]
7875#[cfg_attr(test, assert_instr(st2, LANE = 0))]
7876#[rustc_legacy_const_generics(2)]
7877#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7878pub unsafe fn vst2q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x2_t) {
7879    static_assert_uimm_bits!(LANE, 1);
7880    #[allow(improper_ctypes)]
7881    extern "unadjusted" {
7882        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st2lane.v2f64.p0i8")]
7883        fn vst2q_lane_f64_(a: float64x2_t, b: float64x2_t, n: i64, ptr: *mut i8);
7884    }
7885    vst2q_lane_f64_(b.0, b.1, LANE as i64, a as _)
7886}
7887
7888/// Store multiple 3-element structures from three registers
7889///
7890/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s64)
7891#[inline]
7892#[target_feature(enable = "neon")]
7893#[cfg_attr(test, assert_instr(st3))]
7894#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7895pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) {
7896    #[allow(improper_ctypes)]
7897    extern "unadjusted" {
7898        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v2i64.p0i8")]
7899        fn vst3q_s64_(a: int64x2_t, b: int64x2_t, c: int64x2_t, ptr: *mut i8);
7900    }
7901    vst3q_s64_(b.0, b.1, b.2, a as _)
7902}
7903
7904/// Store multiple 3-element structures from three registers
7905///
7906/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u64)
7907#[inline]
7908#[target_feature(enable = "neon")]
7909#[cfg_attr(test, assert_instr(st3))]
7910#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7911pub unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t) {
7912    vst3q_s64(transmute(a), transmute(b))
7913}
7914
7915/// Store multiple 3-element structures from three registers
7916///
7917/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p64)
7918#[inline]
7919#[target_feature(enable = "neon,aes")]
7920#[cfg_attr(test, assert_instr(st3))]
7921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7922pub unsafe fn vst3q_p64(a: *mut p64, b: poly64x2x3_t) {
7923    vst3q_s64(transmute(a), transmute(b))
7924}
7925
7926/// Store multiple 3-element structures from three registers
7927///
7928/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f64)
7929#[inline]
7930#[target_feature(enable = "neon")]
7931#[cfg_attr(test, assert_instr(nop))]
7932#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7933pub unsafe fn vst3_f64(a: *mut f64, b: float64x1x3_t) {
7934    #[allow(improper_ctypes)]
7935    extern "unadjusted" {
7936        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v1f64.p0i8")]
7937        fn vst3_f64_(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut i8);
7938    }
7939    vst3_f64_(b.0, b.1, b.2, a as _)
7940}
7941
7942/// Store multiple 3-element structures from three registers
7943///
7944/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f64)
7945#[inline]
7946#[target_feature(enable = "neon")]
7947#[cfg_attr(test, assert_instr(st3))]
7948#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7949pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) {
7950    #[allow(improper_ctypes)]
7951    extern "unadjusted" {
7952        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3.v2f64.p0i8")]
7953        fn vst3q_f64_(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut i8);
7954    }
7955    vst3q_f64_(b.0, b.1, b.2, a as _)
7956}
7957
7958/// Store multiple 3-element structures from three registers
7959///
7960/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s8)
7961#[inline]
7962#[target_feature(enable = "neon")]
7963#[cfg_attr(test, assert_instr(st3, LANE = 0))]
7964#[rustc_legacy_const_generics(2)]
7965#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7966pub unsafe fn vst3q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x3_t) {
7967    static_assert_uimm_bits!(LANE, 4);
7968    #[allow(improper_ctypes)]
7969    extern "unadjusted" {
7970        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v16i8.p0i8")]
7971        fn vst3q_lane_s8_(a: int8x16_t, b: int8x16_t, c: int8x16_t, n: i64, ptr: *mut i8);
7972    }
7973    vst3q_lane_s8_(b.0, b.1, b.2, LANE as i64, a as _)
7974}
7975
7976/// Store multiple 3-element structures from three registers
7977///
7978/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s64)
7979#[inline]
7980#[target_feature(enable = "neon")]
7981#[cfg_attr(test, assert_instr(st3, LANE = 0))]
7982#[rustc_legacy_const_generics(2)]
7983#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7984pub unsafe fn vst3_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x3_t) {
7985    static_assert!(LANE == 0);
7986    #[allow(improper_ctypes)]
7987    extern "unadjusted" {
7988        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v1i64.p0i8")]
7989        fn vst3_lane_s64_(a: int64x1_t, b: int64x1_t, c: int64x1_t, n: i64, ptr: *mut i8);
7990    }
7991    vst3_lane_s64_(b.0, b.1, b.2, LANE as i64, a as _)
7992}
7993
7994/// Store multiple 3-element structures from three registers
7995///
7996/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s64)
7997#[inline]
7998#[target_feature(enable = "neon")]
7999#[cfg_attr(test, assert_instr(st3, LANE = 0))]
8000#[rustc_legacy_const_generics(2)]
8001#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8002pub unsafe fn vst3q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x3_t) {
8003    static_assert_uimm_bits!(LANE, 1);
8004    #[allow(improper_ctypes)]
8005    extern "unadjusted" {
8006        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v2i64.p0i8")]
8007        fn vst3q_lane_s64_(a: int64x2_t, b: int64x2_t, c: int64x2_t, n: i64, ptr: *mut i8);
8008    }
8009    vst3q_lane_s64_(b.0, b.1, b.2, LANE as i64, a as _)
8010}
8011
8012/// Store multiple 3-element structures from three registers
8013///
8014/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u8)
8015#[inline]
8016#[target_feature(enable = "neon")]
8017#[cfg_attr(test, assert_instr(st3, LANE = 0))]
8018#[rustc_legacy_const_generics(2)]
8019#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8020pub unsafe fn vst3q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x3_t) {
8021    static_assert_uimm_bits!(LANE, 4);
8022    vst3q_lane_s8::<LANE>(transmute(a), transmute(b))
8023}
8024
8025/// Store multiple 3-element structures from three registers
8026///
8027/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u64)
8028#[inline]
8029#[target_feature(enable = "neon")]
8030#[cfg_attr(test, assert_instr(st3, LANE = 0))]
8031#[rustc_legacy_const_generics(2)]
8032#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8033pub unsafe fn vst3_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x3_t) {
8034    static_assert!(LANE == 0);
8035    vst3_lane_s64::<LANE>(transmute(a), transmute(b))
8036}
8037
8038/// Store multiple 3-element structures from three registers
8039///
8040/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u64)
8041#[inline]
8042#[target_feature(enable = "neon")]
8043#[cfg_attr(test, assert_instr(st3, LANE = 0))]
8044#[rustc_legacy_const_generics(2)]
8045#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8046pub unsafe fn vst3q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x3_t) {
8047    static_assert_uimm_bits!(LANE, 1);
8048    vst3q_lane_s64::<LANE>(transmute(a), transmute(b))
8049}
8050
8051/// Store multiple 3-element structures from three registers
8052///
8053/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p8)
8054#[inline]
8055#[target_feature(enable = "neon")]
8056#[cfg_attr(test, assert_instr(st3, LANE = 0))]
8057#[rustc_legacy_const_generics(2)]
8058#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8059pub unsafe fn vst3q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x3_t) {
8060    static_assert_uimm_bits!(LANE, 4);
8061    vst3q_lane_s8::<LANE>(transmute(a), transmute(b))
8062}
8063
8064/// Store multiple 3-element structures from three registers
8065///
8066/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p64)
8067#[inline]
8068#[target_feature(enable = "neon,aes")]
8069#[cfg_attr(test, assert_instr(st3, LANE = 0))]
8070#[rustc_legacy_const_generics(2)]
8071#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8072pub unsafe fn vst3_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x3_t) {
8073    static_assert!(LANE == 0);
8074    vst3_lane_s64::<LANE>(transmute(a), transmute(b))
8075}
8076
8077/// Store multiple 3-element structures from three registers
8078///
8079/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p64)
8080#[inline]
8081#[target_feature(enable = "neon,aes")]
8082#[cfg_attr(test, assert_instr(st3, LANE = 0))]
8083#[rustc_legacy_const_generics(2)]
8084#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8085pub unsafe fn vst3q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x3_t) {
8086    static_assert_uimm_bits!(LANE, 1);
8087    vst3q_lane_s64::<LANE>(transmute(a), transmute(b))
8088}
8089
8090/// Store multiple 3-element structures from three registers
8091///
8092/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f64)
8093#[inline]
8094#[target_feature(enable = "neon")]
8095#[cfg_attr(test, assert_instr(st3, LANE = 0))]
8096#[rustc_legacy_const_generics(2)]
8097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8098pub unsafe fn vst3_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x3_t) {
8099    static_assert!(LANE == 0);
8100    #[allow(improper_ctypes)]
8101    extern "unadjusted" {
8102        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v1f64.p0i8")]
8103        fn vst3_lane_f64_(a: float64x1_t, b: float64x1_t, c: float64x1_t, n: i64, ptr: *mut i8);
8104    }
8105    vst3_lane_f64_(b.0, b.1, b.2, LANE as i64, a as _)
8106}
8107
8108/// Store multiple 3-element structures from three registers
8109///
8110/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f64)
8111#[inline]
8112#[target_feature(enable = "neon")]
8113#[cfg_attr(test, assert_instr(st3, LANE = 0))]
8114#[rustc_legacy_const_generics(2)]
8115#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8116pub unsafe fn vst3q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x3_t) {
8117    static_assert_uimm_bits!(LANE, 1);
8118    #[allow(improper_ctypes)]
8119    extern "unadjusted" {
8120        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st3lane.v2f64.p0i8")]
8121        fn vst3q_lane_f64_(a: float64x2_t, b: float64x2_t, c: float64x2_t, n: i64, ptr: *mut i8);
8122    }
8123    vst3q_lane_f64_(b.0, b.1, b.2, LANE as i64, a as _)
8124}
8125
8126/// Store multiple 4-element structures from four registers
8127///
8128/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s64)
8129#[inline]
8130#[target_feature(enable = "neon")]
8131#[cfg_attr(test, assert_instr(st4))]
8132#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8133pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) {
8134    #[allow(improper_ctypes)]
8135    extern "unadjusted" {
8136        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v2i64.p0i8")]
8137        fn vst4q_s64_(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, ptr: *mut i8);
8138    }
8139    vst4q_s64_(b.0, b.1, b.2, b.3, a as _)
8140}
8141
8142/// Store multiple 4-element structures from four registers
8143///
8144/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u64)
8145#[inline]
8146#[target_feature(enable = "neon")]
8147#[cfg_attr(test, assert_instr(st4))]
8148#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8149pub unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t) {
8150    vst4q_s64(transmute(a), transmute(b))
8151}
8152
8153/// Store multiple 4-element structures from four registers
8154///
8155/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p64)
8156#[inline]
8157#[target_feature(enable = "neon,aes")]
8158#[cfg_attr(test, assert_instr(st4))]
8159#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8160pub unsafe fn vst4q_p64(a: *mut p64, b: poly64x2x4_t) {
8161    vst4q_s64(transmute(a), transmute(b))
8162}
8163
8164/// Store multiple 4-element structures from four registers
8165///
8166/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f64)
8167#[inline]
8168#[target_feature(enable = "neon")]
8169#[cfg_attr(test, assert_instr(nop))]
8170#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8171pub unsafe fn vst4_f64(a: *mut f64, b: float64x1x4_t) {
8172    #[allow(improper_ctypes)]
8173    extern "unadjusted" {
8174        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v1f64.p0i8")]
8175        fn vst4_f64_(a: float64x1_t, b: float64x1_t, c: float64x1_t, d: float64x1_t, ptr: *mut i8);
8176    }
8177    vst4_f64_(b.0, b.1, b.2, b.3, a as _)
8178}
8179
8180/// Store multiple 4-element structures from four registers
8181///
8182/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f64)
8183#[inline]
8184#[target_feature(enable = "neon")]
8185#[cfg_attr(test, assert_instr(st4))]
8186#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8187pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) {
8188    #[allow(improper_ctypes)]
8189    extern "unadjusted" {
8190        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4.v2f64.p0i8")]
8191        fn vst4q_f64_(a: float64x2_t, b: float64x2_t, c: float64x2_t, d: float64x2_t, ptr: *mut i8);
8192    }
8193    vst4q_f64_(b.0, b.1, b.2, b.3, a as _)
8194}
8195
8196/// Store multiple 4-element structures from four registers
8197///
8198/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s8)
8199#[inline]
8200#[target_feature(enable = "neon")]
8201#[cfg_attr(test, assert_instr(st4, LANE = 0))]
8202#[rustc_legacy_const_generics(2)]
8203#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8204pub unsafe fn vst4q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x4_t) {
8205    static_assert_uimm_bits!(LANE, 4);
8206    #[allow(improper_ctypes)]
8207    extern "unadjusted" {
8208        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v16i8.p0i8")]
8209        fn vst4q_lane_s8_(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, n: i64, ptr: *mut i8);
8210    }
8211    vst4q_lane_s8_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
8212}
8213
8214/// Store multiple 4-element structures from four registers
8215///
8216/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s64)
8217#[inline]
8218#[target_feature(enable = "neon")]
8219#[cfg_attr(test, assert_instr(st4, LANE = 0))]
8220#[rustc_legacy_const_generics(2)]
8221#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8222pub unsafe fn vst4_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x4_t) {
8223    static_assert!(LANE == 0);
8224    #[allow(improper_ctypes)]
8225    extern "unadjusted" {
8226        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v1i64.p0i8")]
8227        fn vst4_lane_s64_(a: int64x1_t, b: int64x1_t, c: int64x1_t, d: int64x1_t, n: i64, ptr: *mut i8);
8228    }
8229    vst4_lane_s64_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
8230}
8231
8232/// Store multiple 4-element structures from four registers
8233///
8234/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s64)
8235#[inline]
8236#[target_feature(enable = "neon")]
8237#[cfg_attr(test, assert_instr(st4, LANE = 0))]
8238#[rustc_legacy_const_generics(2)]
8239#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8240pub unsafe fn vst4q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x4_t) {
8241    static_assert_uimm_bits!(LANE, 1);
8242    #[allow(improper_ctypes)]
8243    extern "unadjusted" {
8244        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v2i64.p0i8")]
8245        fn vst4q_lane_s64_(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, n: i64, ptr: *mut i8);
8246    }
8247    vst4q_lane_s64_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
8248}
8249
8250/// Store multiple 4-element structures from four registers
8251///
8252/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u8)
8253#[inline]
8254#[target_feature(enable = "neon")]
8255#[cfg_attr(test, assert_instr(st4, LANE = 0))]
8256#[rustc_legacy_const_generics(2)]
8257#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8258pub unsafe fn vst4q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x4_t) {
8259    static_assert_uimm_bits!(LANE, 4);
8260    vst4q_lane_s8::<LANE>(transmute(a), transmute(b))
8261}
8262
8263/// Store multiple 4-element structures from four registers
8264///
8265/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u64)
8266#[inline]
8267#[target_feature(enable = "neon")]
8268#[cfg_attr(test, assert_instr(st4, LANE = 0))]
8269#[rustc_legacy_const_generics(2)]
8270#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8271pub unsafe fn vst4_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x4_t) {
8272    static_assert!(LANE == 0);
8273    vst4_lane_s64::<LANE>(transmute(a), transmute(b))
8274}
8275
8276/// Store multiple 4-element structures from four registers
8277///
8278/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u64)
8279#[inline]
8280#[target_feature(enable = "neon")]
8281#[cfg_attr(test, assert_instr(st4, LANE = 0))]
8282#[rustc_legacy_const_generics(2)]
8283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8284pub unsafe fn vst4q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x4_t) {
8285    static_assert_uimm_bits!(LANE, 1);
8286    vst4q_lane_s64::<LANE>(transmute(a), transmute(b))
8287}
8288
8289/// Store multiple 4-element structures from four registers
8290///
8291/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p8)
8292#[inline]
8293#[target_feature(enable = "neon")]
8294#[cfg_attr(test, assert_instr(st4, LANE = 0))]
8295#[rustc_legacy_const_generics(2)]
8296#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8297pub unsafe fn vst4q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x4_t) {
8298    static_assert_uimm_bits!(LANE, 4);
8299    vst4q_lane_s8::<LANE>(transmute(a), transmute(b))
8300}
8301
8302/// Store multiple 4-element structures from four registers
8303///
8304/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p64)
8305#[inline]
8306#[target_feature(enable = "neon,aes")]
8307#[cfg_attr(test, assert_instr(st4, LANE = 0))]
8308#[rustc_legacy_const_generics(2)]
8309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8310pub unsafe fn vst4_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x4_t) {
8311    static_assert!(LANE == 0);
8312    vst4_lane_s64::<LANE>(transmute(a), transmute(b))
8313}
8314
8315/// Store multiple 4-element structures from four registers
8316///
8317/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p64)
8318#[inline]
8319#[target_feature(enable = "neon,aes")]
8320#[cfg_attr(test, assert_instr(st4, LANE = 0))]
8321#[rustc_legacy_const_generics(2)]
8322#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8323pub unsafe fn vst4q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x4_t) {
8324    static_assert_uimm_bits!(LANE, 1);
8325    vst4q_lane_s64::<LANE>(transmute(a), transmute(b))
8326}
8327
8328/// Store multiple 4-element structures from four registers
8329///
8330/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f64)
8331#[inline]
8332#[target_feature(enable = "neon")]
8333#[cfg_attr(test, assert_instr(st4, LANE = 0))]
8334#[rustc_legacy_const_generics(2)]
8335#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8336pub unsafe fn vst4_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x4_t) {
8337    static_assert!(LANE == 0);
8338    #[allow(improper_ctypes)]
8339    extern "unadjusted" {
8340        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v1f64.p0i8")]
8341        fn vst4_lane_f64_(a: float64x1_t, b: float64x1_t, c: float64x1_t, d: float64x1_t, n: i64, ptr: *mut i8);
8342    }
8343    vst4_lane_f64_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
8344}
8345
8346/// Store multiple 4-element structures from four registers
8347///
8348/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f64)
8349#[inline]
8350#[target_feature(enable = "neon")]
8351#[cfg_attr(test, assert_instr(st4, LANE = 0))]
8352#[rustc_legacy_const_generics(2)]
8353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8354pub unsafe fn vst4q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x4_t) {
8355    static_assert_uimm_bits!(LANE, 1);
8356    #[allow(improper_ctypes)]
8357    extern "unadjusted" {
8358        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.st4lane.v2f64.p0i8")]
8359        fn vst4q_lane_f64_(a: float64x2_t, b: float64x2_t, c: float64x2_t, d: float64x2_t, n: i64, ptr: *mut i8);
8360    }
8361    vst4q_lane_f64_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
8362}
8363
8364/// Dot product index form with unsigned and signed integers
8365///
8366/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_laneq_s32)
8367#[inline]
8368#[target_feature(enable = "neon,i8mm")]
8369#[cfg_attr(test, assert_instr(usdot, LANE = 3))]
8370#[rustc_legacy_const_generics(3)]
8371#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")]
8372pub unsafe fn vusdot_laneq_s32<const LANE: i32>(a: int32x2_t, b: uint8x8_t, c: int8x16_t) -> int32x2_t {
8373    static_assert_uimm_bits!(LANE, 2);
8374    let c: int32x4_t = transmute(c);
8375    let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
8376    vusdot_s32(a, b, transmute(c))
8377}
8378
8379/// Dot product index form with unsigned and signed integers
8380///
8381/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_laneq_s32)
8382#[inline]
8383#[target_feature(enable = "neon,i8mm")]
8384#[cfg_attr(test, assert_instr(usdot, LANE = 3))]
8385#[rustc_legacy_const_generics(3)]
8386#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")]
8387pub unsafe fn vusdotq_laneq_s32<const LANE: i32>(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t {
8388    static_assert_uimm_bits!(LANE, 2);
8389    let c: int32x4_t = transmute(c);
8390    let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
8391    vusdotq_s32(a, b, transmute(c))
8392}
8393
8394/// Dot product index form with signed and unsigned integers
8395///
8396/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_laneq_s32)
8397#[inline]
8398#[target_feature(enable = "neon,i8mm")]
8399#[cfg_attr(test, assert_instr(sudot, LANE = 3))]
8400#[rustc_legacy_const_generics(3)]
8401#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")]
8402pub unsafe fn vsudot_laneq_s32<const LANE: i32>(a: int32x2_t, b: int8x8_t, c: uint8x16_t) -> int32x2_t {
8403    static_assert_uimm_bits!(LANE, 2);
8404    let c: uint32x4_t = transmute(c);
8405    let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
8406    vusdot_s32(a, transmute(c), b)
8407}
8408
8409/// Dot product index form with signed and unsigned integers
8410///
8411/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_laneq_s32)
8412#[inline]
8413#[target_feature(enable = "neon,i8mm")]
8414#[cfg_attr(test, assert_instr(sudot, LANE = 3))]
8415#[rustc_legacy_const_generics(3)]
8416#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")]
8417pub unsafe fn vsudotq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int8x16_t, c: uint8x16_t) -> int32x4_t {
8418    static_assert_uimm_bits!(LANE, 2);
8419    let c: uint32x4_t = transmute(c);
8420    let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
8421    vusdotq_s32(a, transmute(c), b)
8422}
8423
8424/// Multiply
8425///
8426/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f64)
8427#[inline]
8428#[target_feature(enable = "neon")]
8429#[cfg_attr(test, assert_instr(fmul))]
8430#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8431pub unsafe fn vmul_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
8432    simd_mul(a, b)
8433}
8434
8435/// Multiply
8436///
8437/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f64)
8438#[inline]
8439#[target_feature(enable = "neon")]
8440#[cfg_attr(test, assert_instr(fmul))]
8441#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8442pub unsafe fn vmulq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
8443    simd_mul(a, b)
8444}
8445
8446/// Vector multiply by scalar
8447///
8448/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f64)
8449#[inline]
8450#[target_feature(enable = "neon")]
8451#[cfg_attr(test, assert_instr(fmul))]
8452#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8453pub unsafe fn vmul_n_f64(a: float64x1_t, b: f64) -> float64x1_t {
8454    simd_mul(a, vdup_n_f64(b))
8455}
8456
8457/// Vector multiply by scalar
8458///
8459/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f64)
8460#[inline]
8461#[target_feature(enable = "neon")]
8462#[cfg_attr(test, assert_instr(fmul))]
8463#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8464pub unsafe fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t {
8465    simd_mul(a, vdupq_n_f64(b))
8466}
8467
8468/// Floating-point multiply
8469///
8470/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f64)
8471#[inline]
8472#[target_feature(enable = "neon")]
8473#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
8474#[rustc_legacy_const_generics(2)]
8475#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8476pub unsafe fn vmul_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
8477    static_assert!(LANE == 0);
8478    simd_mul(a, transmute::<f64, _>(simd_extract!(b, LANE as u32)))
8479}
8480
8481/// Floating-point multiply
8482///
8483/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f64)
8484#[inline]
8485#[target_feature(enable = "neon")]
8486#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
8487#[rustc_legacy_const_generics(2)]
8488#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8489pub unsafe fn vmul_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
8490    static_assert_uimm_bits!(LANE, 1);
8491    simd_mul(a, transmute::<f64, _>(simd_extract!(b, LANE as u32)))
8492}
8493
8494/// Floating-point multiply
8495///
8496/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f64)
8497#[inline]
8498#[target_feature(enable = "neon")]
8499#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
8500#[rustc_legacy_const_generics(2)]
8501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8502pub unsafe fn vmulq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
8503    static_assert!(LANE == 0);
8504    simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32]))
8505}
8506
8507/// Floating-point multiply
8508///
8509/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f64)
8510#[inline]
8511#[target_feature(enable = "neon")]
8512#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
8513#[rustc_legacy_const_generics(2)]
8514#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8515pub unsafe fn vmulq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
8516    static_assert_uimm_bits!(LANE, 1);
8517    simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32]))
8518}
8519
8520/// Floating-point multiply
8521///
8522/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_lane_f32)
8523#[inline]
8524#[target_feature(enable = "neon")]
8525#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
8526#[rustc_legacy_const_generics(2)]
8527#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8528pub unsafe fn vmuls_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
8529    static_assert_uimm_bits!(LANE, 1);
8530    let b: f32 = simd_extract!(b, LANE as u32);
8531    a * b
8532}
8533
8534/// Floating-point multiply
8535///
8536/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_laneq_f32)
8537#[inline]
8538#[target_feature(enable = "neon")]
8539#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
8540#[rustc_legacy_const_generics(2)]
8541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8542pub unsafe fn vmuls_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
8543    static_assert_uimm_bits!(LANE, 2);
8544    let b: f32 = simd_extract!(b, LANE as u32);
8545    a * b
8546}
8547
8548/// Floating-point multiply
8549///
8550/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_lane_f64)
8551#[inline]
8552#[target_feature(enable = "neon")]
8553#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
8554#[rustc_legacy_const_generics(2)]
8555#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8556pub unsafe fn vmuld_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
8557    static_assert!(LANE == 0);
8558    let b: f64 = simd_extract!(b, LANE as u32);
8559    a * b
8560}
8561
8562/// Floating-point multiply
8563///
8564/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_laneq_f64)
8565#[inline]
8566#[target_feature(enable = "neon")]
8567#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
8568#[rustc_legacy_const_generics(2)]
8569#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8570pub unsafe fn vmuld_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
8571    static_assert_uimm_bits!(LANE, 1);
8572    let b: f64 = simd_extract!(b, LANE as u32);
8573    a * b
8574}
8575
8576/// Signed multiply long
8577///
8578/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s8)
8579#[inline]
8580#[target_feature(enable = "neon")]
8581#[cfg_attr(test, assert_instr(smull2))]
8582#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8583pub unsafe fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
8584    let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
8585    let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
8586    vmull_s8(a, b)
8587}
8588
8589/// Signed multiply long
8590///
8591/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s16)
8592#[inline]
8593#[target_feature(enable = "neon")]
8594#[cfg_attr(test, assert_instr(smull2))]
8595#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8596pub unsafe fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
8597    let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
8598    let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
8599    vmull_s16(a, b)
8600}
8601
8602/// Signed multiply long
8603///
8604/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s32)
8605#[inline]
8606#[target_feature(enable = "neon")]
8607#[cfg_attr(test, assert_instr(smull2))]
8608#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8609pub unsafe fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
8610    let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
8611    let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
8612    vmull_s32(a, b)
8613}
8614
8615/// Unsigned multiply long
8616///
8617/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u8)
8618#[inline]
8619#[target_feature(enable = "neon")]
8620#[cfg_attr(test, assert_instr(umull2))]
8621#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8622pub unsafe fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
8623    let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
8624    let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
8625    vmull_u8(a, b)
8626}
8627
8628/// Unsigned multiply long
8629///
8630/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u16)
8631#[inline]
8632#[target_feature(enable = "neon")]
8633#[cfg_attr(test, assert_instr(umull2))]
8634#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8635pub unsafe fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
8636    let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
8637    let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
8638    vmull_u16(a, b)
8639}
8640
8641/// Unsigned multiply long
8642///
8643/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u32)
8644#[inline]
8645#[target_feature(enable = "neon")]
8646#[cfg_attr(test, assert_instr(umull2))]
8647#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8648pub unsafe fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
8649    let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
8650    let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
8651    vmull_u32(a, b)
8652}
8653
8654/// Polynomial multiply long
8655///
8656/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p64)
8657#[inline]
8658#[target_feature(enable = "neon,aes")]
8659#[cfg_attr(test, assert_instr(pmull))]
8660#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8661pub unsafe fn vmull_p64(a: p64, b: p64) -> p128 {
8662    #[allow(improper_ctypes)]
8663    extern "unadjusted" {
8664        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.pmull64")]
8665        fn vmull_p64_(a: p64, b: p64) -> int8x16_t;
8666    }
8667    transmute(vmull_p64_(a, b))
8668}
8669
8670/// Polynomial multiply long
8671///
8672/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p8)
8673#[inline]
8674#[target_feature(enable = "neon")]
8675#[cfg_attr(test, assert_instr(pmull))]
8676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8677pub unsafe fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t {
8678    let a: poly8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
8679    let b: poly8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
8680    vmull_p8(a, b)
8681}
8682
8683/// Polynomial multiply long
8684///
8685/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p64)
8686#[inline]
8687#[target_feature(enable = "neon,aes")]
8688#[cfg_attr(test, assert_instr(pmull))]
8689#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8690pub unsafe fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 {
8691    vmull_p64(simd_extract!(a, 1), simd_extract!(b, 1))
8692}
8693
8694/// Multiply long
8695///
8696/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s16)
8697#[inline]
8698#[target_feature(enable = "neon")]
8699#[cfg_attr(test, assert_instr(smull2))]
8700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8701pub unsafe fn vmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
8702    vmull_high_s16(a, vdupq_n_s16(b))
8703}
8704
8705/// Multiply long
8706///
8707/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s32)
8708#[inline]
8709#[target_feature(enable = "neon")]
8710#[cfg_attr(test, assert_instr(smull2))]
8711#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8712pub unsafe fn vmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
8713    vmull_high_s32(a, vdupq_n_s32(b))
8714}
8715
8716/// Multiply long
8717///
8718/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u16)
8719#[inline]
8720#[target_feature(enable = "neon")]
8721#[cfg_attr(test, assert_instr(umull2))]
8722#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8723pub unsafe fn vmull_high_n_u16(a: uint16x8_t, b: u16) -> uint32x4_t {
8724    vmull_high_u16(a, vdupq_n_u16(b))
8725}
8726
8727/// Multiply long
8728///
8729/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u32)
8730#[inline]
8731#[target_feature(enable = "neon")]
8732#[cfg_attr(test, assert_instr(umull2))]
8733#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8734pub unsafe fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t {
8735    vmull_high_u32(a, vdupq_n_u32(b))
8736}
8737
8738/// Multiply long
8739///
8740/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s16)
8741#[inline]
8742#[target_feature(enable = "neon")]
8743#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
8744#[rustc_legacy_const_generics(2)]
8745#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8746pub unsafe fn vmull_high_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
8747    static_assert_uimm_bits!(LANE, 2);
8748    vmull_high_s16(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
8749}
8750
8751/// Multiply long
8752///
8753/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s16)
8754#[inline]
8755#[target_feature(enable = "neon")]
8756#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
8757#[rustc_legacy_const_generics(2)]
8758#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8759pub unsafe fn vmull_high_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
8760    static_assert_uimm_bits!(LANE, 3);
8761    vmull_high_s16(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
8762}
8763
8764/// Multiply long
8765///
8766/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s32)
8767#[inline]
8768#[target_feature(enable = "neon")]
8769#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
8770#[rustc_legacy_const_generics(2)]
8771#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8772pub unsafe fn vmull_high_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
8773    static_assert_uimm_bits!(LANE, 1);
8774    vmull_high_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
8775}
8776
8777/// Multiply long
8778///
8779/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s32)
8780#[inline]
8781#[target_feature(enable = "neon")]
8782#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
8783#[rustc_legacy_const_generics(2)]
8784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8785pub unsafe fn vmull_high_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
8786    static_assert_uimm_bits!(LANE, 2);
8787    vmull_high_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
8788}
8789
8790/// Multiply long
8791///
8792/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u16)
8793#[inline]
8794#[target_feature(enable = "neon")]
8795#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
8796#[rustc_legacy_const_generics(2)]
8797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8798pub unsafe fn vmull_high_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x4_t) -> uint32x4_t {
8799    static_assert_uimm_bits!(LANE, 2);
8800    vmull_high_u16(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
8801}
8802
8803/// Multiply long
8804///
8805/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u16)
8806#[inline]
8807#[target_feature(enable = "neon")]
8808#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
8809#[rustc_legacy_const_generics(2)]
8810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8811pub unsafe fn vmull_high_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
8812    static_assert_uimm_bits!(LANE, 3);
8813    vmull_high_u16(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
8814}
8815
8816/// Multiply long
8817///
8818/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u32)
8819#[inline]
8820#[target_feature(enable = "neon")]
8821#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
8822#[rustc_legacy_const_generics(2)]
8823#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8824pub unsafe fn vmull_high_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x2_t) -> uint64x2_t {
8825    static_assert_uimm_bits!(LANE, 1);
8826    vmull_high_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
8827}
8828
8829/// Multiply long
8830///
8831/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u32)
8832#[inline]
8833#[target_feature(enable = "neon")]
8834#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
8835#[rustc_legacy_const_generics(2)]
8836#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8837pub unsafe fn vmull_high_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
8838    static_assert_uimm_bits!(LANE, 2);
8839    vmull_high_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
8840}
8841
8842/// Floating-point multiply extended
8843///
8844/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f32)
8845#[inline]
8846#[target_feature(enable = "neon")]
8847#[cfg_attr(test, assert_instr(fmulx))]
8848#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8849pub unsafe fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
8850    #[allow(improper_ctypes)]
8851    extern "unadjusted" {
8852        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmulx.v2f32")]
8853        fn vmulx_f32_(a: float32x2_t, b: float32x2_t) -> float32x2_t;
8854    }
8855    vmulx_f32_(a, b)
8856}
8857
8858/// Floating-point multiply extended
8859///
8860/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f32)
8861#[inline]
8862#[target_feature(enable = "neon")]
8863#[cfg_attr(test, assert_instr(fmulx))]
8864#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8865pub unsafe fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
8866    #[allow(improper_ctypes)]
8867    extern "unadjusted" {
8868        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmulx.v4f32")]
8869        fn vmulxq_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t;
8870    }
8871    vmulxq_f32_(a, b)
8872}
8873
8874/// Floating-point multiply extended
8875///
8876/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f64)
8877#[inline]
8878#[target_feature(enable = "neon")]
8879#[cfg_attr(test, assert_instr(fmulx))]
8880#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8881pub unsafe fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
8882    #[allow(improper_ctypes)]
8883    extern "unadjusted" {
8884        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmulx.v1f64")]
8885        fn vmulx_f64_(a: float64x1_t, b: float64x1_t) -> float64x1_t;
8886    }
8887    vmulx_f64_(a, b)
8888}
8889
8890/// Floating-point multiply extended
8891///
8892/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f64)
8893#[inline]
8894#[target_feature(enable = "neon")]
8895#[cfg_attr(test, assert_instr(fmulx))]
8896#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8897pub unsafe fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
8898    #[allow(improper_ctypes)]
8899    extern "unadjusted" {
8900        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmulx.v2f64")]
8901        fn vmulxq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t;
8902    }
8903    vmulxq_f64_(a, b)
8904}
8905
8906/// Floating-point multiply extended
8907///
8908/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f64)
8909#[inline]
8910#[target_feature(enable = "neon")]
8911#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
8912#[rustc_legacy_const_generics(2)]
8913#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8914pub unsafe fn vmulx_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
8915    static_assert!(LANE == 0);
8916    vmulx_f64(a, transmute::<f64, _>(simd_extract!(b, LANE as u32)))
8917}
8918
8919/// Floating-point multiply extended
8920///
8921/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f64)
8922#[inline]
8923#[target_feature(enable = "neon")]
8924#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
8925#[rustc_legacy_const_generics(2)]
8926#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8927pub unsafe fn vmulx_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
8928    static_assert_uimm_bits!(LANE, 1);
8929    vmulx_f64(a, transmute::<f64, _>(simd_extract!(b, LANE as u32)))
8930}
8931
8932/// Floating-point multiply extended
8933///
8934/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f32)
8935#[inline]
8936#[target_feature(enable = "neon")]
8937#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
8938#[rustc_legacy_const_generics(2)]
8939#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8940pub unsafe fn vmulx_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t) -> float32x2_t {
8941    static_assert_uimm_bits!(LANE, 1);
8942    vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32]))
8943}
8944
8945/// Floating-point multiply extended
8946///
8947/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f32)
8948#[inline]
8949#[target_feature(enable = "neon")]
8950#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
8951#[rustc_legacy_const_generics(2)]
8952#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8953pub unsafe fn vmulx_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x4_t) -> float32x2_t {
8954    static_assert_uimm_bits!(LANE, 2);
8955    vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32]))
8956}
8957
8958/// Floating-point multiply extended
8959///
8960/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f32)
8961#[inline]
8962#[target_feature(enable = "neon")]
8963#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
8964#[rustc_legacy_const_generics(2)]
8965#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8966pub unsafe fn vmulxq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x2_t) -> float32x4_t {
8967    static_assert_uimm_bits!(LANE, 1);
8968    vmulxq_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
8969}
8970
8971/// Floating-point multiply extended
8972///
8973/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f32)
8974#[inline]
8975#[target_feature(enable = "neon")]
8976#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
8977#[rustc_legacy_const_generics(2)]
8978#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8979pub unsafe fn vmulxq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t) -> float32x4_t {
8980    static_assert_uimm_bits!(LANE, 2);
8981    vmulxq_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
8982}
8983
8984/// Floating-point multiply extended
8985///
8986/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f64)
8987#[inline]
8988#[target_feature(enable = "neon")]
8989#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
8990#[rustc_legacy_const_generics(2)]
8991#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8992pub unsafe fn vmulxq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
8993    static_assert!(LANE == 0);
8994    vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32]))
8995}
8996
8997/// Floating-point multiply extended
8998///
8999/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f64)
9000#[inline]
9001#[target_feature(enable = "neon")]
9002#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
9003#[rustc_legacy_const_generics(2)]
9004#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9005pub unsafe fn vmulxq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
9006    static_assert_uimm_bits!(LANE, 1);
9007    vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32]))
9008}
9009
9010/// Floating-point multiply extended
9011///
9012/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_f32)
9013#[inline]
9014#[target_feature(enable = "neon")]
9015#[cfg_attr(test, assert_instr(fmulx))]
9016#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9017pub unsafe fn vmulxs_f32(a: f32, b: f32) -> f32 {
9018    #[allow(improper_ctypes)]
9019    extern "unadjusted" {
9020        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmulx.f32")]
9021        fn vmulxs_f32_(a: f32, b: f32) -> f32;
9022    }
9023    vmulxs_f32_(a, b)
9024}
9025
9026/// Floating-point multiply extended
9027///
9028/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_f64)
9029#[inline]
9030#[target_feature(enable = "neon")]
9031#[cfg_attr(test, assert_instr(fmulx))]
9032#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9033pub unsafe fn vmulxd_f64(a: f64, b: f64) -> f64 {
9034    #[allow(improper_ctypes)]
9035    extern "unadjusted" {
9036        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmulx.f64")]
9037        fn vmulxd_f64_(a: f64, b: f64) -> f64;
9038    }
9039    vmulxd_f64_(a, b)
9040}
9041
9042/// Floating-point multiply extended
9043///
9044/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_lane_f32)
9045#[inline]
9046#[target_feature(enable = "neon")]
9047#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
9048#[rustc_legacy_const_generics(2)]
9049#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9050pub unsafe fn vmulxs_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
9051    static_assert_uimm_bits!(LANE, 1);
9052    vmulxs_f32(a, simd_extract!(b, LANE as u32))
9053}
9054
9055/// Floating-point multiply extended
9056///
9057/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_laneq_f32)
9058#[inline]
9059#[target_feature(enable = "neon")]
9060#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
9061#[rustc_legacy_const_generics(2)]
9062#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9063pub unsafe fn vmulxs_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
9064    static_assert_uimm_bits!(LANE, 2);
9065    vmulxs_f32(a, simd_extract!(b, LANE as u32))
9066}
9067
9068/// Floating-point multiply extended
9069///
9070/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_lane_f64)
9071#[inline]
9072#[target_feature(enable = "neon")]
9073#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
9074#[rustc_legacy_const_generics(2)]
9075#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9076pub unsafe fn vmulxd_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
9077    static_assert!(LANE == 0);
9078    vmulxd_f64(a, simd_extract!(b, LANE as u32))
9079}
9080
9081/// Floating-point multiply extended
9082///
9083/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_laneq_f64)
9084#[inline]
9085#[target_feature(enable = "neon")]
9086#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
9087#[rustc_legacy_const_generics(2)]
9088#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9089pub unsafe fn vmulxd_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
9090    static_assert_uimm_bits!(LANE, 1);
9091    vmulxd_f64(a, simd_extract!(b, LANE as u32))
9092}
9093
9094/// Floating-point fused Multiply-Add to accumulator(vector)
9095///
9096/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f64)
9097#[inline]
9098#[target_feature(enable = "neon")]
9099#[cfg_attr(test, assert_instr(fmadd))]
9100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9101pub unsafe fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
9102    #[allow(improper_ctypes)]
9103    extern "unadjusted" {
9104        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fma.v1f64")]
9105        fn vfma_f64_(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t;
9106    }
9107    vfma_f64_(b, c, a)
9108}
9109
9110/// Floating-point fused Multiply-Add to accumulator(vector)
9111///
9112/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f64)
9113#[inline]
9114#[target_feature(enable = "neon")]
9115#[cfg_attr(test, assert_instr(fmla))]
9116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9117pub unsafe fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
9118    #[allow(improper_ctypes)]
9119    extern "unadjusted" {
9120        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fma.v2f64")]
9121        fn vfmaq_f64_(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
9122    }
9123    vfmaq_f64_(b, c, a)
9124}
9125
9126/// Floating-point fused Multiply-Add to accumulator(vector)
9127///
9128/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f64)
9129#[inline]
9130#[target_feature(enable = "neon")]
9131#[cfg_attr(test, assert_instr(fmadd))]
9132#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9133pub unsafe fn vfma_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t {
9134    vfma_f64(a, b, vdup_n_f64(c))
9135}
9136
9137/// Floating-point fused Multiply-Add to accumulator(vector)
9138///
9139/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f64)
9140#[inline]
9141#[target_feature(enable = "neon")]
9142#[cfg_attr(test, assert_instr(fmla))]
9143#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9144pub unsafe fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t {
9145    vfmaq_f64(a, b, vdupq_n_f64(c))
9146}
9147
9148/// Floating-point fused multiply-add to accumulator
9149///
9150/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f32)
9151#[inline]
9152#[target_feature(enable = "neon")]
9153#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
9154#[rustc_legacy_const_generics(3)]
9155#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9156pub unsafe fn vfma_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
9157    static_assert_uimm_bits!(LANE, 1);
9158    vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32)))
9159}
9160
9161/// Floating-point fused multiply-add to accumulator
9162///
9163/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f32)
9164#[inline]
9165#[target_feature(enable = "neon")]
9166#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
9167#[rustc_legacy_const_generics(3)]
9168#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9169pub unsafe fn vfma_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t {
9170    static_assert_uimm_bits!(LANE, 2);
9171    vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32)))
9172}
9173
9174/// Floating-point fused multiply-add to accumulator
9175///
9176/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f32)
9177#[inline]
9178#[target_feature(enable = "neon")]
9179#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
9180#[rustc_legacy_const_generics(3)]
9181#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9182pub unsafe fn vfmaq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t {
9183    static_assert_uimm_bits!(LANE, 1);
9184    vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32)))
9185}
9186
9187/// Floating-point fused multiply-add to accumulator
9188///
9189/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f32)
9190#[inline]
9191#[target_feature(enable = "neon")]
9192#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
9193#[rustc_legacy_const_generics(3)]
9194#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9195pub unsafe fn vfmaq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
9196    static_assert_uimm_bits!(LANE, 2);
9197    vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32)))
9198}
9199
9200/// Floating-point fused multiply-add to accumulator
9201///
9202/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f64)
9203#[inline]
9204#[target_feature(enable = "neon")]
9205#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
9206#[rustc_legacy_const_generics(3)]
9207#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9208pub unsafe fn vfma_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
9209    static_assert!(LANE == 0);
9210    vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32)))
9211}
9212
9213/// Floating-point fused multiply-add to accumulator
9214///
9215/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f64)
9216#[inline]
9217#[target_feature(enable = "neon")]
9218#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
9219#[rustc_legacy_const_generics(3)]
9220#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9221pub unsafe fn vfma_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t, c: float64x2_t) -> float64x1_t {
9222    static_assert_uimm_bits!(LANE, 1);
9223    vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32)))
9224}
9225
9226/// Floating-point fused multiply-add to accumulator
9227///
9228/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f64)
9229#[inline]
9230#[target_feature(enable = "neon")]
9231#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
9232#[rustc_legacy_const_generics(3)]
9233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9234pub unsafe fn vfmaq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t, c: float64x1_t) -> float64x2_t {
9235    static_assert!(LANE == 0);
9236    vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32)))
9237}
9238
9239/// Floating-point fused multiply-add to accumulator
9240///
9241/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f64)
9242#[inline]
9243#[target_feature(enable = "neon")]
9244#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
9245#[rustc_legacy_const_generics(3)]
9246#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9247pub unsafe fn vfmaq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
9248    static_assert_uimm_bits!(LANE, 1);
9249    vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32)))
9250}
9251
9252/// Floating-point fused multiply-add to accumulator
9253///
9254/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_lane_f32)
9255#[inline]
9256#[target_feature(enable = "neon")]
9257#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
9258#[rustc_legacy_const_generics(3)]
9259#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9260pub unsafe fn vfmas_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) -> f32 {
9261    #[allow(improper_ctypes)]
9262    extern "unadjusted" {
9263        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fma.f32")]
9264        fn vfmas_lane_f32_(a: f32, b: f32, c: f32) -> f32;
9265    }
9266    static_assert_uimm_bits!(LANE, 1);
9267    let c: f32 = simd_extract!(c, LANE as u32);
9268    vfmas_lane_f32_(b, c, a)
9269}
9270
9271/// Floating-point fused multiply-add to accumulator
9272///
9273/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_laneq_f32)
9274#[inline]
9275#[target_feature(enable = "neon")]
9276#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
9277#[rustc_legacy_const_generics(3)]
9278#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9279pub unsafe fn vfmas_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -> f32 {
9280    #[allow(improper_ctypes)]
9281    extern "unadjusted" {
9282        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fma.f32")]
9283        fn vfmas_laneq_f32_(a: f32, b: f32, c: f32) -> f32;
9284    }
9285    static_assert_uimm_bits!(LANE, 2);
9286    let c: f32 = simd_extract!(c, LANE as u32);
9287    vfmas_laneq_f32_(b, c, a)
9288}
9289
9290/// Floating-point fused multiply-add to accumulator
9291///
9292/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_lane_f64)
9293#[inline]
9294#[target_feature(enable = "neon")]
9295#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
9296#[rustc_legacy_const_generics(3)]
9297#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9298pub unsafe fn vfmad_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) -> f64 {
9299    #[allow(improper_ctypes)]
9300    extern "unadjusted" {
9301        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fma.f64")]
9302        fn vfmad_lane_f64_(a: f64, b: f64, c: f64) -> f64;
9303    }
9304    static_assert!(LANE == 0);
9305    let c: f64 = simd_extract!(c, LANE as u32);
9306    vfmad_lane_f64_(b, c, a)
9307}
9308
9309/// Floating-point fused multiply-add to accumulator
9310///
9311/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_laneq_f64)
9312#[inline]
9313#[target_feature(enable = "neon")]
9314#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
9315#[rustc_legacy_const_generics(3)]
9316#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9317pub unsafe fn vfmad_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
9318    #[allow(improper_ctypes)]
9319    extern "unadjusted" {
9320        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fma.f64")]
9321        fn vfmad_laneq_f64_(a: f64, b: f64, c: f64) -> f64;
9322    }
9323    static_assert_uimm_bits!(LANE, 1);
9324    let c: f64 = simd_extract!(c, LANE as u32);
9325    vfmad_laneq_f64_(b, c, a)
9326}
9327
9328/// Floating-point fused multiply-subtract from accumulator
9329///
9330/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f64)
9331#[inline]
9332#[target_feature(enable = "neon")]
9333#[cfg_attr(test, assert_instr(fmsub))]
9334#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9335pub unsafe fn vfms_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
9336    let b: float64x1_t = simd_neg(b);
9337    vfma_f64(a, b, c)
9338}
9339
9340/// Floating-point fused multiply-subtract from accumulator
9341///
9342/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f64)
9343#[inline]
9344#[target_feature(enable = "neon")]
9345#[cfg_attr(test, assert_instr(fmls))]
9346#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9347pub unsafe fn vfmsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
9348    let b: float64x2_t = simd_neg(b);
9349    vfmaq_f64(a, b, c)
9350}
9351
9352/// Floating-point fused Multiply-subtract to accumulator(vector)
9353///
9354/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f64)
9355#[inline]
9356#[target_feature(enable = "neon")]
9357#[cfg_attr(test, assert_instr(fmsub))]
9358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9359pub unsafe fn vfms_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t {
9360    vfms_f64(a, b, vdup_n_f64(c))
9361}
9362
9363/// Floating-point fused Multiply-subtract to accumulator(vector)
9364///
9365/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f64)
9366#[inline]
9367#[target_feature(enable = "neon")]
9368#[cfg_attr(test, assert_instr(fmls))]
9369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9370pub unsafe fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t {
9371    vfmsq_f64(a, b, vdupq_n_f64(c))
9372}
9373
9374/// Floating-point fused multiply-subtract to accumulator
9375///
9376/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f32)
9377#[inline]
9378#[target_feature(enable = "neon")]
9379#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
9380#[rustc_legacy_const_generics(3)]
9381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9382pub unsafe fn vfms_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
9383    static_assert_uimm_bits!(LANE, 1);
9384    vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32)))
9385}
9386
9387/// Floating-point fused multiply-subtract to accumulator
9388///
9389/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f32)
9390#[inline]
9391#[target_feature(enable = "neon")]
9392#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
9393#[rustc_legacy_const_generics(3)]
9394#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9395pub unsafe fn vfms_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t {
9396    static_assert_uimm_bits!(LANE, 2);
9397    vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32)))
9398}
9399
9400/// Floating-point fused multiply-subtract to accumulator
9401///
9402/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f32)
9403#[inline]
9404#[target_feature(enable = "neon")]
9405#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
9406#[rustc_legacy_const_generics(3)]
9407#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9408pub unsafe fn vfmsq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t {
9409    static_assert_uimm_bits!(LANE, 1);
9410    vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32)))
9411}
9412
9413/// Floating-point fused multiply-subtract to accumulator
9414///
9415/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f32)
9416#[inline]
9417#[target_feature(enable = "neon")]
9418#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
9419#[rustc_legacy_const_generics(3)]
9420#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9421pub unsafe fn vfmsq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
9422    static_assert_uimm_bits!(LANE, 2);
9423    vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32)))
9424}
9425
9426/// Floating-point fused multiply-subtract to accumulator
9427///
9428/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f64)
9429#[inline]
9430#[target_feature(enable = "neon")]
9431#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
9432#[rustc_legacy_const_generics(3)]
9433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9434pub unsafe fn vfms_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
9435    static_assert!(LANE == 0);
9436    vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32)))
9437}
9438
9439/// Floating-point fused multiply-subtract to accumulator
9440///
9441/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f64)
9442#[inline]
9443#[target_feature(enable = "neon")]
9444#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
9445#[rustc_legacy_const_generics(3)]
9446#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9447pub unsafe fn vfms_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t, c: float64x2_t) -> float64x1_t {
9448    static_assert_uimm_bits!(LANE, 1);
9449    vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32)))
9450}
9451
9452/// Floating-point fused multiply-subtract to accumulator
9453///
9454/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f64)
9455#[inline]
9456#[target_feature(enable = "neon")]
9457#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
9458#[rustc_legacy_const_generics(3)]
9459#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9460pub unsafe fn vfmsq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t, c: float64x1_t) -> float64x2_t {
9461    static_assert!(LANE == 0);
9462    vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32)))
9463}
9464
9465/// Floating-point fused multiply-subtract to accumulator
9466///
9467/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f64)
9468#[inline]
9469#[target_feature(enable = "neon")]
9470#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
9471#[rustc_legacy_const_generics(3)]
9472#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9473pub unsafe fn vfmsq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
9474    static_assert_uimm_bits!(LANE, 1);
9475    vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32)))
9476}
9477
9478/// Floating-point fused multiply-subtract to accumulator
9479///
9480/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_lane_f32)
9481#[inline]
9482#[target_feature(enable = "neon")]
9483#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
9484#[rustc_legacy_const_generics(3)]
9485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9486pub unsafe fn vfmss_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) -> f32 {
9487    vfmas_lane_f32::<LANE>(a, -b, c)
9488}
9489
9490/// Floating-point fused multiply-subtract to accumulator
9491///
9492/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_laneq_f32)
9493#[inline]
9494#[target_feature(enable = "neon")]
9495#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
9496#[rustc_legacy_const_generics(3)]
9497#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9498pub unsafe fn vfmss_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -> f32 {
9499    vfmas_laneq_f32::<LANE>(a, -b, c)
9500}
9501
9502/// Floating-point fused multiply-subtract to accumulator
9503///
9504/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_lane_f64)
9505#[inline]
9506#[target_feature(enable = "neon")]
9507#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
9508#[rustc_legacy_const_generics(3)]
9509#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9510pub unsafe fn vfmsd_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) -> f64 {
9511    vfmad_lane_f64::<LANE>(a, -b, c)
9512}
9513
9514/// Floating-point fused multiply-subtract to accumulator
9515///
9516/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_laneq_f64)
9517#[inline]
9518#[target_feature(enable = "neon")]
9519#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
9520#[rustc_legacy_const_generics(3)]
9521#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9522pub unsafe fn vfmsd_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
9523    vfmad_laneq_f64::<LANE>(a, -b, c)
9524}
9525
9526/// Divide
9527///
9528/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f32)
9529#[inline]
9530#[target_feature(enable = "neon")]
9531#[cfg_attr(test, assert_instr(fdiv))]
9532#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9533pub unsafe fn vdiv_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
9534    simd_div(a, b)
9535}
9536
9537/// Divide
9538///
9539/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f32)
9540#[inline]
9541#[target_feature(enable = "neon")]
9542#[cfg_attr(test, assert_instr(fdiv))]
9543#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9544pub unsafe fn vdivq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
9545    simd_div(a, b)
9546}
9547
9548/// Divide
9549///
9550/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f64)
9551#[inline]
9552#[target_feature(enable = "neon")]
9553#[cfg_attr(test, assert_instr(fdiv))]
9554#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9555pub unsafe fn vdiv_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
9556    simd_div(a, b)
9557}
9558
9559/// Divide
9560///
9561/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f64)
9562#[inline]
9563#[target_feature(enable = "neon")]
9564#[cfg_attr(test, assert_instr(fdiv))]
9565#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9566pub unsafe fn vdivq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
9567    simd_div(a, b)
9568}
9569
9570/// Subtract
9571///
9572/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f64)
9573#[inline]
9574#[target_feature(enable = "neon")]
9575#[cfg_attr(test, assert_instr(fsub))]
9576#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9577pub unsafe fn vsub_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
9578    simd_sub(a, b)
9579}
9580
9581/// Subtract
9582///
9583/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f64)
9584#[inline]
9585#[target_feature(enable = "neon")]
9586#[cfg_attr(test, assert_instr(fsub))]
9587#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9588pub unsafe fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
9589    simd_sub(a, b)
9590}
9591
9592/// Subtract
9593///
9594/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_s64)
9595#[inline]
9596#[target_feature(enable = "neon")]
9597#[cfg_attr(test, assert_instr(nop))]
9598#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9599pub unsafe fn vsubd_s64(a: i64, b: i64) -> i64 {
9600    a.wrapping_sub(b)
9601}
9602
9603/// Subtract
9604///
9605/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_u64)
9606#[inline]
9607#[target_feature(enable = "neon")]
9608#[cfg_attr(test, assert_instr(nop))]
9609#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9610pub unsafe fn vsubd_u64(a: u64, b: u64) -> u64 {
9611    a.wrapping_sub(b)
9612}
9613
9614/// Add
9615///
9616/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_s64)
9617#[inline]
9618#[target_feature(enable = "neon")]
9619#[cfg_attr(test, assert_instr(nop))]
9620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9621pub unsafe fn vaddd_s64(a: i64, b: i64) -> i64 {
9622    a.wrapping_add(b)
9623}
9624
9625/// Add
9626///
9627/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_u64)
9628#[inline]
9629#[target_feature(enable = "neon")]
9630#[cfg_attr(test, assert_instr(nop))]
9631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9632pub unsafe fn vaddd_u64(a: u64, b: u64) -> u64 {
9633    a.wrapping_add(b)
9634}
9635
9636/// Floating-point add across vector
9637///
9638/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_f32)
9639#[inline]
9640#[target_feature(enable = "neon")]
9641#[cfg_attr(test, assert_instr(faddp))]
9642#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9643pub unsafe fn vaddv_f32(a: float32x2_t) -> f32 {
9644    #[allow(improper_ctypes)]
9645    extern "unadjusted" {
9646        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.faddv.f32.v2f32")]
9647        fn vaddv_f32_(a: float32x2_t) -> f32;
9648    }
9649    vaddv_f32_(a)
9650}
9651
9652/// Floating-point add across vector
9653///
9654/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f32)
9655#[inline]
9656#[target_feature(enable = "neon")]
9657#[cfg_attr(test, assert_instr(faddp))]
9658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9659pub unsafe fn vaddvq_f32(a: float32x4_t) -> f32 {
9660    #[allow(improper_ctypes)]
9661    extern "unadjusted" {
9662        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.faddv.f32.v4f32")]
9663        fn vaddvq_f32_(a: float32x4_t) -> f32;
9664    }
9665    vaddvq_f32_(a)
9666}
9667
9668/// Floating-point add across vector
9669///
9670/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f64)
9671#[inline]
9672#[target_feature(enable = "neon")]
9673#[cfg_attr(test, assert_instr(faddp))]
9674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9675pub unsafe fn vaddvq_f64(a: float64x2_t) -> f64 {
9676    #[allow(improper_ctypes)]
9677    extern "unadjusted" {
9678        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.faddv.f64.v2f64")]
9679        fn vaddvq_f64_(a: float64x2_t) -> f64;
9680    }
9681    vaddvq_f64_(a)
9682}
9683
9684/// Signed Add Long across Vector
9685///
9686/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s16)
9687#[inline]
9688#[target_feature(enable = "neon")]
9689#[cfg_attr(test, assert_instr(saddlv))]
9690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9691pub unsafe fn vaddlv_s16(a: int16x4_t) -> i32 {
9692    #[allow(improper_ctypes)]
9693    extern "unadjusted" {
9694        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlv.i32.v4i16")]
9695        fn vaddlv_s16_(a: int16x4_t) -> i32;
9696    }
9697    vaddlv_s16_(a)
9698}
9699
9700/// Signed Add Long across Vector
9701///
9702/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s16)
9703#[inline]
9704#[target_feature(enable = "neon")]
9705#[cfg_attr(test, assert_instr(saddlv))]
9706#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9707pub unsafe fn vaddlvq_s16(a: int16x8_t) -> i32 {
9708    #[allow(improper_ctypes)]
9709    extern "unadjusted" {
9710        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlv.i32.v8i16")]
9711        fn vaddlvq_s16_(a: int16x8_t) -> i32;
9712    }
9713    vaddlvq_s16_(a)
9714}
9715
9716/// Signed Add Long across Vector
9717///
9718/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s32)
9719#[inline]
9720#[target_feature(enable = "neon")]
9721#[cfg_attr(test, assert_instr(saddlp))]
9722#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9723pub unsafe fn vaddlv_s32(a: int32x2_t) -> i64 {
9724    #[allow(improper_ctypes)]
9725    extern "unadjusted" {
9726        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlv.i64.v2i32")]
9727        fn vaddlv_s32_(a: int32x2_t) -> i64;
9728    }
9729    vaddlv_s32_(a)
9730}
9731
9732/// Signed Add Long across Vector
9733///
9734/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s32)
9735#[inline]
9736#[target_feature(enable = "neon")]
9737#[cfg_attr(test, assert_instr(saddlv))]
9738#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9739pub unsafe fn vaddlvq_s32(a: int32x4_t) -> i64 {
9740    #[allow(improper_ctypes)]
9741    extern "unadjusted" {
9742        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.saddlv.i64.v4i32")]
9743        fn vaddlvq_s32_(a: int32x4_t) -> i64;
9744    }
9745    vaddlvq_s32_(a)
9746}
9747
9748/// Unsigned Add Long across Vector
9749///
9750/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u16)
9751#[inline]
9752#[target_feature(enable = "neon")]
9753#[cfg_attr(test, assert_instr(uaddlv))]
9754#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9755pub unsafe fn vaddlv_u16(a: uint16x4_t) -> u32 {
9756    #[allow(improper_ctypes)]
9757    extern "unadjusted" {
9758        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlv.i32.v4i16")]
9759        fn vaddlv_u16_(a: uint16x4_t) -> u32;
9760    }
9761    vaddlv_u16_(a)
9762}
9763
9764/// Unsigned Add Long across Vector
9765///
9766/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u16)
9767#[inline]
9768#[target_feature(enable = "neon")]
9769#[cfg_attr(test, assert_instr(uaddlv))]
9770#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9771pub unsafe fn vaddlvq_u16(a: uint16x8_t) -> u32 {
9772    #[allow(improper_ctypes)]
9773    extern "unadjusted" {
9774        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlv.i32.v8i16")]
9775        fn vaddlvq_u16_(a: uint16x8_t) -> u32;
9776    }
9777    vaddlvq_u16_(a)
9778}
9779
9780/// Unsigned Add Long across Vector
9781///
9782/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u32)
9783#[inline]
9784#[target_feature(enable = "neon")]
9785#[cfg_attr(test, assert_instr(uaddlp))]
9786#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9787pub unsafe fn vaddlv_u32(a: uint32x2_t) -> u64 {
9788    #[allow(improper_ctypes)]
9789    extern "unadjusted" {
9790        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlv.i64.v2i32")]
9791        fn vaddlv_u32_(a: uint32x2_t) -> u64;
9792    }
9793    vaddlv_u32_(a)
9794}
9795
9796/// Unsigned Add Long across Vector
9797///
9798/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u32)
9799#[inline]
9800#[target_feature(enable = "neon")]
9801#[cfg_attr(test, assert_instr(uaddlv))]
9802#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9803pub unsafe fn vaddlvq_u32(a: uint32x4_t) -> u64 {
9804    #[allow(improper_ctypes)]
9805    extern "unadjusted" {
9806        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlv.i64.v4i32")]
9807        fn vaddlvq_u32_(a: uint32x4_t) -> u64;
9808    }
9809    vaddlvq_u32_(a)
9810}
9811
9812/// Signed Subtract Wide
9813///
9814/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s8)
9815#[inline]
9816#[target_feature(enable = "neon")]
9817#[cfg_attr(test, assert_instr(ssubw))]
9818#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9819pub unsafe fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t {
9820    let c: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
9821    simd_sub(a, simd_cast(c))
9822}
9823
9824/// Signed Subtract Wide
9825///
9826/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s16)
9827#[inline]
9828#[target_feature(enable = "neon")]
9829#[cfg_attr(test, assert_instr(ssubw))]
9830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9831pub unsafe fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t {
9832    let c: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
9833    simd_sub(a, simd_cast(c))
9834}
9835
9836/// Signed Subtract Wide
9837///
9838/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s32)
9839#[inline]
9840#[target_feature(enable = "neon")]
9841#[cfg_attr(test, assert_instr(ssubw))]
9842#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9843pub unsafe fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t {
9844    let c: int32x2_t = simd_shuffle!(b, b, [2, 3]);
9845    simd_sub(a, simd_cast(c))
9846}
9847
9848/// Unsigned Subtract Wide
9849///
9850/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u8)
9851#[inline]
9852#[target_feature(enable = "neon")]
9853#[cfg_attr(test, assert_instr(usubw))]
9854#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9855pub unsafe fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
9856    let c: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
9857    simd_sub(a, simd_cast(c))
9858}
9859
9860/// Unsigned Subtract Wide
9861///
9862/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u16)
9863#[inline]
9864#[target_feature(enable = "neon")]
9865#[cfg_attr(test, assert_instr(usubw))]
9866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9867pub unsafe fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t {
9868    let c: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
9869    simd_sub(a, simd_cast(c))
9870}
9871
9872/// Unsigned Subtract Wide
9873///
9874/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u32)
9875#[inline]
9876#[target_feature(enable = "neon")]
9877#[cfg_attr(test, assert_instr(usubw))]
9878#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9879pub unsafe fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t {
9880    let c: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
9881    simd_sub(a, simd_cast(c))
9882}
9883
9884/// Signed Subtract Long
9885///
9886/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s8)
9887#[inline]
9888#[target_feature(enable = "neon")]
9889#[cfg_attr(test, assert_instr(ssubl))]
9890#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9891pub unsafe fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
9892    let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
9893    let d: int16x8_t = simd_cast(c);
9894    let e: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
9895    let f: int16x8_t = simd_cast(e);
9896    simd_sub(d, f)
9897}
9898
9899/// Signed Subtract Long
9900///
9901/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s16)
9902#[inline]
9903#[target_feature(enable = "neon")]
9904#[cfg_attr(test, assert_instr(ssubl))]
9905#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9906pub unsafe fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
9907    let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
9908    let d: int32x4_t = simd_cast(c);
9909    let e: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
9910    let f: int32x4_t = simd_cast(e);
9911    simd_sub(d, f)
9912}
9913
9914/// Signed Subtract Long
9915///
9916/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s32)
9917#[inline]
9918#[target_feature(enable = "neon")]
9919#[cfg_attr(test, assert_instr(ssubl))]
9920#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9921pub unsafe fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
9922    let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
9923    let d: int64x2_t = simd_cast(c);
9924    let e: int32x2_t = simd_shuffle!(b, b, [2, 3]);
9925    let f: int64x2_t = simd_cast(e);
9926    simd_sub(d, f)
9927}
9928
9929/// Unsigned Subtract Long
9930///
9931/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u8)
9932#[inline]
9933#[target_feature(enable = "neon")]
9934#[cfg_attr(test, assert_instr(usubl))]
9935#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9936pub unsafe fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
9937    let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
9938    let d: uint16x8_t = simd_cast(c);
9939    let e: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
9940    let f: uint16x8_t = simd_cast(e);
9941    simd_sub(d, f)
9942}
9943
9944/// Unsigned Subtract Long
9945///
9946/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u16)
9947#[inline]
9948#[target_feature(enable = "neon")]
9949#[cfg_attr(test, assert_instr(usubl))]
9950#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9951pub unsafe fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
9952    let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
9953    let d: uint32x4_t = simd_cast(c);
9954    let e: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
9955    let f: uint32x4_t = simd_cast(e);
9956    simd_sub(d, f)
9957}
9958
9959/// Unsigned Subtract Long
9960///
9961/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u32)
9962#[inline]
9963#[target_feature(enable = "neon")]
9964#[cfg_attr(test, assert_instr(usubl))]
9965#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9966pub unsafe fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
9967    let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
9968    let d: uint64x2_t = simd_cast(c);
9969    let e: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
9970    let f: uint64x2_t = simd_cast(e);
9971    simd_sub(d, f)
9972}
9973
9974/// Bit clear and exclusive OR
9975///
9976/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s8)
9977#[inline]
9978#[target_feature(enable = "neon,sha3")]
9979#[cfg_attr(test, assert_instr(bcax))]
9980#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9981pub unsafe fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
9982    #[allow(improper_ctypes)]
9983    extern "unadjusted" {
9984        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxs.v16i8")]
9985        fn vbcaxq_s8_(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t;
9986    }
9987    vbcaxq_s8_(a, b, c)
9988}
9989
9990/// Bit clear and exclusive OR
9991///
9992/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s16)
9993#[inline]
9994#[target_feature(enable = "neon,sha3")]
9995#[cfg_attr(test, assert_instr(bcax))]
9996#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9997pub unsafe fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
9998    #[allow(improper_ctypes)]
9999    extern "unadjusted" {
10000        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxs.v8i16")]
10001        fn vbcaxq_s16_(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
10002    }
10003    vbcaxq_s16_(a, b, c)
10004}
10005
10006/// Bit clear and exclusive OR
10007///
10008/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s32)
10009#[inline]
10010#[target_feature(enable = "neon,sha3")]
10011#[cfg_attr(test, assert_instr(bcax))]
10012#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10013pub unsafe fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
10014    #[allow(improper_ctypes)]
10015    extern "unadjusted" {
10016        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxs.v4i32")]
10017        fn vbcaxq_s32_(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
10018    }
10019    vbcaxq_s32_(a, b, c)
10020}
10021
10022/// Bit clear and exclusive OR
10023///
10024/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s64)
10025#[inline]
10026#[target_feature(enable = "neon,sha3")]
10027#[cfg_attr(test, assert_instr(bcax))]
10028#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10029pub unsafe fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
10030    #[allow(improper_ctypes)]
10031    extern "unadjusted" {
10032        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxs.v2i64")]
10033        fn vbcaxq_s64_(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t;
10034    }
10035    vbcaxq_s64_(a, b, c)
10036}
10037
10038/// Bit clear and exclusive OR
10039///
10040/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u8)
10041#[inline]
10042#[target_feature(enable = "neon,sha3")]
10043#[cfg_attr(test, assert_instr(bcax))]
10044#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10045pub unsafe fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
10046    #[allow(improper_ctypes)]
10047    extern "unadjusted" {
10048        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxu.v16i8")]
10049        fn vbcaxq_u8_(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t;
10050    }
10051    vbcaxq_u8_(a, b, c)
10052}
10053
10054/// Bit clear and exclusive OR
10055///
10056/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u16)
10057#[inline]
10058#[target_feature(enable = "neon,sha3")]
10059#[cfg_attr(test, assert_instr(bcax))]
10060#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10061pub unsafe fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
10062    #[allow(improper_ctypes)]
10063    extern "unadjusted" {
10064        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxu.v8i16")]
10065        fn vbcaxq_u16_(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t;
10066    }
10067    vbcaxq_u16_(a, b, c)
10068}
10069
10070/// Bit clear and exclusive OR
10071///
10072/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u32)
10073#[inline]
10074#[target_feature(enable = "neon,sha3")]
10075#[cfg_attr(test, assert_instr(bcax))]
10076#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10077pub unsafe fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
10078    #[allow(improper_ctypes)]
10079    extern "unadjusted" {
10080        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxu.v4i32")]
10081        fn vbcaxq_u32_(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
10082    }
10083    vbcaxq_u32_(a, b, c)
10084}
10085
10086/// Bit clear and exclusive OR
10087///
10088/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u64)
10089#[inline]
10090#[target_feature(enable = "neon,sha3")]
10091#[cfg_attr(test, assert_instr(bcax))]
10092#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10093pub unsafe fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
10094    #[allow(improper_ctypes)]
10095    extern "unadjusted" {
10096        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxu.v2i64")]
10097        fn vbcaxq_u64_(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
10098    }
10099    vbcaxq_u64_(a, b, c)
10100}
10101
10102/// Floating-point complex add
10103///
10104/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f32)
10105#[inline]
10106#[target_feature(enable = "neon,fcma")]
10107#[cfg_attr(test, assert_instr(fcadd))]
10108#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10109pub unsafe fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
10110    #[allow(improper_ctypes)]
10111    extern "unadjusted" {
10112        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcadd.rot270.v2f32")]
10113        fn vcadd_rot270_f32_(a: float32x2_t, b: float32x2_t) -> float32x2_t;
10114    }
10115    vcadd_rot270_f32_(a, b)
10116}
10117
10118/// Floating-point complex add
10119///
10120/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f32)
10121#[inline]
10122#[target_feature(enable = "neon,fcma")]
10123#[cfg_attr(test, assert_instr(fcadd))]
10124#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10125pub unsafe fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
10126    #[allow(improper_ctypes)]
10127    extern "unadjusted" {
10128        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcadd.rot270.v4f32")]
10129        fn vcaddq_rot270_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t;
10130    }
10131    vcaddq_rot270_f32_(a, b)
10132}
10133
10134/// Floating-point complex add
10135///
10136/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f64)
10137#[inline]
10138#[target_feature(enable = "neon,fcma")]
10139#[cfg_attr(test, assert_instr(fcadd))]
10140#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10141pub unsafe fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
10142    #[allow(improper_ctypes)]
10143    extern "unadjusted" {
10144        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcadd.rot270.v2f64")]
10145        fn vcaddq_rot270_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t;
10146    }
10147    vcaddq_rot270_f64_(a, b)
10148}
10149
10150/// Floating-point complex add
10151///
10152/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f32)
10153#[inline]
10154#[target_feature(enable = "neon,fcma")]
10155#[cfg_attr(test, assert_instr(fcadd))]
10156#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10157pub unsafe fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
10158    #[allow(improper_ctypes)]
10159    extern "unadjusted" {
10160        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcadd.rot90.v2f32")]
10161        fn vcadd_rot90_f32_(a: float32x2_t, b: float32x2_t) -> float32x2_t;
10162    }
10163    vcadd_rot90_f32_(a, b)
10164}
10165
10166/// Floating-point complex add
10167///
10168/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f32)
10169#[inline]
10170#[target_feature(enable = "neon,fcma")]
10171#[cfg_attr(test, assert_instr(fcadd))]
10172#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10173pub unsafe fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
10174    #[allow(improper_ctypes)]
10175    extern "unadjusted" {
10176        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcadd.rot90.v4f32")]
10177        fn vcaddq_rot90_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t;
10178    }
10179    vcaddq_rot90_f32_(a, b)
10180}
10181
10182/// Floating-point complex add
10183///
10184/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f64)
10185#[inline]
10186#[target_feature(enable = "neon,fcma")]
10187#[cfg_attr(test, assert_instr(fcadd))]
10188#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10189pub unsafe fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
10190    #[allow(improper_ctypes)]
10191    extern "unadjusted" {
10192        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcadd.rot90.v2f64")]
10193        fn vcaddq_rot90_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t;
10194    }
10195    vcaddq_rot90_f64_(a, b)
10196}
10197
10198/// Floating-point complex multiply accumulate
10199///
10200/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f32)
10201#[inline]
10202#[target_feature(enable = "neon,fcma")]
10203#[cfg_attr(test, assert_instr(fcmla))]
10204#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10205pub unsafe fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
10206    #[allow(improper_ctypes)]
10207    extern "unadjusted" {
10208        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot0.v2f32")]
10209        fn vcmla_f32_(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
10210    }
10211    vcmla_f32_(a, b, c)
10212}
10213
10214/// Floating-point complex multiply accumulate
10215///
10216/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f32)
10217#[inline]
10218#[target_feature(enable = "neon,fcma")]
10219#[cfg_attr(test, assert_instr(fcmla))]
10220#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10221pub unsafe fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
10222    #[allow(improper_ctypes)]
10223    extern "unadjusted" {
10224        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot0.v4f32")]
10225        fn vcmlaq_f32_(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
10226    }
10227    vcmlaq_f32_(a, b, c)
10228}
10229
10230/// Floating-point complex multiply accumulate
10231///
10232/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f64)
10233#[inline]
10234#[target_feature(enable = "neon,fcma")]
10235#[cfg_attr(test, assert_instr(fcmla))]
10236#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10237pub unsafe fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
10238    #[allow(improper_ctypes)]
10239    extern "unadjusted" {
10240        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot0.v2f64")]
10241        fn vcmlaq_f64_(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
10242    }
10243    vcmlaq_f64_(a, b, c)
10244}
10245
10246/// Floating-point complex multiply accumulate
10247///
10248/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f32)
10249#[inline]
10250#[target_feature(enable = "neon,fcma")]
10251#[cfg_attr(test, assert_instr(fcmla))]
10252#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10253pub unsafe fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
10254    #[allow(improper_ctypes)]
10255    extern "unadjusted" {
10256        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot90.v2f32")]
10257        fn vcmla_rot90_f32_(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
10258    }
10259    vcmla_rot90_f32_(a, b, c)
10260}
10261
10262/// Floating-point complex multiply accumulate
10263///
10264/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f32)
10265#[inline]
10266#[target_feature(enable = "neon,fcma")]
10267#[cfg_attr(test, assert_instr(fcmla))]
10268#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10269pub unsafe fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
10270    #[allow(improper_ctypes)]
10271    extern "unadjusted" {
10272        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot90.v4f32")]
10273        fn vcmlaq_rot90_f32_(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
10274    }
10275    vcmlaq_rot90_f32_(a, b, c)
10276}
10277
10278/// Floating-point complex multiply accumulate
10279///
10280/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f64)
10281#[inline]
10282#[target_feature(enable = "neon,fcma")]
10283#[cfg_attr(test, assert_instr(fcmla))]
10284#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10285pub unsafe fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
10286    #[allow(improper_ctypes)]
10287    extern "unadjusted" {
10288        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot90.v2f64")]
10289        fn vcmlaq_rot90_f64_(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
10290    }
10291    vcmlaq_rot90_f64_(a, b, c)
10292}
10293
10294/// Floating-point complex multiply accumulate
10295///
10296/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f32)
10297#[inline]
10298#[target_feature(enable = "neon,fcma")]
10299#[cfg_attr(test, assert_instr(fcmla))]
10300#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10301pub unsafe fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
10302    #[allow(improper_ctypes)]
10303    extern "unadjusted" {
10304        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot180.v2f32")]
10305        fn vcmla_rot180_f32_(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
10306    }
10307    vcmla_rot180_f32_(a, b, c)
10308}
10309
10310/// Floating-point complex multiply accumulate
10311///
10312/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f32)
10313#[inline]
10314#[target_feature(enable = "neon,fcma")]
10315#[cfg_attr(test, assert_instr(fcmla))]
10316#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10317pub unsafe fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
10318    #[allow(improper_ctypes)]
10319    extern "unadjusted" {
10320        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot180.v4f32")]
10321        fn vcmlaq_rot180_f32_(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
10322    }
10323    vcmlaq_rot180_f32_(a, b, c)
10324}
10325
10326/// Floating-point complex multiply accumulate
10327///
10328/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f64)
10329#[inline]
10330#[target_feature(enable = "neon,fcma")]
10331#[cfg_attr(test, assert_instr(fcmla))]
10332#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10333pub unsafe fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
10334    #[allow(improper_ctypes)]
10335    extern "unadjusted" {
10336        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot180.v2f64")]
10337        fn vcmlaq_rot180_f64_(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
10338    }
10339    vcmlaq_rot180_f64_(a, b, c)
10340}
10341
10342/// Floating-point complex multiply accumulate
10343///
10344/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f32)
10345#[inline]
10346#[target_feature(enable = "neon,fcma")]
10347#[cfg_attr(test, assert_instr(fcmla))]
10348#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10349pub unsafe fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
10350    #[allow(improper_ctypes)]
10351    extern "unadjusted" {
10352        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot270.v2f32")]
10353        fn vcmla_rot270_f32_(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
10354    }
10355    vcmla_rot270_f32_(a, b, c)
10356}
10357
10358/// Floating-point complex multiply accumulate
10359///
10360/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f32)
10361#[inline]
10362#[target_feature(enable = "neon,fcma")]
10363#[cfg_attr(test, assert_instr(fcmla))]
10364#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10365pub unsafe fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
10366    #[allow(improper_ctypes)]
10367    extern "unadjusted" {
10368        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot270.v4f32")]
10369        fn vcmlaq_rot270_f32_(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
10370    }
10371    vcmlaq_rot270_f32_(a, b, c)
10372}
10373
10374/// Floating-point complex multiply accumulate
10375///
10376/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f64)
10377#[inline]
10378#[target_feature(enable = "neon,fcma")]
10379#[cfg_attr(test, assert_instr(fcmla))]
10380#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10381pub unsafe fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
10382    #[allow(improper_ctypes)]
10383    extern "unadjusted" {
10384        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcmla.rot270.v2f64")]
10385        fn vcmlaq_rot270_f64_(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
10386    }
10387    vcmlaq_rot270_f64_(a, b, c)
10388}
10389
10390/// Floating-point complex multiply accumulate
10391///
10392/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f32)
10393#[inline]
10394#[target_feature(enable = "neon,fcma")]
10395#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10396#[rustc_legacy_const_generics(3)]
10397#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10398pub unsafe fn vcmla_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
10399    static_assert!(LANE == 0);
10400    let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
10401    vcmla_f32(a, b, c)
10402}
10403
10404/// Floating-point complex multiply accumulate
10405///
10406/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f32)
10407#[inline]
10408#[target_feature(enable = "neon,fcma")]
10409#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10410#[rustc_legacy_const_generics(3)]
10411#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10412pub unsafe fn vcmla_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t {
10413    static_assert_uimm_bits!(LANE, 1);
10414    let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
10415    vcmla_f32(a, b, c)
10416}
10417
10418/// Floating-point complex multiply accumulate
10419///
10420/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f32)
10421#[inline]
10422#[target_feature(enable = "neon,fcma")]
10423#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10424#[rustc_legacy_const_generics(3)]
10425#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10426pub unsafe fn vcmlaq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t {
10427    static_assert!(LANE == 0);
10428    let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
10429    vcmlaq_f32(a, b, c)
10430}
10431
10432/// Floating-point complex multiply accumulate
10433///
10434/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f32)
10435#[inline]
10436#[target_feature(enable = "neon,fcma")]
10437#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10438#[rustc_legacy_const_generics(3)]
10439#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10440pub unsafe fn vcmlaq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
10441    static_assert_uimm_bits!(LANE, 1);
10442    let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
10443    vcmlaq_f32(a, b, c)
10444}
10445
10446/// Floating-point complex multiply accumulate
10447///
10448/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f32)
10449#[inline]
10450#[target_feature(enable = "neon,fcma")]
10451#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10452#[rustc_legacy_const_generics(3)]
10453#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10454pub unsafe fn vcmla_rot90_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
10455    static_assert!(LANE == 0);
10456    let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
10457    vcmla_rot90_f32(a, b, c)
10458}
10459
10460/// Floating-point complex multiply accumulate
10461///
10462/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f32)
10463#[inline]
10464#[target_feature(enable = "neon,fcma")]
10465#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10466#[rustc_legacy_const_generics(3)]
10467#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10468pub unsafe fn vcmla_rot90_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t {
10469    static_assert_uimm_bits!(LANE, 1);
10470    let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
10471    vcmla_rot90_f32(a, b, c)
10472}
10473
10474/// Floating-point complex multiply accumulate
10475///
10476/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f32)
10477#[inline]
10478#[target_feature(enable = "neon,fcma")]
10479#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10480#[rustc_legacy_const_generics(3)]
10481#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10482pub unsafe fn vcmlaq_rot90_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t {
10483    static_assert!(LANE == 0);
10484    let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
10485    vcmlaq_rot90_f32(a, b, c)
10486}
10487
10488/// Floating-point complex multiply accumulate
10489///
10490/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f32)
10491#[inline]
10492#[target_feature(enable = "neon,fcma")]
10493#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10494#[rustc_legacy_const_generics(3)]
10495#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10496pub unsafe fn vcmlaq_rot90_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
10497    static_assert_uimm_bits!(LANE, 1);
10498    let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
10499    vcmlaq_rot90_f32(a, b, c)
10500}
10501
10502/// Floating-point complex multiply accumulate
10503///
10504/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f32)
10505#[inline]
10506#[target_feature(enable = "neon,fcma")]
10507#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10508#[rustc_legacy_const_generics(3)]
10509#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10510pub unsafe fn vcmla_rot180_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
10511    static_assert!(LANE == 0);
10512    let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
10513    vcmla_rot180_f32(a, b, c)
10514}
10515
10516/// Floating-point complex multiply accumulate
10517///
10518/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f32)
10519#[inline]
10520#[target_feature(enable = "neon,fcma")]
10521#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10522#[rustc_legacy_const_generics(3)]
10523#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10524pub unsafe fn vcmla_rot180_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t {
10525    static_assert_uimm_bits!(LANE, 1);
10526    let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
10527    vcmla_rot180_f32(a, b, c)
10528}
10529
10530/// Floating-point complex multiply accumulate
10531///
10532/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f32)
10533#[inline]
10534#[target_feature(enable = "neon,fcma")]
10535#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10536#[rustc_legacy_const_generics(3)]
10537#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10538pub unsafe fn vcmlaq_rot180_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t {
10539    static_assert!(LANE == 0);
10540    let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
10541    vcmlaq_rot180_f32(a, b, c)
10542}
10543
10544/// Floating-point complex multiply accumulate
10545///
10546/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f32)
10547#[inline]
10548#[target_feature(enable = "neon,fcma")]
10549#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10550#[rustc_legacy_const_generics(3)]
10551#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10552pub unsafe fn vcmlaq_rot180_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
10553    static_assert_uimm_bits!(LANE, 1);
10554    let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
10555    vcmlaq_rot180_f32(a, b, c)
10556}
10557
10558/// Floating-point complex multiply accumulate
10559///
10560/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f32)
10561#[inline]
10562#[target_feature(enable = "neon,fcma")]
10563#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10564#[rustc_legacy_const_generics(3)]
10565#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10566pub unsafe fn vcmla_rot270_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
10567    static_assert!(LANE == 0);
10568    let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
10569    vcmla_rot270_f32(a, b, c)
10570}
10571
10572/// Floating-point complex multiply accumulate
10573///
10574/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f32)
10575#[inline]
10576#[target_feature(enable = "neon,fcma")]
10577#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10578#[rustc_legacy_const_generics(3)]
10579#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10580pub unsafe fn vcmla_rot270_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t {
10581    static_assert_uimm_bits!(LANE, 1);
10582    let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
10583    vcmla_rot270_f32(a, b, c)
10584}
10585
10586/// Floating-point complex multiply accumulate
10587///
10588/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f32)
10589#[inline]
10590#[target_feature(enable = "neon,fcma")]
10591#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10592#[rustc_legacy_const_generics(3)]
10593#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10594pub unsafe fn vcmlaq_rot270_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t {
10595    static_assert!(LANE == 0);
10596    let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
10597    vcmlaq_rot270_f32(a, b, c)
10598}
10599
10600/// Floating-point complex multiply accumulate
10601///
10602/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f32)
10603#[inline]
10604#[target_feature(enable = "neon,fcma")]
10605#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
10606#[rustc_legacy_const_generics(3)]
10607#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
10608pub unsafe fn vcmlaq_rot270_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
10609    static_assert_uimm_bits!(LANE, 1);
10610    let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
10611    vcmlaq_rot270_f32(a, b, c)
10612}
10613
10614/// Dot product arithmetic (indexed)
10615///
10616/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_s32)
10617#[inline]
10618#[target_feature(enable = "neon,dotprod")]
10619#[cfg_attr(test, assert_instr(sdot, LANE = 0))]
10620#[rustc_legacy_const_generics(3)]
10621#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")]
10622pub unsafe fn vdot_laneq_s32<const LANE: i32>(a: int32x2_t, b: int8x8_t, c: int8x16_t) -> int32x2_t {
10623    static_assert_uimm_bits!(LANE, 2);
10624    let c: int32x4_t = transmute(c);
10625    let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
10626    vdot_s32(a, b, transmute(c))
10627}
10628
10629/// Dot product arithmetic (indexed)
10630///
10631/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_s32)
10632#[inline]
10633#[target_feature(enable = "neon,dotprod")]
10634#[cfg_attr(test, assert_instr(sdot, LANE = 0))]
10635#[rustc_legacy_const_generics(3)]
10636#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")]
10637pub unsafe fn vdotq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t {
10638    static_assert_uimm_bits!(LANE, 2);
10639    let c: int32x4_t = transmute(c);
10640    let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
10641    vdotq_s32(a, b, transmute(c))
10642}
10643
10644/// Dot product arithmetic (indexed)
10645///
10646/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_u32)
10647#[inline]
10648#[target_feature(enable = "neon,dotprod")]
10649#[cfg_attr(test, assert_instr(udot, LANE = 0))]
10650#[rustc_legacy_const_generics(3)]
10651#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")]
10652pub unsafe fn vdot_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint8x8_t, c: uint8x16_t) -> uint32x2_t {
10653    static_assert_uimm_bits!(LANE, 2);
10654    let c: uint32x4_t = transmute(c);
10655    let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
10656    vdot_u32(a, b, transmute(c))
10657}
10658
10659/// Dot product arithmetic (indexed)
10660///
10661/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_u32)
10662#[inline]
10663#[target_feature(enable = "neon,dotprod")]
10664#[cfg_attr(test, assert_instr(udot, LANE = 0))]
10665#[rustc_legacy_const_generics(3)]
10666#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")]
10667pub unsafe fn vdotq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t {
10668    static_assert_uimm_bits!(LANE, 2);
10669    let c: uint32x4_t = transmute(c);
10670    let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
10671    vdotq_u32(a, b, transmute(c))
10672}
10673
10674/// Maximum (vector)
10675///
10676/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f64)
10677#[inline]
10678#[target_feature(enable = "neon")]
10679#[cfg_attr(test, assert_instr(fmax))]
10680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10681pub unsafe fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
10682    #[allow(improper_ctypes)]
10683    extern "unadjusted" {
10684        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmax.v1f64")]
10685        fn vmax_f64_(a: float64x1_t, b: float64x1_t) -> float64x1_t;
10686    }
10687    vmax_f64_(a, b)
10688}
10689
10690/// Maximum (vector)
10691///
10692/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f64)
10693#[inline]
10694#[target_feature(enable = "neon")]
10695#[cfg_attr(test, assert_instr(fmax))]
10696#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10697pub unsafe fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
10698    #[allow(improper_ctypes)]
10699    extern "unadjusted" {
10700        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmax.v2f64")]
10701        fn vmaxq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t;
10702    }
10703    vmaxq_f64_(a, b)
10704}
10705
10706/// Floating-point Maximum Number (vector)
10707///
10708/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f64)
10709#[inline]
10710#[target_feature(enable = "neon")]
10711#[cfg_attr(test, assert_instr(fmaxnm))]
10712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10713pub unsafe fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
10714    #[allow(improper_ctypes)]
10715    extern "unadjusted" {
10716        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnm.v1f64")]
10717        fn vmaxnm_f64_(a: float64x1_t, b: float64x1_t) -> float64x1_t;
10718    }
10719    vmaxnm_f64_(a, b)
10720}
10721
10722/// Floating-point Maximum Number (vector)
10723///
10724/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f64)
10725#[inline]
10726#[target_feature(enable = "neon")]
10727#[cfg_attr(test, assert_instr(fmaxnm))]
10728#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10729pub unsafe fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
10730    #[allow(improper_ctypes)]
10731    extern "unadjusted" {
10732        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnm.v2f64")]
10733        fn vmaxnmq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t;
10734    }
10735    vmaxnmq_f64_(a, b)
10736}
10737
10738/// Floating-point maximum number across vector
10739///
10740/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f32)
10741#[inline]
10742#[target_feature(enable = "neon")]
10743#[cfg_attr(test, assert_instr(fmaxnmp))]
10744#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10745pub unsafe fn vmaxnmv_f32(a: float32x2_t) -> f32 {
10746    #[allow(improper_ctypes)]
10747    extern "unadjusted" {
10748        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32")]
10749        fn vmaxnmv_f32_(a: float32x2_t) -> f32;
10750    }
10751    vmaxnmv_f32_(a)
10752}
10753
10754/// Floating-point maximum number across vector
10755///
10756/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f64)
10757#[inline]
10758#[target_feature(enable = "neon")]
10759#[cfg_attr(test, assert_instr(fmaxnmp))]
10760#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10761pub unsafe fn vmaxnmvq_f64(a: float64x2_t) -> f64 {
10762    #[allow(improper_ctypes)]
10763    extern "unadjusted" {
10764        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64")]
10765        fn vmaxnmvq_f64_(a: float64x2_t) -> f64;
10766    }
10767    vmaxnmvq_f64_(a)
10768}
10769
10770/// Floating-point maximum number across vector
10771///
10772/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f32)
10773#[inline]
10774#[target_feature(enable = "neon")]
10775#[cfg_attr(test, assert_instr(fmaxnmv))]
10776#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10777pub unsafe fn vmaxnmvq_f32(a: float32x4_t) -> f32 {
10778    #[allow(improper_ctypes)]
10779    extern "unadjusted" {
10780        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmv.f32.v4f32")]
10781        fn vmaxnmvq_f32_(a: float32x4_t) -> f32;
10782    }
10783    vmaxnmvq_f32_(a)
10784}
10785
10786/// Floating-point Maximum Number Pairwise (vector).
10787///
10788/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f32)
10789#[inline]
10790#[target_feature(enable = "neon")]
10791#[cfg_attr(test, assert_instr(fmaxnmp))]
10792#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10793pub unsafe fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
10794    #[allow(improper_ctypes)]
10795    extern "unadjusted" {
10796        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmp.v2f32")]
10797        fn vpmaxnm_f32_(a: float32x2_t, b: float32x2_t) -> float32x2_t;
10798    }
10799    vpmaxnm_f32_(a, b)
10800}
10801
10802/// Floating-point Maximum Number Pairwise (vector).
10803///
10804/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f64)
10805#[inline]
10806#[target_feature(enable = "neon")]
10807#[cfg_attr(test, assert_instr(fmaxnmp))]
10808#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10809pub unsafe fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
10810    #[allow(improper_ctypes)]
10811    extern "unadjusted" {
10812        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmp.v2f64")]
10813        fn vpmaxnmq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t;
10814    }
10815    vpmaxnmq_f64_(a, b)
10816}
10817
10818/// Floating-point Maximum Number Pairwise (vector).
10819///
10820/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f32)
10821#[inline]
10822#[target_feature(enable = "neon")]
10823#[cfg_attr(test, assert_instr(fmaxnmp))]
10824#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10825pub unsafe fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
10826    #[allow(improper_ctypes)]
10827    extern "unadjusted" {
10828        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmp.v4f32")]
10829        fn vpmaxnmq_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t;
10830    }
10831    vpmaxnmq_f32_(a, b)
10832}
10833
10834/// Floating-point maximum number pairwise
10835///
10836/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnms_f32)
10837#[inline]
10838#[target_feature(enable = "neon")]
10839#[cfg_attr(test, assert_instr(fmaxnmp))]
10840#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10841pub unsafe fn vpmaxnms_f32(a: float32x2_t) -> f32 {
10842    #[allow(improper_ctypes)]
10843    extern "unadjusted" {
10844        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32")]
10845        fn vpmaxnms_f32_(a: float32x2_t) -> f32;
10846    }
10847    vpmaxnms_f32_(a)
10848}
10849
10850/// Floating-point maximum number pairwise
10851///
10852/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmqd_f64)
10853#[inline]
10854#[target_feature(enable = "neon")]
10855#[cfg_attr(test, assert_instr(fmaxnmp))]
10856#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10857pub unsafe fn vpmaxnmqd_f64(a: float64x2_t) -> f64 {
10858    #[allow(improper_ctypes)]
10859    extern "unadjusted" {
10860        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64")]
10861        fn vpmaxnmqd_f64_(a: float64x2_t) -> f64;
10862    }
10863    vpmaxnmqd_f64_(a)
10864}
10865
10866/// Floating-point maximum pairwise
10867///
10868/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxs_f32)
10869#[inline]
10870#[target_feature(enable = "neon")]
10871#[cfg_attr(test, assert_instr(fmaxp))]
10872#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10873pub unsafe fn vpmaxs_f32(a: float32x2_t) -> f32 {
10874    #[allow(improper_ctypes)]
10875    extern "unadjusted" {
10876        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32")]
10877        fn vpmaxs_f32_(a: float32x2_t) -> f32;
10878    }
10879    vpmaxs_f32_(a)
10880}
10881
10882/// Floating-point maximum pairwise
10883///
10884/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxqd_f64)
10885#[inline]
10886#[target_feature(enable = "neon")]
10887#[cfg_attr(test, assert_instr(fmaxp))]
10888#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10889pub unsafe fn vpmaxqd_f64(a: float64x2_t) -> f64 {
10890    #[allow(improper_ctypes)]
10891    extern "unadjusted" {
10892        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64")]
10893        fn vpmaxqd_f64_(a: float64x2_t) -> f64;
10894    }
10895    vpmaxqd_f64_(a)
10896}
10897
10898/// Minimum (vector)
10899///
10900/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f64)
10901#[inline]
10902#[target_feature(enable = "neon")]
10903#[cfg_attr(test, assert_instr(fmin))]
10904#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10905pub unsafe fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
10906    #[allow(improper_ctypes)]
10907    extern "unadjusted" {
10908        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmin.v1f64")]
10909        fn vmin_f64_(a: float64x1_t, b: float64x1_t) -> float64x1_t;
10910    }
10911    vmin_f64_(a, b)
10912}
10913
10914/// Minimum (vector)
10915///
10916/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f64)
10917#[inline]
10918#[target_feature(enable = "neon")]
10919#[cfg_attr(test, assert_instr(fmin))]
10920#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10921pub unsafe fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
10922    #[allow(improper_ctypes)]
10923    extern "unadjusted" {
10924        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fmin.v2f64")]
10925        fn vminq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t;
10926    }
10927    vminq_f64_(a, b)
10928}
10929
10930/// Floating-point Minimum Number (vector)
10931///
10932/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f64)
10933#[inline]
10934#[target_feature(enable = "neon")]
10935#[cfg_attr(test, assert_instr(fminnm))]
10936#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10937pub unsafe fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
10938    #[allow(improper_ctypes)]
10939    extern "unadjusted" {
10940        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnm.v1f64")]
10941        fn vminnm_f64_(a: float64x1_t, b: float64x1_t) -> float64x1_t;
10942    }
10943    vminnm_f64_(a, b)
10944}
10945
10946/// Floating-point Minimum Number (vector)
10947///
10948/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f64)
10949#[inline]
10950#[target_feature(enable = "neon")]
10951#[cfg_attr(test, assert_instr(fminnm))]
10952#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10953pub unsafe fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
10954    #[allow(improper_ctypes)]
10955    extern "unadjusted" {
10956        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnm.v2f64")]
10957        fn vminnmq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t;
10958    }
10959    vminnmq_f64_(a, b)
10960}
10961
10962/// Floating-point minimum number across vector
10963///
10964/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f32)
10965#[inline]
10966#[target_feature(enable = "neon")]
10967#[cfg_attr(test, assert_instr(fminnmp))]
10968#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10969pub unsafe fn vminnmv_f32(a: float32x2_t) -> f32 {
10970    #[allow(improper_ctypes)]
10971    extern "unadjusted" {
10972        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32")]
10973        fn vminnmv_f32_(a: float32x2_t) -> f32;
10974    }
10975    vminnmv_f32_(a)
10976}
10977
10978/// Floating-point minimum number across vector
10979///
10980/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f64)
10981#[inline]
10982#[target_feature(enable = "neon")]
10983#[cfg_attr(test, assert_instr(fminnmp))]
10984#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10985pub unsafe fn vminnmvq_f64(a: float64x2_t) -> f64 {
10986    #[allow(improper_ctypes)]
10987    extern "unadjusted" {
10988        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64")]
10989        fn vminnmvq_f64_(a: float64x2_t) -> f64;
10990    }
10991    vminnmvq_f64_(a)
10992}
10993
10994/// Floating-point minimum number across vector
10995///
10996/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f32)
10997#[inline]
10998#[target_feature(enable = "neon")]
10999#[cfg_attr(test, assert_instr(fminnmv))]
11000#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11001pub unsafe fn vminnmvq_f32(a: float32x4_t) -> f32 {
11002    #[allow(improper_ctypes)]
11003    extern "unadjusted" {
11004        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmv.f32.v4f32")]
11005        fn vminnmvq_f32_(a: float32x4_t) -> f32;
11006    }
11007    vminnmvq_f32_(a)
11008}
11009
11010/// Vector move
11011///
11012/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s8)
11013#[inline]
11014#[target_feature(enable = "neon")]
11015#[cfg_attr(test, assert_instr(sxtl2))]
11016#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11017pub unsafe fn vmovl_high_s8(a: int8x16_t) -> int16x8_t {
11018    let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
11019    vmovl_s8(a)
11020}
11021
11022/// Vector move
11023///
11024/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s16)
11025#[inline]
11026#[target_feature(enable = "neon")]
11027#[cfg_attr(test, assert_instr(sxtl2))]
11028#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11029pub unsafe fn vmovl_high_s16(a: int16x8_t) -> int32x4_t {
11030    let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
11031    vmovl_s16(a)
11032}
11033
11034/// Vector move
11035///
11036/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s32)
11037#[inline]
11038#[target_feature(enable = "neon")]
11039#[cfg_attr(test, assert_instr(sxtl2))]
11040#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11041pub unsafe fn vmovl_high_s32(a: int32x4_t) -> int64x2_t {
11042    let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
11043    vmovl_s32(a)
11044}
11045
11046/// Vector move
11047///
11048/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u8)
11049#[inline]
11050#[target_feature(enable = "neon")]
11051#[cfg_attr(test, assert_instr(uxtl2))]
11052#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11053pub unsafe fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t {
11054    let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
11055    vmovl_u8(a)
11056}
11057
11058/// Vector move
11059///
11060/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u16)
11061#[inline]
11062#[target_feature(enable = "neon")]
11063#[cfg_attr(test, assert_instr(uxtl2))]
11064#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11065pub unsafe fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t {
11066    let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
11067    vmovl_u16(a)
11068}
11069
11070/// Vector move
11071///
11072/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u32)
11073#[inline]
11074#[target_feature(enable = "neon")]
11075#[cfg_attr(test, assert_instr(uxtl2))]
11076#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11077pub unsafe fn vmovl_high_u32(a: uint32x4_t) -> uint64x2_t {
11078    let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
11079    vmovl_u32(a)
11080}
11081
11082/// Floating-point add pairwise
11083///
11084/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f32)
11085#[inline]
11086#[target_feature(enable = "neon")]
11087#[cfg_attr(test, assert_instr(faddp))]
11088#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11089pub unsafe fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
11090    #[allow(improper_ctypes)]
11091    extern "unadjusted" {
11092        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.faddp.v4f32")]
11093        fn vpaddq_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t;
11094    }
11095    vpaddq_f32_(a, b)
11096}
11097
11098/// Floating-point add pairwise
11099///
11100/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f64)
11101#[inline]
11102#[target_feature(enable = "neon")]
11103#[cfg_attr(test, assert_instr(faddp))]
11104#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11105pub unsafe fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
11106    #[allow(improper_ctypes)]
11107    extern "unadjusted" {
11108        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.faddp.v2f64")]
11109        fn vpaddq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t;
11110    }
11111    vpaddq_f64_(a, b)
11112}
11113
11114/// Floating-point add pairwise
11115///
11116/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadds_f32)
11117#[inline]
11118#[target_feature(enable = "neon")]
11119#[cfg_attr(test, assert_instr(nop))]
11120#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11121pub unsafe fn vpadds_f32(a: float32x2_t) -> f32 {
11122    let a1: f32 = simd_extract!(a, 0);
11123    let a2: f32 = simd_extract!(a, 1);
11124    a1 + a2
11125}
11126
11127/// Floating-point add pairwise
11128///
11129/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_f64)
11130#[inline]
11131#[target_feature(enable = "neon")]
11132#[cfg_attr(test, assert_instr(nop))]
11133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11134pub unsafe fn vpaddd_f64(a: float64x2_t) -> f64 {
11135    let a1: f64 = simd_extract!(a, 0);
11136    let a2: f64 = simd_extract!(a, 1);
11137    a1 + a2
11138}
11139
11140/// Floating-point Minimum Number Pairwise (vector).
11141///
11142/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f32)
11143#[inline]
11144#[target_feature(enable = "neon")]
11145#[cfg_attr(test, assert_instr(fminnmp))]
11146#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11147pub unsafe fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
11148    #[allow(improper_ctypes)]
11149    extern "unadjusted" {
11150        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmp.v2f32")]
11151        fn vpminnm_f32_(a: float32x2_t, b: float32x2_t) -> float32x2_t;
11152    }
11153    vpminnm_f32_(a, b)
11154}
11155
11156/// Floating-point Minimum Number Pairwise (vector).
11157///
11158/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f64)
11159#[inline]
11160#[target_feature(enable = "neon")]
11161#[cfg_attr(test, assert_instr(fminnmp))]
11162#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11163pub unsafe fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
11164    #[allow(improper_ctypes)]
11165    extern "unadjusted" {
11166        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmp.v2f64")]
11167        fn vpminnmq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t;
11168    }
11169    vpminnmq_f64_(a, b)
11170}
11171
11172/// Floating-point Minimum Number Pairwise (vector).
11173///
11174/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f32)
11175#[inline]
11176#[target_feature(enable = "neon")]
11177#[cfg_attr(test, assert_instr(fminnmp))]
11178#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11179pub unsafe fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
11180    #[allow(improper_ctypes)]
11181    extern "unadjusted" {
11182        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmp.v4f32")]
11183        fn vpminnmq_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t;
11184    }
11185    vpminnmq_f32_(a, b)
11186}
11187
11188/// Floating-point minimum number pairwise
11189///
11190/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnms_f32)
11191#[inline]
11192#[target_feature(enable = "neon")]
11193#[cfg_attr(test, assert_instr(fminnmp))]
11194#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11195pub unsafe fn vpminnms_f32(a: float32x2_t) -> f32 {
11196    #[allow(improper_ctypes)]
11197    extern "unadjusted" {
11198        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32")]
11199        fn vpminnms_f32_(a: float32x2_t) -> f32;
11200    }
11201    vpminnms_f32_(a)
11202}
11203
11204/// Floating-point minimum number pairwise
11205///
11206/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmqd_f64)
11207#[inline]
11208#[target_feature(enable = "neon")]
11209#[cfg_attr(test, assert_instr(fminnmp))]
11210#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11211pub unsafe fn vpminnmqd_f64(a: float64x2_t) -> f64 {
11212    #[allow(improper_ctypes)]
11213    extern "unadjusted" {
11214        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64")]
11215        fn vpminnmqd_f64_(a: float64x2_t) -> f64;
11216    }
11217    vpminnmqd_f64_(a)
11218}
11219
11220/// Floating-point minimum pairwise
11221///
11222/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmins_f32)
11223#[inline]
11224#[target_feature(enable = "neon")]
11225#[cfg_attr(test, assert_instr(fminp))]
11226#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11227pub unsafe fn vpmins_f32(a: float32x2_t) -> f32 {
11228    #[allow(improper_ctypes)]
11229    extern "unadjusted" {
11230        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminv.f32.v2f32")]
11231        fn vpmins_f32_(a: float32x2_t) -> f32;
11232    }
11233    vpmins_f32_(a)
11234}
11235
11236/// Floating-point minimum pairwise
11237///
11238/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminqd_f64)
11239#[inline]
11240#[target_feature(enable = "neon")]
11241#[cfg_attr(test, assert_instr(fminp))]
11242#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11243pub unsafe fn vpminqd_f64(a: float64x2_t) -> f64 {
11244    #[allow(improper_ctypes)]
11245    extern "unadjusted" {
11246        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fminv.f64.v2f64")]
11247        fn vpminqd_f64_(a: float64x2_t) -> f64;
11248    }
11249    vpminqd_f64_(a)
11250}
11251
11252/// Signed saturating doubling multiply long
11253///
11254/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_s16)
11255#[inline]
11256#[target_feature(enable = "neon")]
11257#[cfg_attr(test, assert_instr(sqdmull))]
11258#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11259pub unsafe fn vqdmullh_s16(a: i16, b: i16) -> i32 {
11260    let a: int16x4_t = vdup_n_s16(a);
11261    let b: int16x4_t = vdup_n_s16(b);
11262    simd_extract!(vqdmull_s16(a, b), 0)
11263}
11264
11265/// Signed saturating doubling multiply long
11266///
11267/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_s32)
11268#[inline]
11269#[target_feature(enable = "neon")]
11270#[cfg_attr(test, assert_instr(sqdmull))]
11271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11272pub unsafe fn vqdmulls_s32(a: i32, b: i32) -> i64 {
11273    #[allow(improper_ctypes)]
11274    extern "unadjusted" {
11275        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqdmulls.scalar")]
11276        fn vqdmulls_s32_(a: i32, b: i32) -> i64;
11277    }
11278    vqdmulls_s32_(a, b)
11279}
11280
11281/// Signed saturating doubling multiply long
11282///
11283/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s16)
11284#[inline]
11285#[target_feature(enable = "neon")]
11286#[cfg_attr(test, assert_instr(sqdmull2))]
11287#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11288pub unsafe fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
11289    let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
11290    let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
11291    vqdmull_s16(a, b)
11292}
11293
11294/// Signed saturating doubling multiply long
11295///
11296/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s32)
11297#[inline]
11298#[target_feature(enable = "neon")]
11299#[cfg_attr(test, assert_instr(sqdmull2))]
11300#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11301pub unsafe fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
11302    let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
11303    let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
11304    vqdmull_s32(a, b)
11305}
11306
11307/// Signed saturating doubling multiply long
11308///
11309/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s16)
11310#[inline]
11311#[target_feature(enable = "neon")]
11312#[cfg_attr(test, assert_instr(sqdmull2))]
11313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11314pub unsafe fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
11315    let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
11316    let b: int16x4_t = vdup_n_s16(b);
11317    vqdmull_s16(a, b)
11318}
11319
11320/// Signed saturating doubling multiply long
11321///
11322/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s32)
11323#[inline]
11324#[target_feature(enable = "neon")]
11325#[cfg_attr(test, assert_instr(sqdmull2))]
11326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11327pub unsafe fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
11328    let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
11329    let b: int32x2_t = vdup_n_s32(b);
11330    vqdmull_s32(a, b)
11331}
11332
11333/// Vector saturating doubling long multiply by scalar
11334///
11335/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s16)
11336#[inline]
11337#[target_feature(enable = "neon")]
11338#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
11339#[rustc_legacy_const_generics(2)]
11340#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11341pub unsafe fn vqdmull_laneq_s16<const N: i32>(a: int16x4_t, b: int16x8_t) -> int32x4_t {
11342    static_assert_uimm_bits!(N, 3);
11343    let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
11344    vqdmull_s16(a, b)
11345}
11346
11347/// Vector saturating doubling long multiply by scalar
11348///
11349/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s32)
11350#[inline]
11351#[target_feature(enable = "neon")]
11352#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
11353#[rustc_legacy_const_generics(2)]
11354#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11355pub unsafe fn vqdmull_laneq_s32<const N: i32>(a: int32x2_t, b: int32x4_t) -> int64x2_t {
11356    static_assert_uimm_bits!(N, 2);
11357    let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
11358    vqdmull_s32(a, b)
11359}
11360
11361/// Signed saturating doubling multiply long
11362///
11363/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_lane_s16)
11364#[inline]
11365#[target_feature(enable = "neon")]
11366#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
11367#[rustc_legacy_const_generics(2)]
11368#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11369pub unsafe fn vqdmullh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i32 {
11370    static_assert_uimm_bits!(N, 2);
11371    let b: i16 = simd_extract!(b, N as u32);
11372    vqdmullh_s16(a, b)
11373}
11374
11375/// Signed saturating doubling multiply long
11376///
11377/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_laneq_s16)
11378#[inline]
11379#[target_feature(enable = "neon")]
11380#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
11381#[rustc_legacy_const_generics(2)]
11382#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11383pub unsafe fn vqdmullh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i32 {
11384    static_assert_uimm_bits!(N, 3);
11385    let b: i16 = simd_extract!(b, N as u32);
11386    vqdmullh_s16(a, b)
11387}
11388
11389/// Signed saturating doubling multiply long
11390///
11391/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_lane_s32)
11392#[inline]
11393#[target_feature(enable = "neon")]
11394#[cfg_attr(test, assert_instr(sqdmull, N = 1))]
11395#[rustc_legacy_const_generics(2)]
11396#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11397pub unsafe fn vqdmulls_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i64 {
11398    static_assert_uimm_bits!(N, 1);
11399    let b: i32 = simd_extract!(b, N as u32);
11400    vqdmulls_s32(a, b)
11401}
11402
11403/// Signed saturating doubling multiply long
11404///
11405/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_laneq_s32)
11406#[inline]
11407#[target_feature(enable = "neon")]
11408#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
11409#[rustc_legacy_const_generics(2)]
11410#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11411pub unsafe fn vqdmulls_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i64 {
11412    static_assert_uimm_bits!(N, 2);
11413    let b: i32 = simd_extract!(b, N as u32);
11414    vqdmulls_s32(a, b)
11415}
11416
11417/// Signed saturating doubling multiply long
11418///
11419/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s16)
11420#[inline]
11421#[target_feature(enable = "neon")]
11422#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
11423#[rustc_legacy_const_generics(2)]
11424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11425pub unsafe fn vqdmull_high_lane_s16<const N: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
11426    static_assert_uimm_bits!(N, 2);
11427    let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
11428    let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
11429    vqdmull_s16(a, b)
11430}
11431
11432/// Signed saturating doubling multiply long
11433///
11434/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s32)
11435#[inline]
11436#[target_feature(enable = "neon")]
11437#[cfg_attr(test, assert_instr(sqdmull2, N = 1))]
11438#[rustc_legacy_const_generics(2)]
11439#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11440pub unsafe fn vqdmull_high_lane_s32<const N: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
11441    static_assert_uimm_bits!(N, 1);
11442    let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
11443    let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
11444    vqdmull_s32(a, b)
11445}
11446
11447/// Signed saturating doubling multiply long
11448///
11449/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s16)
11450#[inline]
11451#[target_feature(enable = "neon")]
11452#[cfg_attr(test, assert_instr(sqdmull2, N = 4))]
11453#[rustc_legacy_const_generics(2)]
11454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11455pub unsafe fn vqdmull_high_laneq_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
11456    static_assert_uimm_bits!(N, 3);
11457    let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
11458    let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
11459    vqdmull_s16(a, b)
11460}
11461
11462/// Signed saturating doubling multiply long
11463///
11464/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s32)
11465#[inline]
11466#[target_feature(enable = "neon")]
11467#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
11468#[rustc_legacy_const_generics(2)]
11469#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11470pub unsafe fn vqdmull_high_laneq_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
11471    static_assert_uimm_bits!(N, 2);
11472    let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
11473    let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
11474    vqdmull_s32(a, b)
11475}
11476
11477/// Signed saturating doubling multiply-add long
11478///
11479/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s16)
11480#[inline]
11481#[target_feature(enable = "neon")]
11482#[cfg_attr(test, assert_instr(sqdmlal2))]
11483#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11484pub unsafe fn vqdmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
11485    vqaddq_s32(a, vqdmull_high_s16(b, c))
11486}
11487
11488/// Signed saturating doubling multiply-add long
11489///
11490/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s32)
11491#[inline]
11492#[target_feature(enable = "neon")]
11493#[cfg_attr(test, assert_instr(sqdmlal2))]
11494#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11495pub unsafe fn vqdmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
11496    vqaddq_s64(a, vqdmull_high_s32(b, c))
11497}
11498
11499/// Signed saturating doubling multiply-add long
11500///
11501/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s16)
11502#[inline]
11503#[target_feature(enable = "neon")]
11504#[cfg_attr(test, assert_instr(sqdmlal2))]
11505#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11506pub unsafe fn vqdmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
11507    vqaddq_s32(a, vqdmull_high_n_s16(b, c))
11508}
11509
11510/// Signed saturating doubling multiply-add long
11511///
11512/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s32)
11513#[inline]
11514#[target_feature(enable = "neon")]
11515#[cfg_attr(test, assert_instr(sqdmlal2))]
11516#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11517pub unsafe fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
11518    vqaddq_s64(a, vqdmull_high_n_s32(b, c))
11519}
11520
11521/// Vector widening saturating doubling multiply accumulate with scalar
11522///
11523/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s16)
11524#[inline]
11525#[target_feature(enable = "neon")]
11526#[cfg_attr(test, assert_instr(sqdmlal, N = 2))]
11527#[rustc_legacy_const_generics(3)]
11528#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11529pub unsafe fn vqdmlal_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
11530    static_assert_uimm_bits!(N, 3);
11531    vqaddq_s32(a, vqdmull_laneq_s16::<N>(b, c))
11532}
11533
11534/// Vector widening saturating doubling multiply accumulate with scalar
11535///
11536/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s32)
11537#[inline]
11538#[target_feature(enable = "neon")]
11539#[cfg_attr(test, assert_instr(sqdmlal, N = 1))]
11540#[rustc_legacy_const_generics(3)]
11541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11542pub unsafe fn vqdmlal_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
11543    static_assert_uimm_bits!(N, 2);
11544    vqaddq_s64(a, vqdmull_laneq_s32::<N>(b, c))
11545}
11546
11547/// Signed saturating doubling multiply-add long
11548///
11549/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s16)
11550#[inline]
11551#[target_feature(enable = "neon")]
11552#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
11553#[rustc_legacy_const_generics(3)]
11554#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11555pub unsafe fn vqdmlal_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
11556    static_assert_uimm_bits!(N, 2);
11557    vqaddq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
11558}
11559
11560/// Signed saturating doubling multiply-add long
11561///
11562/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s16)
11563#[inline]
11564#[target_feature(enable = "neon")]
11565#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
11566#[rustc_legacy_const_generics(3)]
11567#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11568pub unsafe fn vqdmlal_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
11569    static_assert_uimm_bits!(N, 3);
11570    vqaddq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
11571}
11572
11573/// Signed saturating doubling multiply-add long
11574///
11575/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s32)
11576#[inline]
11577#[target_feature(enable = "neon")]
11578#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
11579#[rustc_legacy_const_generics(3)]
11580#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11581pub unsafe fn vqdmlal_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
11582    static_assert_uimm_bits!(N, 1);
11583    vqaddq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
11584}
11585
11586/// Signed saturating doubling multiply-add long
11587///
11588/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s32)
11589#[inline]
11590#[target_feature(enable = "neon")]
11591#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
11592#[rustc_legacy_const_generics(3)]
11593#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11594pub unsafe fn vqdmlal_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
11595    static_assert_uimm_bits!(N, 2);
11596    vqaddq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
11597}
11598
11599/// Signed saturating doubling multiply-add long
11600///
11601/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_s16)
11602#[inline]
11603#[target_feature(enable = "neon")]
11604#[cfg_attr(test, assert_instr(sqdmlal))]
11605#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11606pub unsafe fn vqdmlalh_s16(a: i32, b: i16, c: i16) -> i32 {
11607    let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
11608    vqadds_s32(a, simd_extract!(x, 0))
11609}
11610
11611/// Signed saturating doubling multiply-add long
11612///
11613/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_s32)
11614#[inline]
11615#[target_feature(enable = "neon")]
11616#[cfg_attr(test, assert_instr(sqdmlal))]
11617#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11618pub unsafe fn vqdmlals_s32(a: i64, b: i32, c: i32) -> i64 {
11619    let x: i64 = vqaddd_s64(a, vqdmulls_s32(b, c));
11620    x as i64
11621}
11622
11623/// Signed saturating doubling multiply-add long
11624///
11625/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_lane_s16)
11626#[inline]
11627#[target_feature(enable = "neon")]
11628#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
11629#[rustc_legacy_const_generics(3)]
11630#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11631pub unsafe fn vqdmlalh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
11632    static_assert_uimm_bits!(LANE, 2);
11633    vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32))
11634}
11635
11636/// Signed saturating doubling multiply-add long
11637///
11638/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_laneq_s16)
11639#[inline]
11640#[target_feature(enable = "neon")]
11641#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
11642#[rustc_legacy_const_generics(3)]
11643#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11644pub unsafe fn vqdmlalh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
11645    static_assert_uimm_bits!(LANE, 3);
11646    vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32))
11647}
11648
11649/// Signed saturating doubling multiply-add long
11650///
11651/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_lane_s32)
11652#[inline]
11653#[target_feature(enable = "neon")]
11654#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
11655#[rustc_legacy_const_generics(3)]
11656#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11657pub unsafe fn vqdmlals_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
11658    static_assert_uimm_bits!(LANE, 1);
11659    vqdmlals_s32(a, b, simd_extract!(c, LANE as u32))
11660}
11661
11662/// Signed saturating doubling multiply-add long
11663///
11664/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_laneq_s32)
11665#[inline]
11666#[target_feature(enable = "neon")]
11667#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
11668#[rustc_legacy_const_generics(3)]
11669#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11670pub unsafe fn vqdmlals_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
11671    static_assert_uimm_bits!(LANE, 2);
11672    vqdmlals_s32(a, b, simd_extract!(c, LANE as u32))
11673}
11674
11675/// Signed saturating doubling multiply-subtract long
11676///
11677/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s16)
11678#[inline]
11679#[target_feature(enable = "neon")]
11680#[cfg_attr(test, assert_instr(sqdmlsl2))]
11681#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11682pub unsafe fn vqdmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
11683    vqsubq_s32(a, vqdmull_high_s16(b, c))
11684}
11685
11686/// Signed saturating doubling multiply-subtract long
11687///
11688/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s32)
11689#[inline]
11690#[target_feature(enable = "neon")]
11691#[cfg_attr(test, assert_instr(sqdmlsl2))]
11692#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11693pub unsafe fn vqdmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
11694    vqsubq_s64(a, vqdmull_high_s32(b, c))
11695}
11696
11697/// Signed saturating doubling multiply-subtract long
11698///
11699/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s16)
11700#[inline]
11701#[target_feature(enable = "neon")]
11702#[cfg_attr(test, assert_instr(sqdmlsl2))]
11703#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11704pub unsafe fn vqdmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
11705    vqsubq_s32(a, vqdmull_high_n_s16(b, c))
11706}
11707
11708/// Signed saturating doubling multiply-subtract long
11709///
11710/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s32)
11711#[inline]
11712#[target_feature(enable = "neon")]
11713#[cfg_attr(test, assert_instr(sqdmlsl2))]
11714#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11715pub unsafe fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
11716    vqsubq_s64(a, vqdmull_high_n_s32(b, c))
11717}
11718
11719/// Vector widening saturating doubling multiply subtract with scalar
11720///
11721/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s16)
11722#[inline]
11723#[target_feature(enable = "neon")]
11724#[cfg_attr(test, assert_instr(sqdmlsl, N = 2))]
11725#[rustc_legacy_const_generics(3)]
11726#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11727pub unsafe fn vqdmlsl_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
11728    static_assert_uimm_bits!(N, 3);
11729    vqsubq_s32(a, vqdmull_laneq_s16::<N>(b, c))
11730}
11731
11732/// Vector widening saturating doubling multiply subtract with scalar
11733///
11734/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s32)
11735#[inline]
11736#[target_feature(enable = "neon")]
11737#[cfg_attr(test, assert_instr(sqdmlsl, N = 1))]
11738#[rustc_legacy_const_generics(3)]
11739#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11740pub unsafe fn vqdmlsl_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
11741    static_assert_uimm_bits!(N, 2);
11742    vqsubq_s64(a, vqdmull_laneq_s32::<N>(b, c))
11743}
11744
11745/// Signed saturating doubling multiply-subtract long
11746///
11747/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s16)
11748#[inline]
11749#[target_feature(enable = "neon")]
11750#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
11751#[rustc_legacy_const_generics(3)]
11752#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11753pub unsafe fn vqdmlsl_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
11754    static_assert_uimm_bits!(N, 2);
11755    vqsubq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
11756}
11757
11758/// Signed saturating doubling multiply-subtract long
11759///
11760/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s16)
11761#[inline]
11762#[target_feature(enable = "neon")]
11763#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
11764#[rustc_legacy_const_generics(3)]
11765#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11766pub unsafe fn vqdmlsl_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
11767    static_assert_uimm_bits!(N, 3);
11768    vqsubq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
11769}
11770
11771/// Signed saturating doubling multiply-subtract long
11772///
11773/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s32)
11774#[inline]
11775#[target_feature(enable = "neon")]
11776#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
11777#[rustc_legacy_const_generics(3)]
11778#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11779pub unsafe fn vqdmlsl_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
11780    static_assert_uimm_bits!(N, 1);
11781    vqsubq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
11782}
11783
11784/// Signed saturating doubling multiply-subtract long
11785///
11786/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s32)
11787#[inline]
11788#[target_feature(enable = "neon")]
11789#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
11790#[rustc_legacy_const_generics(3)]
11791#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11792pub unsafe fn vqdmlsl_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
11793    static_assert_uimm_bits!(N, 2);
11794    vqsubq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
11795}
11796
11797/// Signed saturating doubling multiply-subtract long
11798///
11799/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_s16)
11800#[inline]
11801#[target_feature(enable = "neon")]
11802#[cfg_attr(test, assert_instr(sqdmlsl))]
11803#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11804pub unsafe fn vqdmlslh_s16(a: i32, b: i16, c: i16) -> i32 {
11805    let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
11806    vqsubs_s32(a, simd_extract!(x, 0))
11807}
11808
11809/// Signed saturating doubling multiply-subtract long
11810///
11811/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_s32)
11812#[inline]
11813#[target_feature(enable = "neon")]
11814#[cfg_attr(test, assert_instr(sqdmlsl))]
11815#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11816pub unsafe fn vqdmlsls_s32(a: i64, b: i32, c: i32) -> i64 {
11817    let x: i64 = vqsubd_s64(a, vqdmulls_s32(b, c));
11818    x as i64
11819}
11820
11821/// Signed saturating doubling multiply-subtract long
11822///
11823/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_lane_s16)
11824#[inline]
11825#[target_feature(enable = "neon")]
11826#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
11827#[rustc_legacy_const_generics(3)]
11828#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11829pub unsafe fn vqdmlslh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
11830    static_assert_uimm_bits!(LANE, 2);
11831    vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32))
11832}
11833
11834/// Signed saturating doubling multiply-subtract long
11835///
11836/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_laneq_s16)
11837#[inline]
11838#[target_feature(enable = "neon")]
11839#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
11840#[rustc_legacy_const_generics(3)]
11841#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11842pub unsafe fn vqdmlslh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
11843    static_assert_uimm_bits!(LANE, 3);
11844    vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32))
11845}
11846
11847/// Signed saturating doubling multiply-subtract long
11848///
11849/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_lane_s32)
11850#[inline]
11851#[target_feature(enable = "neon")]
11852#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
11853#[rustc_legacy_const_generics(3)]
11854#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11855pub unsafe fn vqdmlsls_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
11856    static_assert_uimm_bits!(LANE, 1);
11857    vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32))
11858}
11859
11860/// Signed saturating doubling multiply-subtract long
11861///
11862/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_laneq_s32)
11863#[inline]
11864#[target_feature(enable = "neon")]
11865#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
11866#[rustc_legacy_const_generics(3)]
11867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11868pub unsafe fn vqdmlsls_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
11869    static_assert_uimm_bits!(LANE, 2);
11870    vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32))
11871}
11872
11873/// Signed saturating doubling multiply returning high half
11874///
11875/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_s16)
11876#[inline]
11877#[target_feature(enable = "neon")]
11878#[cfg_attr(test, assert_instr(sqdmulh))]
11879#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11880pub unsafe fn vqdmulhh_s16(a: i16, b: i16) -> i16 {
11881    let a: int16x4_t = vdup_n_s16(a);
11882    let b: int16x4_t = vdup_n_s16(b);
11883    simd_extract!(vqdmulh_s16(a, b), 0)
11884}
11885
11886/// Signed saturating doubling multiply returning high half
11887///
11888/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_s32)
11889#[inline]
11890#[target_feature(enable = "neon")]
11891#[cfg_attr(test, assert_instr(sqdmulh))]
11892#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11893pub unsafe fn vqdmulhs_s32(a: i32, b: i32) -> i32 {
11894    let a: int32x2_t = vdup_n_s32(a);
11895    let b: int32x2_t = vdup_n_s32(b);
11896    simd_extract!(vqdmulh_s32(a, b), 0)
11897}
11898
11899/// Signed saturating doubling multiply returning high half
11900///
11901/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_lane_s16)
11902#[inline]
11903#[target_feature(enable = "neon")]
11904#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
11905#[rustc_legacy_const_generics(2)]
11906#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11907pub unsafe fn vqdmulhh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i16 {
11908    static_assert_uimm_bits!(N, 2);
11909    let b: i16 = simd_extract!(b, N as u32);
11910    vqdmulhh_s16(a, b)
11911}
11912
11913/// Signed saturating doubling multiply returning high half
11914///
11915/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_laneq_s16)
11916#[inline]
11917#[target_feature(enable = "neon")]
11918#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
11919#[rustc_legacy_const_generics(2)]
11920#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11921pub unsafe fn vqdmulhh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i16 {
11922    static_assert_uimm_bits!(N, 3);
11923    let b: i16 = simd_extract!(b, N as u32);
11924    vqdmulhh_s16(a, b)
11925}
11926
11927/// Signed saturating doubling multiply returning high half
11928///
11929/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_lane_s32)
11930#[inline]
11931#[target_feature(enable = "neon")]
11932#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
11933#[rustc_legacy_const_generics(2)]
11934#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11935pub unsafe fn vqdmulhs_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i32 {
11936    static_assert_uimm_bits!(N, 1);
11937    let b: i32 = simd_extract!(b, N as u32);
11938    vqdmulhs_s32(a, b)
11939}
11940
11941/// Signed saturating doubling multiply returning high half
11942///
11943/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_laneq_s32)
11944#[inline]
11945#[target_feature(enable = "neon")]
11946#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
11947#[rustc_legacy_const_generics(2)]
11948#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11949pub unsafe fn vqdmulhs_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i32 {
11950    static_assert_uimm_bits!(N, 2);
11951    let b: i32 = simd_extract!(b, N as u32);
11952    vqdmulhs_s32(a, b)
11953}
11954
11955/// Vector saturating doubling multiply high by scalar
11956///
11957/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s16)
11958#[inline]
11959#[target_feature(enable = "neon")]
11960#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
11961#[rustc_legacy_const_generics(2)]
11962#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11963pub unsafe fn vqdmulh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
11964    static_assert_uimm_bits!(LANE, 2);
11965    vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32)))
11966}
11967
11968/// Vector saturating doubling multiply high by scalar
11969///
11970/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s16)
11971#[inline]
11972#[target_feature(enable = "neon")]
11973#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
11974#[rustc_legacy_const_generics(2)]
11975#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11976pub unsafe fn vqdmulhq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int16x8_t {
11977    static_assert_uimm_bits!(LANE, 2);
11978    vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32)))
11979}
11980
11981/// Vector saturating doubling multiply high by scalar
11982///
11983/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s32)
11984#[inline]
11985#[target_feature(enable = "neon")]
11986#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
11987#[rustc_legacy_const_generics(2)]
11988#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11989pub unsafe fn vqdmulh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
11990    static_assert_uimm_bits!(LANE, 1);
11991    vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32)))
11992}
11993
11994/// Vector saturating doubling multiply high by scalar
11995///
11996/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s32)
11997#[inline]
11998#[target_feature(enable = "neon")]
11999#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
12000#[rustc_legacy_const_generics(2)]
12001#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12002pub unsafe fn vqdmulhq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int32x4_t {
12003    static_assert_uimm_bits!(LANE, 1);
12004    vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32)))
12005}
12006
12007/// Saturating extract narrow
12008///
12009/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_s16)
12010#[inline]
12011#[target_feature(enable = "neon")]
12012#[cfg_attr(test, assert_instr(sqxtn))]
12013#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12014pub unsafe fn vqmovnh_s16(a: i16) -> i8 {
12015    simd_extract!(vqmovn_s16(vdupq_n_s16(a)), 0)
12016}
12017
12018/// Saturating extract narrow
12019///
12020/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_s32)
12021#[inline]
12022#[target_feature(enable = "neon")]
12023#[cfg_attr(test, assert_instr(sqxtn))]
12024#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12025pub unsafe fn vqmovns_s32(a: i32) -> i16 {
12026    simd_extract!(vqmovn_s32(vdupq_n_s32(a)), 0)
12027}
12028
12029/// Saturating extract narrow
12030///
12031/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_u16)
12032#[inline]
12033#[target_feature(enable = "neon")]
12034#[cfg_attr(test, assert_instr(uqxtn))]
12035#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12036pub unsafe fn vqmovnh_u16(a: u16) -> u8 {
12037    simd_extract!(vqmovn_u16(vdupq_n_u16(a)), 0)
12038}
12039
12040/// Saturating extract narrow
12041///
12042/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_u32)
12043#[inline]
12044#[target_feature(enable = "neon")]
12045#[cfg_attr(test, assert_instr(uqxtn))]
12046#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12047pub unsafe fn vqmovns_u32(a: u32) -> u16 {
12048    simd_extract!(vqmovn_u32(vdupq_n_u32(a)), 0)
12049}
12050
12051/// Saturating extract narrow
12052///
12053/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_s64)
12054#[inline]
12055#[target_feature(enable = "neon")]
12056#[cfg_attr(test, assert_instr(sqxtn))]
12057#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12058pub unsafe fn vqmovnd_s64(a: i64) -> i32 {
12059    #[allow(improper_ctypes)]
12060    extern "unadjusted" {
12061        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.scalar.sqxtn.i32.i64")]
12062        fn vqmovnd_s64_(a: i64) -> i32;
12063    }
12064    vqmovnd_s64_(a)
12065}
12066
12067/// Saturating extract narrow
12068///
12069/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_u64)
12070#[inline]
12071#[target_feature(enable = "neon")]
12072#[cfg_attr(test, assert_instr(uqxtn))]
12073#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12074pub unsafe fn vqmovnd_u64(a: u64) -> u32 {
12075    #[allow(improper_ctypes)]
12076    extern "unadjusted" {
12077        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.scalar.uqxtn.i32.i64")]
12078        fn vqmovnd_u64_(a: u64) -> u32;
12079    }
12080    vqmovnd_u64_(a)
12081}
12082
12083/// Signed saturating extract narrow
12084///
12085/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s16)
12086#[inline]
12087#[target_feature(enable = "neon")]
12088#[cfg_attr(test, assert_instr(sqxtn2))]
12089#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12090pub unsafe fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
12091    simd_shuffle!(a, vqmovn_s16(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
12092}
12093
12094/// Signed saturating extract narrow
12095///
12096/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s32)
12097#[inline]
12098#[target_feature(enable = "neon")]
12099#[cfg_attr(test, assert_instr(sqxtn2))]
12100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12101pub unsafe fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
12102    simd_shuffle!(a, vqmovn_s32(b), [0, 1, 2, 3, 4, 5, 6, 7])
12103}
12104
12105/// Signed saturating extract narrow
12106///
12107/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s64)
12108#[inline]
12109#[target_feature(enable = "neon")]
12110#[cfg_attr(test, assert_instr(sqxtn2))]
12111#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12112pub unsafe fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
12113    simd_shuffle!(a, vqmovn_s64(b), [0, 1, 2, 3])
12114}
12115
12116/// Signed saturating extract narrow
12117///
12118/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u16)
12119#[inline]
12120#[target_feature(enable = "neon")]
12121#[cfg_attr(test, assert_instr(uqxtn2))]
12122#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12123pub unsafe fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
12124    simd_shuffle!(a, vqmovn_u16(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
12125}
12126
12127/// Signed saturating extract narrow
12128///
12129/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u32)
12130#[inline]
12131#[target_feature(enable = "neon")]
12132#[cfg_attr(test, assert_instr(uqxtn2))]
12133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12134pub unsafe fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
12135    simd_shuffle!(a, vqmovn_u32(b), [0, 1, 2, 3, 4, 5, 6, 7])
12136}
12137
12138/// Signed saturating extract narrow
12139///
12140/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u64)
12141#[inline]
12142#[target_feature(enable = "neon")]
12143#[cfg_attr(test, assert_instr(uqxtn2))]
12144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12145pub unsafe fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
12146    simd_shuffle!(a, vqmovn_u64(b), [0, 1, 2, 3])
12147}
12148
12149/// Signed saturating extract unsigned narrow
12150///
12151/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovunh_s16)
12152#[inline]
12153#[target_feature(enable = "neon")]
12154#[cfg_attr(test, assert_instr(sqxtun))]
12155#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12156pub unsafe fn vqmovunh_s16(a: i16) -> u8 {
12157    simd_extract!(vqmovun_s16(vdupq_n_s16(a)), 0)
12158}
12159
12160/// Signed saturating extract unsigned narrow
12161///
12162/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovuns_s32)
12163#[inline]
12164#[target_feature(enable = "neon")]
12165#[cfg_attr(test, assert_instr(sqxtun))]
12166#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12167pub unsafe fn vqmovuns_s32(a: i32) -> u16 {
12168    simd_extract!(vqmovun_s32(vdupq_n_s32(a)), 0)
12169}
12170
12171/// Signed saturating extract unsigned narrow
12172///
12173/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovund_s64)
12174#[inline]
12175#[target_feature(enable = "neon")]
12176#[cfg_attr(test, assert_instr(sqxtun))]
12177#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12178pub unsafe fn vqmovund_s64(a: i64) -> u32 {
12179    simd_extract!(vqmovun_s64(vdupq_n_s64(a)), 0)
12180}
12181
12182/// Signed saturating extract unsigned narrow
12183///
12184/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s16)
12185#[inline]
12186#[target_feature(enable = "neon")]
12187#[cfg_attr(test, assert_instr(sqxtun2))]
12188#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12189pub unsafe fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
12190    simd_shuffle!(a, vqmovun_s16(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
12191}
12192
12193/// Signed saturating extract unsigned narrow
12194///
12195/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s32)
12196#[inline]
12197#[target_feature(enable = "neon")]
12198#[cfg_attr(test, assert_instr(sqxtun2))]
12199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12200pub unsafe fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
12201    simd_shuffle!(a, vqmovun_s32(b), [0, 1, 2, 3, 4, 5, 6, 7])
12202}
12203
12204/// Signed saturating extract unsigned narrow
12205///
12206/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s64)
12207#[inline]
12208#[target_feature(enable = "neon")]
12209#[cfg_attr(test, assert_instr(sqxtun2))]
12210#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12211pub unsafe fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
12212    simd_shuffle!(a, vqmovun_s64(b), [0, 1, 2, 3])
12213}
12214
12215/// Signed saturating rounding doubling multiply returning high half
12216///
12217/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_s16)
12218#[inline]
12219#[target_feature(enable = "neon")]
12220#[cfg_attr(test, assert_instr(sqrdmulh))]
12221#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12222pub unsafe fn vqrdmulhh_s16(a: i16, b: i16) -> i16 {
12223    simd_extract!(vqrdmulh_s16(vdup_n_s16(a), vdup_n_s16(b)), 0)
12224}
12225
12226/// Signed saturating rounding doubling multiply returning high half
12227///
12228/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_s32)
12229#[inline]
12230#[target_feature(enable = "neon")]
12231#[cfg_attr(test, assert_instr(sqrdmulh))]
12232#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12233pub unsafe fn vqrdmulhs_s32(a: i32, b: i32) -> i32 {
12234    simd_extract!(vqrdmulh_s32(vdup_n_s32(a), vdup_n_s32(b)), 0)
12235}
12236
12237/// Signed saturating rounding doubling multiply returning high half
12238///
12239/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_lane_s16)
12240#[inline]
12241#[target_feature(enable = "neon")]
12242#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
12243#[rustc_legacy_const_generics(2)]
12244#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12245pub unsafe fn vqrdmulhh_lane_s16<const LANE: i32>(a: i16, b: int16x4_t) -> i16 {
12246    static_assert_uimm_bits!(LANE, 2);
12247    vqrdmulhh_s16(a, simd_extract!(b, LANE as u32))
12248}
12249
12250/// Signed saturating rounding doubling multiply returning high half
12251///
12252/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_laneq_s16)
12253#[inline]
12254#[target_feature(enable = "neon")]
12255#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
12256#[rustc_legacy_const_generics(2)]
12257#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12258pub unsafe fn vqrdmulhh_laneq_s16<const LANE: i32>(a: i16, b: int16x8_t) -> i16 {
12259    static_assert_uimm_bits!(LANE, 3);
12260    vqrdmulhh_s16(a, simd_extract!(b, LANE as u32))
12261}
12262
12263/// Signed saturating rounding doubling multiply returning high half
12264///
12265/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_lane_s32)
12266#[inline]
12267#[target_feature(enable = "neon")]
12268#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
12269#[rustc_legacy_const_generics(2)]
12270#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12271pub unsafe fn vqrdmulhs_lane_s32<const LANE: i32>(a: i32, b: int32x2_t) -> i32 {
12272    static_assert_uimm_bits!(LANE, 1);
12273    vqrdmulhs_s32(a, simd_extract!(b, LANE as u32))
12274}
12275
12276/// Signed saturating rounding doubling multiply returning high half
12277///
12278/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_laneq_s32)
12279#[inline]
12280#[target_feature(enable = "neon")]
12281#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
12282#[rustc_legacy_const_generics(2)]
12283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12284pub unsafe fn vqrdmulhs_laneq_s32<const LANE: i32>(a: i32, b: int32x4_t) -> i32 {
12285    static_assert_uimm_bits!(LANE, 2);
12286    vqrdmulhs_s32(a, simd_extract!(b, LANE as u32))
12287}
12288
12289/// Signed saturating rounding doubling multiply accumulate returning high half
12290///
12291/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s16)
12292#[inline]
12293#[target_feature(enable = "rdm")]
12294#[cfg_attr(test, assert_instr(sqrdmlah))]
12295#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12296pub unsafe fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
12297    #[allow(improper_ctypes)]
12298    extern "unadjusted" {
12299        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlah.v4i16")]
12300        fn vqrdmlah_s16_(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t;
12301    }
12302    vqrdmlah_s16_(a, b, c)
12303}
12304
12305/// Signed saturating rounding doubling multiply accumulate returning high half
12306///
12307/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s16)
12308#[inline]
12309#[target_feature(enable = "rdm")]
12310#[cfg_attr(test, assert_instr(sqrdmlah))]
12311#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12312pub unsafe fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
12313    #[allow(improper_ctypes)]
12314    extern "unadjusted" {
12315        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlah.v8i16")]
12316        fn vqrdmlahq_s16_(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
12317    }
12318    vqrdmlahq_s16_(a, b, c)
12319}
12320
12321/// Signed saturating rounding doubling multiply accumulate returning high half
12322///
12323/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s32)
12324#[inline]
12325#[target_feature(enable = "rdm")]
12326#[cfg_attr(test, assert_instr(sqrdmlah))]
12327#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12328pub unsafe fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
12329    #[allow(improper_ctypes)]
12330    extern "unadjusted" {
12331        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlah.v2i32")]
12332        fn vqrdmlah_s32_(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t;
12333    }
12334    vqrdmlah_s32_(a, b, c)
12335}
12336
12337/// Signed saturating rounding doubling multiply accumulate returning high half
12338///
12339/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s32)
12340#[inline]
12341#[target_feature(enable = "rdm")]
12342#[cfg_attr(test, assert_instr(sqrdmlah))]
12343#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12344pub unsafe fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
12345    #[allow(improper_ctypes)]
12346    extern "unadjusted" {
12347        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlah.v4i32")]
12348        fn vqrdmlahq_s32_(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
12349    }
12350    vqrdmlahq_s32_(a, b, c)
12351}
12352
12353/// Signed saturating rounding doubling multiply accumulate returning high half
12354///
12355/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_s16)
12356#[inline]
12357#[target_feature(enable = "rdm")]
12358#[cfg_attr(test, assert_instr(sqrdmlah))]
12359#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12360pub unsafe fn vqrdmlahh_s16(a: i16, b: i16, c: i16) -> i16 {
12361    let a: int16x4_t = vdup_n_s16(a);
12362    let b: int16x4_t = vdup_n_s16(b);
12363    let c: int16x4_t = vdup_n_s16(c);
12364    simd_extract!(vqrdmlah_s16(a, b, c), 0)
12365}
12366
12367/// Signed saturating rounding doubling multiply accumulate returning high half
12368///
12369/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_s32)
12370#[inline]
12371#[target_feature(enable = "rdm")]
12372#[cfg_attr(test, assert_instr(sqrdmlah))]
12373#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12374pub unsafe fn vqrdmlahs_s32(a: i32, b: i32, c: i32) -> i32 {
12375    let a: int32x2_t = vdup_n_s32(a);
12376    let b: int32x2_t = vdup_n_s32(b);
12377    let c: int32x2_t = vdup_n_s32(c);
12378    simd_extract!(vqrdmlah_s32(a, b, c), 0)
12379}
12380
12381/// Signed saturating rounding doubling multiply accumulate returning high half
12382///
12383/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s16)
12384#[inline]
12385#[target_feature(enable = "rdm")]
12386#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
12387#[rustc_legacy_const_generics(3)]
12388#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12389pub unsafe fn vqrdmlah_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
12390    static_assert_uimm_bits!(LANE, 2);
12391    let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
12392    vqrdmlah_s16(a, b, c)
12393}
12394
12395/// Signed saturating rounding doubling multiply accumulate returning high half
12396///
12397/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s16)
12398#[inline]
12399#[target_feature(enable = "rdm")]
12400#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
12401#[rustc_legacy_const_generics(3)]
12402#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12403pub unsafe fn vqrdmlah_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
12404    static_assert_uimm_bits!(LANE, 3);
12405    let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
12406    vqrdmlah_s16(a, b, c)
12407}
12408
12409/// Signed saturating rounding doubling multiply accumulate returning high half
12410///
12411/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s16)
12412#[inline]
12413#[target_feature(enable = "rdm")]
12414#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
12415#[rustc_legacy_const_generics(3)]
12416#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12417pub unsafe fn vqrdmlahq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
12418    static_assert_uimm_bits!(LANE, 2);
12419    let c: int16x8_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
12420    vqrdmlahq_s16(a, b, c)
12421}
12422
12423/// Signed saturating rounding doubling multiply accumulate returning high half
12424///
12425/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s16)
12426#[inline]
12427#[target_feature(enable = "rdm")]
12428#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
12429#[rustc_legacy_const_generics(3)]
12430#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12431pub unsafe fn vqrdmlahq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
12432    static_assert_uimm_bits!(LANE, 3);
12433    let c: int16x8_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
12434    vqrdmlahq_s16(a, b, c)
12435}
12436
12437/// Signed saturating rounding doubling multiply accumulate returning high half
12438///
12439/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s32)
12440#[inline]
12441#[target_feature(enable = "rdm")]
12442#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
12443#[rustc_legacy_const_generics(3)]
12444#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12445pub unsafe fn vqrdmlah_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
12446    static_assert_uimm_bits!(LANE, 1);
12447    let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
12448    vqrdmlah_s32(a, b, c)
12449}
12450
12451/// Signed saturating rounding doubling multiply accumulate returning high half
12452///
12453/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s32)
12454#[inline]
12455#[target_feature(enable = "rdm")]
12456#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
12457#[rustc_legacy_const_generics(3)]
12458#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12459pub unsafe fn vqrdmlah_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
12460    static_assert_uimm_bits!(LANE, 2);
12461    let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
12462    vqrdmlah_s32(a, b, c)
12463}
12464
12465/// Signed saturating rounding doubling multiply accumulate returning high half
12466///
12467/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s32)
12468#[inline]
12469#[target_feature(enable = "rdm")]
12470#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
12471#[rustc_legacy_const_generics(3)]
12472#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12473pub unsafe fn vqrdmlahq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
12474    static_assert_uimm_bits!(LANE, 1);
12475    let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
12476    vqrdmlahq_s32(a, b, c)
12477}
12478
12479/// Signed saturating rounding doubling multiply accumulate returning high half
12480///
12481/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s32)
12482#[inline]
12483#[target_feature(enable = "rdm")]
12484#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
12485#[rustc_legacy_const_generics(3)]
12486#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12487pub unsafe fn vqrdmlahq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
12488    static_assert_uimm_bits!(LANE, 2);
12489    let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
12490    vqrdmlahq_s32(a, b, c)
12491}
12492
12493/// Signed saturating rounding doubling multiply accumulate returning high half
12494///
12495/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_lane_s16)
12496#[inline]
12497#[target_feature(enable = "rdm")]
12498#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
12499#[rustc_legacy_const_generics(3)]
12500#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12501pub unsafe fn vqrdmlahh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
12502    static_assert_uimm_bits!(LANE, 2);
12503    vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32))
12504}
12505
12506/// Signed saturating rounding doubling multiply accumulate returning high half
12507///
12508/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_laneq_s16)
12509#[inline]
12510#[target_feature(enable = "rdm")]
12511#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
12512#[rustc_legacy_const_generics(3)]
12513#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12514pub unsafe fn vqrdmlahh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
12515    static_assert_uimm_bits!(LANE, 3);
12516    vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32))
12517}
12518
12519/// Signed saturating rounding doubling multiply accumulate returning high half
12520///
12521/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_lane_s32)
12522#[inline]
12523#[target_feature(enable = "rdm")]
12524#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
12525#[rustc_legacy_const_generics(3)]
12526#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12527pub unsafe fn vqrdmlahs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
12528    static_assert_uimm_bits!(LANE, 1);
12529    vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32))
12530}
12531
12532/// Signed saturating rounding doubling multiply accumulate returning high half
12533///
12534/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_laneq_s32)
12535#[inline]
12536#[target_feature(enable = "rdm")]
12537#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
12538#[rustc_legacy_const_generics(3)]
12539#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12540pub unsafe fn vqrdmlahs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
12541    static_assert_uimm_bits!(LANE, 2);
12542    vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32))
12543}
12544
12545/// Signed saturating rounding doubling multiply subtract returning high half
12546///
12547/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s16)
12548#[inline]
12549#[target_feature(enable = "rdm")]
12550#[cfg_attr(test, assert_instr(sqrdmlsh))]
12551#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12552pub unsafe fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
12553    #[allow(improper_ctypes)]
12554    extern "unadjusted" {
12555        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlsh.v4i16")]
12556        fn vqrdmlsh_s16_(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t;
12557    }
12558    vqrdmlsh_s16_(a, b, c)
12559}
12560
12561/// Signed saturating rounding doubling multiply subtract returning high half
12562///
12563/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s16)
12564#[inline]
12565#[target_feature(enable = "rdm")]
12566#[cfg_attr(test, assert_instr(sqrdmlsh))]
12567#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12568pub unsafe fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
12569    #[allow(improper_ctypes)]
12570    extern "unadjusted" {
12571        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlsh.v8i16")]
12572        fn vqrdmlshq_s16_(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
12573    }
12574    vqrdmlshq_s16_(a, b, c)
12575}
12576
12577/// Signed saturating rounding doubling multiply subtract returning high half
12578///
12579/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s32)
12580#[inline]
12581#[target_feature(enable = "rdm")]
12582#[cfg_attr(test, assert_instr(sqrdmlsh))]
12583#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12584pub unsafe fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
12585    #[allow(improper_ctypes)]
12586    extern "unadjusted" {
12587        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlsh.v2i32")]
12588        fn vqrdmlsh_s32_(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t;
12589    }
12590    vqrdmlsh_s32_(a, b, c)
12591}
12592
12593/// Signed saturating rounding doubling multiply subtract returning high half
12594///
12595/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s32)
12596#[inline]
12597#[target_feature(enable = "rdm")]
12598#[cfg_attr(test, assert_instr(sqrdmlsh))]
12599#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12600pub unsafe fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
12601    #[allow(improper_ctypes)]
12602    extern "unadjusted" {
12603        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrdmlsh.v4i32")]
12604        fn vqrdmlshq_s32_(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
12605    }
12606    vqrdmlshq_s32_(a, b, c)
12607}
12608
12609/// Signed saturating rounding doubling multiply subtract returning high half
12610///
12611/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_s16)
12612#[inline]
12613#[target_feature(enable = "rdm")]
12614#[cfg_attr(test, assert_instr(sqrdmlsh))]
12615#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12616pub unsafe fn vqrdmlshh_s16(a: i16, b: i16, c: i16) -> i16 {
12617    let a: int16x4_t = vdup_n_s16(a);
12618    let b: int16x4_t = vdup_n_s16(b);
12619    let c: int16x4_t = vdup_n_s16(c);
12620    simd_extract!(vqrdmlsh_s16(a, b, c), 0)
12621}
12622
12623/// Signed saturating rounding doubling multiply subtract returning high half
12624///
12625/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_s32)
12626#[inline]
12627#[target_feature(enable = "rdm")]
12628#[cfg_attr(test, assert_instr(sqrdmlsh))]
12629#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12630pub unsafe fn vqrdmlshs_s32(a: i32, b: i32, c: i32) -> i32 {
12631    let a: int32x2_t = vdup_n_s32(a);
12632    let b: int32x2_t = vdup_n_s32(b);
12633    let c: int32x2_t = vdup_n_s32(c);
12634    simd_extract!(vqrdmlsh_s32(a, b, c), 0)
12635}
12636
12637/// Signed saturating rounding doubling multiply subtract returning high half
12638///
12639/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s16)
12640#[inline]
12641#[target_feature(enable = "rdm")]
12642#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
12643#[rustc_legacy_const_generics(3)]
12644#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12645pub unsafe fn vqrdmlsh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
12646    static_assert_uimm_bits!(LANE, 2);
12647    let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
12648    vqrdmlsh_s16(a, b, c)
12649}
12650
12651/// Signed saturating rounding doubling multiply subtract returning high half
12652///
12653/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s16)
12654#[inline]
12655#[target_feature(enable = "rdm")]
12656#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
12657#[rustc_legacy_const_generics(3)]
12658#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12659pub unsafe fn vqrdmlsh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
12660    static_assert_uimm_bits!(LANE, 3);
12661    let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
12662    vqrdmlsh_s16(a, b, c)
12663}
12664
12665/// Signed saturating rounding doubling multiply subtract returning high half
12666///
12667/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s16)
12668#[inline]
12669#[target_feature(enable = "rdm")]
12670#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
12671#[rustc_legacy_const_generics(3)]
12672#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12673pub unsafe fn vqrdmlshq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
12674    static_assert_uimm_bits!(LANE, 2);
12675    let c: int16x8_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
12676    vqrdmlshq_s16(a, b, c)
12677}
12678
12679/// Signed saturating rounding doubling multiply subtract returning high half
12680///
12681/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s16)
12682#[inline]
12683#[target_feature(enable = "rdm")]
12684#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
12685#[rustc_legacy_const_generics(3)]
12686#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12687pub unsafe fn vqrdmlshq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
12688    static_assert_uimm_bits!(LANE, 3);
12689    let c: int16x8_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
12690    vqrdmlshq_s16(a, b, c)
12691}
12692
12693/// Signed saturating rounding doubling multiply subtract returning high half
12694///
12695/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s32)
12696#[inline]
12697#[target_feature(enable = "rdm")]
12698#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
12699#[rustc_legacy_const_generics(3)]
12700#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12701pub unsafe fn vqrdmlsh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
12702    static_assert_uimm_bits!(LANE, 1);
12703    let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
12704    vqrdmlsh_s32(a, b, c)
12705}
12706
12707/// Signed saturating rounding doubling multiply subtract returning high half
12708///
12709/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s32)
12710#[inline]
12711#[target_feature(enable = "rdm")]
12712#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
12713#[rustc_legacy_const_generics(3)]
12714#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12715pub unsafe fn vqrdmlsh_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
12716    static_assert_uimm_bits!(LANE, 2);
12717    let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
12718    vqrdmlsh_s32(a, b, c)
12719}
12720
12721/// Signed saturating rounding doubling multiply subtract returning high half
12722///
12723/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s32)
12724#[inline]
12725#[target_feature(enable = "rdm")]
12726#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
12727#[rustc_legacy_const_generics(3)]
12728#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12729pub unsafe fn vqrdmlshq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
12730    static_assert_uimm_bits!(LANE, 1);
12731    let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
12732    vqrdmlshq_s32(a, b, c)
12733}
12734
12735/// Signed saturating rounding doubling multiply subtract returning high half
12736///
12737/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s32)
12738#[inline]
12739#[target_feature(enable = "rdm")]
12740#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
12741#[rustc_legacy_const_generics(3)]
12742#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12743pub unsafe fn vqrdmlshq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
12744    static_assert_uimm_bits!(LANE, 2);
12745    let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
12746    vqrdmlshq_s32(a, b, c)
12747}
12748
12749/// Signed saturating rounding doubling multiply subtract returning high half
12750///
12751/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_lane_s16)
12752#[inline]
12753#[target_feature(enable = "rdm")]
12754#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
12755#[rustc_legacy_const_generics(3)]
12756#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12757pub unsafe fn vqrdmlshh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
12758    static_assert_uimm_bits!(LANE, 2);
12759    vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32))
12760}
12761
12762/// Signed saturating rounding doubling multiply subtract returning high half
12763///
12764/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_laneq_s16)
12765#[inline]
12766#[target_feature(enable = "rdm")]
12767#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
12768#[rustc_legacy_const_generics(3)]
12769#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12770pub unsafe fn vqrdmlshh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
12771    static_assert_uimm_bits!(LANE, 3);
12772    vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32))
12773}
12774
12775/// Signed saturating rounding doubling multiply subtract returning high half
12776///
12777/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_lane_s32)
12778#[inline]
12779#[target_feature(enable = "rdm")]
12780#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
12781#[rustc_legacy_const_generics(3)]
12782#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12783pub unsafe fn vqrdmlshs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
12784    static_assert_uimm_bits!(LANE, 1);
12785    vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32))
12786}
12787
12788/// Signed saturating rounding doubling multiply subtract returning high half
12789///
12790/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_laneq_s32)
12791#[inline]
12792#[target_feature(enable = "rdm")]
12793#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
12794#[rustc_legacy_const_generics(3)]
12795#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
12796pub unsafe fn vqrdmlshs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
12797    static_assert_uimm_bits!(LANE, 2);
12798    vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32))
12799}
12800
12801/// Signed saturating rounding shift left
12802///
12803/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_s32)
12804#[inline]
12805#[target_feature(enable = "neon")]
12806#[cfg_attr(test, assert_instr(sqrshl))]
12807#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12808pub unsafe fn vqrshls_s32(a: i32, b: i32) -> i32 {
12809    #[allow(improper_ctypes)]
12810    extern "unadjusted" {
12811        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrshl.i32")]
12812        fn vqrshls_s32_(a: i32, b: i32) -> i32;
12813    }
12814    vqrshls_s32_(a, b)
12815}
12816
12817/// Signed saturating rounding shift left
12818///
12819/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_s64)
12820#[inline]
12821#[target_feature(enable = "neon")]
12822#[cfg_attr(test, assert_instr(sqrshl))]
12823#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12824pub unsafe fn vqrshld_s64(a: i64, b: i64) -> i64 {
12825    #[allow(improper_ctypes)]
12826    extern "unadjusted" {
12827        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrshl.i64")]
12828        fn vqrshld_s64_(a: i64, b: i64) -> i64;
12829    }
12830    vqrshld_s64_(a, b)
12831}
12832
12833/// Signed saturating rounding shift left
12834///
12835/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_s8)
12836#[inline]
12837#[target_feature(enable = "neon")]
12838#[cfg_attr(test, assert_instr(sqrshl))]
12839#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12840pub unsafe fn vqrshlb_s8(a: i8, b: i8) -> i8 {
12841    let a: int8x8_t = vdup_n_s8(a);
12842    let b: int8x8_t = vdup_n_s8(b);
12843    simd_extract!(vqrshl_s8(a, b), 0)
12844}
12845
12846/// Signed saturating rounding shift left
12847///
12848/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_s16)
12849#[inline]
12850#[target_feature(enable = "neon")]
12851#[cfg_attr(test, assert_instr(sqrshl))]
12852#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12853pub unsafe fn vqrshlh_s16(a: i16, b: i16) -> i16 {
12854    let a: int16x4_t = vdup_n_s16(a);
12855    let b: int16x4_t = vdup_n_s16(b);
12856    simd_extract!(vqrshl_s16(a, b), 0)
12857}
12858
12859/// Unsigned signed saturating rounding shift left
12860///
12861/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_u32)
12862#[inline]
12863#[target_feature(enable = "neon")]
12864#[cfg_attr(test, assert_instr(uqrshl))]
12865#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12866pub unsafe fn vqrshls_u32(a: u32, b: i32) -> u32 {
12867    #[allow(improper_ctypes)]
12868    extern "unadjusted" {
12869        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqrshl.i32")]
12870        fn vqrshls_u32_(a: u32, b: i32) -> u32;
12871    }
12872    vqrshls_u32_(a, b)
12873}
12874
12875/// Unsigned signed saturating rounding shift left
12876///
12877/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_u64)
12878#[inline]
12879#[target_feature(enable = "neon")]
12880#[cfg_attr(test, assert_instr(uqrshl))]
12881#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12882pub unsafe fn vqrshld_u64(a: u64, b: i64) -> u64 {
12883    #[allow(improper_ctypes)]
12884    extern "unadjusted" {
12885        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqrshl.i64")]
12886        fn vqrshld_u64_(a: u64, b: i64) -> u64;
12887    }
12888    vqrshld_u64_(a, b)
12889}
12890
12891/// Unsigned signed saturating rounding shift left
12892///
12893/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_u8)
12894#[inline]
12895#[target_feature(enable = "neon")]
12896#[cfg_attr(test, assert_instr(uqrshl))]
12897#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12898pub unsafe fn vqrshlb_u8(a: u8, b: i8) -> u8 {
12899    let a: uint8x8_t = vdup_n_u8(a);
12900    let b: int8x8_t = vdup_n_s8(b);
12901    simd_extract!(vqrshl_u8(a, b), 0)
12902}
12903
12904/// Unsigned signed saturating rounding shift left
12905///
12906/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_u16)
12907#[inline]
12908#[target_feature(enable = "neon")]
12909#[cfg_attr(test, assert_instr(uqrshl))]
12910#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12911pub unsafe fn vqrshlh_u16(a: u16, b: i16) -> u16 {
12912    let a: uint16x4_t = vdup_n_u16(a);
12913    let b: int16x4_t = vdup_n_s16(b);
12914    simd_extract!(vqrshl_u16(a, b), 0)
12915}
12916
12917/// Signed saturating rounded shift right narrow
12918///
12919/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_s16)
12920#[inline]
12921#[target_feature(enable = "neon")]
12922#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
12923#[rustc_legacy_const_generics(1)]
12924#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12925pub unsafe fn vqrshrnh_n_s16<const N: i32>(a: i16) -> i8 {
12926    static_assert!(N >= 1 && N <= 8);
12927    let a: int16x8_t = vdupq_n_s16(a);
12928    simd_extract!(vqrshrn_n_s16::<N>(a), 0)
12929}
12930
12931/// Signed saturating rounded shift right narrow
12932///
12933/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_s32)
12934#[inline]
12935#[target_feature(enable = "neon")]
12936#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
12937#[rustc_legacy_const_generics(1)]
12938#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12939pub unsafe fn vqrshrns_n_s32<const N: i32>(a: i32) -> i16 {
12940    static_assert!(N >= 1 && N <= 16);
12941    let a: int32x4_t = vdupq_n_s32(a);
12942    simd_extract!(vqrshrn_n_s32::<N>(a), 0)
12943}
12944
12945/// Signed saturating rounded shift right narrow
12946///
12947/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_s64)
12948#[inline]
12949#[target_feature(enable = "neon")]
12950#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
12951#[rustc_legacy_const_generics(1)]
12952#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12953pub unsafe fn vqrshrnd_n_s64<const N: i32>(a: i64) -> i32 {
12954    static_assert!(N >= 1 && N <= 32);
12955    let a: int64x2_t = vdupq_n_s64(a);
12956    simd_extract!(vqrshrn_n_s64::<N>(a), 0)
12957}
12958
12959/// Signed saturating rounded shift right narrow
12960///
12961/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s16)
12962#[inline]
12963#[target_feature(enable = "neon")]
12964#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
12965#[rustc_legacy_const_generics(2)]
12966#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12967pub unsafe fn vqrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
12968    static_assert!(N >= 1 && N <= 8);
12969    simd_shuffle!(a, vqrshrn_n_s16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
12970}
12971
12972/// Signed saturating rounded shift right narrow
12973///
12974/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s32)
12975#[inline]
12976#[target_feature(enable = "neon")]
12977#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
12978#[rustc_legacy_const_generics(2)]
12979#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12980pub unsafe fn vqrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
12981    static_assert!(N >= 1 && N <= 16);
12982    simd_shuffle!(a, vqrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
12983}
12984
12985/// Signed saturating rounded shift right narrow
12986///
12987/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s64)
12988#[inline]
12989#[target_feature(enable = "neon")]
12990#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
12991#[rustc_legacy_const_generics(2)]
12992#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12993pub unsafe fn vqrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
12994    static_assert!(N >= 1 && N <= 32);
12995    simd_shuffle!(a, vqrshrn_n_s64::<N>(b), [0, 1, 2, 3])
12996}
12997
12998/// Unsigned saturating rounded shift right narrow
12999///
13000/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_u16)
13001#[inline]
13002#[target_feature(enable = "neon")]
13003#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
13004#[rustc_legacy_const_generics(1)]
13005#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13006pub unsafe fn vqrshrnh_n_u16<const N: i32>(a: u16) -> u8 {
13007    static_assert!(N >= 1 && N <= 8);
13008    let a: uint16x8_t = vdupq_n_u16(a);
13009    simd_extract!(vqrshrn_n_u16::<N>(a), 0)
13010}
13011
13012/// Unsigned saturating rounded shift right narrow
13013///
13014/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_u32)
13015#[inline]
13016#[target_feature(enable = "neon")]
13017#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
13018#[rustc_legacy_const_generics(1)]
13019#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13020pub unsafe fn vqrshrns_n_u32<const N: i32>(a: u32) -> u16 {
13021    static_assert!(N >= 1 && N <= 16);
13022    let a: uint32x4_t = vdupq_n_u32(a);
13023    simd_extract!(vqrshrn_n_u32::<N>(a), 0)
13024}
13025
13026/// Unsigned saturating rounded shift right narrow
13027///
13028/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_u64)
13029#[inline]
13030#[target_feature(enable = "neon")]
13031#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
13032#[rustc_legacy_const_generics(1)]
13033#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13034pub unsafe fn vqrshrnd_n_u64<const N: i32>(a: u64) -> u32 {
13035    static_assert!(N >= 1 && N <= 32);
13036    let a: uint64x2_t = vdupq_n_u64(a);
13037    simd_extract!(vqrshrn_n_u64::<N>(a), 0)
13038}
13039
13040/// Unsigned saturating rounded shift right narrow
13041///
13042/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u16)
13043#[inline]
13044#[target_feature(enable = "neon")]
13045#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
13046#[rustc_legacy_const_generics(2)]
13047#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13048pub unsafe fn vqrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
13049    static_assert!(N >= 1 && N <= 8);
13050    simd_shuffle!(a, vqrshrn_n_u16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
13051}
13052
13053/// Unsigned saturating rounded shift right narrow
13054///
13055/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u32)
13056#[inline]
13057#[target_feature(enable = "neon")]
13058#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
13059#[rustc_legacy_const_generics(2)]
13060#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13061pub unsafe fn vqrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
13062    static_assert!(N >= 1 && N <= 16);
13063    simd_shuffle!(a, vqrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
13064}
13065
13066/// Unsigned saturating rounded shift right narrow
13067///
13068/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u64)
13069#[inline]
13070#[target_feature(enable = "neon")]
13071#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
13072#[rustc_legacy_const_generics(2)]
13073#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13074pub unsafe fn vqrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
13075    static_assert!(N >= 1 && N <= 32);
13076    simd_shuffle!(a, vqrshrn_n_u64::<N>(b), [0, 1, 2, 3])
13077}
13078
13079/// Signed saturating rounded shift right unsigned narrow
13080///
13081/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrunh_n_s16)
13082#[inline]
13083#[target_feature(enable = "neon")]
13084#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
13085#[rustc_legacy_const_generics(1)]
13086#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13087pub unsafe fn vqrshrunh_n_s16<const N: i32>(a: i16) -> u8 {
13088    static_assert!(N >= 1 && N <= 8);
13089    let a: int16x8_t = vdupq_n_s16(a);
13090    simd_extract!(vqrshrun_n_s16::<N>(a), 0)
13091}
13092
13093/// Signed saturating rounded shift right unsigned narrow
13094///
13095/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshruns_n_s32)
13096#[inline]
13097#[target_feature(enable = "neon")]
13098#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
13099#[rustc_legacy_const_generics(1)]
13100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13101pub unsafe fn vqrshruns_n_s32<const N: i32>(a: i32) -> u16 {
13102    static_assert!(N >= 1 && N <= 16);
13103    let a: int32x4_t = vdupq_n_s32(a);
13104    simd_extract!(vqrshrun_n_s32::<N>(a), 0)
13105}
13106
13107/// Signed saturating rounded shift right unsigned narrow
13108///
13109/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrund_n_s64)
13110#[inline]
13111#[target_feature(enable = "neon")]
13112#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
13113#[rustc_legacy_const_generics(1)]
13114#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13115pub unsafe fn vqrshrund_n_s64<const N: i32>(a: i64) -> u32 {
13116    static_assert!(N >= 1 && N <= 32);
13117    let a: int64x2_t = vdupq_n_s64(a);
13118    simd_extract!(vqrshrun_n_s64::<N>(a), 0)
13119}
13120
13121/// Signed saturating rounded shift right unsigned narrow
13122///
13123/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s16)
13124#[inline]
13125#[target_feature(enable = "neon")]
13126#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
13127#[rustc_legacy_const_generics(2)]
13128#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13129pub unsafe fn vqrshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
13130    static_assert!(N >= 1 && N <= 8);
13131    simd_shuffle!(a, vqrshrun_n_s16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
13132}
13133
13134/// Signed saturating rounded shift right unsigned narrow
13135///
13136/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s32)
13137#[inline]
13138#[target_feature(enable = "neon")]
13139#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
13140#[rustc_legacy_const_generics(2)]
13141#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13142pub unsafe fn vqrshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
13143    static_assert!(N >= 1 && N <= 16);
13144    simd_shuffle!(a, vqrshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
13145}
13146
13147/// Signed saturating rounded shift right unsigned narrow
13148///
13149/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s64)
13150#[inline]
13151#[target_feature(enable = "neon")]
13152#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
13153#[rustc_legacy_const_generics(2)]
13154#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13155pub unsafe fn vqrshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
13156    static_assert!(N >= 1 && N <= 32);
13157    simd_shuffle!(a, vqrshrun_n_s64::<N>(b), [0, 1, 2, 3])
13158}
13159
13160/// Signed saturating shift left
13161///
13162/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_s64)
13163#[inline]
13164#[target_feature(enable = "neon")]
13165#[cfg_attr(test, assert_instr(sqshl))]
13166#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13167pub unsafe fn vqshld_s64(a: i64, b: i64) -> i64 {
13168    #[allow(improper_ctypes)]
13169    extern "unadjusted" {
13170        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshl.i64")]
13171        fn vqshld_s64_(a: i64, b: i64) -> i64;
13172    }
13173    vqshld_s64_(a, b)
13174}
13175
13176/// Signed saturating shift left
13177///
13178/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_s8)
13179#[inline]
13180#[target_feature(enable = "neon")]
13181#[cfg_attr(test, assert_instr(sqshl))]
13182#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13183pub unsafe fn vqshlb_s8(a: i8, b: i8) -> i8 {
13184    let c: int8x8_t = vqshl_s8(vdup_n_s8(a), vdup_n_s8(b));
13185    simd_extract!(c, 0)
13186}
13187
13188/// Signed saturating shift left
13189///
13190/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_s16)
13191#[inline]
13192#[target_feature(enable = "neon")]
13193#[cfg_attr(test, assert_instr(sqshl))]
13194#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13195pub unsafe fn vqshlh_s16(a: i16, b: i16) -> i16 {
13196    let c: int16x4_t = vqshl_s16(vdup_n_s16(a), vdup_n_s16(b));
13197    simd_extract!(c, 0)
13198}
13199
13200/// Signed saturating shift left
13201///
13202/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_s32)
13203#[inline]
13204#[target_feature(enable = "neon")]
13205#[cfg_attr(test, assert_instr(sqshl))]
13206#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13207pub unsafe fn vqshls_s32(a: i32, b: i32) -> i32 {
13208    let c: int32x2_t = vqshl_s32(vdup_n_s32(a), vdup_n_s32(b));
13209    simd_extract!(c, 0)
13210}
13211
13212/// Unsigned saturating shift left
13213///
13214/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_u64)
13215#[inline]
13216#[target_feature(enable = "neon")]
13217#[cfg_attr(test, assert_instr(uqshl))]
13218#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13219pub unsafe fn vqshld_u64(a: u64, b: i64) -> u64 {
13220    #[allow(improper_ctypes)]
13221    extern "unadjusted" {
13222        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqshl.i64")]
13223        fn vqshld_u64_(a: u64, b: i64) -> u64;
13224    }
13225    vqshld_u64_(a, b)
13226}
13227
13228/// Unsigned saturating shift left
13229///
13230/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_u8)
13231#[inline]
13232#[target_feature(enable = "neon")]
13233#[cfg_attr(test, assert_instr(uqshl))]
13234#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13235pub unsafe fn vqshlb_u8(a: u8, b: i8) -> u8 {
13236    let c: uint8x8_t = vqshl_u8(vdup_n_u8(a), vdup_n_s8(b));
13237    simd_extract!(c, 0)
13238}
13239
13240/// Unsigned saturating shift left
13241///
13242/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_u16)
13243#[inline]
13244#[target_feature(enable = "neon")]
13245#[cfg_attr(test, assert_instr(uqshl))]
13246#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13247pub unsafe fn vqshlh_u16(a: u16, b: i16) -> u16 {
13248    let c: uint16x4_t = vqshl_u16(vdup_n_u16(a), vdup_n_s16(b));
13249    simd_extract!(c, 0)
13250}
13251
13252/// Unsigned saturating shift left
13253///
13254/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_u32)
13255#[inline]
13256#[target_feature(enable = "neon")]
13257#[cfg_attr(test, assert_instr(uqshl))]
13258#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13259pub unsafe fn vqshls_u32(a: u32, b: i32) -> u32 {
13260    let c: uint32x2_t = vqshl_u32(vdup_n_u32(a), vdup_n_s32(b));
13261    simd_extract!(c, 0)
13262}
13263
13264/// Signed saturating shift left
13265///
13266/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_s8)
13267#[inline]
13268#[target_feature(enable = "neon")]
13269#[cfg_attr(test, assert_instr(sqshl, N = 2))]
13270#[rustc_legacy_const_generics(1)]
13271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13272pub unsafe fn vqshlb_n_s8<const N: i32>(a: i8) -> i8 {
13273    static_assert_uimm_bits!(N, 3);
13274    simd_extract!(vqshl_n_s8::<N>(vdup_n_s8(a)), 0)
13275}
13276
13277/// Signed saturating shift left
13278///
13279/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_s16)
13280#[inline]
13281#[target_feature(enable = "neon")]
13282#[cfg_attr(test, assert_instr(sqshl, N = 2))]
13283#[rustc_legacy_const_generics(1)]
13284#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13285pub unsafe fn vqshlh_n_s16<const N: i32>(a: i16) -> i16 {
13286    static_assert_uimm_bits!(N, 4);
13287    simd_extract!(vqshl_n_s16::<N>(vdup_n_s16(a)), 0)
13288}
13289
13290/// Signed saturating shift left
13291///
13292/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_s32)
13293#[inline]
13294#[target_feature(enable = "neon")]
13295#[cfg_attr(test, assert_instr(sqshl, N = 2))]
13296#[rustc_legacy_const_generics(1)]
13297#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13298pub unsafe fn vqshls_n_s32<const N: i32>(a: i32) -> i32 {
13299    static_assert_uimm_bits!(N, 5);
13300    simd_extract!(vqshl_n_s32::<N>(vdup_n_s32(a)), 0)
13301}
13302
13303/// Signed saturating shift left
13304///
13305/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_s64)
13306#[inline]
13307#[target_feature(enable = "neon")]
13308#[cfg_attr(test, assert_instr(sqshl, N = 2))]
13309#[rustc_legacy_const_generics(1)]
13310#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13311pub unsafe fn vqshld_n_s64<const N: i32>(a: i64) -> i64 {
13312    static_assert_uimm_bits!(N, 6);
13313    simd_extract!(vqshl_n_s64::<N>(vdup_n_s64(a)), 0)
13314}
13315
13316/// Unsigned saturating shift left
13317///
13318/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_u8)
13319#[inline]
13320#[target_feature(enable = "neon")]
13321#[cfg_attr(test, assert_instr(uqshl, N = 2))]
13322#[rustc_legacy_const_generics(1)]
13323#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13324pub unsafe fn vqshlb_n_u8<const N: i32>(a: u8) -> u8 {
13325    static_assert_uimm_bits!(N, 3);
13326    simd_extract!(vqshl_n_u8::<N>(vdup_n_u8(a)), 0)
13327}
13328
13329/// Unsigned saturating shift left
13330///
13331/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_u16)
13332#[inline]
13333#[target_feature(enable = "neon")]
13334#[cfg_attr(test, assert_instr(uqshl, N = 2))]
13335#[rustc_legacy_const_generics(1)]
13336#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13337pub unsafe fn vqshlh_n_u16<const N: i32>(a: u16) -> u16 {
13338    static_assert_uimm_bits!(N, 4);
13339    simd_extract!(vqshl_n_u16::<N>(vdup_n_u16(a)), 0)
13340}
13341
13342/// Unsigned saturating shift left
13343///
13344/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_u32)
13345#[inline]
13346#[target_feature(enable = "neon")]
13347#[cfg_attr(test, assert_instr(uqshl, N = 2))]
13348#[rustc_legacy_const_generics(1)]
13349#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13350pub unsafe fn vqshls_n_u32<const N: i32>(a: u32) -> u32 {
13351    static_assert_uimm_bits!(N, 5);
13352    simd_extract!(vqshl_n_u32::<N>(vdup_n_u32(a)), 0)
13353}
13354
13355/// Unsigned saturating shift left
13356///
13357/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_u64)
13358#[inline]
13359#[target_feature(enable = "neon")]
13360#[cfg_attr(test, assert_instr(uqshl, N = 2))]
13361#[rustc_legacy_const_generics(1)]
13362#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13363pub unsafe fn vqshld_n_u64<const N: i32>(a: u64) -> u64 {
13364    static_assert_uimm_bits!(N, 6);
13365    simd_extract!(vqshl_n_u64::<N>(vdup_n_u64(a)), 0)
13366}
13367
13368/// Signed saturating shift left unsigned
13369///
13370/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlub_n_s8)
13371#[inline]
13372#[target_feature(enable = "neon")]
13373#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
13374#[rustc_legacy_const_generics(1)]
13375#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13376pub unsafe fn vqshlub_n_s8<const N: i32>(a: i8) -> u8 {
13377    static_assert_uimm_bits!(N, 3);
13378    simd_extract!(vqshlu_n_s8::<N>(vdup_n_s8(a)), 0)
13379}
13380
13381/// Signed saturating shift left unsigned
13382///
13383/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluh_n_s16)
13384#[inline]
13385#[target_feature(enable = "neon")]
13386#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
13387#[rustc_legacy_const_generics(1)]
13388#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13389pub unsafe fn vqshluh_n_s16<const N: i32>(a: i16) -> u16 {
13390    static_assert_uimm_bits!(N, 4);
13391    simd_extract!(vqshlu_n_s16::<N>(vdup_n_s16(a)), 0)
13392}
13393
13394/// Signed saturating shift left unsigned
13395///
13396/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlus_n_s32)
13397#[inline]
13398#[target_feature(enable = "neon")]
13399#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
13400#[rustc_legacy_const_generics(1)]
13401#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13402pub unsafe fn vqshlus_n_s32<const N: i32>(a: i32) -> u32 {
13403    static_assert_uimm_bits!(N, 5);
13404    simd_extract!(vqshlu_n_s32::<N>(vdup_n_s32(a)), 0)
13405}
13406
13407/// Signed saturating shift left unsigned
13408///
13409/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlud_n_s64)
13410#[inline]
13411#[target_feature(enable = "neon")]
13412#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
13413#[rustc_legacy_const_generics(1)]
13414#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13415pub unsafe fn vqshlud_n_s64<const N: i32>(a: i64) -> u64 {
13416    static_assert_uimm_bits!(N, 6);
13417    simd_extract!(vqshlu_n_s64::<N>(vdup_n_s64(a)), 0)
13418}
13419
13420/// Signed saturating shift right narrow
13421///
13422/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_s64)
13423#[inline]
13424#[target_feature(enable = "neon")]
13425#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
13426#[rustc_legacy_const_generics(1)]
13427#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13428pub unsafe fn vqshrnd_n_s64<const N: i32>(a: i64) -> i32 {
13429    static_assert!(N >= 1 && N <= 32);
13430    #[allow(improper_ctypes)]
13431    extern "unadjusted" {
13432        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshrn.i32")]
13433        fn vqshrnd_n_s64_(a: i64, n: i32) -> i32;
13434    }
13435    vqshrnd_n_s64_(a, N)
13436}
13437
13438/// Signed saturating shift right narrow
13439///
13440/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_s16)
13441#[inline]
13442#[target_feature(enable = "neon")]
13443#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
13444#[rustc_legacy_const_generics(1)]
13445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13446pub unsafe fn vqshrnh_n_s16<const N: i32>(a: i16) -> i8 {
13447    static_assert!(N >= 1 && N <= 8);
13448    simd_extract!(vqshrn_n_s16::<N>(vdupq_n_s16(a)), 0)
13449}
13450
13451/// Signed saturating shift right narrow
13452///
13453/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_s32)
13454#[inline]
13455#[target_feature(enable = "neon")]
13456#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
13457#[rustc_legacy_const_generics(1)]
13458#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13459pub unsafe fn vqshrns_n_s32<const N: i32>(a: i32) -> i16 {
13460    static_assert!(N >= 1 && N <= 16);
13461    simd_extract!(vqshrn_n_s32::<N>(vdupq_n_s32(a)), 0)
13462}
13463
13464/// Signed saturating shift right narrow
13465///
13466/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s16)
13467#[inline]
13468#[target_feature(enable = "neon")]
13469#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
13470#[rustc_legacy_const_generics(2)]
13471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13472pub unsafe fn vqshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
13473    static_assert!(N >= 1 && N <= 8);
13474    simd_shuffle!(a, vqshrn_n_s16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
13475}
13476
13477/// Signed saturating shift right narrow
13478///
13479/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s32)
13480#[inline]
13481#[target_feature(enable = "neon")]
13482#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
13483#[rustc_legacy_const_generics(2)]
13484#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13485pub unsafe fn vqshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
13486    static_assert!(N >= 1 && N <= 16);
13487    simd_shuffle!(a, vqshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
13488}
13489
13490/// Signed saturating shift right narrow
13491///
13492/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s64)
13493#[inline]
13494#[target_feature(enable = "neon")]
13495#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
13496#[rustc_legacy_const_generics(2)]
13497#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13498pub unsafe fn vqshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
13499    static_assert!(N >= 1 && N <= 32);
13500    simd_shuffle!(a, vqshrn_n_s64::<N>(b), [0, 1, 2, 3])
13501}
13502
13503/// Unsigned saturating shift right narrow
13504///
13505/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_u64)
13506#[inline]
13507#[target_feature(enable = "neon")]
13508#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
13509#[rustc_legacy_const_generics(1)]
13510#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13511pub unsafe fn vqshrnd_n_u64<const N: i32>(a: u64) -> u32 {
13512    static_assert!(N >= 1 && N <= 32);
13513    #[allow(improper_ctypes)]
13514    extern "unadjusted" {
13515        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqshrn.i32")]
13516        fn vqshrnd_n_u64_(a: u64, n: i32) -> u32;
13517    }
13518    vqshrnd_n_u64_(a, N)
13519}
13520
13521/// Unsigned saturating shift right narrow
13522///
13523/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_u16)
13524#[inline]
13525#[target_feature(enable = "neon")]
13526#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
13527#[rustc_legacy_const_generics(1)]
13528#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13529pub unsafe fn vqshrnh_n_u16<const N: i32>(a: u16) -> u8 {
13530    static_assert!(N >= 1 && N <= 8);
13531    simd_extract!(vqshrn_n_u16::<N>(vdupq_n_u16(a)), 0)
13532}
13533
13534/// Unsigned saturating shift right narrow
13535///
13536/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_u32)
13537#[inline]
13538#[target_feature(enable = "neon")]
13539#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
13540#[rustc_legacy_const_generics(1)]
13541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13542pub unsafe fn vqshrns_n_u32<const N: i32>(a: u32) -> u16 {
13543    static_assert!(N >= 1 && N <= 16);
13544    simd_extract!(vqshrn_n_u32::<N>(vdupq_n_u32(a)), 0)
13545}
13546
13547/// Unsigned saturating shift right narrow
13548///
13549/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u16)
13550#[inline]
13551#[target_feature(enable = "neon")]
13552#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
13553#[rustc_legacy_const_generics(2)]
13554#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13555pub unsafe fn vqshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
13556    static_assert!(N >= 1 && N <= 8);
13557    simd_shuffle!(a, vqshrn_n_u16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
13558}
13559
13560/// Unsigned saturating shift right narrow
13561///
13562/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u32)
13563#[inline]
13564#[target_feature(enable = "neon")]
13565#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
13566#[rustc_legacy_const_generics(2)]
13567#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13568pub unsafe fn vqshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
13569    static_assert!(N >= 1 && N <= 16);
13570    simd_shuffle!(a, vqshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
13571}
13572
13573/// Unsigned saturating shift right narrow
13574///
13575/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u64)
13576#[inline]
13577#[target_feature(enable = "neon")]
13578#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
13579#[rustc_legacy_const_generics(2)]
13580#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13581pub unsafe fn vqshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
13582    static_assert!(N >= 1 && N <= 32);
13583    simd_shuffle!(a, vqshrn_n_u64::<N>(b), [0, 1, 2, 3])
13584}
13585
13586/// Signed saturating shift right unsigned narrow
13587///
13588/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrunh_n_s16)
13589#[inline]
13590#[target_feature(enable = "neon")]
13591#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
13592#[rustc_legacy_const_generics(1)]
13593#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13594pub unsafe fn vqshrunh_n_s16<const N: i32>(a: i16) -> u8 {
13595    static_assert!(N >= 1 && N <= 8);
13596    simd_extract!(vqshrun_n_s16::<N>(vdupq_n_s16(a)), 0)
13597}
13598
13599/// Signed saturating shift right unsigned narrow
13600///
13601/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshruns_n_s32)
13602#[inline]
13603#[target_feature(enable = "neon")]
13604#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
13605#[rustc_legacy_const_generics(1)]
13606#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13607pub unsafe fn vqshruns_n_s32<const N: i32>(a: i32) -> u16 {
13608    static_assert!(N >= 1 && N <= 16);
13609    simd_extract!(vqshrun_n_s32::<N>(vdupq_n_s32(a)), 0)
13610}
13611
13612/// Signed saturating shift right unsigned narrow
13613///
13614/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrund_n_s64)
13615#[inline]
13616#[target_feature(enable = "neon")]
13617#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
13618#[rustc_legacy_const_generics(1)]
13619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13620pub unsafe fn vqshrund_n_s64<const N: i32>(a: i64) -> u32 {
13621    static_assert!(N >= 1 && N <= 32);
13622    simd_extract!(vqshrun_n_s64::<N>(vdupq_n_s64(a)), 0)
13623}
13624
13625/// Signed saturating shift right unsigned narrow
13626///
13627/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s16)
13628#[inline]
13629#[target_feature(enable = "neon")]
13630#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
13631#[rustc_legacy_const_generics(2)]
13632#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13633pub unsafe fn vqshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
13634    static_assert!(N >= 1 && N <= 8);
13635    simd_shuffle!(a, vqshrun_n_s16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
13636}
13637
13638/// Signed saturating shift right unsigned narrow
13639///
13640/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s32)
13641#[inline]
13642#[target_feature(enable = "neon")]
13643#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
13644#[rustc_legacy_const_generics(2)]
13645#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13646pub unsafe fn vqshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
13647    static_assert!(N >= 1 && N <= 16);
13648    simd_shuffle!(a, vqshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
13649}
13650
13651/// Signed saturating shift right unsigned narrow
13652///
13653/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s64)
13654#[inline]
13655#[target_feature(enable = "neon")]
13656#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
13657#[rustc_legacy_const_generics(2)]
13658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13659pub unsafe fn vqshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
13660    static_assert!(N >= 1 && N <= 32);
13661    simd_shuffle!(a, vqshrun_n_s64::<N>(b), [0, 1, 2, 3])
13662}
13663
13664/// Unsigned saturating accumulate of signed value
13665///
13666/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddb_u8)
13667#[inline]
13668#[target_feature(enable = "neon")]
13669#[cfg_attr(test, assert_instr(usqadd))]
13670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13671pub unsafe fn vsqaddb_u8(a: u8, b: i8) -> u8 {
13672    simd_extract!(vsqadd_u8(vdup_n_u8(a), vdup_n_s8(b)), 0)
13673}
13674
13675/// Unsigned saturating accumulate of signed value
13676///
13677/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddh_u16)
13678#[inline]
13679#[target_feature(enable = "neon")]
13680#[cfg_attr(test, assert_instr(usqadd))]
13681#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13682pub unsafe fn vsqaddh_u16(a: u16, b: i16) -> u16 {
13683    simd_extract!(vsqadd_u16(vdup_n_u16(a), vdup_n_s16(b)), 0)
13684}
13685
13686/// Unsigned saturating accumulate of signed value
13687///
13688/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadds_u32)
13689#[inline]
13690#[target_feature(enable = "neon")]
13691#[cfg_attr(test, assert_instr(usqadd))]
13692#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13693pub unsafe fn vsqadds_u32(a: u32, b: i32) -> u32 {
13694    #[allow(improper_ctypes)]
13695    extern "unadjusted" {
13696        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.i32")]
13697        fn vsqadds_u32_(a: u32, b: i32) -> u32;
13698    }
13699    vsqadds_u32_(a, b)
13700}
13701
13702/// Unsigned saturating accumulate of signed value
13703///
13704/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddd_u64)
13705#[inline]
13706#[target_feature(enable = "neon")]
13707#[cfg_attr(test, assert_instr(usqadd))]
13708#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13709pub unsafe fn vsqaddd_u64(a: u64, b: i64) -> u64 {
13710    #[allow(improper_ctypes)]
13711    extern "unadjusted" {
13712        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.i64")]
13713        fn vsqaddd_u64_(a: u64, b: i64) -> u64;
13714    }
13715    vsqaddd_u64_(a, b)
13716}
13717
13718/// Calculates the square root of each lane.
13719///
13720/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f32)
13721#[inline]
13722#[target_feature(enable = "neon")]
13723#[cfg_attr(test, assert_instr(fsqrt))]
13724#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13725pub unsafe fn vsqrt_f32(a: float32x2_t) -> float32x2_t {
13726    simd_fsqrt(a)
13727}
13728
13729/// Calculates the square root of each lane.
13730///
13731/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f32)
13732#[inline]
13733#[target_feature(enable = "neon")]
13734#[cfg_attr(test, assert_instr(fsqrt))]
13735#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13736pub unsafe fn vsqrtq_f32(a: float32x4_t) -> float32x4_t {
13737    simd_fsqrt(a)
13738}
13739
13740/// Calculates the square root of each lane.
13741///
13742/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f64)
13743#[inline]
13744#[target_feature(enable = "neon")]
13745#[cfg_attr(test, assert_instr(fsqrt))]
13746#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13747pub unsafe fn vsqrt_f64(a: float64x1_t) -> float64x1_t {
13748    simd_fsqrt(a)
13749}
13750
13751/// Calculates the square root of each lane.
13752///
13753/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f64)
13754#[inline]
13755#[target_feature(enable = "neon")]
13756#[cfg_attr(test, assert_instr(fsqrt))]
13757#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13758pub unsafe fn vsqrtq_f64(a: float64x2_t) -> float64x2_t {
13759    simd_fsqrt(a)
13760}
13761
13762/// Reciprocal square-root estimate.
13763///
13764/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f64)
13765#[inline]
13766#[target_feature(enable = "neon")]
13767#[cfg_attr(test, assert_instr(frsqrte))]
13768#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13769pub unsafe fn vrsqrte_f64(a: float64x1_t) -> float64x1_t {
13770    #[allow(improper_ctypes)]
13771    extern "unadjusted" {
13772        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frsqrte.v1f64")]
13773        fn vrsqrte_f64_(a: float64x1_t) -> float64x1_t;
13774    }
13775    vrsqrte_f64_(a)
13776}
13777
13778/// Reciprocal square-root estimate.
13779///
13780/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f64)
13781#[inline]
13782#[target_feature(enable = "neon")]
13783#[cfg_attr(test, assert_instr(frsqrte))]
13784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13785pub unsafe fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t {
13786    #[allow(improper_ctypes)]
13787    extern "unadjusted" {
13788        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frsqrte.v2f64")]
13789        fn vrsqrteq_f64_(a: float64x2_t) -> float64x2_t;
13790    }
13791    vrsqrteq_f64_(a)
13792}
13793
13794/// Reciprocal square-root estimate.
13795///
13796/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtes_f32)
13797#[inline]
13798#[target_feature(enable = "neon")]
13799#[cfg_attr(test, assert_instr(frsqrte))]
13800#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13801pub unsafe fn vrsqrtes_f32(a: f32) -> f32 {
13802    #[allow(improper_ctypes)]
13803    extern "unadjusted" {
13804        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frsqrte.f32")]
13805        fn vrsqrtes_f32_(a: f32) -> f32;
13806    }
13807    vrsqrtes_f32_(a)
13808}
13809
13810/// Reciprocal square-root estimate.
13811///
13812/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrted_f64)
13813#[inline]
13814#[target_feature(enable = "neon")]
13815#[cfg_attr(test, assert_instr(frsqrte))]
13816#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13817pub unsafe fn vrsqrted_f64(a: f64) -> f64 {
13818    #[allow(improper_ctypes)]
13819    extern "unadjusted" {
13820        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frsqrte.f64")]
13821        fn vrsqrted_f64_(a: f64) -> f64;
13822    }
13823    vrsqrted_f64_(a)
13824}
13825
13826/// Floating-point reciprocal square root step
13827///
13828/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f64)
13829#[inline]
13830#[target_feature(enable = "neon")]
13831#[cfg_attr(test, assert_instr(frsqrts))]
13832#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13833pub unsafe fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13834    #[allow(improper_ctypes)]
13835    extern "unadjusted" {
13836        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frsqrts.v1f64")]
13837        fn vrsqrts_f64_(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13838    }
13839    vrsqrts_f64_(a, b)
13840}
13841
13842/// Floating-point reciprocal square root step
13843///
13844/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f64)
13845#[inline]
13846#[target_feature(enable = "neon")]
13847#[cfg_attr(test, assert_instr(frsqrts))]
13848#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13849pub unsafe fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13850    #[allow(improper_ctypes)]
13851    extern "unadjusted" {
13852        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frsqrts.v2f64")]
13853        fn vrsqrtsq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13854    }
13855    vrsqrtsq_f64_(a, b)
13856}
13857
13858/// Floating-point reciprocal square root step
13859///
13860/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtss_f32)
13861#[inline]
13862#[target_feature(enable = "neon")]
13863#[cfg_attr(test, assert_instr(frsqrts))]
13864#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13865pub unsafe fn vrsqrtss_f32(a: f32, b: f32) -> f32 {
13866    #[allow(improper_ctypes)]
13867    extern "unadjusted" {
13868        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frsqrts.f32")]
13869        fn vrsqrtss_f32_(a: f32, b: f32) -> f32;
13870    }
13871    vrsqrtss_f32_(a, b)
13872}
13873
13874/// Floating-point reciprocal square root step
13875///
13876/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsd_f64)
13877#[inline]
13878#[target_feature(enable = "neon")]
13879#[cfg_attr(test, assert_instr(frsqrts))]
13880#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13881pub unsafe fn vrsqrtsd_f64(a: f64, b: f64) -> f64 {
13882    #[allow(improper_ctypes)]
13883    extern "unadjusted" {
13884        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frsqrts.f64")]
13885        fn vrsqrtsd_f64_(a: f64, b: f64) -> f64;
13886    }
13887    vrsqrtsd_f64_(a, b)
13888}
13889
13890/// Reciprocal estimate.
13891///
13892/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f64)
13893#[inline]
13894#[target_feature(enable = "neon")]
13895#[cfg_attr(test, assert_instr(frecpe))]
13896#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13897pub unsafe fn vrecpe_f64(a: float64x1_t) -> float64x1_t {
13898    #[allow(improper_ctypes)]
13899    extern "unadjusted" {
13900        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecpe.v1f64")]
13901        fn vrecpe_f64_(a: float64x1_t) -> float64x1_t;
13902    }
13903    vrecpe_f64_(a)
13904}
13905
13906/// Reciprocal estimate.
13907///
13908/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f64)
13909#[inline]
13910#[target_feature(enable = "neon")]
13911#[cfg_attr(test, assert_instr(frecpe))]
13912#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13913pub unsafe fn vrecpeq_f64(a: float64x2_t) -> float64x2_t {
13914    #[allow(improper_ctypes)]
13915    extern "unadjusted" {
13916        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecpe.v2f64")]
13917        fn vrecpeq_f64_(a: float64x2_t) -> float64x2_t;
13918    }
13919    vrecpeq_f64_(a)
13920}
13921
13922/// Reciprocal estimate.
13923///
13924/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpes_f32)
13925#[inline]
13926#[target_feature(enable = "neon")]
13927#[cfg_attr(test, assert_instr(frecpe))]
13928#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13929pub unsafe fn vrecpes_f32(a: f32) -> f32 {
13930    #[allow(improper_ctypes)]
13931    extern "unadjusted" {
13932        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecpe.f32")]
13933        fn vrecpes_f32_(a: f32) -> f32;
13934    }
13935    vrecpes_f32_(a)
13936}
13937
13938/// Reciprocal estimate.
13939///
13940/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecped_f64)
13941#[inline]
13942#[target_feature(enable = "neon")]
13943#[cfg_attr(test, assert_instr(frecpe))]
13944#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13945pub unsafe fn vrecped_f64(a: f64) -> f64 {
13946    #[allow(improper_ctypes)]
13947    extern "unadjusted" {
13948        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecpe.f64")]
13949        fn vrecped_f64_(a: f64) -> f64;
13950    }
13951    vrecped_f64_(a)
13952}
13953
13954/// Floating-point reciprocal step
13955///
13956/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f64)
13957#[inline]
13958#[target_feature(enable = "neon")]
13959#[cfg_attr(test, assert_instr(frecps))]
13960#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13961pub unsafe fn vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13962    #[allow(improper_ctypes)]
13963    extern "unadjusted" {
13964        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecps.v1f64")]
13965        fn vrecps_f64_(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13966    }
13967    vrecps_f64_(a, b)
13968}
13969
13970/// Floating-point reciprocal step
13971///
13972/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f64)
13973#[inline]
13974#[target_feature(enable = "neon")]
13975#[cfg_attr(test, assert_instr(frecps))]
13976#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13977pub unsafe fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13978    #[allow(improper_ctypes)]
13979    extern "unadjusted" {
13980        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecps.v2f64")]
13981        fn vrecpsq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13982    }
13983    vrecpsq_f64_(a, b)
13984}
13985
13986/// Floating-point reciprocal step
13987///
13988/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpss_f32)
13989#[inline]
13990#[target_feature(enable = "neon")]
13991#[cfg_attr(test, assert_instr(frecps))]
13992#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13993pub unsafe fn vrecpss_f32(a: f32, b: f32) -> f32 {
13994    #[allow(improper_ctypes)]
13995    extern "unadjusted" {
13996        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecps.f32")]
13997        fn vrecpss_f32_(a: f32, b: f32) -> f32;
13998    }
13999    vrecpss_f32_(a, b)
14000}
14001
14002/// Floating-point reciprocal step
14003///
14004/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsd_f64)
14005#[inline]
14006#[target_feature(enable = "neon")]
14007#[cfg_attr(test, assert_instr(frecps))]
14008#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14009pub unsafe fn vrecpsd_f64(a: f64, b: f64) -> f64 {
14010    #[allow(improper_ctypes)]
14011    extern "unadjusted" {
14012        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecps.f64")]
14013        fn vrecpsd_f64_(a: f64, b: f64) -> f64;
14014    }
14015    vrecpsd_f64_(a, b)
14016}
14017
14018/// Floating-point reciprocal exponent
14019///
14020/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxs_f32)
14021#[inline]
14022#[target_feature(enable = "neon")]
14023#[cfg_attr(test, assert_instr(frecpx))]
14024#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14025pub unsafe fn vrecpxs_f32(a: f32) -> f32 {
14026    #[allow(improper_ctypes)]
14027    extern "unadjusted" {
14028        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecpx.f32")]
14029        fn vrecpxs_f32_(a: f32) -> f32;
14030    }
14031    vrecpxs_f32_(a)
14032}
14033
14034/// Floating-point reciprocal exponent
14035///
14036/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxd_f64)
14037#[inline]
14038#[target_feature(enable = "neon")]
14039#[cfg_attr(test, assert_instr(frecpx))]
14040#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14041pub unsafe fn vrecpxd_f64(a: f64) -> f64 {
14042    #[allow(improper_ctypes)]
14043    extern "unadjusted" {
14044        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frecpx.f64")]
14045        fn vrecpxd_f64_(a: f64) -> f64;
14046    }
14047    vrecpxd_f64_(a)
14048}
14049
14050/// Vector reinterpret cast operation
14051///
14052/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p64)
14053#[inline]
14054#[target_feature(enable = "neon")]
14055#[cfg_attr(test, assert_instr(nop))]
14056#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14057pub unsafe fn vreinterpret_s64_p64(a: poly64x1_t) -> int64x1_t {
14058    transmute(a)
14059}
14060
14061/// Vector reinterpret cast operation
14062///
14063/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p64)
14064#[inline]
14065#[target_feature(enable = "neon")]
14066#[cfg_attr(test, assert_instr(nop))]
14067#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14068pub unsafe fn vreinterpret_u64_p64(a: poly64x1_t) -> uint64x1_t {
14069    transmute(a)
14070}
14071
14072/// Vector reinterpret cast operation
14073///
14074/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s64)
14075#[inline]
14076#[target_feature(enable = "neon")]
14077#[cfg_attr(test, assert_instr(nop))]
14078#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14079pub unsafe fn vreinterpret_p64_s64(a: int64x1_t) -> poly64x1_t {
14080    transmute(a)
14081}
14082
14083/// Vector reinterpret cast operation
14084///
14085/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u64)
14086#[inline]
14087#[target_feature(enable = "neon")]
14088#[cfg_attr(test, assert_instr(nop))]
14089#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14090pub unsafe fn vreinterpret_p64_u64(a: uint64x1_t) -> poly64x1_t {
14091    transmute(a)
14092}
14093
14094/// Vector reinterpret cast operation
14095///
14096/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)
14097#[inline]
14098#[target_feature(enable = "neon")]
14099#[cfg_attr(test, assert_instr(nop))]
14100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14101pub unsafe fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t {
14102    transmute(a)
14103}
14104
14105/// Vector reinterpret cast operation
14106///
14107/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)
14108#[inline]
14109#[target_feature(enable = "neon")]
14110#[cfg_attr(test, assert_instr(nop))]
14111#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14112pub unsafe fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t {
14113    transmute(a)
14114}
14115
14116/// Vector reinterpret cast operation
14117///
14118/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)
14119#[inline]
14120#[target_feature(enable = "neon")]
14121#[cfg_attr(test, assert_instr(nop))]
14122#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14123pub unsafe fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t {
14124    transmute(a)
14125}
14126
14127/// Vector reinterpret cast operation
14128///
14129/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)
14130#[inline]
14131#[target_feature(enable = "neon")]
14132#[cfg_attr(test, assert_instr(nop))]
14133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14134pub unsafe fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t {
14135    transmute(a)
14136}
14137
14138/// Vector reinterpret cast operation
14139///
14140/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)
14141#[inline]
14142#[target_feature(enable = "neon")]
14143#[cfg_attr(test, assert_instr(nop))]
14144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14145pub unsafe fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t {
14146    transmute(a)
14147}
14148
14149/// Vector reinterpret cast operation
14150///
14151/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)
14152#[inline]
14153#[target_feature(enable = "neon")]
14154#[cfg_attr(test, assert_instr(nop))]
14155#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14156pub unsafe fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t {
14157    transmute(a)
14158}
14159
14160/// Vector reinterpret cast operation
14161///
14162/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)
14163#[inline]
14164#[target_feature(enable = "neon")]
14165#[cfg_attr(test, assert_instr(nop))]
14166#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14167pub unsafe fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t {
14168    transmute(a)
14169}
14170
14171/// Vector reinterpret cast operation
14172///
14173/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f64)
14174#[inline]
14175#[target_feature(enable = "neon")]
14176#[cfg_attr(test, assert_instr(nop))]
14177#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14178pub unsafe fn vreinterpret_s64_f64(a: float64x1_t) -> int64x1_t {
14179    transmute(a)
14180}
14181
14182/// Vector reinterpret cast operation
14183///
14184/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)
14185#[inline]
14186#[target_feature(enable = "neon")]
14187#[cfg_attr(test, assert_instr(nop))]
14188#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14189pub unsafe fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t {
14190    transmute(a)
14191}
14192
14193/// Vector reinterpret cast operation
14194///
14195/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)
14196#[inline]
14197#[target_feature(enable = "neon")]
14198#[cfg_attr(test, assert_instr(nop))]
14199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14200pub unsafe fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t {
14201    transmute(a)
14202}
14203
14204/// Vector reinterpret cast operation
14205///
14206/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)
14207#[inline]
14208#[target_feature(enable = "neon")]
14209#[cfg_attr(test, assert_instr(nop))]
14210#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14211pub unsafe fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t {
14212    transmute(a)
14213}
14214
14215/// Vector reinterpret cast operation
14216///
14217/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)
14218#[inline]
14219#[target_feature(enable = "neon")]
14220#[cfg_attr(test, assert_instr(nop))]
14221#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14222pub unsafe fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t {
14223    transmute(a)
14224}
14225
14226/// Vector reinterpret cast operation
14227///
14228/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)
14229#[inline]
14230#[target_feature(enable = "neon")]
14231#[cfg_attr(test, assert_instr(nop))]
14232#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14233pub unsafe fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t {
14234    transmute(a)
14235}
14236
14237/// Vector reinterpret cast operation
14238///
14239/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)
14240#[inline]
14241#[target_feature(enable = "neon")]
14242#[cfg_attr(test, assert_instr(nop))]
14243#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14244pub unsafe fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t {
14245    transmute(a)
14246}
14247
14248/// Vector reinterpret cast operation
14249///
14250/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)
14251#[inline]
14252#[target_feature(enable = "neon")]
14253#[cfg_attr(test, assert_instr(nop))]
14254#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14255pub unsafe fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t {
14256    transmute(a)
14257}
14258
14259/// Vector reinterpret cast operation
14260///
14261/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f64)
14262#[inline]
14263#[target_feature(enable = "neon")]
14264#[cfg_attr(test, assert_instr(nop))]
14265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14266pub unsafe fn vreinterpret_u64_f64(a: float64x1_t) -> uint64x1_t {
14267    transmute(a)
14268}
14269
14270/// Vector reinterpret cast operation
14271///
14272/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)
14273#[inline]
14274#[target_feature(enable = "neon")]
14275#[cfg_attr(test, assert_instr(nop))]
14276#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14277pub unsafe fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t {
14278    transmute(a)
14279}
14280
14281/// Vector reinterpret cast operation
14282///
14283/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)
14284#[inline]
14285#[target_feature(enable = "neon")]
14286#[cfg_attr(test, assert_instr(nop))]
14287#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14288pub unsafe fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t {
14289    transmute(a)
14290}
14291
14292/// Vector reinterpret cast operation
14293///
14294/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)
14295#[inline]
14296#[target_feature(enable = "neon")]
14297#[cfg_attr(test, assert_instr(nop))]
14298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14299pub unsafe fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t {
14300    transmute(a)
14301}
14302
14303/// Vector reinterpret cast operation
14304///
14305/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)
14306#[inline]
14307#[target_feature(enable = "neon")]
14308#[cfg_attr(test, assert_instr(nop))]
14309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14310pub unsafe fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t {
14311    transmute(a)
14312}
14313
14314/// Vector reinterpret cast operation
14315///
14316/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)
14317#[inline]
14318#[target_feature(enable = "neon")]
14319#[cfg_attr(test, assert_instr(nop))]
14320#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14321pub unsafe fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t {
14322    transmute(a)
14323}
14324
14325/// Vector reinterpret cast operation
14326///
14327/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)
14328#[inline]
14329#[target_feature(enable = "neon")]
14330#[cfg_attr(test, assert_instr(nop))]
14331#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14332pub unsafe fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t {
14333    transmute(a)
14334}
14335
14336/// Vector reinterpret cast operation
14337///
14338/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)
14339#[inline]
14340#[target_feature(enable = "neon")]
14341#[cfg_attr(test, assert_instr(nop))]
14342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14343pub unsafe fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t {
14344    transmute(a)
14345}
14346
14347/// Vector reinterpret cast operation
14348///
14349/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f64)
14350#[inline]
14351#[target_feature(enable = "neon")]
14352#[cfg_attr(test, assert_instr(nop))]
14353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14354pub unsafe fn vreinterpret_p64_f64(a: float64x1_t) -> poly64x1_t {
14355    transmute(a)
14356}
14357
14358/// Vector reinterpret cast operation
14359///
14360/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)
14361#[inline]
14362#[target_feature(enable = "neon")]
14363#[cfg_attr(test, assert_instr(nop))]
14364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14365pub unsafe fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t {
14366    transmute(a)
14367}
14368
14369/// Vector reinterpret cast operation
14370///
14371/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)
14372#[inline]
14373#[target_feature(enable = "neon")]
14374#[cfg_attr(test, assert_instr(nop))]
14375#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14376pub unsafe fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t {
14377    transmute(a)
14378}
14379
14380/// Vector reinterpret cast operation
14381///
14382/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)
14383#[inline]
14384#[target_feature(enable = "neon")]
14385#[cfg_attr(test, assert_instr(nop))]
14386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14387pub unsafe fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t {
14388    transmute(a)
14389}
14390
14391/// Vector reinterpret cast operation
14392///
14393/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)
14394#[inline]
14395#[target_feature(enable = "neon")]
14396#[cfg_attr(test, assert_instr(nop))]
14397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14398pub unsafe fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t {
14399    transmute(a)
14400}
14401
14402/// Vector reinterpret cast operation
14403///
14404/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)
14405#[inline]
14406#[target_feature(enable = "neon")]
14407#[cfg_attr(test, assert_instr(nop))]
14408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14409pub unsafe fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 {
14410    transmute(a)
14411}
14412
14413/// Vector reinterpret cast operation
14414///
14415/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)
14416#[inline]
14417#[target_feature(enable = "neon")]
14418#[cfg_attr(test, assert_instr(nop))]
14419#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14420pub unsafe fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t {
14421    transmute(a)
14422}
14423
14424/// Vector reinterpret cast operation
14425///
14426/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)
14427#[inline]
14428#[target_feature(enable = "neon")]
14429#[cfg_attr(test, assert_instr(nop))]
14430#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14431pub unsafe fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t {
14432    transmute(a)
14433}
14434
14435/// Vector reinterpret cast operation
14436///
14437/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)
14438#[inline]
14439#[target_feature(enable = "neon")]
14440#[cfg_attr(test, assert_instr(nop))]
14441#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14442pub unsafe fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t {
14443    transmute(a)
14444}
14445
14446/// Vector reinterpret cast operation
14447///
14448/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s64)
14449#[inline]
14450#[target_feature(enable = "neon")]
14451#[cfg_attr(test, assert_instr(nop))]
14452#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14453pub unsafe fn vreinterpret_f64_s64(a: int64x1_t) -> float64x1_t {
14454    transmute(a)
14455}
14456
14457/// Vector reinterpret cast operation
14458///
14459/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)
14460#[inline]
14461#[target_feature(enable = "neon")]
14462#[cfg_attr(test, assert_instr(nop))]
14463#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14464pub unsafe fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t {
14465    transmute(a)
14466}
14467
14468/// Vector reinterpret cast operation
14469///
14470/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)
14471#[inline]
14472#[target_feature(enable = "neon")]
14473#[cfg_attr(test, assert_instr(nop))]
14474#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14475pub unsafe fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t {
14476    transmute(a)
14477}
14478
14479/// Vector reinterpret cast operation
14480///
14481/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)
14482#[inline]
14483#[target_feature(enable = "neon")]
14484#[cfg_attr(test, assert_instr(nop))]
14485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14486pub unsafe fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t {
14487    transmute(a)
14488}
14489
14490/// Vector reinterpret cast operation
14491///
14492/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)
14493#[inline]
14494#[target_feature(enable = "neon")]
14495#[cfg_attr(test, assert_instr(nop))]
14496#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14497pub unsafe fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t {
14498    transmute(a)
14499}
14500
14501/// Vector reinterpret cast operation
14502///
14503/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)
14504#[inline]
14505#[target_feature(enable = "neon")]
14506#[cfg_attr(test, assert_instr(nop))]
14507#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14508pub unsafe fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t {
14509    transmute(a)
14510}
14511
14512/// Vector reinterpret cast operation
14513///
14514/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)
14515#[inline]
14516#[target_feature(enable = "neon")]
14517#[cfg_attr(test, assert_instr(nop))]
14518#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14519pub unsafe fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t {
14520    transmute(a)
14521}
14522
14523/// Vector reinterpret cast operation
14524///
14525/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)
14526#[inline]
14527#[target_feature(enable = "neon")]
14528#[cfg_attr(test, assert_instr(nop))]
14529#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14530pub unsafe fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t {
14531    transmute(a)
14532}
14533
14534/// Vector reinterpret cast operation
14535///
14536/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u64)
14537#[inline]
14538#[target_feature(enable = "neon")]
14539#[cfg_attr(test, assert_instr(nop))]
14540#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14541pub unsafe fn vreinterpret_f64_u64(a: uint64x1_t) -> float64x1_t {
14542    transmute(a)
14543}
14544
14545/// Vector reinterpret cast operation
14546///
14547/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)
14548#[inline]
14549#[target_feature(enable = "neon")]
14550#[cfg_attr(test, assert_instr(nop))]
14551#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14552pub unsafe fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t {
14553    transmute(a)
14554}
14555
14556/// Vector reinterpret cast operation
14557///
14558/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)
14559#[inline]
14560#[target_feature(enable = "neon")]
14561#[cfg_attr(test, assert_instr(nop))]
14562#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14563pub unsafe fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t {
14564    transmute(a)
14565}
14566
14567/// Vector reinterpret cast operation
14568///
14569/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)
14570#[inline]
14571#[target_feature(enable = "neon")]
14572#[cfg_attr(test, assert_instr(nop))]
14573#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14574pub unsafe fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t {
14575    transmute(a)
14576}
14577
14578/// Vector reinterpret cast operation
14579///
14580/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)
14581#[inline]
14582#[target_feature(enable = "neon")]
14583#[cfg_attr(test, assert_instr(nop))]
14584#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14585pub unsafe fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t {
14586    transmute(a)
14587}
14588
14589/// Vector reinterpret cast operation
14590///
14591/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)
14592#[inline]
14593#[target_feature(enable = "neon")]
14594#[cfg_attr(test, assert_instr(nop))]
14595#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14596pub unsafe fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t {
14597    transmute(a)
14598}
14599
14600/// Vector reinterpret cast operation
14601///
14602/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)
14603#[inline]
14604#[target_feature(enable = "neon")]
14605#[cfg_attr(test, assert_instr(nop))]
14606#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14607pub unsafe fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t {
14608    transmute(a)
14609}
14610
14611/// Vector reinterpret cast operation
14612///
14613/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p64)
14614#[inline]
14615#[target_feature(enable = "neon")]
14616#[cfg_attr(test, assert_instr(nop))]
14617#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14618pub unsafe fn vreinterpret_f64_p64(a: poly64x1_t) -> float64x1_t {
14619    transmute(a)
14620}
14621
14622/// Vector reinterpret cast operation
14623///
14624/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)
14625#[inline]
14626#[target_feature(enable = "neon")]
14627#[cfg_attr(test, assert_instr(nop))]
14628#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14629pub unsafe fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t {
14630    transmute(a)
14631}
14632
14633/// Vector reinterpret cast operation
14634///
14635/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)
14636#[inline]
14637#[target_feature(enable = "neon")]
14638#[cfg_attr(test, assert_instr(nop))]
14639#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14640pub unsafe fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t {
14641    transmute(a)
14642}
14643
14644/// Vector reinterpret cast operation
14645///
14646/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)
14647#[inline]
14648#[target_feature(enable = "neon")]
14649#[cfg_attr(test, assert_instr(nop))]
14650#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14651pub unsafe fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t {
14652    transmute(a)
14653}
14654
14655/// Vector reinterpret cast operation
14656///
14657/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)
14658#[inline]
14659#[target_feature(enable = "neon")]
14660#[cfg_attr(test, assert_instr(nop))]
14661#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14662pub unsafe fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t {
14663    transmute(a)
14664}
14665
14666/// Vector reinterpret cast operation
14667///
14668/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)
14669#[inline]
14670#[target_feature(enable = "neon")]
14671#[cfg_attr(test, assert_instr(nop))]
14672#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14673pub unsafe fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t {
14674    transmute(a)
14675}
14676
14677/// Vector reinterpret cast operation
14678///
14679/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)
14680#[inline]
14681#[target_feature(enable = "neon")]
14682#[cfg_attr(test, assert_instr(nop))]
14683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14684pub unsafe fn vreinterpretq_f64_p128(a: p128) -> float64x2_t {
14685    transmute(a)
14686}
14687
14688/// Vector reinterpret cast operation
14689///
14690/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)
14691#[inline]
14692#[target_feature(enable = "neon")]
14693#[cfg_attr(test, assert_instr(nop))]
14694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14695pub unsafe fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t {
14696    transmute(a)
14697}
14698
14699/// Vector reinterpret cast operation
14700///
14701/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)
14702#[inline]
14703#[target_feature(enable = "neon")]
14704#[cfg_attr(test, assert_instr(nop))]
14705#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14706pub unsafe fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t {
14707    transmute(a)
14708}
14709
14710/// Vector reinterpret cast operation
14711///
14712/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)
14713#[inline]
14714#[target_feature(enable = "neon")]
14715#[cfg_attr(test, assert_instr(nop))]
14716#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14717pub unsafe fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t {
14718    transmute(a)
14719}
14720
14721/// Vector reinterpret cast operation
14722///
14723/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)
14724#[inline]
14725#[target_feature(enable = "neon")]
14726#[cfg_attr(test, assert_instr(nop))]
14727#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14728pub unsafe fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t {
14729    transmute(a)
14730}
14731
14732/// Signed rounding shift left
14733///
14734/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_s64)
14735#[inline]
14736#[target_feature(enable = "neon")]
14737#[cfg_attr(test, assert_instr(srshl))]
14738#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14739pub unsafe fn vrshld_s64(a: i64, b: i64) -> i64 {
14740    #[allow(improper_ctypes)]
14741    extern "unadjusted" {
14742        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.srshl.i64")]
14743        fn vrshld_s64_(a: i64, b: i64) -> i64;
14744    }
14745    vrshld_s64_(a, b)
14746}
14747
14748/// Unsigned rounding shift left
14749///
14750/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_u64)
14751#[inline]
14752#[target_feature(enable = "neon")]
14753#[cfg_attr(test, assert_instr(urshl))]
14754#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14755pub unsafe fn vrshld_u64(a: u64, b: i64) -> u64 {
14756    #[allow(improper_ctypes)]
14757    extern "unadjusted" {
14758        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urshl.i64")]
14759        fn vrshld_u64_(a: u64, b: i64) -> u64;
14760    }
14761    vrshld_u64_(a, b)
14762}
14763
14764/// Signed rounding shift right
14765///
14766/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_s64)
14767#[inline]
14768#[target_feature(enable = "neon")]
14769#[cfg_attr(test, assert_instr(srshr, N = 2))]
14770#[rustc_legacy_const_generics(1)]
14771#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14772pub unsafe fn vrshrd_n_s64<const N: i32>(a: i64) -> i64 {
14773    static_assert!(N >= 1 && N <= 64);
14774    vrshld_s64(a, -N as i64)
14775}
14776
14777/// Unsigned rounding shift right
14778///
14779/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_u64)
14780#[inline]
14781#[target_feature(enable = "neon")]
14782#[cfg_attr(test, assert_instr(urshr, N = 2))]
14783#[rustc_legacy_const_generics(1)]
14784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14785pub unsafe fn vrshrd_n_u64<const N: i32>(a: u64) -> u64 {
14786    static_assert!(N >= 1 && N <= 64);
14787    vrshld_u64(a, -N as i64)
14788}
14789
14790/// Rounding shift right narrow
14791///
14792/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s16)
14793#[inline]
14794#[target_feature(enable = "neon")]
14795#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
14796#[rustc_legacy_const_generics(2)]
14797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14798pub unsafe fn vrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
14799    static_assert!(N >= 1 && N <= 8);
14800    simd_shuffle!(a, vrshrn_n_s16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
14801}
14802
14803/// Rounding shift right narrow
14804///
14805/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s32)
14806#[inline]
14807#[target_feature(enable = "neon")]
14808#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
14809#[rustc_legacy_const_generics(2)]
14810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14811pub unsafe fn vrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
14812    static_assert!(N >= 1 && N <= 16);
14813    simd_shuffle!(a, vrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
14814}
14815
14816/// Rounding shift right narrow
14817///
14818/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s64)
14819#[inline]
14820#[target_feature(enable = "neon")]
14821#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
14822#[rustc_legacy_const_generics(2)]
14823#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14824pub unsafe fn vrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
14825    static_assert!(N >= 1 && N <= 32);
14826    simd_shuffle!(a, vrshrn_n_s64::<N>(b), [0, 1, 2, 3])
14827}
14828
14829/// Rounding shift right narrow
14830///
14831/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u16)
14832#[inline]
14833#[target_feature(enable = "neon")]
14834#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
14835#[rustc_legacy_const_generics(2)]
14836#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14837pub unsafe fn vrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
14838    static_assert!(N >= 1 && N <= 8);
14839    simd_shuffle!(a, vrshrn_n_u16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
14840}
14841
14842/// Rounding shift right narrow
14843///
14844/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u32)
14845#[inline]
14846#[target_feature(enable = "neon")]
14847#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
14848#[rustc_legacy_const_generics(2)]
14849#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14850pub unsafe fn vrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
14851    static_assert!(N >= 1 && N <= 16);
14852    simd_shuffle!(a, vrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
14853}
14854
14855/// Rounding shift right narrow
14856///
14857/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u64)
14858#[inline]
14859#[target_feature(enable = "neon")]
14860#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
14861#[rustc_legacy_const_generics(2)]
14862#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14863pub unsafe fn vrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
14864    static_assert!(N >= 1 && N <= 32);
14865    simd_shuffle!(a, vrshrn_n_u64::<N>(b), [0, 1, 2, 3])
14866}
14867
14868/// Signed rounding shift right and accumulate.
14869///
14870/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_s64)
14871#[inline]
14872#[target_feature(enable = "neon")]
14873#[cfg_attr(test, assert_instr(srshr, N = 2))]
14874#[rustc_legacy_const_generics(2)]
14875#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14876pub unsafe fn vrsrad_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
14877    static_assert!(N >= 1 && N <= 64);
14878    let b: i64 = vrshrd_n_s64::<N>(b);
14879    a.wrapping_add(b)
14880}
14881
14882/// Unsigned rounding shift right and accumulate.
14883///
14884/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_u64)
14885#[inline]
14886#[target_feature(enable = "neon")]
14887#[cfg_attr(test, assert_instr(urshr, N = 2))]
14888#[rustc_legacy_const_generics(2)]
14889#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14890pub unsafe fn vrsrad_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
14891    static_assert!(N >= 1 && N <= 64);
14892    let b: u64 = vrshrd_n_u64::<N>(b);
14893    a.wrapping_add(b)
14894}
14895
14896/// Rounding subtract returning high narrow
14897///
14898/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)
14899#[inline]
14900#[target_feature(enable = "neon")]
14901#[cfg_attr(test, assert_instr(rsubhn2))]
14902#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14903pub unsafe fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t {
14904    let x: int8x8_t = vrsubhn_s16(b, c);
14905    simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
14906}
14907
14908/// Rounding subtract returning high narrow
14909///
14910/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)
14911#[inline]
14912#[target_feature(enable = "neon")]
14913#[cfg_attr(test, assert_instr(rsubhn2))]
14914#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14915pub unsafe fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t {
14916    let x: int16x4_t = vrsubhn_s32(b, c);
14917    simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7])
14918}
14919
14920/// Rounding subtract returning high narrow
14921///
14922/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)
14923#[inline]
14924#[target_feature(enable = "neon")]
14925#[cfg_attr(test, assert_instr(rsubhn2))]
14926#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14927pub unsafe fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t {
14928    let x: int32x2_t = vrsubhn_s64(b, c);
14929    simd_shuffle!(a, x, [0, 1, 2, 3])
14930}
14931
14932/// Rounding subtract returning high narrow
14933///
14934/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)
14935#[inline]
14936#[target_feature(enable = "neon")]
14937#[cfg_attr(test, assert_instr(rsubhn2))]
14938#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14939pub unsafe fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t {
14940    let x: uint8x8_t = vrsubhn_u16(b, c);
14941    simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
14942}
14943
14944/// Rounding subtract returning high narrow
14945///
14946/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)
14947#[inline]
14948#[target_feature(enable = "neon")]
14949#[cfg_attr(test, assert_instr(rsubhn2))]
14950#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14951pub unsafe fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t {
14952    let x: uint16x4_t = vrsubhn_u32(b, c);
14953    simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7])
14954}
14955
14956/// Rounding subtract returning high narrow
14957///
14958/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)
14959#[inline]
14960#[target_feature(enable = "neon")]
14961#[cfg_attr(test, assert_instr(rsubhn2))]
14962#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14963pub unsafe fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t {
14964    let x: uint32x2_t = vrsubhn_u64(b, c);
14965    simd_shuffle!(a, x, [0, 1, 2, 3])
14966}
14967
14968/// Insert vector element from another vector element
14969///
14970/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f64)
14971#[inline]
14972#[target_feature(enable = "neon")]
14973#[cfg_attr(test, assert_instr(nop, LANE = 0))]
14974#[rustc_legacy_const_generics(2)]
14975#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14976pub unsafe fn vset_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> float64x1_t {
14977    static_assert!(LANE == 0);
14978    simd_insert!(b, LANE as u32, a)
14979}
14980
14981/// Insert vector element from another vector element
14982///
14983/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f64)
14984#[inline]
14985#[target_feature(enable = "neon")]
14986#[cfg_attr(test, assert_instr(nop, LANE = 0))]
14987#[rustc_legacy_const_generics(2)]
14988#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14989pub unsafe fn vsetq_lane_f64<const LANE: i32>(a: f64, b: float64x2_t) -> float64x2_t {
14990    static_assert_uimm_bits!(LANE, 1);
14991    simd_insert!(b, LANE as u32, a)
14992}
14993
14994/// Signed Shift left
14995///
14996/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_s64)
14997#[inline]
14998#[target_feature(enable = "neon")]
14999#[cfg_attr(test, assert_instr(sshl))]
15000#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15001pub unsafe fn vshld_s64(a: i64, b: i64) -> i64 {
15002    transmute(vshl_s64(transmute(a), transmute(b)))
15003}
15004
15005/// Unsigned Shift left
15006///
15007/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_u64)
15008#[inline]
15009#[target_feature(enable = "neon")]
15010#[cfg_attr(test, assert_instr(ushl))]
15011#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15012pub unsafe fn vshld_u64(a: u64, b: i64) -> u64 {
15013    transmute(vshl_u64(transmute(a), transmute(b)))
15014}
15015
15016/// Signed shift left long
15017///
15018/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s8)
15019#[inline]
15020#[target_feature(enable = "neon")]
15021#[cfg_attr(test, assert_instr(sshll2, N = 2))]
15022#[rustc_legacy_const_generics(1)]
15023#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15024pub unsafe fn vshll_high_n_s8<const N: i32>(a: int8x16_t) -> int16x8_t {
15025    static_assert!(N >= 0 && N <= 8);
15026    let b: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15027    vshll_n_s8::<N>(b)
15028}
15029
15030/// Signed shift left long
15031///
15032/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s16)
15033#[inline]
15034#[target_feature(enable = "neon")]
15035#[cfg_attr(test, assert_instr(sshll2, N = 2))]
15036#[rustc_legacy_const_generics(1)]
15037#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15038pub unsafe fn vshll_high_n_s16<const N: i32>(a: int16x8_t) -> int32x4_t {
15039    static_assert!(N >= 0 && N <= 16);
15040    let b: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
15041    vshll_n_s16::<N>(b)
15042}
15043
15044/// Signed shift left long
15045///
15046/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s32)
15047#[inline]
15048#[target_feature(enable = "neon")]
15049#[cfg_attr(test, assert_instr(sshll2, N = 2))]
15050#[rustc_legacy_const_generics(1)]
15051#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15052pub unsafe fn vshll_high_n_s32<const N: i32>(a: int32x4_t) -> int64x2_t {
15053    static_assert!(N >= 0 && N <= 32);
15054    let b: int32x2_t = simd_shuffle!(a, a, [2, 3]);
15055    vshll_n_s32::<N>(b)
15056}
15057
15058/// Signed shift left long
15059///
15060/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u8)
15061#[inline]
15062#[target_feature(enable = "neon")]
15063#[cfg_attr(test, assert_instr(ushll2, N = 2))]
15064#[rustc_legacy_const_generics(1)]
15065#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15066pub unsafe fn vshll_high_n_u8<const N: i32>(a: uint8x16_t) -> uint16x8_t {
15067    static_assert!(N >= 0 && N <= 8);
15068    let b: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15069    vshll_n_u8::<N>(b)
15070}
15071
15072/// Signed shift left long
15073///
15074/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u16)
15075#[inline]
15076#[target_feature(enable = "neon")]
15077#[cfg_attr(test, assert_instr(ushll2, N = 2))]
15078#[rustc_legacy_const_generics(1)]
15079#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15080pub unsafe fn vshll_high_n_u16<const N: i32>(a: uint16x8_t) -> uint32x4_t {
15081    static_assert!(N >= 0 && N <= 16);
15082    let b: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
15083    vshll_n_u16::<N>(b)
15084}
15085
15086/// Signed shift left long
15087///
15088/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u32)
15089#[inline]
15090#[target_feature(enable = "neon")]
15091#[cfg_attr(test, assert_instr(ushll2, N = 2))]
15092#[rustc_legacy_const_generics(1)]
15093#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15094pub unsafe fn vshll_high_n_u32<const N: i32>(a: uint32x4_t) -> uint64x2_t {
15095    static_assert!(N >= 0 && N <= 32);
15096    let b: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
15097    vshll_n_u32::<N>(b)
15098}
15099
15100/// Shift right narrow
15101///
15102/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s16)
15103#[inline]
15104#[target_feature(enable = "neon")]
15105#[cfg_attr(test, assert_instr(shrn2, N = 2))]
15106#[rustc_legacy_const_generics(2)]
15107#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15108pub unsafe fn vshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
15109    static_assert!(N >= 1 && N <= 8);
15110    simd_shuffle!(a, vshrn_n_s16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
15111}
15112
15113/// Shift right narrow
15114///
15115/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s32)
15116#[inline]
15117#[target_feature(enable = "neon")]
15118#[cfg_attr(test, assert_instr(shrn2, N = 2))]
15119#[rustc_legacy_const_generics(2)]
15120#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15121pub unsafe fn vshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
15122    static_assert!(N >= 1 && N <= 16);
15123    simd_shuffle!(a, vshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
15124}
15125
15126/// Shift right narrow
15127///
15128/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s64)
15129#[inline]
15130#[target_feature(enable = "neon")]
15131#[cfg_attr(test, assert_instr(shrn2, N = 2))]
15132#[rustc_legacy_const_generics(2)]
15133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15134pub unsafe fn vshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
15135    static_assert!(N >= 1 && N <= 32);
15136    simd_shuffle!(a, vshrn_n_s64::<N>(b), [0, 1, 2, 3])
15137}
15138
15139/// Shift right narrow
15140///
15141/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u16)
15142#[inline]
15143#[target_feature(enable = "neon")]
15144#[cfg_attr(test, assert_instr(shrn2, N = 2))]
15145#[rustc_legacy_const_generics(2)]
15146#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15147pub unsafe fn vshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
15148    static_assert!(N >= 1 && N <= 8);
15149    simd_shuffle!(a, vshrn_n_u16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
15150}
15151
15152/// Shift right narrow
15153///
15154/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u32)
15155#[inline]
15156#[target_feature(enable = "neon")]
15157#[cfg_attr(test, assert_instr(shrn2, N = 2))]
15158#[rustc_legacy_const_generics(2)]
15159#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15160pub unsafe fn vshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
15161    static_assert!(N >= 1 && N <= 16);
15162    simd_shuffle!(a, vshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
15163}
15164
15165/// Shift right narrow
15166///
15167/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u64)
15168#[inline]
15169#[target_feature(enable = "neon")]
15170#[cfg_attr(test, assert_instr(shrn2, N = 2))]
15171#[rustc_legacy_const_generics(2)]
15172#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15173pub unsafe fn vshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
15174    static_assert!(N >= 1 && N <= 32);
15175    simd_shuffle!(a, vshrn_n_u64::<N>(b), [0, 1, 2, 3])
15176}
15177
15178/// SM3PARTW1
15179///
15180/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw1q_u32)
15181#[inline]
15182#[target_feature(enable = "neon,sm4")]
15183#[cfg_attr(test, assert_instr(sm3partw1))]
15184#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
15185pub unsafe fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
15186    #[allow(improper_ctypes)]
15187    extern "unadjusted" {
15188        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3partw1")]
15189        fn vsm3partw1q_u32_(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
15190    }
15191    vsm3partw1q_u32_(a, b, c)
15192}
15193
15194/// SM3PARTW2
15195///
15196/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw2q_u32)
15197#[inline]
15198#[target_feature(enable = "neon,sm4")]
15199#[cfg_attr(test, assert_instr(sm3partw2))]
15200#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
15201pub unsafe fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
15202    #[allow(improper_ctypes)]
15203    extern "unadjusted" {
15204        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3partw2")]
15205        fn vsm3partw2q_u32_(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
15206    }
15207    vsm3partw2q_u32_(a, b, c)
15208}
15209
15210/// SM3SS1
15211///
15212/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3ss1q_u32)
15213#[inline]
15214#[target_feature(enable = "neon,sm4")]
15215#[cfg_attr(test, assert_instr(sm3ss1))]
15216#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
15217pub unsafe fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
15218    #[allow(improper_ctypes)]
15219    extern "unadjusted" {
15220        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3ss1")]
15221        fn vsm3ss1q_u32_(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
15222    }
15223    vsm3ss1q_u32_(a, b, c)
15224}
15225
15226/// SM4 key
15227///
15228/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4ekeyq_u32)
15229#[inline]
15230#[target_feature(enable = "neon,sm4")]
15231#[cfg_attr(test, assert_instr(sm4ekey))]
15232#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
15233pub unsafe fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
15234    #[allow(improper_ctypes)]
15235    extern "unadjusted" {
15236        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm4ekey")]
15237        fn vsm4ekeyq_u32_(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
15238    }
15239    vsm4ekeyq_u32_(a, b)
15240}
15241
15242/// SM4 encode
15243///
15244/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4eq_u32)
15245#[inline]
15246#[target_feature(enable = "neon,sm4")]
15247#[cfg_attr(test, assert_instr(sm4e))]
15248#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
15249pub unsafe fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
15250    #[allow(improper_ctypes)]
15251    extern "unadjusted" {
15252        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm4e")]
15253        fn vsm4eq_u32_(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
15254    }
15255    vsm4eq_u32_(a, b)
15256}
15257
15258/// Rotate and exclusive OR
15259///
15260/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrax1q_u64)
15261#[inline]
15262#[target_feature(enable = "neon,sha3")]
15263#[cfg_attr(test, assert_instr(rax1))]
15264#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
15265pub unsafe fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
15266    #[allow(improper_ctypes)]
15267    extern "unadjusted" {
15268        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.rax1")]
15269        fn vrax1q_u64_(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
15270    }
15271    vrax1q_u64_(a, b)
15272}
15273
15274/// SHA512 hash update part 1
15275///
15276/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512hq_u64)
15277#[inline]
15278#[target_feature(enable = "neon,sha3")]
15279#[cfg_attr(test, assert_instr(sha512h))]
15280#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
15281pub unsafe fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
15282    #[allow(improper_ctypes)]
15283    extern "unadjusted" {
15284        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha512h")]
15285        fn vsha512hq_u64_(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
15286    }
15287    vsha512hq_u64_(a, b, c)
15288}
15289
15290/// SHA512 hash update part 2
15291///
15292/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512h2q_u64)
15293#[inline]
15294#[target_feature(enable = "neon,sha3")]
15295#[cfg_attr(test, assert_instr(sha512h2))]
15296#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
15297pub unsafe fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
15298    #[allow(improper_ctypes)]
15299    extern "unadjusted" {
15300        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha512h2")]
15301        fn vsha512h2q_u64_(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
15302    }
15303    vsha512h2q_u64_(a, b, c)
15304}
15305
15306/// SHA512 schedule update 0
15307///
15308/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su0q_u64)
15309#[inline]
15310#[target_feature(enable = "neon,sha3")]
15311#[cfg_attr(test, assert_instr(sha512su0))]
15312#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
15313pub unsafe fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
15314    #[allow(improper_ctypes)]
15315    extern "unadjusted" {
15316        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha512su0")]
15317        fn vsha512su0q_u64_(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
15318    }
15319    vsha512su0q_u64_(a, b)
15320}
15321
15322/// SHA512 schedule update 1
15323///
15324/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su1q_u64)
15325#[inline]
15326#[target_feature(enable = "neon,sha3")]
15327#[cfg_attr(test, assert_instr(sha512su1))]
15328#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
15329pub unsafe fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
15330    #[allow(improper_ctypes)]
15331    extern "unadjusted" {
15332        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha512su1")]
15333        fn vsha512su1q_u64_(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
15334    }
15335    vsha512su1q_u64_(a, b, c)
15336}
15337
15338/// Floating-point round to 32-bit integer, using current rounding mode
15339///
15340/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f32)
15341#[inline]
15342#[target_feature(enable = "neon,frintts")]
15343#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
15344#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
15345pub unsafe fn vrnd32x_f32(a: float32x2_t) -> float32x2_t {
15346    #[allow(improper_ctypes)]
15347    extern "unadjusted" {
15348        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint32x.v2f32")]
15349        fn vrnd32x_f32_(a: float32x2_t) -> float32x2_t;
15350    }
15351    vrnd32x_f32_(a)
15352}
15353
15354/// Floating-point round to 32-bit integer, using current rounding mode
15355///
15356/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f32)
15357#[inline]
15358#[target_feature(enable = "neon,frintts")]
15359#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
15360#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
15361pub unsafe fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t {
15362    #[allow(improper_ctypes)]
15363    extern "unadjusted" {
15364        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint32x.v4f32")]
15365        fn vrnd32xq_f32_(a: float32x4_t) -> float32x4_t;
15366    }
15367    vrnd32xq_f32_(a)
15368}
15369
15370/// Floating-point round to 32-bit integer, using current rounding mode
15371///
15372/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f64)
15373#[inline]
15374#[target_feature(enable = "neon,frintts")]
15375#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
15376#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
15377pub unsafe fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t {
15378    #[allow(improper_ctypes)]
15379    extern "unadjusted" {
15380        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint32x.v2f64")]
15381        fn vrnd32xq_f64_(a: float64x2_t) -> float64x2_t;
15382    }
15383    vrnd32xq_f64_(a)
15384}
15385
15386/// Floating-point round to 32-bit integer, using current rounding mode
15387///
15388/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f64)
15389#[inline]
15390#[target_feature(enable = "neon,frintts")]
15391#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
15392#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
15393pub unsafe fn vrnd32x_f64(a: float64x1_t) -> float64x1_t {
15394    #[allow(improper_ctypes)]
15395    extern "unadjusted" {
15396        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.frint32x.f64")]
15397        fn vrnd32x_f64_(a: f64) -> f64;
15398    }
15399    transmute(vrnd32x_f64_(simd_extract!(a, 0)))
15400}
15401
15402/// Floating-point round to 32-bit integer toward zero
15403///
15404/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f32)
15405#[inline]
15406#[target_feature(enable = "neon,frintts")]
15407#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
15408#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
15409pub unsafe fn vrnd32z_f32(a: float32x2_t) -> float32x2_t {
15410    #[allow(improper_ctypes)]
15411    extern "unadjusted" {
15412        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint32z.v2f32")]
15413        fn vrnd32z_f32_(a: float32x2_t) -> float32x2_t;
15414    }
15415    vrnd32z_f32_(a)
15416}
15417
15418/// Floating-point round to 32-bit integer toward zero
15419///
15420/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f32)
15421#[inline]
15422#[target_feature(enable = "neon,frintts")]
15423#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
15424#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
15425pub unsafe fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t {
15426    #[allow(improper_ctypes)]
15427    extern "unadjusted" {
15428        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint32z.v4f32")]
15429        fn vrnd32zq_f32_(a: float32x4_t) -> float32x4_t;
15430    }
15431    vrnd32zq_f32_(a)
15432}
15433
15434/// Floating-point round to 32-bit integer toward zero
15435///
15436/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f64)
15437#[inline]
15438#[target_feature(enable = "neon,frintts")]
15439#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
15440#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
15441pub unsafe fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t {
15442    #[allow(improper_ctypes)]
15443    extern "unadjusted" {
15444        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint32z.v2f64")]
15445        fn vrnd32zq_f64_(a: float64x2_t) -> float64x2_t;
15446    }
15447    vrnd32zq_f64_(a)
15448}
15449
15450/// Floating-point round to 32-bit integer toward zero
15451///
15452/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f64)
15453#[inline]
15454#[target_feature(enable = "neon,frintts")]
15455#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
15456#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
15457pub unsafe fn vrnd32z_f64(a: float64x1_t) -> float64x1_t {
15458    #[allow(improper_ctypes)]
15459    extern "unadjusted" {
15460        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.frint32z.f64")]
15461        fn vrnd32z_f64_(a: f64) -> f64;
15462    }
15463    transmute(vrnd32z_f64_(simd_extract!(a, 0)))
15464}
15465
15466/// Floating-point round to 64-bit integer, using current rounding mode
15467///
15468/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f32)
15469#[inline]
15470#[target_feature(enable = "neon,frintts")]
15471#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
15472#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
15473pub unsafe fn vrnd64x_f32(a: float32x2_t) -> float32x2_t {
15474    #[allow(improper_ctypes)]
15475    extern "unadjusted" {
15476        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint64x.v2f32")]
15477        fn vrnd64x_f32_(a: float32x2_t) -> float32x2_t;
15478    }
15479    vrnd64x_f32_(a)
15480}
15481
15482/// Floating-point round to 64-bit integer, using current rounding mode
15483///
15484/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f32)
15485#[inline]
15486#[target_feature(enable = "neon,frintts")]
15487#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
15488#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
15489pub unsafe fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t {
15490    #[allow(improper_ctypes)]
15491    extern "unadjusted" {
15492        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint64x.v4f32")]
15493        fn vrnd64xq_f32_(a: float32x4_t) -> float32x4_t;
15494    }
15495    vrnd64xq_f32_(a)
15496}
15497
15498/// Floating-point round to 64-bit integer, using current rounding mode
15499///
15500/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f64)
15501#[inline]
15502#[target_feature(enable = "neon,frintts")]
15503#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
15504#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
15505pub unsafe fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t {
15506    #[allow(improper_ctypes)]
15507    extern "unadjusted" {
15508        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint64x.v2f64")]
15509        fn vrnd64xq_f64_(a: float64x2_t) -> float64x2_t;
15510    }
15511    vrnd64xq_f64_(a)
15512}
15513
15514/// Floating-point round to 64-bit integer, using current rounding mode
15515///
15516/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f64)
15517#[inline]
15518#[target_feature(enable = "neon,frintts")]
15519#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
15520#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
15521pub unsafe fn vrnd64x_f64(a: float64x1_t) -> float64x1_t {
15522    #[allow(improper_ctypes)]
15523    extern "unadjusted" {
15524        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.frint64x.f64")]
15525        fn vrnd64x_f64_(a: f64) -> f64;
15526    }
15527    transmute(vrnd64x_f64_(simd_extract!(a, 0)))
15528}
15529
15530/// Floating-point round to 64-bit integer toward zero
15531///
15532/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f32)
15533#[inline]
15534#[target_feature(enable = "neon,frintts")]
15535#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
15536#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
15537pub unsafe fn vrnd64z_f32(a: float32x2_t) -> float32x2_t {
15538    #[allow(improper_ctypes)]
15539    extern "unadjusted" {
15540        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint64z.v2f32")]
15541        fn vrnd64z_f32_(a: float32x2_t) -> float32x2_t;
15542    }
15543    vrnd64z_f32_(a)
15544}
15545
15546/// Floating-point round to 64-bit integer toward zero
15547///
15548/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f32)
15549#[inline]
15550#[target_feature(enable = "neon,frintts")]
15551#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
15552#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
15553pub unsafe fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t {
15554    #[allow(improper_ctypes)]
15555    extern "unadjusted" {
15556        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint64z.v4f32")]
15557        fn vrnd64zq_f32_(a: float32x4_t) -> float32x4_t;
15558    }
15559    vrnd64zq_f32_(a)
15560}
15561
15562/// Floating-point round to 64-bit integer toward zero
15563///
15564/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f64)
15565#[inline]
15566#[target_feature(enable = "neon,frintts")]
15567#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
15568#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
15569pub unsafe fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t {
15570    #[allow(improper_ctypes)]
15571    extern "unadjusted" {
15572        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.frint64z.v2f64")]
15573        fn vrnd64zq_f64_(a: float64x2_t) -> float64x2_t;
15574    }
15575    vrnd64zq_f64_(a)
15576}
15577
15578/// Floating-point round to 64-bit integer toward zero
15579///
15580/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f64)
15581#[inline]
15582#[target_feature(enable = "neon,frintts")]
15583#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
15584#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
15585pub unsafe fn vrnd64z_f64(a: float64x1_t) -> float64x1_t {
15586    #[allow(improper_ctypes)]
15587    extern "unadjusted" {
15588        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.frint64z.f64")]
15589        fn vrnd64z_f64_(a: f64) -> f64;
15590    }
15591    transmute(vrnd64z_f64_(simd_extract!(a, 0)))
15592}
15593
15594/// Transpose vectors
15595///
15596/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s8)
15597#[inline]
15598#[target_feature(enable = "neon")]
15599#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
15600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15601pub unsafe fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
15602    simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14])
15603}
15604
15605/// Transpose vectors
15606///
15607/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s8)
15608#[inline]
15609#[target_feature(enable = "neon")]
15610#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
15611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15612pub unsafe fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
15613    simd_shuffle!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30])
15614}
15615
15616/// Transpose vectors
15617///
15618/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s16)
15619#[inline]
15620#[target_feature(enable = "neon")]
15621#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
15622#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15623pub unsafe fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
15624    simd_shuffle!(a, b, [0, 4, 2, 6])
15625}
15626
15627/// Transpose vectors
15628///
15629/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s16)
15630#[inline]
15631#[target_feature(enable = "neon")]
15632#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
15633#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15634pub unsafe fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
15635    simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14])
15636}
15637
15638/// Transpose vectors
15639///
15640/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s32)
15641#[inline]
15642#[target_feature(enable = "neon")]
15643#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
15644#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15645pub unsafe fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
15646    simd_shuffle!(a, b, [0, 4, 2, 6])
15647}
15648
15649/// Transpose vectors
15650///
15651/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u8)
15652#[inline]
15653#[target_feature(enable = "neon")]
15654#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
15655#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15656pub unsafe fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
15657    simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14])
15658}
15659
15660/// Transpose vectors
15661///
15662/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u8)
15663#[inline]
15664#[target_feature(enable = "neon")]
15665#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
15666#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15667pub unsafe fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
15668    simd_shuffle!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30])
15669}
15670
15671/// Transpose vectors
15672///
15673/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u16)
15674#[inline]
15675#[target_feature(enable = "neon")]
15676#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
15677#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15678pub unsafe fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
15679    simd_shuffle!(a, b, [0, 4, 2, 6])
15680}
15681
15682/// Transpose vectors
15683///
15684/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u16)
15685#[inline]
15686#[target_feature(enable = "neon")]
15687#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
15688#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15689pub unsafe fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
15690    simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14])
15691}
15692
15693/// Transpose vectors
15694///
15695/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u32)
15696#[inline]
15697#[target_feature(enable = "neon")]
15698#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
15699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15700pub unsafe fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
15701    simd_shuffle!(a, b, [0, 4, 2, 6])
15702}
15703
15704/// Transpose vectors
15705///
15706/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p8)
15707#[inline]
15708#[target_feature(enable = "neon")]
15709#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
15710#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15711pub unsafe fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
15712    simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14])
15713}
15714
15715/// Transpose vectors
15716///
15717/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p8)
15718#[inline]
15719#[target_feature(enable = "neon")]
15720#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
15721#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15722pub unsafe fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
15723    simd_shuffle!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30])
15724}
15725
15726/// Transpose vectors
15727///
15728/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p16)
15729#[inline]
15730#[target_feature(enable = "neon")]
15731#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
15732#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15733pub unsafe fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
15734    simd_shuffle!(a, b, [0, 4, 2, 6])
15735}
15736
15737/// Transpose vectors
15738///
15739/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p16)
15740#[inline]
15741#[target_feature(enable = "neon")]
15742#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
15743#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15744pub unsafe fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
15745    simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14])
15746}
15747
15748/// Transpose vectors
15749///
15750/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s32)
15751#[inline]
15752#[target_feature(enable = "neon")]
15753#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
15754#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15755pub unsafe fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
15756    simd_shuffle!(a, b, [0, 2])
15757}
15758
15759/// Transpose vectors
15760///
15761/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s64)
15762#[inline]
15763#[target_feature(enable = "neon")]
15764#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
15765#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15766pub unsafe fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
15767    simd_shuffle!(a, b, [0, 2])
15768}
15769
15770/// Transpose vectors
15771///
15772/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u32)
15773#[inline]
15774#[target_feature(enable = "neon")]
15775#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
15776#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15777pub unsafe fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
15778    simd_shuffle!(a, b, [0, 2])
15779}
15780
15781/// Transpose vectors
15782///
15783/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u64)
15784#[inline]
15785#[target_feature(enable = "neon")]
15786#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
15787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15788pub unsafe fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
15789    simd_shuffle!(a, b, [0, 2])
15790}
15791
15792/// Transpose vectors
15793///
15794/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p64)
15795#[inline]
15796#[target_feature(enable = "neon")]
15797#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
15798#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15799pub unsafe fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
15800    simd_shuffle!(a, b, [0, 2])
15801}
15802
15803/// Transpose vectors
15804///
15805/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f32)
15806#[inline]
15807#[target_feature(enable = "neon")]
15808#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
15809#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15810pub unsafe fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15811    simd_shuffle!(a, b, [0, 4, 2, 6])
15812}
15813
15814/// Transpose vectors
15815///
15816/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f32)
15817#[inline]
15818#[target_feature(enable = "neon")]
15819#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
15820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15821pub unsafe fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15822    simd_shuffle!(a, b, [0, 2])
15823}
15824
15825/// Transpose vectors
15826///
15827/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f64)
15828#[inline]
15829#[target_feature(enable = "neon")]
15830#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
15831#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15832pub unsafe fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15833    simd_shuffle!(a, b, [0, 2])
15834}
15835
15836/// Transpose vectors
15837///
15838/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s8)
15839#[inline]
15840#[target_feature(enable = "neon")]
15841#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
15842#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15843pub unsafe fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
15844    simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15])
15845}
15846
15847/// Transpose vectors
15848///
15849/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s8)
15850#[inline]
15851#[target_feature(enable = "neon")]
15852#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
15853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15854pub unsafe fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
15855    simd_shuffle!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31])
15856}
15857
15858/// Transpose vectors
15859///
15860/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s16)
15861#[inline]
15862#[target_feature(enable = "neon")]
15863#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
15864#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15865pub unsafe fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
15866    simd_shuffle!(a, b, [1, 5, 3, 7])
15867}
15868
15869/// Transpose vectors
15870///
15871/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s16)
15872#[inline]
15873#[target_feature(enable = "neon")]
15874#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
15875#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15876pub unsafe fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
15877    simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15])
15878}
15879
15880/// Transpose vectors
15881///
15882/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s32)
15883#[inline]
15884#[target_feature(enable = "neon")]
15885#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
15886#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15887pub unsafe fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
15888    simd_shuffle!(a, b, [1, 5, 3, 7])
15889}
15890
15891/// Transpose vectors
15892///
15893/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u8)
15894#[inline]
15895#[target_feature(enable = "neon")]
15896#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
15897#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15898pub unsafe fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
15899    simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15])
15900}
15901
15902/// Transpose vectors
15903///
15904/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u8)
15905#[inline]
15906#[target_feature(enable = "neon")]
15907#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
15908#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15909pub unsafe fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
15910    simd_shuffle!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31])
15911}
15912
15913/// Transpose vectors
15914///
15915/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u16)
15916#[inline]
15917#[target_feature(enable = "neon")]
15918#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
15919#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15920pub unsafe fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
15921    simd_shuffle!(a, b, [1, 5, 3, 7])
15922}
15923
15924/// Transpose vectors
15925///
15926/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u16)
15927#[inline]
15928#[target_feature(enable = "neon")]
15929#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
15930#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15931pub unsafe fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
15932    simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15])
15933}
15934
15935/// Transpose vectors
15936///
15937/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u32)
15938#[inline]
15939#[target_feature(enable = "neon")]
15940#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
15941#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15942pub unsafe fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
15943    simd_shuffle!(a, b, [1, 5, 3, 7])
15944}
15945
15946/// Transpose vectors
15947///
15948/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p8)
15949#[inline]
15950#[target_feature(enable = "neon")]
15951#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
15952#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15953pub unsafe fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
15954    simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15])
15955}
15956
15957/// Transpose vectors
15958///
15959/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p8)
15960#[inline]
15961#[target_feature(enable = "neon")]
15962#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
15963#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15964pub unsafe fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
15965    simd_shuffle!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31])
15966}
15967
15968/// Transpose vectors
15969///
15970/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p16)
15971#[inline]
15972#[target_feature(enable = "neon")]
15973#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
15974#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15975pub unsafe fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
15976    simd_shuffle!(a, b, [1, 5, 3, 7])
15977}
15978
15979/// Transpose vectors
15980///
15981/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p16)
15982#[inline]
15983#[target_feature(enable = "neon")]
15984#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
15985#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15986pub unsafe fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
15987    simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15])
15988}
15989
15990/// Transpose vectors
15991///
15992/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s32)
15993#[inline]
15994#[target_feature(enable = "neon")]
15995#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
15996#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15997pub unsafe fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
15998    simd_shuffle!(a, b, [1, 3])
15999}
16000
16001/// Transpose vectors
16002///
16003/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s64)
16004#[inline]
16005#[target_feature(enable = "neon")]
16006#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16007#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16008pub unsafe fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
16009    simd_shuffle!(a, b, [1, 3])
16010}
16011
16012/// Transpose vectors
16013///
16014/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u32)
16015#[inline]
16016#[target_feature(enable = "neon")]
16017#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16018#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16019pub unsafe fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
16020    simd_shuffle!(a, b, [1, 3])
16021}
16022
16023/// Transpose vectors
16024///
16025/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u64)
16026#[inline]
16027#[target_feature(enable = "neon")]
16028#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16029#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16030pub unsafe fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
16031    simd_shuffle!(a, b, [1, 3])
16032}
16033
16034/// Transpose vectors
16035///
16036/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p64)
16037#[inline]
16038#[target_feature(enable = "neon")]
16039#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16040#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16041pub unsafe fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
16042    simd_shuffle!(a, b, [1, 3])
16043}
16044
16045/// Transpose vectors
16046///
16047/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f32)
16048#[inline]
16049#[target_feature(enable = "neon")]
16050#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
16051#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16052pub unsafe fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16053    simd_shuffle!(a, b, [1, 5, 3, 7])
16054}
16055
16056/// Transpose vectors
16057///
16058/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f32)
16059#[inline]
16060#[target_feature(enable = "neon")]
16061#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16062#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16063pub unsafe fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
16064    simd_shuffle!(a, b, [1, 3])
16065}
16066
16067/// Transpose vectors
16068///
16069/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f64)
16070#[inline]
16071#[target_feature(enable = "neon")]
16072#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16073#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16074pub unsafe fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16075    simd_shuffle!(a, b, [1, 3])
16076}
16077
16078/// Zip vectors
16079///
16080/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s8)
16081#[inline]
16082#[target_feature(enable = "neon")]
16083#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16084#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16085pub unsafe fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
16086    simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11])
16087}
16088
16089/// Zip vectors
16090///
16091/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s8)
16092#[inline]
16093#[target_feature(enable = "neon")]
16094#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16095#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16096pub unsafe fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16097    simd_shuffle!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23])
16098}
16099
16100/// Zip vectors
16101///
16102/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s16)
16103#[inline]
16104#[target_feature(enable = "neon")]
16105#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16106#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16107pub unsafe fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
16108    simd_shuffle!(a, b, [0, 4, 1, 5])
16109}
16110
16111/// Zip vectors
16112///
16113/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s16)
16114#[inline]
16115#[target_feature(enable = "neon")]
16116#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16117#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16118pub unsafe fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16119    simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11])
16120}
16121
16122/// Zip vectors
16123///
16124/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s32)
16125#[inline]
16126#[target_feature(enable = "neon")]
16127#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16128#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16129pub unsafe fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
16130    simd_shuffle!(a, b, [0, 2])
16131}
16132
16133/// Zip vectors
16134///
16135/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s32)
16136#[inline]
16137#[target_feature(enable = "neon")]
16138#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16139#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16140pub unsafe fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16141    simd_shuffle!(a, b, [0, 4, 1, 5])
16142}
16143
16144/// Zip vectors
16145///
16146/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s64)
16147#[inline]
16148#[target_feature(enable = "neon")]
16149#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16150#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16151pub unsafe fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
16152    simd_shuffle!(a, b, [0, 2])
16153}
16154
16155/// Zip vectors
16156///
16157/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u8)
16158#[inline]
16159#[target_feature(enable = "neon")]
16160#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16161#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16162pub unsafe fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
16163    simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11])
16164}
16165
16166/// Zip vectors
16167///
16168/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u8)
16169#[inline]
16170#[target_feature(enable = "neon")]
16171#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16172#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16173pub unsafe fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16174    simd_shuffle!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23])
16175}
16176
16177/// Zip vectors
16178///
16179/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u16)
16180#[inline]
16181#[target_feature(enable = "neon")]
16182#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16183#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16184pub unsafe fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
16185    simd_shuffle!(a, b, [0, 4, 1, 5])
16186}
16187
16188/// Zip vectors
16189///
16190/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u16)
16191#[inline]
16192#[target_feature(enable = "neon")]
16193#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16194#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16195pub unsafe fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16196    simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11])
16197}
16198
16199/// Zip vectors
16200///
16201/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u32)
16202#[inline]
16203#[target_feature(enable = "neon")]
16204#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16206pub unsafe fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
16207    simd_shuffle!(a, b, [0, 2])
16208}
16209
16210/// Zip vectors
16211///
16212/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u32)
16213#[inline]
16214#[target_feature(enable = "neon")]
16215#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16216#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16217pub unsafe fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16218    simd_shuffle!(a, b, [0, 4, 1, 5])
16219}
16220
16221/// Zip vectors
16222///
16223/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u64)
16224#[inline]
16225#[target_feature(enable = "neon")]
16226#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16228pub unsafe fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
16229    simd_shuffle!(a, b, [0, 2])
16230}
16231
16232/// Zip vectors
16233///
16234/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p8)
16235#[inline]
16236#[target_feature(enable = "neon")]
16237#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16238#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16239pub unsafe fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
16240    simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11])
16241}
16242
16243/// Zip vectors
16244///
16245/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p8)
16246#[inline]
16247#[target_feature(enable = "neon")]
16248#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16250pub unsafe fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
16251    simd_shuffle!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23])
16252}
16253
16254/// Zip vectors
16255///
16256/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p16)
16257#[inline]
16258#[target_feature(enable = "neon")]
16259#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16260#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16261pub unsafe fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
16262    simd_shuffle!(a, b, [0, 4, 1, 5])
16263}
16264
16265/// Zip vectors
16266///
16267/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p16)
16268#[inline]
16269#[target_feature(enable = "neon")]
16270#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16272pub unsafe fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
16273    simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11])
16274}
16275
16276/// Zip vectors
16277///
16278/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p64)
16279#[inline]
16280#[target_feature(enable = "neon")]
16281#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16283pub unsafe fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
16284    simd_shuffle!(a, b, [0, 2])
16285}
16286
16287/// Zip vectors
16288///
16289/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f32)
16290#[inline]
16291#[target_feature(enable = "neon")]
16292#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16294pub unsafe fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
16295    simd_shuffle!(a, b, [0, 2])
16296}
16297
16298/// Zip vectors
16299///
16300/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f32)
16301#[inline]
16302#[target_feature(enable = "neon")]
16303#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16304#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16305pub unsafe fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16306    simd_shuffle!(a, b, [0, 4, 1, 5])
16307}
16308
16309/// Zip vectors
16310///
16311/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f64)
16312#[inline]
16313#[target_feature(enable = "neon")]
16314#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16315#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16316pub unsafe fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16317    simd_shuffle!(a, b, [0, 2])
16318}
16319
16320/// Zip vectors
16321///
16322/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s8)
16323#[inline]
16324#[target_feature(enable = "neon")]
16325#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16327pub unsafe fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
16328    simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15])
16329}
16330
16331/// Zip vectors
16332///
16333/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s8)
16334#[inline]
16335#[target_feature(enable = "neon")]
16336#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16337#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16338pub unsafe fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16339    simd_shuffle!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31])
16340}
16341
16342/// Zip vectors
16343///
16344/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s16)
16345#[inline]
16346#[target_feature(enable = "neon")]
16347#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16349pub unsafe fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
16350    simd_shuffle!(a, b, [2, 6, 3, 7])
16351}
16352
16353/// Zip vectors
16354///
16355/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s16)
16356#[inline]
16357#[target_feature(enable = "neon")]
16358#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16359#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16360pub unsafe fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16361    simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15])
16362}
16363
16364/// Zip vectors
16365///
16366/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s32)
16367#[inline]
16368#[target_feature(enable = "neon")]
16369#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16370#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16371pub unsafe fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
16372    simd_shuffle!(a, b, [1, 3])
16373}
16374
16375/// Zip vectors
16376///
16377/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s32)
16378#[inline]
16379#[target_feature(enable = "neon")]
16380#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16382pub unsafe fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16383    simd_shuffle!(a, b, [2, 6, 3, 7])
16384}
16385
16386/// Zip vectors
16387///
16388/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s64)
16389#[inline]
16390#[target_feature(enable = "neon")]
16391#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16392#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16393pub unsafe fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
16394    simd_shuffle!(a, b, [1, 3])
16395}
16396
16397/// Zip vectors
16398///
16399/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u8)
16400#[inline]
16401#[target_feature(enable = "neon")]
16402#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16403#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16404pub unsafe fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
16405    simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15])
16406}
16407
16408/// Zip vectors
16409///
16410/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u8)
16411#[inline]
16412#[target_feature(enable = "neon")]
16413#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16414#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16415pub unsafe fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16416    simd_shuffle!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31])
16417}
16418
16419/// Zip vectors
16420///
16421/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u16)
16422#[inline]
16423#[target_feature(enable = "neon")]
16424#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16425#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16426pub unsafe fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
16427    simd_shuffle!(a, b, [2, 6, 3, 7])
16428}
16429
16430/// Zip vectors
16431///
16432/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u16)
16433#[inline]
16434#[target_feature(enable = "neon")]
16435#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16437pub unsafe fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16438    simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15])
16439}
16440
16441/// Zip vectors
16442///
16443/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u32)
16444#[inline]
16445#[target_feature(enable = "neon")]
16446#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16448pub unsafe fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
16449    simd_shuffle!(a, b, [1, 3])
16450}
16451
16452/// Zip vectors
16453///
16454/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u32)
16455#[inline]
16456#[target_feature(enable = "neon")]
16457#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16458#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16459pub unsafe fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16460    simd_shuffle!(a, b, [2, 6, 3, 7])
16461}
16462
16463/// Zip vectors
16464///
16465/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u64)
16466#[inline]
16467#[target_feature(enable = "neon")]
16468#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16469#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16470pub unsafe fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
16471    simd_shuffle!(a, b, [1, 3])
16472}
16473
16474/// Zip vectors
16475///
16476/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p8)
16477#[inline]
16478#[target_feature(enable = "neon")]
16479#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16480#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16481pub unsafe fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
16482    simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15])
16483}
16484
16485/// Zip vectors
16486///
16487/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p8)
16488#[inline]
16489#[target_feature(enable = "neon")]
16490#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16491#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16492pub unsafe fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
16493    simd_shuffle!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31])
16494}
16495
16496/// Zip vectors
16497///
16498/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p16)
16499#[inline]
16500#[target_feature(enable = "neon")]
16501#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16502#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16503pub unsafe fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
16504    simd_shuffle!(a, b, [2, 6, 3, 7])
16505}
16506
16507/// Zip vectors
16508///
16509/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p16)
16510#[inline]
16511#[target_feature(enable = "neon")]
16512#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16513#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16514pub unsafe fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
16515    simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15])
16516}
16517
16518/// Zip vectors
16519///
16520/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p64)
16521#[inline]
16522#[target_feature(enable = "neon")]
16523#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16524#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16525pub unsafe fn vzip2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
16526    simd_shuffle!(a, b, [1, 3])
16527}
16528
16529/// Zip vectors
16530///
16531/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f32)
16532#[inline]
16533#[target_feature(enable = "neon")]
16534#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16535#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16536pub unsafe fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
16537    simd_shuffle!(a, b, [1, 3])
16538}
16539
16540/// Zip vectors
16541///
16542/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f32)
16543#[inline]
16544#[target_feature(enable = "neon")]
16545#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16546#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16547pub unsafe fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16548    simd_shuffle!(a, b, [2, 6, 3, 7])
16549}
16550
16551/// Zip vectors
16552///
16553/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f64)
16554#[inline]
16555#[target_feature(enable = "neon")]
16556#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16557#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16558pub unsafe fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16559    simd_shuffle!(a, b, [1, 3])
16560}
16561
16562/// Unzip vectors
16563///
16564/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s8)
16565#[inline]
16566#[target_feature(enable = "neon")]
16567#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
16568#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16569pub unsafe fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
16570    simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14])
16571}
16572
16573/// Unzip vectors
16574///
16575/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s8)
16576#[inline]
16577#[target_feature(enable = "neon")]
16578#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
16579#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16580pub unsafe fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16581    simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30])
16582}
16583
16584/// Unzip vectors
16585///
16586/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s16)
16587#[inline]
16588#[target_feature(enable = "neon")]
16589#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
16590#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16591pub unsafe fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
16592    simd_shuffle!(a, b, [0, 2, 4, 6])
16593}
16594
16595/// Unzip vectors
16596///
16597/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s16)
16598#[inline]
16599#[target_feature(enable = "neon")]
16600#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
16601#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16602pub unsafe fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16603    simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14])
16604}
16605
16606/// Unzip vectors
16607///
16608/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s32)
16609#[inline]
16610#[target_feature(enable = "neon")]
16611#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
16612#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16613pub unsafe fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16614    simd_shuffle!(a, b, [0, 2, 4, 6])
16615}
16616
16617/// Unzip vectors
16618///
16619/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u8)
16620#[inline]
16621#[target_feature(enable = "neon")]
16622#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
16623#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16624pub unsafe fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
16625    simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14])
16626}
16627
16628/// Unzip vectors
16629///
16630/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u8)
16631#[inline]
16632#[target_feature(enable = "neon")]
16633#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
16634#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16635pub unsafe fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16636    simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30])
16637}
16638
16639/// Unzip vectors
16640///
16641/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u16)
16642#[inline]
16643#[target_feature(enable = "neon")]
16644#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
16645#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16646pub unsafe fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
16647    simd_shuffle!(a, b, [0, 2, 4, 6])
16648}
16649
16650/// Unzip vectors
16651///
16652/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u16)
16653#[inline]
16654#[target_feature(enable = "neon")]
16655#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
16656#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16657pub unsafe fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16658    simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14])
16659}
16660
16661/// Unzip vectors
16662///
16663/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u32)
16664#[inline]
16665#[target_feature(enable = "neon")]
16666#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
16667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16668pub unsafe fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16669    simd_shuffle!(a, b, [0, 2, 4, 6])
16670}
16671
16672/// Unzip vectors
16673///
16674/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p8)
16675#[inline]
16676#[target_feature(enable = "neon")]
16677#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
16678#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16679pub unsafe fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
16680    simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14])
16681}
16682
16683/// Unzip vectors
16684///
16685/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p8)
16686#[inline]
16687#[target_feature(enable = "neon")]
16688#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
16689#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16690pub unsafe fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
16691    simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30])
16692}
16693
16694/// Unzip vectors
16695///
16696/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p16)
16697#[inline]
16698#[target_feature(enable = "neon")]
16699#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
16700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16701pub unsafe fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
16702    simd_shuffle!(a, b, [0, 2, 4, 6])
16703}
16704
16705/// Unzip vectors
16706///
16707/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p16)
16708#[inline]
16709#[target_feature(enable = "neon")]
16710#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
16711#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16712pub unsafe fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
16713    simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14])
16714}
16715
16716/// Unzip vectors
16717///
16718/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s32)
16719#[inline]
16720#[target_feature(enable = "neon")]
16721#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16722#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16723pub unsafe fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
16724    simd_shuffle!(a, b, [0, 2])
16725}
16726
16727/// Unzip vectors
16728///
16729/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s64)
16730#[inline]
16731#[target_feature(enable = "neon")]
16732#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16733#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16734pub unsafe fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
16735    simd_shuffle!(a, b, [0, 2])
16736}
16737
16738/// Unzip vectors
16739///
16740/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u32)
16741#[inline]
16742#[target_feature(enable = "neon")]
16743#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16744#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16745pub unsafe fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
16746    simd_shuffle!(a, b, [0, 2])
16747}
16748
16749/// Unzip vectors
16750///
16751/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u64)
16752#[inline]
16753#[target_feature(enable = "neon")]
16754#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16755#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16756pub unsafe fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
16757    simd_shuffle!(a, b, [0, 2])
16758}
16759
16760/// Unzip vectors
16761///
16762/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p64)
16763#[inline]
16764#[target_feature(enable = "neon")]
16765#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16766#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16767pub unsafe fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
16768    simd_shuffle!(a, b, [0, 2])
16769}
16770
16771/// Unzip vectors
16772///
16773/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f32)
16774#[inline]
16775#[target_feature(enable = "neon")]
16776#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
16777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16778pub unsafe fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16779    simd_shuffle!(a, b, [0, 2, 4, 6])
16780}
16781
16782/// Unzip vectors
16783///
16784/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f32)
16785#[inline]
16786#[target_feature(enable = "neon")]
16787#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16788#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16789pub unsafe fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
16790    simd_shuffle!(a, b, [0, 2])
16791}
16792
16793/// Unzip vectors
16794///
16795/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f64)
16796#[inline]
16797#[target_feature(enable = "neon")]
16798#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
16799#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16800pub unsafe fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16801    simd_shuffle!(a, b, [0, 2])
16802}
16803
16804/// Unzip vectors
16805///
16806/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s8)
16807#[inline]
16808#[target_feature(enable = "neon")]
16809#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
16810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16811pub unsafe fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
16812    simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15])
16813}
16814
16815/// Unzip vectors
16816///
16817/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s8)
16818#[inline]
16819#[target_feature(enable = "neon")]
16820#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
16821#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16822pub unsafe fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16823    simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31])
16824}
16825
16826/// Unzip vectors
16827///
16828/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s16)
16829#[inline]
16830#[target_feature(enable = "neon")]
16831#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
16832#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16833pub unsafe fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
16834    simd_shuffle!(a, b, [1, 3, 5, 7])
16835}
16836
16837/// Unzip vectors
16838///
16839/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s16)
16840#[inline]
16841#[target_feature(enable = "neon")]
16842#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
16843#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16844pub unsafe fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16845    simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15])
16846}
16847
16848/// Unzip vectors
16849///
16850/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s32)
16851#[inline]
16852#[target_feature(enable = "neon")]
16853#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
16854#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16855pub unsafe fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16856    simd_shuffle!(a, b, [1, 3, 5, 7])
16857}
16858
16859/// Unzip vectors
16860///
16861/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u8)
16862#[inline]
16863#[target_feature(enable = "neon")]
16864#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
16865#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16866pub unsafe fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
16867    simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15])
16868}
16869
16870/// Unzip vectors
16871///
16872/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u8)
16873#[inline]
16874#[target_feature(enable = "neon")]
16875#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
16876#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16877pub unsafe fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16878    simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31])
16879}
16880
16881/// Unzip vectors
16882///
16883/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u16)
16884#[inline]
16885#[target_feature(enable = "neon")]
16886#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
16887#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16888pub unsafe fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
16889    simd_shuffle!(a, b, [1, 3, 5, 7])
16890}
16891
16892/// Unzip vectors
16893///
16894/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u16)
16895#[inline]
16896#[target_feature(enable = "neon")]
16897#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
16898#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16899pub unsafe fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16900    simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15])
16901}
16902
16903/// Unzip vectors
16904///
16905/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u32)
16906#[inline]
16907#[target_feature(enable = "neon")]
16908#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
16909#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16910pub unsafe fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16911    simd_shuffle!(a, b, [1, 3, 5, 7])
16912}
16913
16914/// Unzip vectors
16915///
16916/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p8)
16917#[inline]
16918#[target_feature(enable = "neon")]
16919#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
16920#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16921pub unsafe fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
16922    simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15])
16923}
16924
16925/// Unzip vectors
16926///
16927/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p8)
16928#[inline]
16929#[target_feature(enable = "neon")]
16930#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
16931#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16932pub unsafe fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
16933    simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31])
16934}
16935
16936/// Unzip vectors
16937///
16938/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p16)
16939#[inline]
16940#[target_feature(enable = "neon")]
16941#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
16942#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16943pub unsafe fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
16944    simd_shuffle!(a, b, [1, 3, 5, 7])
16945}
16946
16947/// Unzip vectors
16948///
16949/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p16)
16950#[inline]
16951#[target_feature(enable = "neon")]
16952#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
16953#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16954pub unsafe fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
16955    simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15])
16956}
16957
16958/// Unzip vectors
16959///
16960/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s32)
16961#[inline]
16962#[target_feature(enable = "neon")]
16963#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16964#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16965pub unsafe fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
16966    simd_shuffle!(a, b, [1, 3])
16967}
16968
16969/// Unzip vectors
16970///
16971/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s64)
16972#[inline]
16973#[target_feature(enable = "neon")]
16974#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16975#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16976pub unsafe fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
16977    simd_shuffle!(a, b, [1, 3])
16978}
16979
16980/// Unzip vectors
16981///
16982/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u32)
16983#[inline]
16984#[target_feature(enable = "neon")]
16985#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16986#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16987pub unsafe fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
16988    simd_shuffle!(a, b, [1, 3])
16989}
16990
16991/// Unzip vectors
16992///
16993/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u64)
16994#[inline]
16995#[target_feature(enable = "neon")]
16996#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
16997#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16998pub unsafe fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
16999    simd_shuffle!(a, b, [1, 3])
17000}
17001
17002/// Unzip vectors
17003///
17004/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p64)
17005#[inline]
17006#[target_feature(enable = "neon")]
17007#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
17008#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17009pub unsafe fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
17010    simd_shuffle!(a, b, [1, 3])
17011}
17012
17013/// Unzip vectors
17014///
17015/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f32)
17016#[inline]
17017#[target_feature(enable = "neon")]
17018#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
17019#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17020pub unsafe fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
17021    simd_shuffle!(a, b, [1, 3, 5, 7])
17022}
17023
17024/// Unzip vectors
17025///
17026/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f32)
17027#[inline]
17028#[target_feature(enable = "neon")]
17029#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
17030#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17031pub unsafe fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
17032    simd_shuffle!(a, b, [1, 3])
17033}
17034
17035/// Unzip vectors
17036///
17037/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f64)
17038#[inline]
17039#[target_feature(enable = "neon")]
17040#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
17041#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17042pub unsafe fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
17043    simd_shuffle!(a, b, [1, 3])
17044}
17045
17046/// Unsigned Absolute difference and Accumulate Long
17047///
17048/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u8)
17049#[inline]
17050#[target_feature(enable = "neon")]
17051#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal))]
17052#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17053pub unsafe fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
17054    let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
17055    let e: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
17056    let f: uint8x8_t = vabd_u8(d, e);
17057    simd_add(a, simd_cast(f))
17058}
17059
17060/// Unsigned Absolute difference and Accumulate Long
17061///
17062/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u16)
17063#[inline]
17064#[target_feature(enable = "neon")]
17065#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal))]
17066#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17067pub unsafe fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
17068    let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
17069    let e: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
17070    let f: uint16x4_t = vabd_u16(d, e);
17071    simd_add(a, simd_cast(f))
17072}
17073
17074/// Unsigned Absolute difference and Accumulate Long
17075///
17076/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u32)
17077#[inline]
17078#[target_feature(enable = "neon")]
17079#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal))]
17080#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17081pub unsafe fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
17082    let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
17083    let e: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
17084    let f: uint32x2_t = vabd_u32(d, e);
17085    simd_add(a, simd_cast(f))
17086}
17087
17088/// Signed Absolute difference and Accumulate Long
17089///
17090/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s8)
17091#[inline]
17092#[target_feature(enable = "neon")]
17093#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal))]
17094#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17095pub unsafe fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
17096    let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
17097    let e: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
17098    let f: int8x8_t = vabd_s8(d, e);
17099    let f: uint8x8_t = simd_cast(f);
17100    simd_add(a, simd_cast(f))
17101}
17102
17103/// Signed Absolute difference and Accumulate Long
17104///
17105/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s16)
17106#[inline]
17107#[target_feature(enable = "neon")]
17108#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal))]
17109#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17110pub unsafe fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
17111    let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
17112    let e: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
17113    let f: int16x4_t = vabd_s16(d, e);
17114    let f: uint16x4_t = simd_cast(f);
17115    simd_add(a, simd_cast(f))
17116}
17117
17118/// Signed Absolute difference and Accumulate Long
17119///
17120/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s32)
17121#[inline]
17122#[target_feature(enable = "neon")]
17123#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal))]
17124#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17125pub unsafe fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17126    let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
17127    let e: int32x2_t = simd_shuffle!(c, c, [2, 3]);
17128    let f: int32x2_t = vabd_s32(d, e);
17129    let f: uint32x2_t = simd_cast(f);
17130    simd_add(a, simd_cast(f))
17131}
17132
17133/// Signed saturating Absolute value
17134///
17135/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s64)
17136#[inline]
17137#[target_feature(enable = "neon")]
17138#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
17139#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17140pub unsafe fn vqabs_s64(a: int64x1_t) -> int64x1_t {
17141    #[allow(improper_ctypes)]
17142    extern "unadjusted" {
17143        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.v1i64")]
17144        fn vqabs_s64_(a: int64x1_t) -> int64x1_t;
17145    }
17146    vqabs_s64_(a)
17147}
17148
17149/// Signed saturating Absolute value
17150///
17151/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s64)
17152#[inline]
17153#[target_feature(enable = "neon")]
17154#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
17155#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17156pub unsafe fn vqabsq_s64(a: int64x2_t) -> int64x2_t {
17157    #[allow(improper_ctypes)]
17158    extern "unadjusted" {
17159        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.v2i64")]
17160        fn vqabsq_s64_(a: int64x2_t) -> int64x2_t;
17161    }
17162    vqabsq_s64_(a)
17163}
17164
17165/// Signed saturating absolute value
17166///
17167/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsb_s8)
17168#[inline]
17169#[target_feature(enable = "neon")]
17170#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
17171#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17172pub unsafe fn vqabsb_s8(a: i8) -> i8 {
17173    simd_extract!(vqabs_s8(vdup_n_s8(a)), 0)
17174}
17175
17176/// Signed saturating absolute value
17177///
17178/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsh_s16)
17179#[inline]
17180#[target_feature(enable = "neon")]
17181#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
17182#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17183pub unsafe fn vqabsh_s16(a: i16) -> i16 {
17184    simd_extract!(vqabs_s16(vdup_n_s16(a)), 0)
17185}
17186
17187/// Signed saturating absolute value
17188///
17189/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabss_s32)
17190#[inline]
17191#[target_feature(enable = "neon")]
17192#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
17193#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17194pub unsafe fn vqabss_s32(a: i32) -> i32 {
17195    #[allow(improper_ctypes)]
17196    extern "unadjusted" {
17197        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.i32")]
17198        fn vqabss_s32_(a: i32) -> i32;
17199    }
17200    vqabss_s32_(a)
17201}
17202
17203/// Signed saturating absolute value
17204///
17205/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsd_s64)
17206#[inline]
17207#[target_feature(enable = "neon")]
17208#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
17209#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17210pub unsafe fn vqabsd_s64(a: i64) -> i64 {
17211    #[allow(improper_ctypes)]
17212    extern "unadjusted" {
17213        #[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqabs.i64")]
17214        fn vqabsd_s64_(a: i64) -> i64;
17215    }
17216    vqabsd_s64_(a)
17217}
17218
17219/// Shift left and insert
17220///
17221/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_s64)
17222#[inline]
17223#[target_feature(enable = "neon")]
17224#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))]
17225#[rustc_legacy_const_generics(2)]
17226#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17227pub unsafe fn vslid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
17228    static_assert!(N >= 0 && N <= 63);
17229    transmute(vsli_n_s64::<N>(transmute(a), transmute(b)))
17230}
17231
17232/// Shift left and insert
17233///
17234/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_u64)
17235#[inline]
17236#[target_feature(enable = "neon")]
17237#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))]
17238#[rustc_legacy_const_generics(2)]
17239#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17240pub unsafe fn vslid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
17241    static_assert!(N >= 0 && N <= 63);
17242    transmute(vsli_n_u64::<N>(transmute(a), transmute(b)))
17243}
17244
17245/// Shift right and insert
17246///
17247/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_s64)
17248#[inline]
17249#[target_feature(enable = "neon")]
17250#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sri, N = 2))]
17251#[rustc_legacy_const_generics(2)]
17252#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17253pub unsafe fn vsrid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
17254    static_assert!(N >= 1 && N <= 64);
17255    transmute(vsri_n_s64::<N>(transmute(a), transmute(b)))
17256}
17257
17258/// Shift right and insert
17259///
17260/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_u64)
17261#[inline]
17262#[target_feature(enable = "neon")]
17263#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sri, N = 2))]
17264#[rustc_legacy_const_generics(2)]
17265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17266pub unsafe fn vsrid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
17267    static_assert!(N >= 1 && N <= 64);
17268    transmute(vsri_n_u64::<N>(transmute(a), transmute(b)))
17269}
17270
17271#[cfg(test)]
17272mod test {
17273    use super::*;
17274    use crate::core_arch::simd::*;
17275    use std::mem::transmute;
17276    use stdarch_test::simd_test;
17277
17278    #[simd_test(enable = "neon,sha3")]
17279    unsafe fn test_veor3q_s8() {
17280        let a: i8x16 = i8x16::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F);
17281        let b: i8x16 = i8x16::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
17282        let c: i8x16 = i8x16::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
17283        let e: i8x16 = i8x16::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F);
17284        let r: i8x16 = transmute(veor3q_s8(transmute(a), transmute(b), transmute(c)));
17285        assert_eq!(r, e);
17286    }
17287
17288    #[simd_test(enable = "neon,sha3")]
17289    unsafe fn test_veor3q_s16() {
17290        let a: i16x8 = i16x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
17291        let b: i16x8 = i16x8::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
17292        let c: i16x8 = i16x8::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
17293        let e: i16x8 = i16x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
17294        let r: i16x8 = transmute(veor3q_s16(transmute(a), transmute(b), transmute(c)));
17295        assert_eq!(r, e);
17296    }
17297
17298    #[simd_test(enable = "neon,sha3")]
17299    unsafe fn test_veor3q_s32() {
17300        let a: i32x4 = i32x4::new(0x00, 0x01, 0x02, 0x03);
17301        let b: i32x4 = i32x4::new(0x00, 0x00, 0x00, 0x00);
17302        let c: i32x4 = i32x4::new(0x00, 0x00, 0x00, 0x00);
17303        let e: i32x4 = i32x4::new(0x00, 0x01, 0x02, 0x03);
17304        let r: i32x4 = transmute(veor3q_s32(transmute(a), transmute(b), transmute(c)));
17305        assert_eq!(r, e);
17306    }
17307
17308    #[simd_test(enable = "neon,sha3")]
17309    unsafe fn test_veor3q_s64() {
17310        let a: i64x2 = i64x2::new(0x00, 0x01);
17311        let b: i64x2 = i64x2::new(0x00, 0x00);
17312        let c: i64x2 = i64x2::new(0x00, 0x00);
17313        let e: i64x2 = i64x2::new(0x00, 0x01);
17314        let r: i64x2 = transmute(veor3q_s64(transmute(a), transmute(b), transmute(c)));
17315        assert_eq!(r, e);
17316    }
17317
17318    #[simd_test(enable = "neon,sha3")]
17319    unsafe fn test_veor3q_u8() {
17320        let a: u8x16 = u8x16::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F);
17321        let b: u8x16 = u8x16::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
17322        let c: u8x16 = u8x16::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
17323        let e: u8x16 = u8x16::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F);
17324        let r: u8x16 = transmute(veor3q_u8(transmute(a), transmute(b), transmute(c)));
17325        assert_eq!(r, e);
17326    }
17327
17328    #[simd_test(enable = "neon,sha3")]
17329    unsafe fn test_veor3q_u16() {
17330        let a: u16x8 = u16x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
17331        let b: u16x8 = u16x8::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
17332        let c: u16x8 = u16x8::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
17333        let e: u16x8 = u16x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
17334        let r: u16x8 = transmute(veor3q_u16(transmute(a), transmute(b), transmute(c)));
17335        assert_eq!(r, e);
17336    }
17337
17338    #[simd_test(enable = "neon,sha3")]
17339    unsafe fn test_veor3q_u32() {
17340        let a: u32x4 = u32x4::new(0x00, 0x01, 0x02, 0x03);
17341        let b: u32x4 = u32x4::new(0x00, 0x00, 0x00, 0x00);
17342        let c: u32x4 = u32x4::new(0x00, 0x00, 0x00, 0x00);
17343        let e: u32x4 = u32x4::new(0x00, 0x01, 0x02, 0x03);
17344        let r: u32x4 = transmute(veor3q_u32(transmute(a), transmute(b), transmute(c)));
17345        assert_eq!(r, e);
17346    }
17347
17348    #[simd_test(enable = "neon,sha3")]
17349    unsafe fn test_veor3q_u64() {
17350        let a: u64x2 = u64x2::new(0x00, 0x01);
17351        let b: u64x2 = u64x2::new(0x00, 0x00);
17352        let c: u64x2 = u64x2::new(0x00, 0x00);
17353        let e: u64x2 = u64x2::new(0x00, 0x01);
17354        let r: u64x2 = transmute(veor3q_u64(transmute(a), transmute(b), transmute(c)));
17355        assert_eq!(r, e);
17356    }
17357
17358    #[simd_test(enable = "neon")]
17359    unsafe fn test_vabd_f64() {
17360        let a: f64 = 1.0;
17361        let b: f64 = 9.0;
17362        let e: f64 = 8.0;
17363        let r: f64 = transmute(vabd_f64(transmute(a), transmute(b)));
17364        assert_eq!(r, e);
17365    }
17366
17367    #[simd_test(enable = "neon")]
17368    unsafe fn test_vabdq_f64() {
17369        let a: f64x2 = f64x2::new(1.0, 2.0);
17370        let b: f64x2 = f64x2::new(9.0, 3.0);
17371        let e: f64x2 = f64x2::new(8.0, 1.0);
17372        let r: f64x2 = transmute(vabdq_f64(transmute(a), transmute(b)));
17373        assert_eq!(r, e);
17374    }
17375
17376    #[simd_test(enable = "neon")]
17377    unsafe fn test_vabds_f32() {
17378        let a: f32 = 1.0;
17379        let b: f32 = 9.0;
17380        let e: f32 = 8.0;
17381        let r: f32 = vabds_f32(a, b);
17382        assert_eq!(r, e);
17383    }
17384
17385    #[simd_test(enable = "neon")]
17386    unsafe fn test_vabdd_f64() {
17387        let a: f64 = 1.0;
17388        let b: f64 = 9.0;
17389        let e: f64 = 8.0;
17390        let r: f64 = vabdd_f64(a, b);
17391        assert_eq!(r, e);
17392    }
17393
17394    #[simd_test(enable = "neon")]
17395    unsafe fn test_vabdl_high_u8() {
17396        let a: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
17397        let b: u8x16 = u8x16::new(10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10);
17398        let e: u16x8 = u16x8::new(1, 0, 1, 2, 3, 4, 5, 6);
17399        let r: u16x8 = transmute(vabdl_high_u8(transmute(a), transmute(b)));
17400        assert_eq!(r, e);
17401    }
17402
17403    #[simd_test(enable = "neon")]
17404    unsafe fn test_vabdl_high_u16() {
17405        let a: u16x8 = u16x8::new(1, 2, 3, 4, 8, 9, 11, 12);
17406        let b: u16x8 = u16x8::new(10, 10, 10, 10, 10, 10, 10, 10);
17407        let e: u32x4 = u32x4::new(2, 1, 1, 2);
17408        let r: u32x4 = transmute(vabdl_high_u16(transmute(a), transmute(b)));
17409        assert_eq!(r, e);
17410    }
17411
17412    #[simd_test(enable = "neon")]
17413    unsafe fn test_vabdl_high_u32() {
17414        let a: u32x4 = u32x4::new(1, 2, 3, 4);
17415        let b: u32x4 = u32x4::new(10, 10, 10, 10);
17416        let e: u64x2 = u64x2::new(7, 6);
17417        let r: u64x2 = transmute(vabdl_high_u32(transmute(a), transmute(b)));
17418        assert_eq!(r, e);
17419    }
17420
17421    #[simd_test(enable = "neon")]
17422    unsafe fn test_vabdl_high_s8() {
17423        let a: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
17424        let b: i8x16 = i8x16::new(10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10);
17425        let e: i16x8 = i16x8::new(1, 0, 1, 2, 3, 4, 5, 6);
17426        let r: i16x8 = transmute(vabdl_high_s8(transmute(a), transmute(b)));
17427        assert_eq!(r, e);
17428    }
17429
17430    #[simd_test(enable = "neon")]
17431    unsafe fn test_vabdl_high_s16() {
17432        let a: i16x8 = i16x8::new(1, 2, 3, 4, 9, 10, 11, 12);
17433        let b: i16x8 = i16x8::new(10, 10, 10, 10, 10, 10, 10, 10);
17434        let e: i32x4 = i32x4::new(1, 0, 1, 2);
17435        let r: i32x4 = transmute(vabdl_high_s16(transmute(a), transmute(b)));
17436        assert_eq!(r, e);
17437    }
17438
17439    #[simd_test(enable = "neon")]
17440    unsafe fn test_vabdl_high_s32() {
17441        let a: i32x4 = i32x4::new(1, 2, 3, 4);
17442        let b: i32x4 = i32x4::new(10, 10, 10, 10);
17443        let e: i64x2 = i64x2::new(7, 6);
17444        let r: i64x2 = transmute(vabdl_high_s32(transmute(a), transmute(b)));
17445        assert_eq!(r, e);
17446    }
17447
17448    #[simd_test(enable = "neon")]
17449    unsafe fn test_vceq_u64() {
17450        let a: u64x1 = u64x1::new(0);
17451        let b: u64x1 = u64x1::new(0);
17452        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17453        let r: u64x1 = transmute(vceq_u64(transmute(a), transmute(b)));
17454        assert_eq!(r, e);
17455
17456        let a: u64x1 = u64x1::new(0);
17457        let b: u64x1 = u64x1::new(0);
17458        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17459        let r: u64x1 = transmute(vceq_u64(transmute(a), transmute(b)));
17460        assert_eq!(r, e);
17461    }
17462
17463    #[simd_test(enable = "neon")]
17464    unsafe fn test_vceqq_u64() {
17465        let a: u64x2 = u64x2::new(0, 0x01);
17466        let b: u64x2 = u64x2::new(0, 0x01);
17467        let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
17468        let r: u64x2 = transmute(vceqq_u64(transmute(a), transmute(b)));
17469        assert_eq!(r, e);
17470
17471        let a: u64x2 = u64x2::new(0, 0);
17472        let b: u64x2 = u64x2::new(0, 0xFF_FF_FF_FF_FF_FF_FF_FF);
17473        let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0);
17474        let r: u64x2 = transmute(vceqq_u64(transmute(a), transmute(b)));
17475        assert_eq!(r, e);
17476    }
17477
17478    #[simd_test(enable = "neon")]
17479    unsafe fn test_vceq_s64() {
17480        let a: i64x1 = i64x1::new(-9223372036854775808);
17481        let b: i64x1 = i64x1::new(-9223372036854775808);
17482        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17483        let r: u64x1 = transmute(vceq_s64(transmute(a), transmute(b)));
17484        assert_eq!(r, e);
17485
17486        let a: i64x1 = i64x1::new(-9223372036854775808);
17487        let b: i64x1 = i64x1::new(-9223372036854775808);
17488        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17489        let r: u64x1 = transmute(vceq_s64(transmute(a), transmute(b)));
17490        assert_eq!(r, e);
17491    }
17492
17493    #[simd_test(enable = "neon")]
17494    unsafe fn test_vceqq_s64() {
17495        let a: i64x2 = i64x2::new(-9223372036854775808, 0x01);
17496        let b: i64x2 = i64x2::new(-9223372036854775808, 0x01);
17497        let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
17498        let r: u64x2 = transmute(vceqq_s64(transmute(a), transmute(b)));
17499        assert_eq!(r, e);
17500
17501        let a: i64x2 = i64x2::new(-9223372036854775808, -9223372036854775808);
17502        let b: i64x2 = i64x2::new(-9223372036854775808, 0x7F_FF_FF_FF_FF_FF_FF_FF);
17503        let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0);
17504        let r: u64x2 = transmute(vceqq_s64(transmute(a), transmute(b)));
17505        assert_eq!(r, e);
17506    }
17507
17508    #[simd_test(enable = "neon")]
17509    unsafe fn test_vceq_p64() {
17510        let a: i64x1 = i64x1::new(-9223372036854775808);
17511        let b: i64x1 = i64x1::new(-9223372036854775808);
17512        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17513        let r: u64x1 = transmute(vceq_p64(transmute(a), transmute(b)));
17514        assert_eq!(r, e);
17515
17516        let a: i64x1 = i64x1::new(-9223372036854775808);
17517        let b: i64x1 = i64x1::new(-9223372036854775808);
17518        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17519        let r: u64x1 = transmute(vceq_p64(transmute(a), transmute(b)));
17520        assert_eq!(r, e);
17521    }
17522
17523    #[simd_test(enable = "neon")]
17524    unsafe fn test_vceqq_p64() {
17525        let a: i64x2 = i64x2::new(-9223372036854775808, 0x01);
17526        let b: i64x2 = i64x2::new(-9223372036854775808, 0x01);
17527        let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
17528        let r: u64x2 = transmute(vceqq_p64(transmute(a), transmute(b)));
17529        assert_eq!(r, e);
17530
17531        let a: i64x2 = i64x2::new(-9223372036854775808, -9223372036854775808);
17532        let b: i64x2 = i64x2::new(-9223372036854775808, 0x7F_FF_FF_FF_FF_FF_FF_FF);
17533        let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0);
17534        let r: u64x2 = transmute(vceqq_p64(transmute(a), transmute(b)));
17535        assert_eq!(r, e);
17536    }
17537
17538    #[simd_test(enable = "neon")]
17539    unsafe fn test_vceq_f64() {
17540        let a: f64 = 1.2;
17541        let b: f64 = 1.2;
17542        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17543        let r: u64x1 = transmute(vceq_f64(transmute(a), transmute(b)));
17544        assert_eq!(r, e);
17545    }
17546
17547    #[simd_test(enable = "neon")]
17548    unsafe fn test_vceqq_f64() {
17549        let a: f64x2 = f64x2::new(1.2, 3.4);
17550        let b: f64x2 = f64x2::new(1.2, 3.4);
17551        let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
17552        let r: u64x2 = transmute(vceqq_f64(transmute(a), transmute(b)));
17553        assert_eq!(r, e);
17554    }
17555
17556    #[simd_test(enable = "neon")]
17557    unsafe fn test_vceqd_s64() {
17558        let a: i64 = 1;
17559        let b: i64 = 2;
17560        let e: u64 = 0;
17561        let r: u64 = vceqd_s64(a, b);
17562        assert_eq!(r, e);
17563    }
17564
17565    #[simd_test(enable = "neon")]
17566    unsafe fn test_vceqd_u64() {
17567        let a: u64 = 1;
17568        let b: u64 = 2;
17569        let e: u64 = 0;
17570        let r: u64 = vceqd_u64(a, b);
17571        assert_eq!(r, e);
17572    }
17573
17574    #[simd_test(enable = "neon")]
17575    unsafe fn test_vceqs_f32() {
17576        let a: f32 = 1.;
17577        let b: f32 = 2.;
17578        let e: u32 = 0;
17579        let r: u32 = vceqs_f32(a, b);
17580        assert_eq!(r, e);
17581    }
17582
17583    #[simd_test(enable = "neon")]
17584    unsafe fn test_vceqd_f64() {
17585        let a: f64 = 1.;
17586        let b: f64 = 2.;
17587        let e: u64 = 0;
17588        let r: u64 = vceqd_f64(a, b);
17589        assert_eq!(r, e);
17590    }
17591
17592    #[simd_test(enable = "neon")]
17593    unsafe fn test_vceqz_s8() {
17594        let a: i8x8 = i8x8::new(-128, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06);
17595        let e: u8x8 = u8x8::new(0, 0xFF, 0, 0, 0, 0, 0, 0);
17596        let r: u8x8 = transmute(vceqz_s8(transmute(a)));
17597        assert_eq!(r, e);
17598    }
17599
17600    #[simd_test(enable = "neon")]
17601    unsafe fn test_vceqzq_s8() {
17602        let a: i8x16 = i8x16::new(-128, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x7F);
17603        let e: u8x16 = u8x16::new(0, 0xFF, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
17604        let r: u8x16 = transmute(vceqzq_s8(transmute(a)));
17605        assert_eq!(r, e);
17606    }
17607
17608    #[simd_test(enable = "neon")]
17609    unsafe fn test_vceqz_s16() {
17610        let a: i16x4 = i16x4::new(-32768, 0x00, 0x01, 0x02);
17611        let e: u16x4 = u16x4::new(0, 0xFF_FF, 0, 0);
17612        let r: u16x4 = transmute(vceqz_s16(transmute(a)));
17613        assert_eq!(r, e);
17614    }
17615
17616    #[simd_test(enable = "neon")]
17617    unsafe fn test_vceqzq_s16() {
17618        let a: i16x8 = i16x8::new(-32768, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06);
17619        let e: u16x8 = u16x8::new(0, 0xFF_FF, 0, 0, 0, 0, 0, 0);
17620        let r: u16x8 = transmute(vceqzq_s16(transmute(a)));
17621        assert_eq!(r, e);
17622    }
17623
17624    #[simd_test(enable = "neon")]
17625    unsafe fn test_vceqz_s32() {
17626        let a: i32x2 = i32x2::new(-2147483648, 0x00);
17627        let e: u32x2 = u32x2::new(0, 0xFF_FF_FF_FF);
17628        let r: u32x2 = transmute(vceqz_s32(transmute(a)));
17629        assert_eq!(r, e);
17630    }
17631
17632    #[simd_test(enable = "neon")]
17633    unsafe fn test_vceqzq_s32() {
17634        let a: i32x4 = i32x4::new(-2147483648, 0x00, 0x01, 0x02);
17635        let e: u32x4 = u32x4::new(0, 0xFF_FF_FF_FF, 0, 0);
17636        let r: u32x4 = transmute(vceqzq_s32(transmute(a)));
17637        assert_eq!(r, e);
17638    }
17639
17640    #[simd_test(enable = "neon")]
17641    unsafe fn test_vceqz_s64() {
17642        let a: i64x1 = i64x1::new(-9223372036854775808);
17643        let e: u64x1 = u64x1::new(0);
17644        let r: u64x1 = transmute(vceqz_s64(transmute(a)));
17645        assert_eq!(r, e);
17646    }
17647
17648    #[simd_test(enable = "neon")]
17649    unsafe fn test_vceqzq_s64() {
17650        let a: i64x2 = i64x2::new(-9223372036854775808, 0x00);
17651        let e: u64x2 = u64x2::new(0, 0xFF_FF_FF_FF_FF_FF_FF_FF);
17652        let r: u64x2 = transmute(vceqzq_s64(transmute(a)));
17653        assert_eq!(r, e);
17654    }
17655
17656    #[simd_test(enable = "neon")]
17657    unsafe fn test_vceqz_p8() {
17658        let a: i8x8 = i8x8::new(-128, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06);
17659        let e: u8x8 = u8x8::new(0, 0xFF, 0, 0, 0, 0, 0, 0);
17660        let r: u8x8 = transmute(vceqz_p8(transmute(a)));
17661        assert_eq!(r, e);
17662    }
17663
17664    #[simd_test(enable = "neon")]
17665    unsafe fn test_vceqzq_p8() {
17666        let a: i8x16 = i8x16::new(-128, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x7F);
17667        let e: u8x16 = u8x16::new(0, 0xFF, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
17668        let r: u8x16 = transmute(vceqzq_p8(transmute(a)));
17669        assert_eq!(r, e);
17670    }
17671
17672    #[simd_test(enable = "neon")]
17673    unsafe fn test_vceqz_p64() {
17674        let a: i64x1 = i64x1::new(-9223372036854775808);
17675        let e: u64x1 = u64x1::new(0);
17676        let r: u64x1 = transmute(vceqz_p64(transmute(a)));
17677        assert_eq!(r, e);
17678    }
17679
17680    #[simd_test(enable = "neon")]
17681    unsafe fn test_vceqzq_p64() {
17682        let a: i64x2 = i64x2::new(-9223372036854775808, 0x00);
17683        let e: u64x2 = u64x2::new(0, 0xFF_FF_FF_FF_FF_FF_FF_FF);
17684        let r: u64x2 = transmute(vceqzq_p64(transmute(a)));
17685        assert_eq!(r, e);
17686    }
17687
17688    #[simd_test(enable = "neon")]
17689    unsafe fn test_vceqz_u8() {
17690        let a: u8x8 = u8x8::new(0, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06);
17691        let e: u8x8 = u8x8::new(0xFF, 0xFF, 0, 0, 0, 0, 0, 0);
17692        let r: u8x8 = transmute(vceqz_u8(transmute(a)));
17693        assert_eq!(r, e);
17694    }
17695
17696    #[simd_test(enable = "neon")]
17697    unsafe fn test_vceqzq_u8() {
17698        let a: u8x16 = u8x16::new(0, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0xFF);
17699        let e: u8x16 = u8x16::new(0xFF, 0xFF, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
17700        let r: u8x16 = transmute(vceqzq_u8(transmute(a)));
17701        assert_eq!(r, e);
17702    }
17703
17704    #[simd_test(enable = "neon")]
17705    unsafe fn test_vceqz_u16() {
17706        let a: u16x4 = u16x4::new(0, 0x00, 0x01, 0x02);
17707        let e: u16x4 = u16x4::new(0xFF_FF, 0xFF_FF, 0, 0);
17708        let r: u16x4 = transmute(vceqz_u16(transmute(a)));
17709        assert_eq!(r, e);
17710    }
17711
17712    #[simd_test(enable = "neon")]
17713    unsafe fn test_vceqzq_u16() {
17714        let a: u16x8 = u16x8::new(0, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06);
17715        let e: u16x8 = u16x8::new(0xFF_FF, 0xFF_FF, 0, 0, 0, 0, 0, 0);
17716        let r: u16x8 = transmute(vceqzq_u16(transmute(a)));
17717        assert_eq!(r, e);
17718    }
17719
17720    #[simd_test(enable = "neon")]
17721    unsafe fn test_vceqz_u32() {
17722        let a: u32x2 = u32x2::new(0, 0x00);
17723        let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
17724        let r: u32x2 = transmute(vceqz_u32(transmute(a)));
17725        assert_eq!(r, e);
17726    }
17727
17728    #[simd_test(enable = "neon")]
17729    unsafe fn test_vceqzq_u32() {
17730        let a: u32x4 = u32x4::new(0, 0x00, 0x01, 0x02);
17731        let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0, 0);
17732        let r: u32x4 = transmute(vceqzq_u32(transmute(a)));
17733        assert_eq!(r, e);
17734    }
17735
17736    #[simd_test(enable = "neon")]
17737    unsafe fn test_vceqz_u64() {
17738        let a: u64x1 = u64x1::new(0);
17739        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17740        let r: u64x1 = transmute(vceqz_u64(transmute(a)));
17741        assert_eq!(r, e);
17742    }
17743
17744    #[simd_test(enable = "neon")]
17745    unsafe fn test_vceqzq_u64() {
17746        let a: u64x2 = u64x2::new(0, 0x00);
17747        let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
17748        let r: u64x2 = transmute(vceqzq_u64(transmute(a)));
17749        assert_eq!(r, e);
17750    }
17751
17752    #[simd_test(enable = "neon")]
17753    unsafe fn test_vceqz_f32() {
17754        let a: f32x2 = f32x2::new(0.0, 1.2);
17755        let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0);
17756        let r: u32x2 = transmute(vceqz_f32(transmute(a)));
17757        assert_eq!(r, e);
17758    }
17759
17760    #[simd_test(enable = "neon")]
17761    unsafe fn test_vceqzq_f32() {
17762        let a: f32x4 = f32x4::new(0.0, 1.2, 3.4, 5.6);
17763        let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0, 0, 0);
17764        let r: u32x4 = transmute(vceqzq_f32(transmute(a)));
17765        assert_eq!(r, e);
17766    }
17767
17768    #[simd_test(enable = "neon")]
17769    unsafe fn test_vceqz_f64() {
17770        let a: f64 = 0.0;
17771        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17772        let r: u64x1 = transmute(vceqz_f64(transmute(a)));
17773        assert_eq!(r, e);
17774    }
17775
17776    #[simd_test(enable = "neon")]
17777    unsafe fn test_vceqzq_f64() {
17778        let a: f64x2 = f64x2::new(0.0, 1.2);
17779        let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0);
17780        let r: u64x2 = transmute(vceqzq_f64(transmute(a)));
17781        assert_eq!(r, e);
17782    }
17783
17784    #[simd_test(enable = "neon")]
17785    unsafe fn test_vceqzd_s64() {
17786        let a: i64 = 1;
17787        let e: u64 = 0;
17788        let r: u64 = vceqzd_s64(a);
17789        assert_eq!(r, e);
17790    }
17791
17792    #[simd_test(enable = "neon")]
17793    unsafe fn test_vceqzd_u64() {
17794        let a: u64 = 1;
17795        let e: u64 = 0;
17796        let r: u64 = vceqzd_u64(a);
17797        assert_eq!(r, e);
17798    }
17799
17800    #[simd_test(enable = "neon")]
17801    unsafe fn test_vceqzs_f32() {
17802        let a: f32 = 1.;
17803        let e: u32 = 0;
17804        let r: u32 = vceqzs_f32(a);
17805        assert_eq!(r, e);
17806    }
17807
17808    #[simd_test(enable = "neon")]
17809    unsafe fn test_vceqzd_f64() {
17810        let a: f64 = 1.;
17811        let e: u64 = 0;
17812        let r: u64 = vceqzd_f64(a);
17813        assert_eq!(r, e);
17814    }
17815
17816    #[simd_test(enable = "neon")]
17817    unsafe fn test_vtst_s64() {
17818        let a: i64x1 = i64x1::new(-9223372036854775808);
17819        let b: i64x1 = i64x1::new(-9223372036854775808);
17820        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17821        let r: u64x1 = transmute(vtst_s64(transmute(a), transmute(b)));
17822        assert_eq!(r, e);
17823    }
17824
17825    #[simd_test(enable = "neon")]
17826    unsafe fn test_vtstq_s64() {
17827        let a: i64x2 = i64x2::new(-9223372036854775808, 0x00);
17828        let b: i64x2 = i64x2::new(-9223372036854775808, 0x00);
17829        let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0);
17830        let r: u64x2 = transmute(vtstq_s64(transmute(a), transmute(b)));
17831        assert_eq!(r, e);
17832    }
17833
17834    #[simd_test(enable = "neon")]
17835    unsafe fn test_vtst_p64() {
17836        let a: i64x1 = i64x1::new(-9223372036854775808);
17837        let b: i64x1 = i64x1::new(-9223372036854775808);
17838        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17839        let r: u64x1 = transmute(vtst_p64(transmute(a), transmute(b)));
17840        assert_eq!(r, e);
17841    }
17842
17843    #[simd_test(enable = "neon")]
17844    unsafe fn test_vtstq_p64() {
17845        let a: i64x2 = i64x2::new(-9223372036854775808, 0x00);
17846        let b: i64x2 = i64x2::new(-9223372036854775808, 0x00);
17847        let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0);
17848        let r: u64x2 = transmute(vtstq_p64(transmute(a), transmute(b)));
17849        assert_eq!(r, e);
17850    }
17851
17852    #[simd_test(enable = "neon")]
17853    unsafe fn test_vtst_u64() {
17854        let a: u64x1 = u64x1::new(0);
17855        let b: u64x1 = u64x1::new(0);
17856        let e: u64x1 = u64x1::new(0);
17857        let r: u64x1 = transmute(vtst_u64(transmute(a), transmute(b)));
17858        assert_eq!(r, e);
17859    }
17860
17861    #[simd_test(enable = "neon")]
17862    unsafe fn test_vtstq_u64() {
17863        let a: u64x2 = u64x2::new(0, 0x00);
17864        let b: u64x2 = u64x2::new(0, 0x00);
17865        let e: u64x2 = u64x2::new(0, 0);
17866        let r: u64x2 = transmute(vtstq_u64(transmute(a), transmute(b)));
17867        assert_eq!(r, e);
17868    }
17869
17870    #[simd_test(enable = "neon")]
17871    unsafe fn test_vtstd_s64() {
17872        let a: i64 = 0;
17873        let b: i64 = 0;
17874        let e: u64 = 0;
17875        let r: u64 = vtstd_s64(a, b);
17876        assert_eq!(r, e);
17877    }
17878
17879    #[simd_test(enable = "neon")]
17880    unsafe fn test_vtstd_u64() {
17881        let a: u64 = 0;
17882        let b: u64 = 0;
17883        let e: u64 = 0;
17884        let r: u64 = vtstd_u64(a, b);
17885        assert_eq!(r, e);
17886    }
17887
17888    #[simd_test(enable = "neon")]
17889    unsafe fn test_vuqadds_s32() {
17890        let a: i32 = 1;
17891        let b: u32 = 1;
17892        let e: i32 = 2;
17893        let r: i32 = vuqadds_s32(a, b);
17894        assert_eq!(r, e);
17895    }
17896
17897    #[simd_test(enable = "neon")]
17898    unsafe fn test_vuqaddd_s64() {
17899        let a: i64 = 1;
17900        let b: u64 = 1;
17901        let e: i64 = 2;
17902        let r: i64 = vuqaddd_s64(a, b);
17903        assert_eq!(r, e);
17904    }
17905
17906    #[simd_test(enable = "neon")]
17907    unsafe fn test_vuqaddb_s8() {
17908        let a: i8 = 1;
17909        let b: u8 = 2;
17910        let e: i8 = 3;
17911        let r: i8 = vuqaddb_s8(a, b);
17912        assert_eq!(r, e);
17913    }
17914
17915    #[simd_test(enable = "neon")]
17916    unsafe fn test_vuqaddh_s16() {
17917        let a: i16 = 1;
17918        let b: u16 = 2;
17919        let e: i16 = 3;
17920        let r: i16 = vuqaddh_s16(a, b);
17921        assert_eq!(r, e);
17922    }
17923
17924    #[simd_test(enable = "neon")]
17925    unsafe fn test_vabs_f64() {
17926        let a: f64 = -0.1;
17927        let e: f64 = 0.1;
17928        let r: f64 = transmute(vabs_f64(transmute(a)));
17929        assert_eq!(r, e);
17930    }
17931
17932    #[simd_test(enable = "neon")]
17933    unsafe fn test_vabsq_f64() {
17934        let a: f64x2 = f64x2::new(-0.1, -2.2);
17935        let e: f64x2 = f64x2::new(0.1, 2.2);
17936        let r: f64x2 = transmute(vabsq_f64(transmute(a)));
17937        assert_eq!(r, e);
17938    }
17939
17940    #[simd_test(enable = "neon")]
17941    unsafe fn test_vcgt_s64() {
17942        let a: i64x1 = i64x1::new(1);
17943        let b: i64x1 = i64x1::new(0);
17944        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17945        let r: u64x1 = transmute(vcgt_s64(transmute(a), transmute(b)));
17946        assert_eq!(r, e);
17947    }
17948
17949    #[simd_test(enable = "neon")]
17950    unsafe fn test_vcgtq_s64() {
17951        let a: i64x2 = i64x2::new(1, 2);
17952        let b: i64x2 = i64x2::new(0, 1);
17953        let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
17954        let r: u64x2 = transmute(vcgtq_s64(transmute(a), transmute(b)));
17955        assert_eq!(r, e);
17956    }
17957
17958    #[simd_test(enable = "neon")]
17959    unsafe fn test_vcgt_u64() {
17960        let a: u64x1 = u64x1::new(1);
17961        let b: u64x1 = u64x1::new(0);
17962        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17963        let r: u64x1 = transmute(vcgt_u64(transmute(a), transmute(b)));
17964        assert_eq!(r, e);
17965    }
17966
17967    #[simd_test(enable = "neon")]
17968    unsafe fn test_vcgtq_u64() {
17969        let a: u64x2 = u64x2::new(1, 2);
17970        let b: u64x2 = u64x2::new(0, 1);
17971        let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
17972        let r: u64x2 = transmute(vcgtq_u64(transmute(a), transmute(b)));
17973        assert_eq!(r, e);
17974    }
17975
17976    #[simd_test(enable = "neon")]
17977    unsafe fn test_vcgt_f64() {
17978        let a: f64 = 1.2;
17979        let b: f64 = 0.1;
17980        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
17981        let r: u64x1 = transmute(vcgt_f64(transmute(a), transmute(b)));
17982        assert_eq!(r, e);
17983    }
17984
17985    #[simd_test(enable = "neon")]
17986    unsafe fn test_vcgtq_f64() {
17987        let a: f64x2 = f64x2::new(1.2, 2.3);
17988        let b: f64x2 = f64x2::new(0.1, 1.2);
17989        let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
17990        let r: u64x2 = transmute(vcgtq_f64(transmute(a), transmute(b)));
17991        assert_eq!(r, e);
17992    }
17993
17994    #[simd_test(enable = "neon")]
17995    unsafe fn test_vcgtd_s64() {
17996        let a: i64 = 1;
17997        let b: i64 = 2;
17998        let e: u64 = 0;
17999        let r: u64 = vcgtd_s64(a, b);
18000        assert_eq!(r, e);
18001    }
18002
18003    #[simd_test(enable = "neon")]
18004    unsafe fn test_vcgtd_u64() {
18005        let a: u64 = 1;
18006        let b: u64 = 2;
18007        let e: u64 = 0;
18008        let r: u64 = vcgtd_u64(a, b);
18009        assert_eq!(r, e);
18010    }
18011
18012    #[simd_test(enable = "neon")]
18013    unsafe fn test_vcgts_f32() {
18014        let a: f32 = 1.;
18015        let b: f32 = 2.;
18016        let e: u32 = 0;
18017        let r: u32 = vcgts_f32(a, b);
18018        assert_eq!(r, e);
18019    }
18020
18021    #[simd_test(enable = "neon")]
18022    unsafe fn test_vcgtd_f64() {
18023        let a: f64 = 1.;
18024        let b: f64 = 2.;
18025        let e: u64 = 0;
18026        let r: u64 = vcgtd_f64(a, b);
18027        assert_eq!(r, e);
18028    }
18029
18030    #[simd_test(enable = "neon")]
18031    unsafe fn test_vclt_s64() {
18032        let a: i64x1 = i64x1::new(0);
18033        let b: i64x1 = i64x1::new(1);
18034        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
18035        let r: u64x1 = transmute(vclt_s64(transmute(a), transmute(b)));
18036        assert_eq!(r, e);
18037    }
18038
18039    #[simd_test(enable = "neon")]
18040    unsafe fn test_vcltq_s64() {
18041        let a: i64x2 = i64x2::new(0, 1);
18042        let b: i64x2 = i64x2::new(1, 2);
18043        let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
18044        let r: u64x2 = transmute(vcltq_s64(transmute(a), transmute(b)));
18045        assert_eq!(r, e);
18046    }
18047
18048    #[simd_test(enable = "neon")]
18049    unsafe fn test_vclt_u64() {
18050        let a: u64x1 = u64x1::new(0);
18051        let b: u64x1 = u64x1::new(1);
18052        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
18053        let r: u64x1 = transmute(vclt_u64(transmute(a), transmute(b)));
18054        assert_eq!(r, e);
18055    }
18056
18057    #[simd_test(enable = "neon")]
18058    unsafe fn test_vcltq_u64() {
18059        let a: u64x2 = u64x2::new(0, 1);
18060        let b: u64x2 = u64x2::new(1, 2);
18061        let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
18062        let r: u64x2 = transmute(vcltq_u64(transmute(a), transmute(b)));
18063        assert_eq!(r, e);
18064    }
18065
18066    #[simd_test(enable = "neon")]
18067    unsafe fn test_vclt_f64() {
18068        let a: f64 = 0.1;
18069        let b: f64 = 1.2;
18070        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
18071        let r: u64x1 = transmute(vclt_f64(transmute(a), transmute(b)));
18072        assert_eq!(r, e);
18073    }
18074
18075    #[simd_test(enable = "neon")]
18076    unsafe fn test_vcltq_f64() {
18077        let a: f64x2 = f64x2::new(0.1, 1.2);
18078        let b: f64x2 = f64x2::new(1.2, 2.3);
18079        let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
18080        let r: u64x2 = transmute(vcltq_f64(transmute(a), transmute(b)));
18081        assert_eq!(r, e);
18082    }
18083
18084    #[simd_test(enable = "neon")]
18085    unsafe fn test_vcltd_s64() {
18086        let a: i64 = 2;
18087        let b: i64 = 1;
18088        let e: u64 = 0;
18089        let r: u64 = vcltd_s64(a, b);
18090        assert_eq!(r, e);
18091    }
18092
18093    #[simd_test(enable = "neon")]
18094    unsafe fn test_vcltd_u64() {
18095        let a: u64 = 2;
18096        let b: u64 = 1;
18097        let e: u64 = 0;
18098        let r: u64 = vcltd_u64(a, b);
18099        assert_eq!(r, e);
18100    }
18101
18102    #[simd_test(enable = "neon")]
18103    unsafe fn test_vclts_f32() {
18104        let a: f32 = 2.;
18105        let b: f32 = 1.;
18106        let e: u32 = 0;
18107        let r: u32 = vclts_f32(a, b);
18108        assert_eq!(r, e);
18109    }
18110
18111    #[simd_test(enable = "neon")]
18112    unsafe fn test_vcltd_f64() {
18113        let a: f64 = 2.;
18114        let b: f64 = 1.;
18115        let e: u64 = 0;
18116        let r: u64 = vcltd_f64(a, b);
18117        assert_eq!(r, e);
18118    }
18119
18120    #[simd_test(enable = "neon")]
18121    unsafe fn test_vcle_s64() {
18122        let a: i64x1 = i64x1::new(0);
18123        let b: i64x1 = i64x1::new(1);
18124        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
18125        let r: u64x1 = transmute(vcle_s64(transmute(a), transmute(b)));
18126        assert_eq!(r, e);
18127    }
18128
18129    #[simd_test(enable = "neon")]
18130    unsafe fn test_vcleq_s64() {
18131        let a: i64x2 = i64x2::new(0, 1);
18132        let b: i64x2 = i64x2::new(1, 2);
18133        let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
18134        let r: u64x2 = transmute(vcleq_s64(transmute(a), transmute(b)));
18135        assert_eq!(r, e);
18136    }
18137
18138    #[simd_test(enable = "neon")]
18139    unsafe fn test_vcged_s64() {
18140        let a: i64 = 1;
18141        let b: i64 = 2;
18142        let e: u64 = 0;
18143        let r: u64 = vcged_s64(a, b);
18144        assert_eq!(r, e);
18145    }
18146
18147    #[simd_test(enable = "neon")]
18148    unsafe fn test_vcged_u64() {
18149        let a: u64 = 1;
18150        let b: u64 = 2;
18151        let e: u64 = 0;
18152        let r: u64 = vcged_u64(a, b);
18153        assert_eq!(r, e);
18154    }
18155
18156    #[simd_test(enable = "neon")]
18157    unsafe fn test_vcges_f32() {
18158        let a: f32 = 1.;
18159        let b: f32 = 2.;
18160        let e: u32 = 0;
18161        let r: u32 = vcges_f32(a, b);
18162        assert_eq!(r, e);
18163    }
18164
18165    #[simd_test(enable = "neon")]
18166    unsafe fn test_vcged_f64() {
18167        let a: f64 = 1.;
18168        let b: f64 = 2.;
18169        let e: u64 = 0;
18170        let r: u64 = vcged_f64(a, b);
18171        assert_eq!(r, e);
18172    }
18173
18174    #[simd_test(enable = "neon")]
18175    unsafe fn test_vcle_u64() {
18176        let a: u64x1 = u64x1::new(0);
18177        let b: u64x1 = u64x1::new(1);
18178        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
18179        let r: u64x1 = transmute(vcle_u64(transmute(a), transmute(b)));
18180        assert_eq!(r, e);
18181    }
18182
18183    #[simd_test(enable = "neon")]
18184    unsafe fn test_vcleq_u64() {
18185        let a: u64x2 = u64x2::new(0, 1);
18186        let b: u64x2 = u64x2::new(1, 2);
18187        let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
18188        let r: u64x2 = transmute(vcleq_u64(transmute(a), transmute(b)));
18189        assert_eq!(r, e);
18190    }
18191
18192    #[simd_test(enable = "neon")]
18193    unsafe fn test_vcle_f64() {
18194        let a: f64 = 0.1;
18195        let b: f64 = 1.2;
18196        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
18197        let r: u64x1 = transmute(vcle_f64(transmute(a), transmute(b)));
18198        assert_eq!(r, e);
18199    }
18200
18201    #[simd_test(enable = "neon")]
18202    unsafe fn test_vcleq_f64() {
18203        let a: f64x2 = f64x2::new(0.1, 1.2);
18204        let b: f64x2 = f64x2::new(1.2, 2.3);
18205        let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
18206        let r: u64x2 = transmute(vcleq_f64(transmute(a), transmute(b)));
18207        assert_eq!(r, e);
18208    }
18209
18210    #[simd_test(enable = "neon")]
18211    unsafe fn test_vcled_s64() {
18212        let a: i64 = 2;
18213        let b: i64 = 1;
18214        let e: u64 = 0;
18215        let r: u64 = vcled_s64(a, b);
18216        assert_eq!(r, e);
18217    }
18218
18219    #[simd_test(enable = "neon")]
18220    unsafe fn test_vcled_u64() {
18221        let a: u64 = 2;
18222        let b: u64 = 1;
18223        let e: u64 = 0;
18224        let r: u64 = vcled_u64(a, b);
18225        assert_eq!(r, e);
18226    }
18227
18228    #[simd_test(enable = "neon")]
18229    unsafe fn test_vcles_f32() {
18230        let a: f32 = 2.;
18231        let b: f32 = 1.;
18232        let e: u32 = 0;
18233        let r: u32 = vcles_f32(a, b);
18234        assert_eq!(r, e);
18235    }
18236
18237    #[simd_test(enable = "neon")]
18238    unsafe fn test_vcled_f64() {
18239        let a: f64 = 2.;
18240        let b: f64 = 1.;
18241        let e: u64 = 0;
18242        let r: u64 = vcled_f64(a, b);
18243        assert_eq!(r, e);
18244    }
18245
18246    #[simd_test(enable = "neon")]
18247    unsafe fn test_vcge_s64() {
18248        let a: i64x1 = i64x1::new(1);
18249        let b: i64x1 = i64x1::new(0);
18250        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
18251        let r: u64x1 = transmute(vcge_s64(transmute(a), transmute(b)));
18252        assert_eq!(r, e);
18253    }
18254
18255    #[simd_test(enable = "neon")]
18256    unsafe fn test_vcgeq_s64() {
18257        let a: i64x2 = i64x2::new(1, 2);
18258        let b: i64x2 = i64x2::new(0, 1);
18259        let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
18260        let r: u64x2 = transmute(vcgeq_s64(transmute(a), transmute(b)));
18261        assert_eq!(r, e);
18262    }
18263
18264    #[simd_test(enable = "neon")]
18265    unsafe fn test_vcge_u64() {
18266        let a: u64x1 = u64x1::new(1);
18267        let b: u64x1 = u64x1::new(0);
18268        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
18269        let r: u64x1 = transmute(vcge_u64(transmute(a), transmute(b)));
18270        assert_eq!(r, e);
18271    }
18272
18273    #[simd_test(enable = "neon")]
18274    unsafe fn test_vcgeq_u64() {
18275        let a: u64x2 = u64x2::new(1, 2);
18276        let b: u64x2 = u64x2::new(0, 1);
18277        let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
18278        let r: u64x2 = transmute(vcgeq_u64(transmute(a), transmute(b)));
18279        assert_eq!(r, e);
18280    }
18281
18282    #[simd_test(enable = "neon")]
18283    unsafe fn test_vcge_f64() {
18284        let a: f64 = 1.2;
18285        let b: f64 = 0.1;
18286        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
18287        let r: u64x1 = transmute(vcge_f64(transmute(a), transmute(b)));
18288        assert_eq!(r, e);
18289    }
18290
18291    #[simd_test(enable = "neon")]
18292    unsafe fn test_vcgeq_f64() {
18293        let a: f64x2 = f64x2::new(1.2, 2.3);
18294        let b: f64x2 = f64x2::new(0.1, 1.2);
18295        let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
18296        let r: u64x2 = transmute(vcgeq_f64(transmute(a), transmute(b)));
18297        assert_eq!(r, e);
18298    }
18299
18300    #[simd_test(enable = "neon")]
18301    unsafe fn test_vcgez_s8() {
18302        let a: i8x8 = i8x8::new(-128, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05);
18303        let e: u8x8 = u8x8::new(0, 0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
18304        let r: u8x8 = transmute(vcgez_s8(transmute(a)));
18305        assert_eq!(r, e);
18306    }
18307
18308    #[simd_test(enable = "neon")]
18309    unsafe fn test_vcgezq_s8() {
18310        let a: i8x16 = i8x16::new(-128, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x7F);
18311        let e: u8x16 = u8x16::new(0, 0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
18312        let r: u8x16 = transmute(vcgezq_s8(transmute(a)));
18313        assert_eq!(r, e);
18314    }
18315
18316    #[simd_test(enable = "neon")]
18317    unsafe fn test_vcgez_s16() {
18318        let a: i16x4 = i16x4::new(-32768, -1, 0x00, 0x01);
18319        let e: u16x4 = u16x4::new(0, 0, 0xFF_FF, 0xFF_FF);
18320        let r: u16x4 = transmute(vcgez_s16(transmute(a)));
18321        assert_eq!(r, e);
18322    }
18323
18324    #[simd_test(enable = "neon")]
18325    unsafe fn test_vcgezq_s16() {
18326        let a: i16x8 = i16x8::new(-32768, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05);
18327        let e: u16x8 = u16x8::new(0, 0, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
18328        let r: u16x8 = transmute(vcgezq_s16(transmute(a)));
18329        assert_eq!(r, e);
18330    }
18331
18332    #[simd_test(enable = "neon")]
18333    unsafe fn test_vcgez_s32() {
18334        let a: i32x2 = i32x2::new(-2147483648, -1);
18335        let e: u32x2 = u32x2::new(0, 0);
18336        let r: u32x2 = transmute(vcgez_s32(transmute(a)));
18337        assert_eq!(r, e);
18338    }
18339
18340    #[simd_test(enable = "neon")]
18341    unsafe fn test_vcgezq_s32() {
18342        let a: i32x4 = i32x4::new(-2147483648, -1, 0x00, 0x01);
18343        let e: u32x4 = u32x4::new(0, 0, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
18344        let r: u32x4 = transmute(vcgezq_s32(transmute(a)));
18345        assert_eq!(r, e);
18346    }
18347
18348    #[simd_test(enable = "neon")]
18349    unsafe fn test_vcgez_s64() {
18350        let a: i64x1 = i64x1::new(-9223372036854775808);
18351        let e: u64x1 = u64x1::new(0);
18352        let r: u64x1 = transmute(vcgez_s64(transmute(a)));
18353        assert_eq!(r, e);
18354    }
18355
18356    #[simd_test(enable = "neon")]
18357    unsafe fn test_vcgezq_s64() {
18358        let a: i64x2 = i64x2::new(-9223372036854775808, -1);
18359        let e: u64x2 = u64x2::new(0, 0);
18360        let r: u64x2 = transmute(vcgezq_s64(transmute(a)));
18361        assert_eq!(r, e);
18362    }
18363
18364    #[simd_test(enable = "neon")]
18365    unsafe fn test_vcgez_f32() {
18366        let a: f32x2 = f32x2::new(-1.2, 0.0);
18367        let e: u32x2 = u32x2::new(0, 0xFF_FF_FF_FF);
18368        let r: u32x2 = transmute(vcgez_f32(transmute(a)));
18369        assert_eq!(r, e);
18370    }
18371
18372    #[simd_test(enable = "neon")]
18373    unsafe fn test_vcgezq_f32() {
18374        let a: f32x4 = f32x4::new(-1.2, 0.0, 1.2, 2.3);
18375        let e: u32x4 = u32x4::new(0, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
18376        let r: u32x4 = transmute(vcgezq_f32(transmute(a)));
18377        assert_eq!(r, e);
18378    }
18379
18380    #[simd_test(enable = "neon")]
18381    unsafe fn test_vcgez_f64() {
18382        let a: f64 = -1.2;
18383        let e: u64x1 = u64x1::new(0);
18384        let r: u64x1 = transmute(vcgez_f64(transmute(a)));
18385        assert_eq!(r, e);
18386    }
18387
18388    #[simd_test(enable = "neon")]
18389    unsafe fn test_vcgezq_f64() {
18390        let a: f64x2 = f64x2::new(-1.2, 0.0);
18391        let e: u64x2 = u64x2::new(0, 0xFF_FF_FF_FF_FF_FF_FF_FF);
18392        let r: u64x2 = transmute(vcgezq_f64(transmute(a)));
18393        assert_eq!(r, e);
18394    }
18395
18396    #[simd_test(enable = "neon")]
18397    unsafe fn test_vcgezd_s64() {
18398        let a: i64 = -1;
18399        let e: u64 = 0;
18400        let r: u64 = vcgezd_s64(a);
18401        assert_eq!(r, e);
18402    }
18403
18404    #[simd_test(enable = "neon")]
18405    unsafe fn test_vcgezs_f32() {
18406        let a: f32 = -1.;
18407        let e: u32 = 0;
18408        let r: u32 = vcgezs_f32(a);
18409        assert_eq!(r, e);
18410    }
18411
18412    #[simd_test(enable = "neon")]
18413    unsafe fn test_vcgezd_f64() {
18414        let a: f64 = -1.;
18415        let e: u64 = 0;
18416        let r: u64 = vcgezd_f64(a);
18417        assert_eq!(r, e);
18418    }
18419
18420    #[simd_test(enable = "neon")]
18421    unsafe fn test_vcgtz_s8() {
18422        let a: i8x8 = i8x8::new(-128, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05);
18423        let e: u8x8 = u8x8::new(0, 0, 0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
18424        let r: u8x8 = transmute(vcgtz_s8(transmute(a)));
18425        assert_eq!(r, e);
18426    }
18427
18428    #[simd_test(enable = "neon")]
18429    unsafe fn test_vcgtzq_s8() {
18430        let a: i8x16 = i8x16::new(-128, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x7F);
18431        let e: u8x16 = u8x16::new(0, 0, 0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
18432        let r: u8x16 = transmute(vcgtzq_s8(transmute(a)));
18433        assert_eq!(r, e);
18434    }
18435
18436    #[simd_test(enable = "neon")]
18437    unsafe fn test_vcgtz_s16() {
18438        let a: i16x4 = i16x4::new(-32768, -1, 0x00, 0x01);
18439        let e: u16x4 = u16x4::new(0, 0, 0, 0xFF_FF);
18440        let r: u16x4 = transmute(vcgtz_s16(transmute(a)));
18441        assert_eq!(r, e);
18442    }
18443
18444    #[simd_test(enable = "neon")]
18445    unsafe fn test_vcgtzq_s16() {
18446        let a: i16x8 = i16x8::new(-32768, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05);
18447        let e: u16x8 = u16x8::new(0, 0, 0, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
18448        let r: u16x8 = transmute(vcgtzq_s16(transmute(a)));
18449        assert_eq!(r, e);
18450    }
18451
18452    #[simd_test(enable = "neon")]
18453    unsafe fn test_vcgtz_s32() {
18454        let a: i32x2 = i32x2::new(-2147483648, -1);
18455        let e: u32x2 = u32x2::new(0, 0);
18456        let r: u32x2 = transmute(vcgtz_s32(transmute(a)));
18457        assert_eq!(r, e);
18458    }
18459
18460    #[simd_test(enable = "neon")]
18461    unsafe fn test_vcgtzq_s32() {
18462        let a: i32x4 = i32x4::new(-2147483648, -1, 0x00, 0x01);
18463        let e: u32x4 = u32x4::new(0, 0, 0, 0xFF_FF_FF_FF);
18464        let r: u32x4 = transmute(vcgtzq_s32(transmute(a)));
18465        assert_eq!(r, e);
18466    }
18467
18468    #[simd_test(enable = "neon")]
18469    unsafe fn test_vcgtz_s64() {
18470        let a: i64x1 = i64x1::new(-9223372036854775808);
18471        let e: u64x1 = u64x1::new(0);
18472        let r: u64x1 = transmute(vcgtz_s64(transmute(a)));
18473        assert_eq!(r, e);
18474    }
18475
18476    #[simd_test(enable = "neon")]
18477    unsafe fn test_vcgtzq_s64() {
18478        let a: i64x2 = i64x2::new(-9223372036854775808, -1);
18479        let e: u64x2 = u64x2::new(0, 0);
18480        let r: u64x2 = transmute(vcgtzq_s64(transmute(a)));
18481        assert_eq!(r, e);
18482    }
18483
18484    #[simd_test(enable = "neon")]
18485    unsafe fn test_vcgtz_f32() {
18486        let a: f32x2 = f32x2::new(-1.2, 0.0);
18487        let e: u32x2 = u32x2::new(0, 0);
18488        let r: u32x2 = transmute(vcgtz_f32(transmute(a)));
18489        assert_eq!(r, e);
18490    }
18491
18492    #[simd_test(enable = "neon")]
18493    unsafe fn test_vcgtzq_f32() {
18494        let a: f32x4 = f32x4::new(-1.2, 0.0, 1.2, 2.3);
18495        let e: u32x4 = u32x4::new(0, 0, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
18496        let r: u32x4 = transmute(vcgtzq_f32(transmute(a)));
18497        assert_eq!(r, e);
18498    }
18499
18500    #[simd_test(enable = "neon")]
18501    unsafe fn test_vcgtz_f64() {
18502        let a: f64 = -1.2;
18503        let e: u64x1 = u64x1::new(0);
18504        let r: u64x1 = transmute(vcgtz_f64(transmute(a)));
18505        assert_eq!(r, e);
18506    }
18507
18508    #[simd_test(enable = "neon")]
18509    unsafe fn test_vcgtzq_f64() {
18510        let a: f64x2 = f64x2::new(-1.2, 0.0);
18511        let e: u64x2 = u64x2::new(0, 0);
18512        let r: u64x2 = transmute(vcgtzq_f64(transmute(a)));
18513        assert_eq!(r, e);
18514    }
18515
18516    #[simd_test(enable = "neon")]
18517    unsafe fn test_vcgtzd_s64() {
18518        let a: i64 = -1;
18519        let e: u64 = 0;
18520        let r: u64 = vcgtzd_s64(a);
18521        assert_eq!(r, e);
18522    }
18523
18524    #[simd_test(enable = "neon")]
18525    unsafe fn test_vcgtzs_f32() {
18526        let a: f32 = -1.;
18527        let e: u32 = 0;
18528        let r: u32 = vcgtzs_f32(a);
18529        assert_eq!(r, e);
18530    }
18531
18532    #[simd_test(enable = "neon")]
18533    unsafe fn test_vcgtzd_f64() {
18534        let a: f64 = -1.;
18535        let e: u64 = 0;
18536        let r: u64 = vcgtzd_f64(a);
18537        assert_eq!(r, e);
18538    }
18539
18540    #[simd_test(enable = "neon")]
18541    unsafe fn test_vclez_s8() {
18542        let a: i8x8 = i8x8::new(-128, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05);
18543        let e: u8x8 = u8x8::new(0xFF, 0xFF, 0xFF, 0, 0, 0, 0, 0);
18544        let r: u8x8 = transmute(vclez_s8(transmute(a)));
18545        assert_eq!(r, e);
18546    }
18547
18548    #[simd_test(enable = "neon")]
18549    unsafe fn test_vclezq_s8() {
18550        let a: i8x16 = i8x16::new(-128, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x7F);
18551        let e: u8x16 = u8x16::new(0xFF, 0xFF, 0xFF, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
18552        let r: u8x16 = transmute(vclezq_s8(transmute(a)));
18553        assert_eq!(r, e);
18554    }
18555
18556    #[simd_test(enable = "neon")]
18557    unsafe fn test_vclez_s16() {
18558        let a: i16x4 = i16x4::new(-32768, -1, 0x00, 0x01);
18559        let e: u16x4 = u16x4::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0);
18560        let r: u16x4 = transmute(vclez_s16(transmute(a)));
18561        assert_eq!(r, e);
18562    }
18563
18564    #[simd_test(enable = "neon")]
18565    unsafe fn test_vclezq_s16() {
18566        let a: i16x8 = i16x8::new(-32768, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05);
18567        let e: u16x8 = u16x8::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0, 0, 0, 0, 0);
18568        let r: u16x8 = transmute(vclezq_s16(transmute(a)));
18569        assert_eq!(r, e);
18570    }
18571
18572    #[simd_test(enable = "neon")]
18573    unsafe fn test_vclez_s32() {
18574        let a: i32x2 = i32x2::new(-2147483648, -1);
18575        let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
18576        let r: u32x2 = transmute(vclez_s32(transmute(a)));
18577        assert_eq!(r, e);
18578    }
18579
18580    #[simd_test(enable = "neon")]
18581    unsafe fn test_vclezq_s32() {
18582        let a: i32x4 = i32x4::new(-2147483648, -1, 0x00, 0x01);
18583        let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0);
18584        let r: u32x4 = transmute(vclezq_s32(transmute(a)));
18585        assert_eq!(r, e);
18586    }
18587
18588    #[simd_test(enable = "neon")]
18589    unsafe fn test_vclez_s64() {
18590        let a: i64x1 = i64x1::new(-9223372036854775808);
18591        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
18592        let r: u64x1 = transmute(vclez_s64(transmute(a)));
18593        assert_eq!(r, e);
18594    }
18595
18596    #[simd_test(enable = "neon")]
18597    unsafe fn test_vclezq_s64() {
18598        let a: i64x2 = i64x2::new(-9223372036854775808, -1);
18599        let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
18600        let r: u64x2 = transmute(vclezq_s64(transmute(a)));
18601        assert_eq!(r, e);
18602    }
18603
18604    #[simd_test(enable = "neon")]
18605    unsafe fn test_vclez_f32() {
18606        let a: f32x2 = f32x2::new(-1.2, 0.0);
18607        let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
18608        let r: u32x2 = transmute(vclez_f32(transmute(a)));
18609        assert_eq!(r, e);
18610    }
18611
18612    #[simd_test(enable = "neon")]
18613    unsafe fn test_vclezq_f32() {
18614        let a: f32x4 = f32x4::new(-1.2, 0.0, 1.2, 2.3);
18615        let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0, 0);
18616        let r: u32x4 = transmute(vclezq_f32(transmute(a)));
18617        assert_eq!(r, e);
18618    }
18619
18620    #[simd_test(enable = "neon")]
18621    unsafe fn test_vclez_f64() {
18622        let a: f64 = -1.2;
18623        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
18624        let r: u64x1 = transmute(vclez_f64(transmute(a)));
18625        assert_eq!(r, e);
18626    }
18627
18628    #[simd_test(enable = "neon")]
18629    unsafe fn test_vclezq_f64() {
18630        let a: f64x2 = f64x2::new(-1.2, 0.0);
18631        let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
18632        let r: u64x2 = transmute(vclezq_f64(transmute(a)));
18633        assert_eq!(r, e);
18634    }
18635
18636    #[simd_test(enable = "neon")]
18637    unsafe fn test_vclezd_s64() {
18638        let a: i64 = 2;
18639        let e: u64 = 0;
18640        let r: u64 = vclezd_s64(a);
18641        assert_eq!(r, e);
18642    }
18643
18644    #[simd_test(enable = "neon")]
18645    unsafe fn test_vclezs_f32() {
18646        let a: f32 = 2.;
18647        let e: u32 = 0;
18648        let r: u32 = vclezs_f32(a);
18649        assert_eq!(r, e);
18650    }
18651
18652    #[simd_test(enable = "neon")]
18653    unsafe fn test_vclezd_f64() {
18654        let a: f64 = 2.;
18655        let e: u64 = 0;
18656        let r: u64 = vclezd_f64(a);
18657        assert_eq!(r, e);
18658    }
18659
18660    #[simd_test(enable = "neon")]
18661    unsafe fn test_vcltz_s8() {
18662        let a: i8x8 = i8x8::new(-128, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05);
18663        let e: u8x8 = u8x8::new(0xFF, 0xFF, 0, 0, 0, 0, 0, 0);
18664        let r: u8x8 = transmute(vcltz_s8(transmute(a)));
18665        assert_eq!(r, e);
18666    }
18667
18668    #[simd_test(enable = "neon")]
18669    unsafe fn test_vcltzq_s8() {
18670        let a: i8x16 = i8x16::new(-128, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x7F);
18671        let e: u8x16 = u8x16::new(0xFF, 0xFF, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
18672        let r: u8x16 = transmute(vcltzq_s8(transmute(a)));
18673        assert_eq!(r, e);
18674    }
18675
18676    #[simd_test(enable = "neon")]
18677    unsafe fn test_vcltz_s16() {
18678        let a: i16x4 = i16x4::new(-32768, -1, 0x00, 0x01);
18679        let e: u16x4 = u16x4::new(0xFF_FF, 0xFF_FF, 0, 0);
18680        let r: u16x4 = transmute(vcltz_s16(transmute(a)));
18681        assert_eq!(r, e);
18682    }
18683
18684    #[simd_test(enable = "neon")]
18685    unsafe fn test_vcltzq_s16() {
18686        let a: i16x8 = i16x8::new(-32768, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05);
18687        let e: u16x8 = u16x8::new(0xFF_FF, 0xFF_FF, 0, 0, 0, 0, 0, 0);
18688        let r: u16x8 = transmute(vcltzq_s16(transmute(a)));
18689        assert_eq!(r, e);
18690    }
18691
18692    #[simd_test(enable = "neon")]
18693    unsafe fn test_vcltz_s32() {
18694        let a: i32x2 = i32x2::new(-2147483648, -1);
18695        let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
18696        let r: u32x2 = transmute(vcltz_s32(transmute(a)));
18697        assert_eq!(r, e);
18698    }
18699
18700    #[simd_test(enable = "neon")]
18701    unsafe fn test_vcltzq_s32() {
18702        let a: i32x4 = i32x4::new(-2147483648, -1, 0x00, 0x01);
18703        let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0, 0);
18704        let r: u32x4 = transmute(vcltzq_s32(transmute(a)));
18705        assert_eq!(r, e);
18706    }
18707
18708    #[simd_test(enable = "neon")]
18709    unsafe fn test_vcltz_s64() {
18710        let a: i64x1 = i64x1::new(-9223372036854775808);
18711        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
18712        let r: u64x1 = transmute(vcltz_s64(transmute(a)));
18713        assert_eq!(r, e);
18714    }
18715
18716    #[simd_test(enable = "neon")]
18717    unsafe fn test_vcltzq_s64() {
18718        let a: i64x2 = i64x2::new(-9223372036854775808, -1);
18719        let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
18720        let r: u64x2 = transmute(vcltzq_s64(transmute(a)));
18721        assert_eq!(r, e);
18722    }
18723
18724    #[simd_test(enable = "neon")]
18725    unsafe fn test_vcltz_f32() {
18726        let a: f32x2 = f32x2::new(-1.2, 0.0);
18727        let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0);
18728        let r: u32x2 = transmute(vcltz_f32(transmute(a)));
18729        assert_eq!(r, e);
18730    }
18731
18732    #[simd_test(enable = "neon")]
18733    unsafe fn test_vcltzq_f32() {
18734        let a: f32x4 = f32x4::new(-1.2, 0.0, 1.2, 2.3);
18735        let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0, 0, 0);
18736        let r: u32x4 = transmute(vcltzq_f32(transmute(a)));
18737        assert_eq!(r, e);
18738    }
18739
18740    #[simd_test(enable = "neon")]
18741    unsafe fn test_vcltz_f64() {
18742        let a: f64 = -1.2;
18743        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
18744        let r: u64x1 = transmute(vcltz_f64(transmute(a)));
18745        assert_eq!(r, e);
18746    }
18747
18748    #[simd_test(enable = "neon")]
18749    unsafe fn test_vcltzq_f64() {
18750        let a: f64x2 = f64x2::new(-1.2, 0.0);
18751        let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0);
18752        let r: u64x2 = transmute(vcltzq_f64(transmute(a)));
18753        assert_eq!(r, e);
18754    }
18755
18756    #[simd_test(enable = "neon")]
18757    unsafe fn test_vcltzd_s64() {
18758        let a: i64 = 2;
18759        let e: u64 = 0;
18760        let r: u64 = vcltzd_s64(a);
18761        assert_eq!(r, e);
18762    }
18763
18764    #[simd_test(enable = "neon")]
18765    unsafe fn test_vcltzs_f32() {
18766        let a: f32 = 2.;
18767        let e: u32 = 0;
18768        let r: u32 = vcltzs_f32(a);
18769        assert_eq!(r, e);
18770    }
18771
18772    #[simd_test(enable = "neon")]
18773    unsafe fn test_vcltzd_f64() {
18774        let a: f64 = 2.;
18775        let e: u64 = 0;
18776        let r: u64 = vcltzd_f64(a);
18777        assert_eq!(r, e);
18778    }
18779
18780    #[simd_test(enable = "neon")]
18781    unsafe fn test_vcagt_f64() {
18782        let a: f64 = -1.2;
18783        let b: f64 = -1.1;
18784        let e: u64x1 = u64x1::new(!0);
18785        let r: u64x1 = transmute(vcagt_f64(transmute(a), transmute(b)));
18786        assert_eq!(r, e);
18787    }
18788
18789    #[simd_test(enable = "neon")]
18790    unsafe fn test_vcagtq_f64() {
18791        let a: f64x2 = f64x2::new(-1.2, 0.0);
18792        let b: f64x2 = f64x2::new(-1.1, 0.0);
18793        let e: u64x2 = u64x2::new(!0, 0);
18794        let r: u64x2 = transmute(vcagtq_f64(transmute(a), transmute(b)));
18795        assert_eq!(r, e);
18796    }
18797
18798    #[simd_test(enable = "neon")]
18799    unsafe fn test_vcagts_f32() {
18800        let a: f32 = -1.2;
18801        let b: f32 = -1.1;
18802        let e: u32 = !0;
18803        let r: u32 = vcagts_f32(a, b);
18804        assert_eq!(r, e);
18805    }
18806
18807    #[simd_test(enable = "neon")]
18808    unsafe fn test_vcagtd_f64() {
18809        let a: f64 = -1.2;
18810        let b: f64 = -1.1;
18811        let e: u64 = !0;
18812        let r: u64 = vcagtd_f64(a, b);
18813        assert_eq!(r, e);
18814    }
18815
18816    #[simd_test(enable = "neon")]
18817    unsafe fn test_vcage_f64() {
18818        let a: f64 = -1.2;
18819        let b: f64 = -1.1;
18820        let e: u64x1 = u64x1::new(!0);
18821        let r: u64x1 = transmute(vcage_f64(transmute(a), transmute(b)));
18822        assert_eq!(r, e);
18823    }
18824
18825    #[simd_test(enable = "neon")]
18826    unsafe fn test_vcageq_f64() {
18827        let a: f64x2 = f64x2::new(-1.2, 0.0);
18828        let b: f64x2 = f64x2::new(-1.1, 0.0);
18829        let e: u64x2 = u64x2::new(!0, 0xFF_FF_FF_FF_FF_FF_FF_FF);
18830        let r: u64x2 = transmute(vcageq_f64(transmute(a), transmute(b)));
18831        assert_eq!(r, e);
18832    }
18833
18834    #[simd_test(enable = "neon")]
18835    unsafe fn test_vcages_f32() {
18836        let a: f32 = -1.2;
18837        let b: f32 = -1.1;
18838        let e: u32 = !0;
18839        let r: u32 = vcages_f32(a, b);
18840        assert_eq!(r, e);
18841    }
18842
18843    #[simd_test(enable = "neon")]
18844    unsafe fn test_vcaged_f64() {
18845        let a: f64 = -1.2;
18846        let b: f64 = -1.1;
18847        let e: u64 = !0;
18848        let r: u64 = vcaged_f64(a, b);
18849        assert_eq!(r, e);
18850    }
18851
18852    #[simd_test(enable = "neon")]
18853    unsafe fn test_vcalt_f64() {
18854        let a: f64 = -1.2;
18855        let b: f64 = -1.1;
18856        let e: u64x1 = u64x1::new(0);
18857        let r: u64x1 = transmute(vcalt_f64(transmute(a), transmute(b)));
18858        assert_eq!(r, e);
18859    }
18860
18861    #[simd_test(enable = "neon")]
18862    unsafe fn test_vcaltq_f64() {
18863        let a: f64x2 = f64x2::new(-1.2, 0.0);
18864        let b: f64x2 = f64x2::new(-1.1, 0.0);
18865        let e: u64x2 = u64x2::new(0, 0);
18866        let r: u64x2 = transmute(vcaltq_f64(transmute(a), transmute(b)));
18867        assert_eq!(r, e);
18868    }
18869
18870    #[simd_test(enable = "neon")]
18871    unsafe fn test_vcalts_f32() {
18872        let a: f32 = -1.2;
18873        let b: f32 = -1.1;
18874        let e: u32 = 0;
18875        let r: u32 = vcalts_f32(a, b);
18876        assert_eq!(r, e);
18877    }
18878
18879    #[simd_test(enable = "neon")]
18880    unsafe fn test_vcaltd_f64() {
18881        let a: f64 = -1.2;
18882        let b: f64 = -1.1;
18883        let e: u64 = 0;
18884        let r: u64 = vcaltd_f64(a, b);
18885        assert_eq!(r, e);
18886    }
18887
18888    #[simd_test(enable = "neon")]
18889    unsafe fn test_vcale_f64() {
18890        let a: f64 = -1.2;
18891        let b: f64 = -1.1;
18892        let e: u64x1 = u64x1::new(0);
18893        let r: u64x1 = transmute(vcale_f64(transmute(a), transmute(b)));
18894        assert_eq!(r, e);
18895    }
18896
18897    #[simd_test(enable = "neon")]
18898    unsafe fn test_vcaleq_f64() {
18899        let a: f64x2 = f64x2::new(-1.2, 0.0);
18900        let b: f64x2 = f64x2::new(-1.1, 0.0);
18901        let e: u64x2 = u64x2::new(0, 0xFF_FF_FF_FF_FF_FF_FF_FF);
18902        let r: u64x2 = transmute(vcaleq_f64(transmute(a), transmute(b)));
18903        assert_eq!(r, e);
18904    }
18905
18906    #[simd_test(enable = "neon")]
18907    unsafe fn test_vcales_f32() {
18908        let a: f32 = -1.2;
18909        let b: f32 = -1.1;
18910        let e: u32 = 0;
18911        let r: u32 = vcales_f32(a, b);
18912        assert_eq!(r, e);
18913    }
18914
18915    #[simd_test(enable = "neon")]
18916    unsafe fn test_vcaled_f64() {
18917        let a: f64 = -1.2;
18918        let b: f64 = -1.1;
18919        let e: u64 = 0;
18920        let r: u64 = vcaled_f64(a, b);
18921        assert_eq!(r, e);
18922    }
18923
18924    #[simd_test(enable = "neon")]
18925    unsafe fn test_vcopy_lane_s8() {
18926        let a: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
18927        let b: i8x8 = i8x8::new(0, 0x7F, 0, 0, 0, 0, 0, 0);
18928        let e: i8x8 = i8x8::new(0x7F, 2, 3, 4, 5, 6, 7, 8);
18929        let r: i8x8 = transmute(vcopy_lane_s8::<0, 1>(transmute(a), transmute(b)));
18930        assert_eq!(r, e);
18931    }
18932
18933    #[simd_test(enable = "neon")]
18934    unsafe fn test_vcopyq_laneq_s8() {
18935        let a: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
18936        let b: i8x16 = i8x16::new(0, 0x7F, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
18937        let e: i8x16 = i8x16::new(0x7F, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
18938        let r: i8x16 = transmute(vcopyq_laneq_s8::<0, 1>(transmute(a), transmute(b)));
18939        assert_eq!(r, e);
18940    }
18941
18942    #[simd_test(enable = "neon")]
18943    unsafe fn test_vcopy_lane_s16() {
18944        let a: i16x4 = i16x4::new(1, 2, 3, 4);
18945        let b: i16x4 = i16x4::new(0, 0x7F_FF, 0, 0);
18946        let e: i16x4 = i16x4::new(0x7F_FF, 2, 3, 4);
18947        let r: i16x4 = transmute(vcopy_lane_s16::<0, 1>(transmute(a), transmute(b)));
18948        assert_eq!(r, e);
18949    }
18950
18951    #[simd_test(enable = "neon")]
18952    unsafe fn test_vcopyq_laneq_s16() {
18953        let a: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
18954        let b: i16x8 = i16x8::new(0, 0x7F_FF, 0, 0, 0, 0, 0, 0);
18955        let e: i16x8 = i16x8::new(0x7F_FF, 2, 3, 4, 5, 6, 7, 8);
18956        let r: i16x8 = transmute(vcopyq_laneq_s16::<0, 1>(transmute(a), transmute(b)));
18957        assert_eq!(r, e);
18958    }
18959
18960    #[simd_test(enable = "neon")]
18961    unsafe fn test_vcopy_lane_s32() {
18962        let a: i32x2 = i32x2::new(1, 2);
18963        let b: i32x2 = i32x2::new(0, 0x7F_FF_FF_FF);
18964        let e: i32x2 = i32x2::new(0x7F_FF_FF_FF, 2);
18965        let r: i32x2 = transmute(vcopy_lane_s32::<0, 1>(transmute(a), transmute(b)));
18966        assert_eq!(r, e);
18967    }
18968
18969    #[simd_test(enable = "neon")]
18970    unsafe fn test_vcopyq_laneq_s32() {
18971        let a: i32x4 = i32x4::new(1, 2, 3, 4);
18972        let b: i32x4 = i32x4::new(0, 0x7F_FF_FF_FF, 0, 0);
18973        let e: i32x4 = i32x4::new(0x7F_FF_FF_FF, 2, 3, 4);
18974        let r: i32x4 = transmute(vcopyq_laneq_s32::<0, 1>(transmute(a), transmute(b)));
18975        assert_eq!(r, e);
18976    }
18977
18978    #[simd_test(enable = "neon")]
18979    unsafe fn test_vcopyq_laneq_s64() {
18980        let a: i64x2 = i64x2::new(1, 2);
18981        let b: i64x2 = i64x2::new(0, 0x7F_FF_FF_FF_FF_FF_FF_FF);
18982        let e: i64x2 = i64x2::new(0x7F_FF_FF_FF_FF_FF_FF_FF, 2);
18983        let r: i64x2 = transmute(vcopyq_laneq_s64::<0, 1>(transmute(a), transmute(b)));
18984        assert_eq!(r, e);
18985    }
18986
18987    #[simd_test(enable = "neon")]
18988    unsafe fn test_vcopy_lane_u8() {
18989        let a: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
18990        let b: u8x8 = u8x8::new(0, 0xFF, 0, 0, 0, 0, 0, 0);
18991        let e: u8x8 = u8x8::new(0xFF, 2, 3, 4, 5, 6, 7, 8);
18992        let r: u8x8 = transmute(vcopy_lane_u8::<0, 1>(transmute(a), transmute(b)));
18993        assert_eq!(r, e);
18994    }
18995
18996    #[simd_test(enable = "neon")]
18997    unsafe fn test_vcopyq_laneq_u8() {
18998        let a: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
18999        let b: u8x16 = u8x16::new(0, 0xFF, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
19000        let e: u8x16 = u8x16::new(0xFF, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
19001        let r: u8x16 = transmute(vcopyq_laneq_u8::<0, 1>(transmute(a), transmute(b)));
19002        assert_eq!(r, e);
19003    }
19004
19005    #[simd_test(enable = "neon")]
19006    unsafe fn test_vcopy_lane_u16() {
19007        let a: u16x4 = u16x4::new(1, 2, 3, 4);
19008        let b: u16x4 = u16x4::new(0, 0xFF_FF, 0, 0);
19009        let e: u16x4 = u16x4::new(0xFF_FF, 2, 3, 4);
19010        let r: u16x4 = transmute(vcopy_lane_u16::<0, 1>(transmute(a), transmute(b)));
19011        assert_eq!(r, e);
19012    }
19013
19014    #[simd_test(enable = "neon")]
19015    unsafe fn test_vcopyq_laneq_u16() {
19016        let a: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
19017        let b: u16x8 = u16x8::new(0, 0xFF_FF, 0, 0, 0, 0, 0, 0);
19018        let e: u16x8 = u16x8::new(0xFF_FF, 2, 3, 4, 5, 6, 7, 8);
19019        let r: u16x8 = transmute(vcopyq_laneq_u16::<0, 1>(transmute(a), transmute(b)));
19020        assert_eq!(r, e);
19021    }
19022
19023    #[simd_test(enable = "neon")]
19024    unsafe fn test_vcopy_lane_u32() {
19025        let a: u32x2 = u32x2::new(1, 2);
19026        let b: u32x2 = u32x2::new(0, 0xFF_FF_FF_FF);
19027        let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 2);
19028        let r: u32x2 = transmute(vcopy_lane_u32::<0, 1>(transmute(a), transmute(b)));
19029        assert_eq!(r, e);
19030    }
19031
19032    #[simd_test(enable = "neon")]
19033    unsafe fn test_vcopyq_laneq_u32() {
19034        let a: u32x4 = u32x4::new(1, 2, 3, 4);
19035        let b: u32x4 = u32x4::new(0, 0xFF_FF_FF_FF, 0, 0);
19036        let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 2, 3, 4);
19037        let r: u32x4 = transmute(vcopyq_laneq_u32::<0, 1>(transmute(a), transmute(b)));
19038        assert_eq!(r, e);
19039    }
19040
19041    #[simd_test(enable = "neon")]
19042    unsafe fn test_vcopyq_laneq_u64() {
19043        let a: u64x2 = u64x2::new(1, 2);
19044        let b: u64x2 = u64x2::new(0, 0xFF_FF_FF_FF_FF_FF_FF_FF);
19045        let e: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 2);
19046        let r: u64x2 = transmute(vcopyq_laneq_u64::<0, 1>(transmute(a), transmute(b)));
19047        assert_eq!(r, e);
19048    }
19049
19050    #[simd_test(enable = "neon")]
19051    unsafe fn test_vcopy_lane_p8() {
19052        let a: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
19053        let b: i8x8 = i8x8::new(0, 0x7F, 0, 0, 0, 0, 0, 0);
19054        let e: i8x8 = i8x8::new(0x7F, 2, 3, 4, 5, 6, 7, 8);
19055        let r: i8x8 = transmute(vcopy_lane_p8::<0, 1>(transmute(a), transmute(b)));
19056        assert_eq!(r, e);
19057    }
19058
19059    #[simd_test(enable = "neon")]
19060    unsafe fn test_vcopyq_laneq_p8() {
19061        let a: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
19062        let b: i8x16 = i8x16::new(0, 0x7F, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
19063        let e: i8x16 = i8x16::new(0x7F, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
19064        let r: i8x16 = transmute(vcopyq_laneq_p8::<0, 1>(transmute(a), transmute(b)));
19065        assert_eq!(r, e);
19066    }
19067
19068    #[simd_test(enable = "neon")]
19069    unsafe fn test_vcopy_lane_p16() {
19070        let a: i16x4 = i16x4::new(1, 2, 3, 4);
19071        let b: i16x4 = i16x4::new(0, 0x7F_FF, 0, 0);
19072        let e: i16x4 = i16x4::new(0x7F_FF, 2, 3, 4);
19073        let r: i16x4 = transmute(vcopy_lane_p16::<0, 1>(transmute(a), transmute(b)));
19074        assert_eq!(r, e);
19075    }
19076
19077    #[simd_test(enable = "neon")]
19078    unsafe fn test_vcopyq_laneq_p16() {
19079        let a: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
19080        let b: i16x8 = i16x8::new(0, 0x7F_FF, 0, 0, 0, 0, 0, 0);
19081        let e: i16x8 = i16x8::new(0x7F_FF, 2, 3, 4, 5, 6, 7, 8);
19082        let r: i16x8 = transmute(vcopyq_laneq_p16::<0, 1>(transmute(a), transmute(b)));
19083        assert_eq!(r, e);
19084    }
19085
19086    #[simd_test(enable = "neon")]
19087    unsafe fn test_vcopyq_laneq_p64() {
19088        let a: i64x2 = i64x2::new(1, 2);
19089        let b: i64x2 = i64x2::new(0, 0x7F_FF_FF_FF_FF_FF_FF_FF);
19090        let e: i64x2 = i64x2::new(0x7F_FF_FF_FF_FF_FF_FF_FF, 2);
19091        let r: i64x2 = transmute(vcopyq_laneq_p64::<0, 1>(transmute(a), transmute(b)));
19092        assert_eq!(r, e);
19093    }
19094
19095    #[simd_test(enable = "neon")]
19096    unsafe fn test_vcopy_lane_f32() {
19097        let a: f32x2 = f32x2::new(1., 2.);
19098        let b: f32x2 = f32x2::new(0., 0.5);
19099        let e: f32x2 = f32x2::new(0.5, 2.);
19100        let r: f32x2 = transmute(vcopy_lane_f32::<0, 1>(transmute(a), transmute(b)));
19101        assert_eq!(r, e);
19102    }
19103
19104    #[simd_test(enable = "neon")]
19105    unsafe fn test_vcopyq_laneq_f32() {
19106        let a: f32x4 = f32x4::new(1., 2., 3., 4.);
19107        let b: f32x4 = f32x4::new(0., 0.5, 0., 0.);
19108        let e: f32x4 = f32x4::new(0.5, 2., 3., 4.);
19109        let r: f32x4 = transmute(vcopyq_laneq_f32::<0, 1>(transmute(a), transmute(b)));
19110        assert_eq!(r, e);
19111    }
19112
19113    #[simd_test(enable = "neon")]
19114    unsafe fn test_vcopyq_laneq_f64() {
19115        let a: f64x2 = f64x2::new(1., 2.);
19116        let b: f64x2 = f64x2::new(0., 0.5);
19117        let e: f64x2 = f64x2::new(0.5, 2.);
19118        let r: f64x2 = transmute(vcopyq_laneq_f64::<0, 1>(transmute(a), transmute(b)));
19119        assert_eq!(r, e);
19120    }
19121
19122    #[simd_test(enable = "neon")]
19123    unsafe fn test_vcopy_laneq_s8() {
19124        let a: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
19125        let b: i8x16 = i8x16::new(0, 0x7F, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
19126        let e: i8x8 = i8x8::new(0x7F, 2, 3, 4, 5, 6, 7, 8);
19127        let r: i8x8 = transmute(vcopy_laneq_s8::<0, 1>(transmute(a), transmute(b)));
19128        assert_eq!(r, e);
19129    }
19130
19131    #[simd_test(enable = "neon")]
19132    unsafe fn test_vcopy_laneq_s16() {
19133        let a: i16x4 = i16x4::new(1, 2, 3, 4);
19134        let b: i16x8 = i16x8::new(0, 0x7F_FF, 0, 0, 0, 0, 0, 0);
19135        let e: i16x4 = i16x4::new(0x7F_FF, 2, 3, 4);
19136        let r: i16x4 = transmute(vcopy_laneq_s16::<0, 1>(transmute(a), transmute(b)));
19137        assert_eq!(r, e);
19138    }
19139
19140    #[simd_test(enable = "neon")]
19141    unsafe fn test_vcopy_laneq_s32() {
19142        let a: i32x2 = i32x2::new(1, 2);
19143        let b: i32x4 = i32x4::new(0, 0x7F_FF_FF_FF, 0, 0);
19144        let e: i32x2 = i32x2::new(0x7F_FF_FF_FF, 2);
19145        let r: i32x2 = transmute(vcopy_laneq_s32::<0, 1>(transmute(a), transmute(b)));
19146        assert_eq!(r, e);
19147    }
19148
19149    #[simd_test(enable = "neon")]
19150    unsafe fn test_vcopy_laneq_u8() {
19151        let a: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
19152        let b: u8x16 = u8x16::new(0, 0xFF, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
19153        let e: u8x8 = u8x8::new(0xFF, 2, 3, 4, 5, 6, 7, 8);
19154        let r: u8x8 = transmute(vcopy_laneq_u8::<0, 1>(transmute(a), transmute(b)));
19155        assert_eq!(r, e);
19156    }
19157
19158    #[simd_test(enable = "neon")]
19159    unsafe fn test_vcopy_laneq_u16() {
19160        let a: u16x4 = u16x4::new(1, 2, 3, 4);
19161        let b: u16x8 = u16x8::new(0, 0xFF_FF, 0, 0, 0, 0, 0, 0);
19162        let e: u16x4 = u16x4::new(0xFF_FF, 2, 3, 4);
19163        let r: u16x4 = transmute(vcopy_laneq_u16::<0, 1>(transmute(a), transmute(b)));
19164        assert_eq!(r, e);
19165    }
19166
19167    #[simd_test(enable = "neon")]
19168    unsafe fn test_vcopy_laneq_u32() {
19169        let a: u32x2 = u32x2::new(1, 2);
19170        let b: u32x4 = u32x4::new(0, 0xFF_FF_FF_FF, 0, 0);
19171        let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 2);
19172        let r: u32x2 = transmute(vcopy_laneq_u32::<0, 1>(transmute(a), transmute(b)));
19173        assert_eq!(r, e);
19174    }
19175
19176    #[simd_test(enable = "neon")]
19177    unsafe fn test_vcopy_laneq_p8() {
19178        let a: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
19179        let b: i8x16 = i8x16::new(0, 0x7F, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
19180        let e: i8x8 = i8x8::new(0x7F, 2, 3, 4, 5, 6, 7, 8);
19181        let r: i8x8 = transmute(vcopy_laneq_p8::<0, 1>(transmute(a), transmute(b)));
19182        assert_eq!(r, e);
19183    }
19184
19185    #[simd_test(enable = "neon")]
19186    unsafe fn test_vcopy_laneq_p16() {
19187        let a: i16x4 = i16x4::new(1, 2, 3, 4);
19188        let b: i16x8 = i16x8::new(0, 0x7F_FF, 0, 0, 0, 0, 0, 0);
19189        let e: i16x4 = i16x4::new(0x7F_FF, 2, 3, 4);
19190        let r: i16x4 = transmute(vcopy_laneq_p16::<0, 1>(transmute(a), transmute(b)));
19191        assert_eq!(r, e);
19192    }
19193
19194    #[simd_test(enable = "neon")]
19195    unsafe fn test_vcopy_laneq_f32() {
19196        let a: f32x2 = f32x2::new(1., 2.);
19197        let b: f32x4 = f32x4::new(0., 0.5, 0., 0.);
19198        let e: f32x2 = f32x2::new(0.5, 2.);
19199        let r: f32x2 = transmute(vcopy_laneq_f32::<0, 1>(transmute(a), transmute(b)));
19200        assert_eq!(r, e);
19201    }
19202
19203    #[simd_test(enable = "neon")]
19204    unsafe fn test_vcopyq_lane_s8() {
19205        let a: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
19206        let b: i8x8 = i8x8::new(0, 0x7F, 0, 0, 0, 0, 0, 0);
19207        let e: i8x16 = i8x16::new(0x7F, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
19208        let r: i8x16 = transmute(vcopyq_lane_s8::<0, 1>(transmute(a), transmute(b)));
19209        assert_eq!(r, e);
19210    }
19211
19212    #[simd_test(enable = "neon")]
19213    unsafe fn test_vcopyq_lane_s16() {
19214        let a: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
19215        let b: i16x4 = i16x4::new(0, 0x7F_FF, 0, 0);
19216        let e: i16x8 = i16x8::new(0x7F_FF, 2, 3, 4, 5, 6, 7, 8);
19217        let r: i16x8 = transmute(vcopyq_lane_s16::<0, 1>(transmute(a), transmute(b)));
19218        assert_eq!(r, e);
19219    }
19220
19221    #[simd_test(enable = "neon")]
19222    unsafe fn test_vcopyq_lane_s32() {
19223        let a: i32x4 = i32x4::new(1, 2, 3, 4);
19224        let b: i32x2 = i32x2::new(0, 0x7F_FF_FF_FF);
19225        let e: i32x4 = i32x4::new(0x7F_FF_FF_FF, 2, 3, 4);
19226        let r: i32x4 = transmute(vcopyq_lane_s32::<0, 1>(transmute(a), transmute(b)));
19227        assert_eq!(r, e);
19228    }
19229
19230    #[simd_test(enable = "neon")]
19231    unsafe fn test_vcopyq_lane_u8() {
19232        let a: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
19233        let b: u8x8 = u8x8::new(0, 0xFF, 0, 0, 0, 0, 0, 0);
19234        let e: u8x16 = u8x16::new(0xFF, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
19235        let r: u8x16 = transmute(vcopyq_lane_u8::<0, 1>(transmute(a), transmute(b)));
19236        assert_eq!(r, e);
19237    }
19238
19239    #[simd_test(enable = "neon")]
19240    unsafe fn test_vcopyq_lane_u16() {
19241        let a: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
19242        let b: u16x4 = u16x4::new(0, 0xFF_FF, 0, 0);
19243        let e: u16x8 = u16x8::new(0xFF_FF, 2, 3, 4, 5, 6, 7, 8);
19244        let r: u16x8 = transmute(vcopyq_lane_u16::<0, 1>(transmute(a), transmute(b)));
19245        assert_eq!(r, e);
19246    }
19247
19248    #[simd_test(enable = "neon")]
19249    unsafe fn test_vcopyq_lane_u32() {
19250        let a: u32x4 = u32x4::new(1, 2, 3, 4);
19251        let b: u32x2 = u32x2::new(0, 0xFF_FF_FF_FF);
19252        let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 2, 3, 4);
19253        let r: u32x4 = transmute(vcopyq_lane_u32::<0, 1>(transmute(a), transmute(b)));
19254        assert_eq!(r, e);
19255    }
19256
19257    #[simd_test(enable = "neon")]
19258    unsafe fn test_vcopyq_lane_p8() {
19259        let a: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
19260        let b: i8x8 = i8x8::new(0, 0x7F, 0, 0, 0, 0, 0, 0);
19261        let e: i8x16 = i8x16::new(0x7F, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
19262        let r: i8x16 = transmute(vcopyq_lane_p8::<0, 1>(transmute(a), transmute(b)));
19263        assert_eq!(r, e);
19264    }
19265
19266    #[simd_test(enable = "neon")]
19267    unsafe fn test_vcopyq_lane_p16() {
19268        let a: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
19269        let b: i16x4 = i16x4::new(0, 0x7F_FF, 0, 0);
19270        let e: i16x8 = i16x8::new(0x7F_FF, 2, 3, 4, 5, 6, 7, 8);
19271        let r: i16x8 = transmute(vcopyq_lane_p16::<0, 1>(transmute(a), transmute(b)));
19272        assert_eq!(r, e);
19273    }
19274
19275    #[simd_test(enable = "neon")]
19276    unsafe fn test_vcopyq_lane_s64() {
19277        let a: i64x2 = i64x2::new(1, 2);
19278        let b: i64x1 = i64x1::new(0x7F_FF_FF_FF_FF_FF_FF_FF);
19279        let e: i64x2 = i64x2::new(1, 0x7F_FF_FF_FF_FF_FF_FF_FF);
19280        let r: i64x2 = transmute(vcopyq_lane_s64::<1, 0>(transmute(a), transmute(b)));
19281        assert_eq!(r, e);
19282    }
19283
19284    #[simd_test(enable = "neon")]
19285    unsafe fn test_vcopyq_lane_u64() {
19286        let a: u64x2 = u64x2::new(1, 2);
19287        let b: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
19288        let e: u64x2 = u64x2::new(1, 0xFF_FF_FF_FF_FF_FF_FF_FF);
19289        let r: u64x2 = transmute(vcopyq_lane_u64::<1, 0>(transmute(a), transmute(b)));
19290        assert_eq!(r, e);
19291    }
19292
19293    #[simd_test(enable = "neon")]
19294    unsafe fn test_vcopyq_lane_p64() {
19295        let a: i64x2 = i64x2::new(1, 2);
19296        let b: i64x1 = i64x1::new(0x7F_FF_FF_FF_FF_FF_FF_FF);
19297        let e: i64x2 = i64x2::new(1, 0x7F_FF_FF_FF_FF_FF_FF_FF);
19298        let r: i64x2 = transmute(vcopyq_lane_p64::<1, 0>(transmute(a), transmute(b)));
19299        assert_eq!(r, e);
19300    }
19301
19302    #[simd_test(enable = "neon")]
19303    unsafe fn test_vcopyq_lane_f32() {
19304        let a: f32x4 = f32x4::new(1., 2., 3., 4.);
19305        let b: f32x2 = f32x2::new(0.5, 0.);
19306        let e: f32x4 = f32x4::new(1., 0.5, 3., 4.);
19307        let r: f32x4 = transmute(vcopyq_lane_f32::<1, 0>(transmute(a), transmute(b)));
19308        assert_eq!(r, e);
19309    }
19310
19311    #[simd_test(enable = "neon")]
19312    unsafe fn test_vcopyq_lane_f64() {
19313        let a: f64x2 = f64x2::new(1., 2.);
19314        let b: f64 = 0.5;
19315        let e: f64x2 = f64x2::new(1., 0.5);
19316        let r: f64x2 = transmute(vcopyq_lane_f64::<1, 0>(transmute(a), transmute(b)));
19317        assert_eq!(r, e);
19318    }
19319
19320    #[simd_test(enable = "neon")]
19321    unsafe fn test_vcreate_f64() {
19322        let a: u64 = 0;
19323        let e: f64 = 0.;
19324        let r: f64 = transmute(vcreate_f64(a));
19325        assert_eq!(r, e);
19326    }
19327
19328    #[simd_test(enable = "neon")]
19329    unsafe fn test_vcvt_f64_s64() {
19330        let a: i64x1 = i64x1::new(1);
19331        let e: f64 = 1.;
19332        let r: f64 = transmute(vcvt_f64_s64(transmute(a)));
19333        assert_eq!(r, e);
19334    }
19335
19336    #[simd_test(enable = "neon")]
19337    unsafe fn test_vcvtq_f64_s64() {
19338        let a: i64x2 = i64x2::new(1, 2);
19339        let e: f64x2 = f64x2::new(1., 2.);
19340        let r: f64x2 = transmute(vcvtq_f64_s64(transmute(a)));
19341        assert_eq!(r, e);
19342    }
19343
19344    #[simd_test(enable = "neon")]
19345    unsafe fn test_vcvt_f64_u64() {
19346        let a: u64x1 = u64x1::new(1);
19347        let e: f64 = 1.;
19348        let r: f64 = transmute(vcvt_f64_u64(transmute(a)));
19349        assert_eq!(r, e);
19350    }
19351
19352    #[simd_test(enable = "neon")]
19353    unsafe fn test_vcvtq_f64_u64() {
19354        let a: u64x2 = u64x2::new(1, 2);
19355        let e: f64x2 = f64x2::new(1., 2.);
19356        let r: f64x2 = transmute(vcvtq_f64_u64(transmute(a)));
19357        assert_eq!(r, e);
19358    }
19359
19360    #[simd_test(enable = "neon")]
19361    unsafe fn test_vcvt_f64_f32() {
19362        let a: f32x2 = f32x2::new(-1.2, 1.2);
19363        let e: f64x2 = f64x2::new(-1.2f32 as f64, 1.2f32 as f64);
19364        let r: f64x2 = transmute(vcvt_f64_f32(transmute(a)));
19365        assert_eq!(r, e);
19366    }
19367
19368    #[simd_test(enable = "neon")]
19369    unsafe fn test_vcvt_high_f64_f32() {
19370        let a: f32x4 = f32x4::new(-1.2, 1.2, 2.3, 3.4);
19371        let e: f64x2 = f64x2::new(2.3f32 as f64, 3.4f32 as f64);
19372        let r: f64x2 = transmute(vcvt_high_f64_f32(transmute(a)));
19373        assert_eq!(r, e);
19374    }
19375
19376    #[simd_test(enable = "neon")]
19377    unsafe fn test_vcvt_f32_f64() {
19378        let a: f64x2 = f64x2::new(-1.2, 1.2);
19379        let e: f32x2 = f32x2::new(-1.2f64 as f32, 1.2f64 as f32);
19380        let r: f32x2 = transmute(vcvt_f32_f64(transmute(a)));
19381        assert_eq!(r, e);
19382    }
19383
19384    #[simd_test(enable = "neon")]
19385    unsafe fn test_vcvt_high_f32_f64() {
19386        let a: f32x2 = f32x2::new(-1.2, 1.2);
19387        let b: f64x2 = f64x2::new(-2.3, 3.4);
19388        let e: f32x4 = f32x4::new(-1.2, 1.2, -2.3f64 as f32, 3.4f64 as f32);
19389        let r: f32x4 = transmute(vcvt_high_f32_f64(transmute(a), transmute(b)));
19390        assert_eq!(r, e);
19391    }
19392
19393    #[simd_test(enable = "neon")]
19394    unsafe fn test_vcvtx_f32_f64() {
19395        let a: f64x2 = f64x2::new(-1.0, 2.0);
19396        let e: f32x2 = f32x2::new(-1.0, 2.0);
19397        let r: f32x2 = transmute(vcvtx_f32_f64(transmute(a)));
19398        assert_eq!(r, e);
19399    }
19400
19401    #[simd_test(enable = "neon")]
19402    unsafe fn test_vcvtxd_f32_f64() {
19403        let a: f64 = -1.0;
19404        let e: f32 = -1.0;
19405        let r: f32 = vcvtxd_f32_f64(a);
19406        assert_eq!(r, e);
19407    }
19408
19409    #[simd_test(enable = "neon")]
19410    unsafe fn test_vcvtx_high_f32_f64() {
19411        let a: f32x2 = f32x2::new(-1.0, 2.0);
19412        let b: f64x2 = f64x2::new(-3.0, 4.0);
19413        let e: f32x4 = f32x4::new(-1.0, 2.0, -3.0, 4.0);
19414        let r: f32x4 = transmute(vcvtx_high_f32_f64(transmute(a), transmute(b)));
19415        assert_eq!(r, e);
19416    }
19417
19418    #[simd_test(enable = "neon")]
19419    unsafe fn test_vcvt_n_f64_s64() {
19420        let a: i64x1 = i64x1::new(1);
19421        let e: f64 = 0.25;
19422        let r: f64 = transmute(vcvt_n_f64_s64::<2>(transmute(a)));
19423        assert_eq!(r, e);
19424    }
19425
19426    #[simd_test(enable = "neon")]
19427    unsafe fn test_vcvtq_n_f64_s64() {
19428        let a: i64x2 = i64x2::new(1, 2);
19429        let e: f64x2 = f64x2::new(0.25, 0.5);
19430        let r: f64x2 = transmute(vcvtq_n_f64_s64::<2>(transmute(a)));
19431        assert_eq!(r, e);
19432    }
19433
19434    #[simd_test(enable = "neon")]
19435    unsafe fn test_vcvts_n_f32_s32() {
19436        let a: i32 = 1;
19437        let e: f32 = 0.25;
19438        let r: f32 = vcvts_n_f32_s32::<2>(a);
19439        assert_eq!(r, e);
19440    }
19441
19442    #[simd_test(enable = "neon")]
19443    unsafe fn test_vcvtd_n_f64_s64() {
19444        let a: i64 = 1;
19445        let e: f64 = 0.25;
19446        let r: f64 = vcvtd_n_f64_s64::<2>(a);
19447        assert_eq!(r, e);
19448    }
19449
19450    #[simd_test(enable = "neon")]
19451    unsafe fn test_vcvt_n_f64_u64() {
19452        let a: u64x1 = u64x1::new(1);
19453        let e: f64 = 0.25;
19454        let r: f64 = transmute(vcvt_n_f64_u64::<2>(transmute(a)));
19455        assert_eq!(r, e);
19456    }
19457
19458    #[simd_test(enable = "neon")]
19459    unsafe fn test_vcvtq_n_f64_u64() {
19460        let a: u64x2 = u64x2::new(1, 2);
19461        let e: f64x2 = f64x2::new(0.25, 0.5);
19462        let r: f64x2 = transmute(vcvtq_n_f64_u64::<2>(transmute(a)));
19463        assert_eq!(r, e);
19464    }
19465
19466    #[simd_test(enable = "neon")]
19467    unsafe fn test_vcvts_n_f32_u32() {
19468        let a: u32 = 1;
19469        let e: f32 = 0.25;
19470        let r: f32 = vcvts_n_f32_u32::<2>(a);
19471        assert_eq!(r, e);
19472    }
19473
19474    #[simd_test(enable = "neon")]
19475    unsafe fn test_vcvtd_n_f64_u64() {
19476        let a: u64 = 1;
19477        let e: f64 = 0.25;
19478        let r: f64 = vcvtd_n_f64_u64::<2>(a);
19479        assert_eq!(r, e);
19480    }
19481
19482    #[simd_test(enable = "neon")]
19483    unsafe fn test_vcvt_n_s64_f64() {
19484        let a: f64 = 0.25;
19485        let e: i64x1 = i64x1::new(1);
19486        let r: i64x1 = transmute(vcvt_n_s64_f64::<2>(transmute(a)));
19487        assert_eq!(r, e);
19488    }
19489
19490    #[simd_test(enable = "neon")]
19491    unsafe fn test_vcvtq_n_s64_f64() {
19492        let a: f64x2 = f64x2::new(0.25, 0.5);
19493        let e: i64x2 = i64x2::new(1, 2);
19494        let r: i64x2 = transmute(vcvtq_n_s64_f64::<2>(transmute(a)));
19495        assert_eq!(r, e);
19496    }
19497
19498    #[simd_test(enable = "neon")]
19499    unsafe fn test_vcvts_n_s32_f32() {
19500        let a: f32 = 0.25;
19501        let e: i32 = 1;
19502        let r: i32 = vcvts_n_s32_f32::<2>(a);
19503        assert_eq!(r, e);
19504    }
19505
19506    #[simd_test(enable = "neon")]
19507    unsafe fn test_vcvtd_n_s64_f64() {
19508        let a: f64 = 0.25;
19509        let e: i64 = 1;
19510        let r: i64 = vcvtd_n_s64_f64::<2>(a);
19511        assert_eq!(r, e);
19512    }
19513
19514    #[simd_test(enable = "neon")]
19515    unsafe fn test_vcvt_n_u64_f64() {
19516        let a: f64 = 0.25;
19517        let e: u64x1 = u64x1::new(1);
19518        let r: u64x1 = transmute(vcvt_n_u64_f64::<2>(transmute(a)));
19519        assert_eq!(r, e);
19520    }
19521
19522    #[simd_test(enable = "neon")]
19523    unsafe fn test_vcvtq_n_u64_f64() {
19524        let a: f64x2 = f64x2::new(0.25, 0.5);
19525        let e: u64x2 = u64x2::new(1, 2);
19526        let r: u64x2 = transmute(vcvtq_n_u64_f64::<2>(transmute(a)));
19527        assert_eq!(r, e);
19528    }
19529
19530    #[simd_test(enable = "neon")]
19531    unsafe fn test_vcvts_n_u32_f32() {
19532        let a: f32 = 0.25;
19533        let e: u32 = 1;
19534        let r: u32 = vcvts_n_u32_f32::<2>(a);
19535        assert_eq!(r, e);
19536    }
19537
19538    #[simd_test(enable = "neon")]
19539    unsafe fn test_vcvtd_n_u64_f64() {
19540        let a: f64 = 0.25;
19541        let e: u64 = 1;
19542        let r: u64 = vcvtd_n_u64_f64::<2>(a);
19543        assert_eq!(r, e);
19544    }
19545
19546    #[simd_test(enable = "neon")]
19547    unsafe fn test_vcvts_f32_s32() {
19548        let a: i32 = 1;
19549        let e: f32 = 1.;
19550        let r: f32 = vcvts_f32_s32(a);
19551        assert_eq!(r, e);
19552    }
19553
19554    #[simd_test(enable = "neon")]
19555    unsafe fn test_vcvtd_f64_s64() {
19556        let a: i64 = 1;
19557        let e: f64 = 1.;
19558        let r: f64 = vcvtd_f64_s64(a);
19559        assert_eq!(r, e);
19560    }
19561
19562    #[simd_test(enable = "neon")]
19563    unsafe fn test_vcvts_f32_u32() {
19564        let a: u32 = 1;
19565        let e: f32 = 1.;
19566        let r: f32 = vcvts_f32_u32(a);
19567        assert_eq!(r, e);
19568    }
19569
19570    #[simd_test(enable = "neon")]
19571    unsafe fn test_vcvtd_f64_u64() {
19572        let a: u64 = 1;
19573        let e: f64 = 1.;
19574        let r: f64 = vcvtd_f64_u64(a);
19575        assert_eq!(r, e);
19576    }
19577
19578    #[simd_test(enable = "neon")]
19579    unsafe fn test_vcvts_s32_f32() {
19580        let a: f32 = 1.;
19581        let e: i32 = 1;
19582        let r: i32 = vcvts_s32_f32(a);
19583        assert_eq!(r, e);
19584    }
19585
19586    #[simd_test(enable = "neon")]
19587    unsafe fn test_vcvtd_s64_f64() {
19588        let a: f64 = 1.;
19589        let e: i64 = 1;
19590        let r: i64 = vcvtd_s64_f64(a);
19591        assert_eq!(r, e);
19592    }
19593
19594    #[simd_test(enable = "neon")]
19595    unsafe fn test_vcvts_u32_f32() {
19596        let a: f32 = 1.;
19597        let e: u32 = 1;
19598        let r: u32 = vcvts_u32_f32(a);
19599        assert_eq!(r, e);
19600    }
19601
19602    #[simd_test(enable = "neon")]
19603    unsafe fn test_vcvtd_u64_f64() {
19604        let a: f64 = 1.;
19605        let e: u64 = 1;
19606        let r: u64 = vcvtd_u64_f64(a);
19607        assert_eq!(r, e);
19608    }
19609
19610    #[simd_test(enable = "neon")]
19611    unsafe fn test_vcvt_s64_f64() {
19612        let a: f64 = -1.1;
19613        let e: i64x1 = i64x1::new(-1);
19614        let r: i64x1 = transmute(vcvt_s64_f64(transmute(a)));
19615        assert_eq!(r, e);
19616    }
19617
19618    #[simd_test(enable = "neon")]
19619    unsafe fn test_vcvtq_s64_f64() {
19620        let a: f64x2 = f64x2::new(-1.1, 2.1);
19621        let e: i64x2 = i64x2::new(-1, 2);
19622        let r: i64x2 = transmute(vcvtq_s64_f64(transmute(a)));
19623        assert_eq!(r, e);
19624    }
19625
19626    #[simd_test(enable = "neon")]
19627    unsafe fn test_vcvt_u64_f64() {
19628        let a: f64 = 1.1;
19629        let e: u64x1 = u64x1::new(1);
19630        let r: u64x1 = transmute(vcvt_u64_f64(transmute(a)));
19631        assert_eq!(r, e);
19632    }
19633
19634    #[simd_test(enable = "neon")]
19635    unsafe fn test_vcvtq_u64_f64() {
19636        let a: f64x2 = f64x2::new(1.1, 2.1);
19637        let e: u64x2 = u64x2::new(1, 2);
19638        let r: u64x2 = transmute(vcvtq_u64_f64(transmute(a)));
19639        assert_eq!(r, e);
19640    }
19641
19642    #[simd_test(enable = "neon")]
19643    unsafe fn test_vcvta_s32_f32() {
19644        let a: f32x2 = f32x2::new(-1.1, 2.1);
19645        let e: i32x2 = i32x2::new(-1, 2);
19646        let r: i32x2 = transmute(vcvta_s32_f32(transmute(a)));
19647        assert_eq!(r, e);
19648    }
19649
19650    #[simd_test(enable = "neon")]
19651    unsafe fn test_vcvtaq_s32_f32() {
19652        let a: f32x4 = f32x4::new(-1.1, 2.1, -2.9, 3.9);
19653        let e: i32x4 = i32x4::new(-1, 2, -3, 4);
19654        let r: i32x4 = transmute(vcvtaq_s32_f32(transmute(a)));
19655        assert_eq!(r, e);
19656    }
19657
19658    #[simd_test(enable = "neon")]
19659    unsafe fn test_vcvta_s64_f64() {
19660        let a: f64 = -1.1;
19661        let e: i64x1 = i64x1::new(-1);
19662        let r: i64x1 = transmute(vcvta_s64_f64(transmute(a)));
19663        assert_eq!(r, e);
19664    }
19665
19666    #[simd_test(enable = "neon")]
19667    unsafe fn test_vcvtaq_s64_f64() {
19668        let a: f64x2 = f64x2::new(-1.1, 2.1);
19669        let e: i64x2 = i64x2::new(-1, 2);
19670        let r: i64x2 = transmute(vcvtaq_s64_f64(transmute(a)));
19671        assert_eq!(r, e);
19672    }
19673
19674    #[simd_test(enable = "neon")]
19675    unsafe fn test_vcvtas_s32_f32() {
19676        let a: f32 = 2.9;
19677        let e: i32 = 3;
19678        let r: i32 = vcvtas_s32_f32(a);
19679        assert_eq!(r, e);
19680    }
19681
19682    #[simd_test(enable = "neon")]
19683    unsafe fn test_vcvtad_s64_f64() {
19684        let a: f64 = 2.9;
19685        let e: i64 = 3;
19686        let r: i64 = vcvtad_s64_f64(a);
19687        assert_eq!(r, e);
19688    }
19689
19690    #[simd_test(enable = "neon")]
19691    unsafe fn test_vcvtas_u32_f32() {
19692        let a: f32 = 2.9;
19693        let e: u32 = 3;
19694        let r: u32 = vcvtas_u32_f32(a);
19695        assert_eq!(r, e);
19696    }
19697
19698    #[simd_test(enable = "neon")]
19699    unsafe fn test_vcvtad_u64_f64() {
19700        let a: f64 = 2.9;
19701        let e: u64 = 3;
19702        let r: u64 = vcvtad_u64_f64(a);
19703        assert_eq!(r, e);
19704    }
19705
19706    #[simd_test(enable = "neon")]
19707    unsafe fn test_vcvtn_s32_f32() {
19708        let a: f32x2 = f32x2::new(-1.5, 2.1);
19709        let e: i32x2 = i32x2::new(-2, 2);
19710        let r: i32x2 = transmute(vcvtn_s32_f32(transmute(a)));
19711        assert_eq!(r, e);
19712    }
19713
19714    #[simd_test(enable = "neon")]
19715    unsafe fn test_vcvtnq_s32_f32() {
19716        let a: f32x4 = f32x4::new(-1.5, 2.1, -2.9, 3.9);
19717        let e: i32x4 = i32x4::new(-2, 2, -3, 4);
19718        let r: i32x4 = transmute(vcvtnq_s32_f32(transmute(a)));
19719        assert_eq!(r, e);
19720    }
19721
19722    #[simd_test(enable = "neon")]
19723    unsafe fn test_vcvtn_s64_f64() {
19724        let a: f64 = -1.5;
19725        let e: i64x1 = i64x1::new(-2);
19726        let r: i64x1 = transmute(vcvtn_s64_f64(transmute(a)));
19727        assert_eq!(r, e);
19728    }
19729
19730    #[simd_test(enable = "neon")]
19731    unsafe fn test_vcvtnq_s64_f64() {
19732        let a: f64x2 = f64x2::new(-1.5, 2.1);
19733        let e: i64x2 = i64x2::new(-2, 2);
19734        let r: i64x2 = transmute(vcvtnq_s64_f64(transmute(a)));
19735        assert_eq!(r, e);
19736    }
19737
19738    #[simd_test(enable = "neon")]
19739    unsafe fn test_vcvtns_s32_f32() {
19740        let a: f32 = -1.5;
19741        let e: i32 = -2;
19742        let r: i32 = vcvtns_s32_f32(a);
19743        assert_eq!(r, e);
19744    }
19745
19746    #[simd_test(enable = "neon")]
19747    unsafe fn test_vcvtnd_s64_f64() {
19748        let a: f64 = -1.5;
19749        let e: i64 = -2;
19750        let r: i64 = vcvtnd_s64_f64(a);
19751        assert_eq!(r, e);
19752    }
19753
19754    #[simd_test(enable = "neon")]
19755    unsafe fn test_vcvtm_s32_f32() {
19756        let a: f32x2 = f32x2::new(-1.1, 2.1);
19757        let e: i32x2 = i32x2::new(-2, 2);
19758        let r: i32x2 = transmute(vcvtm_s32_f32(transmute(a)));
19759        assert_eq!(r, e);
19760    }
19761
19762    #[simd_test(enable = "neon")]
19763    unsafe fn test_vcvtmq_s32_f32() {
19764        let a: f32x4 = f32x4::new(-1.1, 2.1, -2.9, 3.9);
19765        let e: i32x4 = i32x4::new(-2, 2, -3, 3);
19766        let r: i32x4 = transmute(vcvtmq_s32_f32(transmute(a)));
19767        assert_eq!(r, e);
19768    }
19769
19770    #[simd_test(enable = "neon")]
19771    unsafe fn test_vcvtm_s64_f64() {
19772        let a: f64 = -1.1;
19773        let e: i64x1 = i64x1::new(-2);
19774        let r: i64x1 = transmute(vcvtm_s64_f64(transmute(a)));
19775        assert_eq!(r, e);
19776    }
19777
19778    #[simd_test(enable = "neon")]
19779    unsafe fn test_vcvtmq_s64_f64() {
19780        let a: f64x2 = f64x2::new(-1.1, 2.1);
19781        let e: i64x2 = i64x2::new(-2, 2);
19782        let r: i64x2 = transmute(vcvtmq_s64_f64(transmute(a)));
19783        assert_eq!(r, e);
19784    }
19785
19786    #[simd_test(enable = "neon")]
19787    unsafe fn test_vcvtms_s32_f32() {
19788        let a: f32 = -1.1;
19789        let e: i32 = -2;
19790        let r: i32 = vcvtms_s32_f32(a);
19791        assert_eq!(r, e);
19792    }
19793
19794    #[simd_test(enable = "neon")]
19795    unsafe fn test_vcvtmd_s64_f64() {
19796        let a: f64 = -1.1;
19797        let e: i64 = -2;
19798        let r: i64 = vcvtmd_s64_f64(a);
19799        assert_eq!(r, e);
19800    }
19801
19802    #[simd_test(enable = "neon")]
19803    unsafe fn test_vcvtp_s32_f32() {
19804        let a: f32x2 = f32x2::new(-1.1, 2.1);
19805        let e: i32x2 = i32x2::new(-1, 3);
19806        let r: i32x2 = transmute(vcvtp_s32_f32(transmute(a)));
19807        assert_eq!(r, e);
19808    }
19809
19810    #[simd_test(enable = "neon")]
19811    unsafe fn test_vcvtpq_s32_f32() {
19812        let a: f32x4 = f32x4::new(-1.1, 2.1, -2.9, 3.9);
19813        let e: i32x4 = i32x4::new(-1, 3, -2, 4);
19814        let r: i32x4 = transmute(vcvtpq_s32_f32(transmute(a)));
19815        assert_eq!(r, e);
19816    }
19817
19818    #[simd_test(enable = "neon")]
19819    unsafe fn test_vcvtp_s64_f64() {
19820        let a: f64 = -1.1;
19821        let e: i64x1 = i64x1::new(-1);
19822        let r: i64x1 = transmute(vcvtp_s64_f64(transmute(a)));
19823        assert_eq!(r, e);
19824    }
19825
19826    #[simd_test(enable = "neon")]
19827    unsafe fn test_vcvtpq_s64_f64() {
19828        let a: f64x2 = f64x2::new(-1.1, 2.1);
19829        let e: i64x2 = i64x2::new(-1, 3);
19830        let r: i64x2 = transmute(vcvtpq_s64_f64(transmute(a)));
19831        assert_eq!(r, e);
19832    }
19833
19834    #[simd_test(enable = "neon")]
19835    unsafe fn test_vcvtps_s32_f32() {
19836        let a: f32 = -1.1;
19837        let e: i32 = -1;
19838        let r: i32 = vcvtps_s32_f32(a);
19839        assert_eq!(r, e);
19840    }
19841
19842    #[simd_test(enable = "neon")]
19843    unsafe fn test_vcvtpd_s64_f64() {
19844        let a: f64 = -1.1;
19845        let e: i64 = -1;
19846        let r: i64 = vcvtpd_s64_f64(a);
19847        assert_eq!(r, e);
19848    }
19849
19850    #[simd_test(enable = "neon")]
19851    unsafe fn test_vcvta_u32_f32() {
19852        let a: f32x2 = f32x2::new(1.1, 2.1);
19853        let e: u32x2 = u32x2::new(1, 2);
19854        let r: u32x2 = transmute(vcvta_u32_f32(transmute(a)));
19855        assert_eq!(r, e);
19856    }
19857
19858    #[simd_test(enable = "neon")]
19859    unsafe fn test_vcvtaq_u32_f32() {
19860        let a: f32x4 = f32x4::new(1.1, 2.1, 2.9, 3.9);
19861        let e: u32x4 = u32x4::new(1, 2, 3, 4);
19862        let r: u32x4 = transmute(vcvtaq_u32_f32(transmute(a)));
19863        assert_eq!(r, e);
19864    }
19865
19866    #[simd_test(enable = "neon")]
19867    unsafe fn test_vcvta_u64_f64() {
19868        let a: f64 = 1.1;
19869        let e: u64x1 = u64x1::new(1);
19870        let r: u64x1 = transmute(vcvta_u64_f64(transmute(a)));
19871        assert_eq!(r, e);
19872    }
19873
19874    #[simd_test(enable = "neon")]
19875    unsafe fn test_vcvtaq_u64_f64() {
19876        let a: f64x2 = f64x2::new(1.1, 2.1);
19877        let e: u64x2 = u64x2::new(1, 2);
19878        let r: u64x2 = transmute(vcvtaq_u64_f64(transmute(a)));
19879        assert_eq!(r, e);
19880    }
19881
19882    #[simd_test(enable = "neon")]
19883    unsafe fn test_vcvtn_u32_f32() {
19884        let a: f32x2 = f32x2::new(1.5, 2.1);
19885        let e: u32x2 = u32x2::new(2, 2);
19886        let r: u32x2 = transmute(vcvtn_u32_f32(transmute(a)));
19887        assert_eq!(r, e);
19888    }
19889
19890    #[simd_test(enable = "neon")]
19891    unsafe fn test_vcvtnq_u32_f32() {
19892        let a: f32x4 = f32x4::new(1.5, 2.1, 2.9, 3.9);
19893        let e: u32x4 = u32x4::new(2, 2, 3, 4);
19894        let r: u32x4 = transmute(vcvtnq_u32_f32(transmute(a)));
19895        assert_eq!(r, e);
19896    }
19897
19898    #[simd_test(enable = "neon")]
19899    unsafe fn test_vcvtn_u64_f64() {
19900        let a: f64 = 1.5;
19901        let e: u64x1 = u64x1::new(2);
19902        let r: u64x1 = transmute(vcvtn_u64_f64(transmute(a)));
19903        assert_eq!(r, e);
19904    }
19905
19906    #[simd_test(enable = "neon")]
19907    unsafe fn test_vcvtnq_u64_f64() {
19908        let a: f64x2 = f64x2::new(1.5, 2.1);
19909        let e: u64x2 = u64x2::new(2, 2);
19910        let r: u64x2 = transmute(vcvtnq_u64_f64(transmute(a)));
19911        assert_eq!(r, e);
19912    }
19913
19914    #[simd_test(enable = "neon")]
19915    unsafe fn test_vcvtns_u32_f32() {
19916        let a: f32 = 1.5;
19917        let e: u32 = 2;
19918        let r: u32 = vcvtns_u32_f32(a);
19919        assert_eq!(r, e);
19920    }
19921
19922    #[simd_test(enable = "neon")]
19923    unsafe fn test_vcvtnd_u64_f64() {
19924        let a: f64 = 1.5;
19925        let e: u64 = 2;
19926        let r: u64 = vcvtnd_u64_f64(a);
19927        assert_eq!(r, e);
19928    }
19929
19930    #[simd_test(enable = "neon")]
19931    unsafe fn test_vcvtm_u32_f32() {
19932        let a: f32x2 = f32x2::new(1.1, 2.1);
19933        let e: u32x2 = u32x2::new(1, 2);
19934        let r: u32x2 = transmute(vcvtm_u32_f32(transmute(a)));
19935        assert_eq!(r, e);
19936    }
19937
19938    #[simd_test(enable = "neon")]
19939    unsafe fn test_vcvtmq_u32_f32() {
19940        let a: f32x4 = f32x4::new(1.1, 2.1, 2.9, 3.9);
19941        let e: u32x4 = u32x4::new(1, 2, 2, 3);
19942        let r: u32x4 = transmute(vcvtmq_u32_f32(transmute(a)));
19943        assert_eq!(r, e);
19944    }
19945
19946    #[simd_test(enable = "neon")]
19947    unsafe fn test_vcvtm_u64_f64() {
19948        let a: f64 = 1.1;
19949        let e: u64x1 = u64x1::new(1);
19950        let r: u64x1 = transmute(vcvtm_u64_f64(transmute(a)));
19951        assert_eq!(r, e);
19952    }
19953
19954    #[simd_test(enable = "neon")]
19955    unsafe fn test_vcvtmq_u64_f64() {
19956        let a: f64x2 = f64x2::new(1.1, 2.1);
19957        let e: u64x2 = u64x2::new(1, 2);
19958        let r: u64x2 = transmute(vcvtmq_u64_f64(transmute(a)));
19959        assert_eq!(r, e);
19960    }
19961
19962    #[simd_test(enable = "neon")]
19963    unsafe fn test_vcvtms_u32_f32() {
19964        let a: f32 = 1.1;
19965        let e: u32 = 1;
19966        let r: u32 = vcvtms_u32_f32(a);
19967        assert_eq!(r, e);
19968    }
19969
19970    #[simd_test(enable = "neon")]
19971    unsafe fn test_vcvtmd_u64_f64() {
19972        let a: f64 = 1.1;
19973        let e: u64 = 1;
19974        let r: u64 = vcvtmd_u64_f64(a);
19975        assert_eq!(r, e);
19976    }
19977
19978    #[simd_test(enable = "neon")]
19979    unsafe fn test_vcvtp_u32_f32() {
19980        let a: f32x2 = f32x2::new(1.1, 2.1);
19981        let e: u32x2 = u32x2::new(2, 3);
19982        let r: u32x2 = transmute(vcvtp_u32_f32(transmute(a)));
19983        assert_eq!(r, e);
19984    }
19985
19986    #[simd_test(enable = "neon")]
19987    unsafe fn test_vcvtpq_u32_f32() {
19988        let a: f32x4 = f32x4::new(1.1, 2.1, 2.9, 3.9);
19989        let e: u32x4 = u32x4::new(2, 3, 3, 4);
19990        let r: u32x4 = transmute(vcvtpq_u32_f32(transmute(a)));
19991        assert_eq!(r, e);
19992    }
19993
19994    #[simd_test(enable = "neon")]
19995    unsafe fn test_vcvtp_u64_f64() {
19996        let a: f64 = 1.1;
19997        let e: u64x1 = u64x1::new(2);
19998        let r: u64x1 = transmute(vcvtp_u64_f64(transmute(a)));
19999        assert_eq!(r, e);
20000    }
20001
20002    #[simd_test(enable = "neon")]
20003    unsafe fn test_vcvtpq_u64_f64() {
20004        let a: f64x2 = f64x2::new(1.1, 2.1);
20005        let e: u64x2 = u64x2::new(2, 3);
20006        let r: u64x2 = transmute(vcvtpq_u64_f64(transmute(a)));
20007        assert_eq!(r, e);
20008    }
20009
20010    #[simd_test(enable = "neon")]
20011    unsafe fn test_vcvtps_u32_f32() {
20012        let a: f32 = 1.1;
20013        let e: u32 = 2;
20014        let r: u32 = vcvtps_u32_f32(a);
20015        assert_eq!(r, e);
20016    }
20017
20018    #[simd_test(enable = "neon")]
20019    unsafe fn test_vcvtpd_u64_f64() {
20020        let a: f64 = 1.1;
20021        let e: u64 = 2;
20022        let r: u64 = vcvtpd_u64_f64(a);
20023        assert_eq!(r, e);
20024    }
20025
20026    #[simd_test(enable = "neon")]
20027    unsafe fn test_vdupq_laneq_p64() {
20028        let a: i64x2 = i64x2::new(1, 1);
20029        let e: i64x2 = i64x2::new(1, 1);
20030        let r: i64x2 = transmute(vdupq_laneq_p64::<1>(transmute(a)));
20031        assert_eq!(r, e);
20032    }
20033
20034    #[simd_test(enable = "neon")]
20035    unsafe fn test_vdupq_lane_p64() {
20036        let a: i64x1 = i64x1::new(1);
20037        let e: i64x2 = i64x2::new(1, 1);
20038        let r: i64x2 = transmute(vdupq_lane_p64::<0>(transmute(a)));
20039        assert_eq!(r, e);
20040    }
20041
20042    #[simd_test(enable = "neon")]
20043    unsafe fn test_vdupq_laneq_f64() {
20044        let a: f64x2 = f64x2::new(1., 1.);
20045        let e: f64x2 = f64x2::new(1., 1.);
20046        let r: f64x2 = transmute(vdupq_laneq_f64::<1>(transmute(a)));
20047        assert_eq!(r, e);
20048    }
20049
20050    #[simd_test(enable = "neon")]
20051    unsafe fn test_vdupq_lane_f64() {
20052        let a: f64 = 1.;
20053        let e: f64x2 = f64x2::new(1., 1.);
20054        let r: f64x2 = transmute(vdupq_lane_f64::<0>(transmute(a)));
20055        assert_eq!(r, e);
20056    }
20057
20058    #[simd_test(enable = "neon")]
20059    unsafe fn test_vdup_lane_p64() {
20060        let a: i64x1 = i64x1::new(0);
20061        let e: i64x1 = i64x1::new(0);
20062        let r: i64x1 = transmute(vdup_lane_p64::<0>(transmute(a)));
20063        assert_eq!(r, e);
20064    }
20065
20066    #[simd_test(enable = "neon")]
20067    unsafe fn test_vdup_lane_f64() {
20068        let a: f64 = 0.;
20069        let e: f64 = 0.;
20070        let r: f64 = transmute(vdup_lane_f64::<0>(transmute(a)));
20071        assert_eq!(r, e);
20072    }
20073
20074    #[simd_test(enable = "neon")]
20075    unsafe fn test_vdup_laneq_p64() {
20076        let a: i64x2 = i64x2::new(0, 1);
20077        let e: i64x1 = i64x1::new(1);
20078        let r: i64x1 = transmute(vdup_laneq_p64::<1>(transmute(a)));
20079        assert_eq!(r, e);
20080    }
20081
20082    #[simd_test(enable = "neon")]
20083    unsafe fn test_vdup_laneq_f64() {
20084        let a: f64x2 = f64x2::new(0., 1.);
20085        let e: f64 = 1.;
20086        let r: f64 = transmute(vdup_laneq_f64::<1>(transmute(a)));
20087        assert_eq!(r, e);
20088    }
20089
20090    #[simd_test(enable = "neon")]
20091    unsafe fn test_vdupb_lane_s8() {
20092        let a: i8x8 = i8x8::new(1, 1, 1, 4, 1, 6, 7, 8);
20093        let e: i8 = 1;
20094        let r: i8 = vdupb_lane_s8::<4>(transmute(a));
20095        assert_eq!(r, e);
20096    }
20097
20098    #[simd_test(enable = "neon")]
20099    unsafe fn test_vdupb_laneq_s8() {
20100        let a: i8x16 = i8x16::new(1, 1, 1, 4, 1, 6, 7, 8, 1, 10, 11, 12, 13, 14, 15, 16);
20101        let e: i8 = 1;
20102        let r: i8 = vdupb_laneq_s8::<8>(transmute(a));
20103        assert_eq!(r, e);
20104    }
20105
20106    #[simd_test(enable = "neon")]
20107    unsafe fn test_vduph_lane_s16() {
20108        let a: i16x4 = i16x4::new(1, 1, 1, 4);
20109        let e: i16 = 1;
20110        let r: i16 = vduph_lane_s16::<2>(transmute(a));
20111        assert_eq!(r, e);
20112    }
20113
20114    #[simd_test(enable = "neon")]
20115    unsafe fn test_vduph_laneq_s16() {
20116        let a: i16x8 = i16x8::new(1, 1, 1, 4, 1, 6, 7, 8);
20117        let e: i16 = 1;
20118        let r: i16 = vduph_laneq_s16::<4>(transmute(a));
20119        assert_eq!(r, e);
20120    }
20121
20122    #[simd_test(enable = "neon")]
20123    unsafe fn test_vdups_lane_s32() {
20124        let a: i32x2 = i32x2::new(1, 1);
20125        let e: i32 = 1;
20126        let r: i32 = vdups_lane_s32::<1>(transmute(a));
20127        assert_eq!(r, e);
20128    }
20129
20130    #[simd_test(enable = "neon")]
20131    unsafe fn test_vdups_laneq_s32() {
20132        let a: i32x4 = i32x4::new(1, 1, 1, 4);
20133        let e: i32 = 1;
20134        let r: i32 = vdups_laneq_s32::<2>(transmute(a));
20135        assert_eq!(r, e);
20136    }
20137
20138    #[simd_test(enable = "neon")]
20139    unsafe fn test_vdupd_lane_s64() {
20140        let a: i64x1 = i64x1::new(1);
20141        let e: i64 = 1;
20142        let r: i64 = vdupd_lane_s64::<0>(transmute(a));
20143        assert_eq!(r, e);
20144    }
20145
20146    #[simd_test(enable = "neon")]
20147    unsafe fn test_vdupd_laneq_s64() {
20148        let a: i64x2 = i64x2::new(1, 1);
20149        let e: i64 = 1;
20150        let r: i64 = vdupd_laneq_s64::<1>(transmute(a));
20151        assert_eq!(r, e);
20152    }
20153
20154    #[simd_test(enable = "neon")]
20155    unsafe fn test_vdupb_lane_u8() {
20156        let a: u8x8 = u8x8::new(1, 1, 1, 4, 1, 6, 7, 8);
20157        let e: u8 = 1;
20158        let r: u8 = vdupb_lane_u8::<4>(transmute(a));
20159        assert_eq!(r, e);
20160    }
20161
20162    #[simd_test(enable = "neon")]
20163    unsafe fn test_vdupb_laneq_u8() {
20164        let a: u8x16 = u8x16::new(1, 1, 1, 4, 1, 6, 7, 8, 1, 10, 11, 12, 13, 14, 15, 16);
20165        let e: u8 = 1;
20166        let r: u8 = vdupb_laneq_u8::<8>(transmute(a));
20167        assert_eq!(r, e);
20168    }
20169
20170    #[simd_test(enable = "neon")]
20171    unsafe fn test_vduph_lane_u16() {
20172        let a: u16x4 = u16x4::new(1, 1, 1, 4);
20173        let e: u16 = 1;
20174        let r: u16 = vduph_lane_u16::<2>(transmute(a));
20175        assert_eq!(r, e);
20176    }
20177
20178    #[simd_test(enable = "neon")]
20179    unsafe fn test_vduph_laneq_u16() {
20180        let a: u16x8 = u16x8::new(1, 1, 1, 4, 1, 6, 7, 8);
20181        let e: u16 = 1;
20182        let r: u16 = vduph_laneq_u16::<4>(transmute(a));
20183        assert_eq!(r, e);
20184    }
20185
20186    #[simd_test(enable = "neon")]
20187    unsafe fn test_vdups_lane_u32() {
20188        let a: u32x2 = u32x2::new(1, 1);
20189        let e: u32 = 1;
20190        let r: u32 = vdups_lane_u32::<1>(transmute(a));
20191        assert_eq!(r, e);
20192    }
20193
20194    #[simd_test(enable = "neon")]
20195    unsafe fn test_vdups_laneq_u32() {
20196        let a: u32x4 = u32x4::new(1, 1, 1, 4);
20197        let e: u32 = 1;
20198        let r: u32 = vdups_laneq_u32::<2>(transmute(a));
20199        assert_eq!(r, e);
20200    }
20201
20202    #[simd_test(enable = "neon")]
20203    unsafe fn test_vdupd_lane_u64() {
20204        let a: u64x1 = u64x1::new(1);
20205        let e: u64 = 1;
20206        let r: u64 = vdupd_lane_u64::<0>(transmute(a));
20207        assert_eq!(r, e);
20208    }
20209
20210    #[simd_test(enable = "neon")]
20211    unsafe fn test_vdupd_laneq_u64() {
20212        let a: u64x2 = u64x2::new(1, 1);
20213        let e: u64 = 1;
20214        let r: u64 = vdupd_laneq_u64::<1>(transmute(a));
20215        assert_eq!(r, e);
20216    }
20217
20218    #[simd_test(enable = "neon")]
20219    unsafe fn test_vdupb_lane_p8() {
20220        let a: i8x8 = i8x8::new(1, 1, 1, 4, 1, 6, 7, 8);
20221        let e: p8 = 1;
20222        let r: p8 = vdupb_lane_p8::<4>(transmute(a));
20223        assert_eq!(r, e);
20224    }
20225
20226    #[simd_test(enable = "neon")]
20227    unsafe fn test_vdupb_laneq_p8() {
20228        let a: i8x16 = i8x16::new(1, 1, 1, 4, 1, 6, 7, 8, 1, 10, 11, 12, 13, 14, 15, 16);
20229        let e: p8 = 1;
20230        let r: p8 = vdupb_laneq_p8::<8>(transmute(a));
20231        assert_eq!(r, e);
20232    }
20233
20234    #[simd_test(enable = "neon")]
20235    unsafe fn test_vduph_lane_p16() {
20236        let a: i16x4 = i16x4::new(1, 1, 1, 4);
20237        let e: p16 = 1;
20238        let r: p16 = vduph_lane_p16::<2>(transmute(a));
20239        assert_eq!(r, e);
20240    }
20241
20242    #[simd_test(enable = "neon")]
20243    unsafe fn test_vduph_laneq_p16() {
20244        let a: i16x8 = i16x8::new(1, 1, 1, 4, 1, 6, 7, 8);
20245        let e: p16 = 1;
20246        let r: p16 = vduph_laneq_p16::<4>(transmute(a));
20247        assert_eq!(r, e);
20248    }
20249
20250    #[simd_test(enable = "neon")]
20251    unsafe fn test_vdups_lane_f32() {
20252        let a: f32x2 = f32x2::new(1., 1.);
20253        let e: f32 = 1.;
20254        let r: f32 = vdups_lane_f32::<1>(transmute(a));
20255        assert_eq!(r, e);
20256    }
20257
20258    #[simd_test(enable = "neon")]
20259    unsafe fn test_vdups_laneq_f32() {
20260        let a: f32x4 = f32x4::new(1., 1., 1., 4.);
20261        let e: f32 = 1.;
20262        let r: f32 = vdups_laneq_f32::<2>(transmute(a));
20263        assert_eq!(r, e);
20264    }
20265
20266    #[simd_test(enable = "neon")]
20267    unsafe fn test_vdupd_lane_f64() {
20268        let a: f64 = 1.;
20269        let e: f64 = 1.;
20270        let r: f64 = vdupd_lane_f64::<0>(transmute(a));
20271        assert_eq!(r, e);
20272    }
20273
20274    #[simd_test(enable = "neon")]
20275    unsafe fn test_vdupd_laneq_f64() {
20276        let a: f64x2 = f64x2::new(1., 1.);
20277        let e: f64 = 1.;
20278        let r: f64 = vdupd_laneq_f64::<1>(transmute(a));
20279        assert_eq!(r, e);
20280    }
20281
20282    #[simd_test(enable = "neon")]
20283    unsafe fn test_vextq_p64() {
20284        let a: i64x2 = i64x2::new(1, 1);
20285        let b: i64x2 = i64x2::new(2, 2);
20286        let e: i64x2 = i64x2::new(1, 2);
20287        let r: i64x2 = transmute(vextq_p64::<1>(transmute(a), transmute(b)));
20288        assert_eq!(r, e);
20289    }
20290
20291    #[simd_test(enable = "neon")]
20292    unsafe fn test_vextq_f64() {
20293        let a: f64x2 = f64x2::new(1., 1.);
20294        let b: f64x2 = f64x2::new(2., 2.);
20295        let e: f64x2 = f64x2::new(1., 2.);
20296        let r: f64x2 = transmute(vextq_f64::<1>(transmute(a), transmute(b)));
20297        assert_eq!(r, e);
20298    }
20299
20300    #[simd_test(enable = "neon")]
20301    unsafe fn test_vmla_f64() {
20302        let a: f64 = 0.;
20303        let b: f64 = 2.;
20304        let c: f64 = 3.;
20305        let e: f64 = 6.;
20306        let r: f64 = transmute(vmla_f64(transmute(a), transmute(b), transmute(c)));
20307        assert_eq!(r, e);
20308    }
20309
20310    #[simd_test(enable = "neon")]
20311    unsafe fn test_vmlaq_f64() {
20312        let a: f64x2 = f64x2::new(0., 1.);
20313        let b: f64x2 = f64x2::new(2., 2.);
20314        let c: f64x2 = f64x2::new(3., 3.);
20315        let e: f64x2 = f64x2::new(6., 7.);
20316        let r: f64x2 = transmute(vmlaq_f64(transmute(a), transmute(b), transmute(c)));
20317        assert_eq!(r, e);
20318    }
20319
20320    #[simd_test(enable = "neon")]
20321    unsafe fn test_vmlal_high_s8() {
20322        let a: i16x8 = i16x8::new(8, 7, 6, 5, 4, 3, 2, 1);
20323        let b: i8x16 = i8x16::new(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
20324        let c: i8x16 = i8x16::new(3, 3, 0, 1, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7);
20325        let e: i16x8 = i16x8::new(8, 9, 10, 11, 12, 13, 14, 15);
20326        let r: i16x8 = transmute(vmlal_high_s8(transmute(a), transmute(b), transmute(c)));
20327        assert_eq!(r, e);
20328    }
20329
20330    #[simd_test(enable = "neon")]
20331    unsafe fn test_vmlal_high_s16() {
20332        let a: i32x4 = i32x4::new(8, 7, 6, 5);
20333        let b: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
20334        let c: i16x8 = i16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20335        let e: i32x4 = i32x4::new(8, 9, 10, 11);
20336        let r: i32x4 = transmute(vmlal_high_s16(transmute(a), transmute(b), transmute(c)));
20337        assert_eq!(r, e);
20338    }
20339
20340    #[simd_test(enable = "neon")]
20341    unsafe fn test_vmlal_high_s32() {
20342        let a: i64x2 = i64x2::new(8, 7);
20343        let b: i32x4 = i32x4::new(2, 2, 2, 2);
20344        let c: i32x4 = i32x4::new(3, 3, 0, 1);
20345        let e: i64x2 = i64x2::new(8, 9);
20346        let r: i64x2 = transmute(vmlal_high_s32(transmute(a), transmute(b), transmute(c)));
20347        assert_eq!(r, e);
20348    }
20349
20350    #[simd_test(enable = "neon")]
20351    unsafe fn test_vmlal_high_u8() {
20352        let a: u16x8 = u16x8::new(8, 7, 6, 5, 4, 3, 2, 1);
20353        let b: u8x16 = u8x16::new(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
20354        let c: u8x16 = u8x16::new(3, 3, 0, 1, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7);
20355        let e: u16x8 = u16x8::new(8, 9, 10, 11, 12, 13, 14, 15);
20356        let r: u16x8 = transmute(vmlal_high_u8(transmute(a), transmute(b), transmute(c)));
20357        assert_eq!(r, e);
20358    }
20359
20360    #[simd_test(enable = "neon")]
20361    unsafe fn test_vmlal_high_u16() {
20362        let a: u32x4 = u32x4::new(8, 7, 6, 5);
20363        let b: u16x8 = u16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
20364        let c: u16x8 = u16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20365        let e: u32x4 = u32x4::new(8, 9, 10, 11);
20366        let r: u32x4 = transmute(vmlal_high_u16(transmute(a), transmute(b), transmute(c)));
20367        assert_eq!(r, e);
20368    }
20369
20370    #[simd_test(enable = "neon")]
20371    unsafe fn test_vmlal_high_u32() {
20372        let a: u64x2 = u64x2::new(8, 7);
20373        let b: u32x4 = u32x4::new(2, 2, 2, 2);
20374        let c: u32x4 = u32x4::new(3, 3, 0, 1);
20375        let e: u64x2 = u64x2::new(8, 9);
20376        let r: u64x2 = transmute(vmlal_high_u32(transmute(a), transmute(b), transmute(c)));
20377        assert_eq!(r, e);
20378    }
20379
20380    #[simd_test(enable = "neon")]
20381    unsafe fn test_vmlal_high_n_s16() {
20382        let a: i32x4 = i32x4::new(8, 7, 6, 5);
20383        let b: i16x8 = i16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20384        let c: i16 = 2;
20385        let e: i32x4 = i32x4::new(8, 9, 10, 11);
20386        let r: i32x4 = transmute(vmlal_high_n_s16(transmute(a), transmute(b), c));
20387        assert_eq!(r, e);
20388    }
20389
20390    #[simd_test(enable = "neon")]
20391    unsafe fn test_vmlal_high_n_s32() {
20392        let a: i64x2 = i64x2::new(8, 7);
20393        let b: i32x4 = i32x4::new(3, 3, 0, 1);
20394        let c: i32 = 2;
20395        let e: i64x2 = i64x2::new(8, 9);
20396        let r: i64x2 = transmute(vmlal_high_n_s32(transmute(a), transmute(b), c));
20397        assert_eq!(r, e);
20398    }
20399
20400    #[simd_test(enable = "neon")]
20401    unsafe fn test_vmlal_high_n_u16() {
20402        let a: u32x4 = u32x4::new(8, 7, 6, 5);
20403        let b: u16x8 = u16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20404        let c: u16 = 2;
20405        let e: u32x4 = u32x4::new(8, 9, 10, 11);
20406        let r: u32x4 = transmute(vmlal_high_n_u16(transmute(a), transmute(b), c));
20407        assert_eq!(r, e);
20408    }
20409
20410    #[simd_test(enable = "neon")]
20411    unsafe fn test_vmlal_high_n_u32() {
20412        let a: u64x2 = u64x2::new(8, 7);
20413        let b: u32x4 = u32x4::new(3, 3, 0, 1);
20414        let c: u32 = 2;
20415        let e: u64x2 = u64x2::new(8, 9);
20416        let r: u64x2 = transmute(vmlal_high_n_u32(transmute(a), transmute(b), c));
20417        assert_eq!(r, e);
20418    }
20419
20420    #[simd_test(enable = "neon")]
20421    unsafe fn test_vmlal_high_lane_s16() {
20422        let a: i32x4 = i32x4::new(8, 7, 6, 5);
20423        let b: i16x8 = i16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20424        let c: i16x4 = i16x4::new(0, 2, 0, 0);
20425        let e: i32x4 = i32x4::new(8, 9, 10, 11);
20426        let r: i32x4 = transmute(vmlal_high_lane_s16::<1>(transmute(a), transmute(b), transmute(c)));
20427        assert_eq!(r, e);
20428    }
20429
20430    #[simd_test(enable = "neon")]
20431    unsafe fn test_vmlal_high_laneq_s16() {
20432        let a: i32x4 = i32x4::new(8, 7, 6, 5);
20433        let b: i16x8 = i16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20434        let c: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
20435        let e: i32x4 = i32x4::new(8, 9, 10, 11);
20436        let r: i32x4 = transmute(vmlal_high_laneq_s16::<1>(transmute(a), transmute(b), transmute(c)));
20437        assert_eq!(r, e);
20438    }
20439
20440    #[simd_test(enable = "neon")]
20441    unsafe fn test_vmlal_high_lane_s32() {
20442        let a: i64x2 = i64x2::new(8, 7);
20443        let b: i32x4 = i32x4::new(3, 3, 0, 1);
20444        let c: i32x2 = i32x2::new(0, 2);
20445        let e: i64x2 = i64x2::new(8, 9);
20446        let r: i64x2 = transmute(vmlal_high_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
20447        assert_eq!(r, e);
20448    }
20449
20450    #[simd_test(enable = "neon")]
20451    unsafe fn test_vmlal_high_laneq_s32() {
20452        let a: i64x2 = i64x2::new(8, 7);
20453        let b: i32x4 = i32x4::new(3, 3, 0, 1);
20454        let c: i32x4 = i32x4::new(0, 2, 0, 0);
20455        let e: i64x2 = i64x2::new(8, 9);
20456        let r: i64x2 = transmute(vmlal_high_laneq_s32::<1>(transmute(a), transmute(b), transmute(c)));
20457        assert_eq!(r, e);
20458    }
20459
20460    #[simd_test(enable = "neon")]
20461    unsafe fn test_vmlal_high_lane_u16() {
20462        let a: u32x4 = u32x4::new(8, 7, 6, 5);
20463        let b: u16x8 = u16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20464        let c: u16x4 = u16x4::new(0, 2, 0, 0);
20465        let e: u32x4 = u32x4::new(8, 9, 10, 11);
20466        let r: u32x4 = transmute(vmlal_high_lane_u16::<1>(transmute(a), transmute(b), transmute(c)));
20467        assert_eq!(r, e);
20468    }
20469
20470    #[simd_test(enable = "neon")]
20471    unsafe fn test_vmlal_high_laneq_u16() {
20472        let a: u32x4 = u32x4::new(8, 7, 6, 5);
20473        let b: u16x8 = u16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20474        let c: u16x8 = u16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
20475        let e: u32x4 = u32x4::new(8, 9, 10, 11);
20476        let r: u32x4 = transmute(vmlal_high_laneq_u16::<1>(transmute(a), transmute(b), transmute(c)));
20477        assert_eq!(r, e);
20478    }
20479
20480    #[simd_test(enable = "neon")]
20481    unsafe fn test_vmlal_high_lane_u32() {
20482        let a: u64x2 = u64x2::new(8, 7);
20483        let b: u32x4 = u32x4::new(3, 3, 0, 1);
20484        let c: u32x2 = u32x2::new(0, 2);
20485        let e: u64x2 = u64x2::new(8, 9);
20486        let r: u64x2 = transmute(vmlal_high_lane_u32::<1>(transmute(a), transmute(b), transmute(c)));
20487        assert_eq!(r, e);
20488    }
20489
20490    #[simd_test(enable = "neon")]
20491    unsafe fn test_vmlal_high_laneq_u32() {
20492        let a: u64x2 = u64x2::new(8, 7);
20493        let b: u32x4 = u32x4::new(3, 3, 0, 1);
20494        let c: u32x4 = u32x4::new(0, 2, 0, 0);
20495        let e: u64x2 = u64x2::new(8, 9);
20496        let r: u64x2 = transmute(vmlal_high_laneq_u32::<1>(transmute(a), transmute(b), transmute(c)));
20497        assert_eq!(r, e);
20498    }
20499
20500    #[simd_test(enable = "neon")]
20501    unsafe fn test_vmls_f64() {
20502        let a: f64 = 6.;
20503        let b: f64 = 2.;
20504        let c: f64 = 3.;
20505        let e: f64 = 0.;
20506        let r: f64 = transmute(vmls_f64(transmute(a), transmute(b), transmute(c)));
20507        assert_eq!(r, e);
20508    }
20509
20510    #[simd_test(enable = "neon")]
20511    unsafe fn test_vmlsq_f64() {
20512        let a: f64x2 = f64x2::new(6., 7.);
20513        let b: f64x2 = f64x2::new(2., 2.);
20514        let c: f64x2 = f64x2::new(3., 3.);
20515        let e: f64x2 = f64x2::new(0., 1.);
20516        let r: f64x2 = transmute(vmlsq_f64(transmute(a), transmute(b), transmute(c)));
20517        assert_eq!(r, e);
20518    }
20519
20520    #[simd_test(enable = "neon")]
20521    unsafe fn test_vmlsl_high_s8() {
20522        let a: i16x8 = i16x8::new(14, 15, 16, 17, 18, 19, 20, 21);
20523        let b: i8x16 = i8x16::new(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
20524        let c: i8x16 = i8x16::new(3, 3, 0, 1, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7);
20525        let e: i16x8 = i16x8::new(14, 13, 12, 11, 10, 9, 8, 7);
20526        let r: i16x8 = transmute(vmlsl_high_s8(transmute(a), transmute(b), transmute(c)));
20527        assert_eq!(r, e);
20528    }
20529
20530    #[simd_test(enable = "neon")]
20531    unsafe fn test_vmlsl_high_s16() {
20532        let a: i32x4 = i32x4::new(14, 15, 16, 17);
20533        let b: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
20534        let c: i16x8 = i16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20535        let e: i32x4 = i32x4::new(14, 13, 12, 11);
20536        let r: i32x4 = transmute(vmlsl_high_s16(transmute(a), transmute(b), transmute(c)));
20537        assert_eq!(r, e);
20538    }
20539
20540    #[simd_test(enable = "neon")]
20541    unsafe fn test_vmlsl_high_s32() {
20542        let a: i64x2 = i64x2::new(14, 15);
20543        let b: i32x4 = i32x4::new(2, 2, 2, 2);
20544        let c: i32x4 = i32x4::new(3, 3, 0, 1);
20545        let e: i64x2 = i64x2::new(14, 13);
20546        let r: i64x2 = transmute(vmlsl_high_s32(transmute(a), transmute(b), transmute(c)));
20547        assert_eq!(r, e);
20548    }
20549
20550    #[simd_test(enable = "neon")]
20551    unsafe fn test_vmlsl_high_u8() {
20552        let a: u16x8 = u16x8::new(14, 15, 16, 17, 18, 19, 20, 21);
20553        let b: u8x16 = u8x16::new(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
20554        let c: u8x16 = u8x16::new(3, 3, 0, 1, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7);
20555        let e: u16x8 = u16x8::new(14, 13, 12, 11, 10, 9, 8, 7);
20556        let r: u16x8 = transmute(vmlsl_high_u8(transmute(a), transmute(b), transmute(c)));
20557        assert_eq!(r, e);
20558    }
20559
20560    #[simd_test(enable = "neon")]
20561    unsafe fn test_vmlsl_high_u16() {
20562        let a: u32x4 = u32x4::new(14, 15, 16, 17);
20563        let b: u16x8 = u16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
20564        let c: u16x8 = u16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20565        let e: u32x4 = u32x4::new(14, 13, 12, 11);
20566        let r: u32x4 = transmute(vmlsl_high_u16(transmute(a), transmute(b), transmute(c)));
20567        assert_eq!(r, e);
20568    }
20569
20570    #[simd_test(enable = "neon")]
20571    unsafe fn test_vmlsl_high_u32() {
20572        let a: u64x2 = u64x2::new(14, 15);
20573        let b: u32x4 = u32x4::new(2, 2, 2, 2);
20574        let c: u32x4 = u32x4::new(3, 3, 0, 1);
20575        let e: u64x2 = u64x2::new(14, 13);
20576        let r: u64x2 = transmute(vmlsl_high_u32(transmute(a), transmute(b), transmute(c)));
20577        assert_eq!(r, e);
20578    }
20579
20580    #[simd_test(enable = "neon")]
20581    unsafe fn test_vmlsl_high_n_s16() {
20582        let a: i32x4 = i32x4::new(14, 15, 16, 17);
20583        let b: i16x8 = i16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20584        let c: i16 = 2;
20585        let e: i32x4 = i32x4::new(14, 13, 12, 11);
20586        let r: i32x4 = transmute(vmlsl_high_n_s16(transmute(a), transmute(b), c));
20587        assert_eq!(r, e);
20588    }
20589
20590    #[simd_test(enable = "neon")]
20591    unsafe fn test_vmlsl_high_n_s32() {
20592        let a: i64x2 = i64x2::new(14, 15);
20593        let b: i32x4 = i32x4::new(3, 3, 0, 1);
20594        let c: i32 = 2;
20595        let e: i64x2 = i64x2::new(14, 13);
20596        let r: i64x2 = transmute(vmlsl_high_n_s32(transmute(a), transmute(b), c));
20597        assert_eq!(r, e);
20598    }
20599
20600    #[simd_test(enable = "neon")]
20601    unsafe fn test_vmlsl_high_n_u16() {
20602        let a: u32x4 = u32x4::new(14, 15, 16, 17);
20603        let b: u16x8 = u16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20604        let c: u16 = 2;
20605        let e: u32x4 = u32x4::new(14, 13, 12, 11);
20606        let r: u32x4 = transmute(vmlsl_high_n_u16(transmute(a), transmute(b), c));
20607        assert_eq!(r, e);
20608    }
20609
20610    #[simd_test(enable = "neon")]
20611    unsafe fn test_vmlsl_high_n_u32() {
20612        let a: u64x2 = u64x2::new(14, 15);
20613        let b: u32x4 = u32x4::new(3, 3, 0, 1);
20614        let c: u32 = 2;
20615        let e: u64x2 = u64x2::new(14, 13);
20616        let r: u64x2 = transmute(vmlsl_high_n_u32(transmute(a), transmute(b), c));
20617        assert_eq!(r, e);
20618    }
20619
20620    #[simd_test(enable = "neon")]
20621    unsafe fn test_vmlsl_high_lane_s16() {
20622        let a: i32x4 = i32x4::new(14, 15, 16, 17);
20623        let b: i16x8 = i16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20624        let c: i16x4 = i16x4::new(0, 2, 0, 0);
20625        let e: i32x4 = i32x4::new(14, 13, 12, 11);
20626        let r: i32x4 = transmute(vmlsl_high_lane_s16::<1>(transmute(a), transmute(b), transmute(c)));
20627        assert_eq!(r, e);
20628    }
20629
20630    #[simd_test(enable = "neon")]
20631    unsafe fn test_vmlsl_high_laneq_s16() {
20632        let a: i32x4 = i32x4::new(14, 15, 16, 17);
20633        let b: i16x8 = i16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20634        let c: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
20635        let e: i32x4 = i32x4::new(14, 13, 12, 11);
20636        let r: i32x4 = transmute(vmlsl_high_laneq_s16::<1>(transmute(a), transmute(b), transmute(c)));
20637        assert_eq!(r, e);
20638    }
20639
20640    #[simd_test(enable = "neon")]
20641    unsafe fn test_vmlsl_high_lane_s32() {
20642        let a: i64x2 = i64x2::new(14, 15);
20643        let b: i32x4 = i32x4::new(3, 3, 0, 1);
20644        let c: i32x2 = i32x2::new(0, 2);
20645        let e: i64x2 = i64x2::new(14, 13);
20646        let r: i64x2 = transmute(vmlsl_high_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
20647        assert_eq!(r, e);
20648    }
20649
20650    #[simd_test(enable = "neon")]
20651    unsafe fn test_vmlsl_high_laneq_s32() {
20652        let a: i64x2 = i64x2::new(14, 15);
20653        let b: i32x4 = i32x4::new(3, 3, 0, 1);
20654        let c: i32x4 = i32x4::new(0, 2, 0, 0);
20655        let e: i64x2 = i64x2::new(14, 13);
20656        let r: i64x2 = transmute(vmlsl_high_laneq_s32::<1>(transmute(a), transmute(b), transmute(c)));
20657        assert_eq!(r, e);
20658    }
20659
20660    #[simd_test(enable = "neon")]
20661    unsafe fn test_vmlsl_high_lane_u16() {
20662        let a: u32x4 = u32x4::new(14, 15, 16, 17);
20663        let b: u16x8 = u16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20664        let c: u16x4 = u16x4::new(0, 2, 0, 0);
20665        let e: u32x4 = u32x4::new(14, 13, 12, 11);
20666        let r: u32x4 = transmute(vmlsl_high_lane_u16::<1>(transmute(a), transmute(b), transmute(c)));
20667        assert_eq!(r, e);
20668    }
20669
20670    #[simd_test(enable = "neon")]
20671    unsafe fn test_vmlsl_high_laneq_u16() {
20672        let a: u32x4 = u32x4::new(14, 15, 16, 17);
20673        let b: u16x8 = u16x8::new(3, 3, 0, 1, 0, 1, 2, 3);
20674        let c: u16x8 = u16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
20675        let e: u32x4 = u32x4::new(14, 13, 12, 11);
20676        let r: u32x4 = transmute(vmlsl_high_laneq_u16::<1>(transmute(a), transmute(b), transmute(c)));
20677        assert_eq!(r, e);
20678    }
20679
20680    #[simd_test(enable = "neon")]
20681    unsafe fn test_vmlsl_high_lane_u32() {
20682        let a: u64x2 = u64x2::new(14, 15);
20683        let b: u32x4 = u32x4::new(3, 3, 0, 1);
20684        let c: u32x2 = u32x2::new(0, 2);
20685        let e: u64x2 = u64x2::new(14, 13);
20686        let r: u64x2 = transmute(vmlsl_high_lane_u32::<1>(transmute(a), transmute(b), transmute(c)));
20687        assert_eq!(r, e);
20688    }
20689
20690    #[simd_test(enable = "neon")]
20691    unsafe fn test_vmlsl_high_laneq_u32() {
20692        let a: u64x2 = u64x2::new(14, 15);
20693        let b: u32x4 = u32x4::new(3, 3, 0, 1);
20694        let c: u32x4 = u32x4::new(0, 2, 0, 0);
20695        let e: u64x2 = u64x2::new(14, 13);
20696        let r: u64x2 = transmute(vmlsl_high_laneq_u32::<1>(transmute(a), transmute(b), transmute(c)));
20697        assert_eq!(r, e);
20698    }
20699
20700    #[simd_test(enable = "neon")]
20701    unsafe fn test_vmovn_high_s16() {
20702        let a: i8x8 = i8x8::new(0, 1, 2, 3, 2, 3, 4, 5);
20703        let b: i16x8 = i16x8::new(2, 3, 4, 5, 12, 13, 14, 15);
20704        let e: i8x16 = i8x16::new(0, 1, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 12, 13, 14, 15);
20705        let r: i8x16 = transmute(vmovn_high_s16(transmute(a), transmute(b)));
20706        assert_eq!(r, e);
20707    }
20708
20709    #[simd_test(enable = "neon")]
20710    unsafe fn test_vmovn_high_s32() {
20711        let a: i16x4 = i16x4::new(0, 1, 2, 3);
20712        let b: i32x4 = i32x4::new(2, 3, 4, 5);
20713        let e: i16x8 = i16x8::new(0, 1, 2, 3, 2, 3, 4, 5);
20714        let r: i16x8 = transmute(vmovn_high_s32(transmute(a), transmute(b)));
20715        assert_eq!(r, e);
20716    }
20717
20718    #[simd_test(enable = "neon")]
20719    unsafe fn test_vmovn_high_s64() {
20720        let a: i32x2 = i32x2::new(0, 1);
20721        let b: i64x2 = i64x2::new(2, 3);
20722        let e: i32x4 = i32x4::new(0, 1, 2, 3);
20723        let r: i32x4 = transmute(vmovn_high_s64(transmute(a), transmute(b)));
20724        assert_eq!(r, e);
20725    }
20726
20727    #[simd_test(enable = "neon")]
20728    unsafe fn test_vmovn_high_u16() {
20729        let a: u8x8 = u8x8::new(0, 1, 2, 3, 2, 3, 4, 5);
20730        let b: u16x8 = u16x8::new(2, 3, 4, 5, 12, 13, 14, 15);
20731        let e: u8x16 = u8x16::new(0, 1, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 12, 13, 14, 15);
20732        let r: u8x16 = transmute(vmovn_high_u16(transmute(a), transmute(b)));
20733        assert_eq!(r, e);
20734    }
20735
20736    #[simd_test(enable = "neon")]
20737    unsafe fn test_vmovn_high_u32() {
20738        let a: u16x4 = u16x4::new(0, 1, 2, 3);
20739        let b: u32x4 = u32x4::new(2, 3, 4, 5);
20740        let e: u16x8 = u16x8::new(0, 1, 2, 3, 2, 3, 4, 5);
20741        let r: u16x8 = transmute(vmovn_high_u32(transmute(a), transmute(b)));
20742        assert_eq!(r, e);
20743    }
20744
20745    #[simd_test(enable = "neon")]
20746    unsafe fn test_vmovn_high_u64() {
20747        let a: u32x2 = u32x2::new(0, 1);
20748        let b: u64x2 = u64x2::new(2, 3);
20749        let e: u32x4 = u32x4::new(0, 1, 2, 3);
20750        let r: u32x4 = transmute(vmovn_high_u64(transmute(a), transmute(b)));
20751        assert_eq!(r, e);
20752    }
20753
20754    #[simd_test(enable = "neon")]
20755    unsafe fn test_vneg_s64() {
20756        let a: i64x1 = i64x1::new(0);
20757        let e: i64x1 = i64x1::new(0);
20758        let r: i64x1 = transmute(vneg_s64(transmute(a)));
20759        assert_eq!(r, e);
20760    }
20761
20762    #[simd_test(enable = "neon")]
20763    unsafe fn test_vnegq_s64() {
20764        let a: i64x2 = i64x2::new(0, 1);
20765        let e: i64x2 = i64x2::new(0, -1);
20766        let r: i64x2 = transmute(vnegq_s64(transmute(a)));
20767        assert_eq!(r, e);
20768    }
20769
20770    #[simd_test(enable = "neon")]
20771    unsafe fn test_vnegd_s64() {
20772        let a: i64 = 1;
20773        let e: i64 = -1;
20774        let r: i64 = vnegd_s64(a);
20775        assert_eq!(r, e);
20776    }
20777
20778    #[simd_test(enable = "neon")]
20779    unsafe fn test_vneg_f64() {
20780        let a: f64 = 0.;
20781        let e: f64 = 0.;
20782        let r: f64 = transmute(vneg_f64(transmute(a)));
20783        assert_eq!(r, e);
20784    }
20785
20786    #[simd_test(enable = "neon")]
20787    unsafe fn test_vnegq_f64() {
20788        let a: f64x2 = f64x2::new(0., 1.);
20789        let e: f64x2 = f64x2::new(0., -1.);
20790        let r: f64x2 = transmute(vnegq_f64(transmute(a)));
20791        assert_eq!(r, e);
20792    }
20793
20794    #[simd_test(enable = "neon")]
20795    unsafe fn test_vqneg_s64() {
20796        let a: i64x1 = i64x1::new(-9223372036854775808);
20797        let e: i64x1 = i64x1::new(0x7F_FF_FF_FF_FF_FF_FF_FF);
20798        let r: i64x1 = transmute(vqneg_s64(transmute(a)));
20799        assert_eq!(r, e);
20800    }
20801
20802    #[simd_test(enable = "neon")]
20803    unsafe fn test_vqnegq_s64() {
20804        let a: i64x2 = i64x2::new(-9223372036854775808, 0);
20805        let e: i64x2 = i64x2::new(0x7F_FF_FF_FF_FF_FF_FF_FF, 0);
20806        let r: i64x2 = transmute(vqnegq_s64(transmute(a)));
20807        assert_eq!(r, e);
20808    }
20809
20810    #[simd_test(enable = "neon")]
20811    unsafe fn test_vqnegb_s8() {
20812        let a: i8 = 1;
20813        let e: i8 = -1;
20814        let r: i8 = vqnegb_s8(a);
20815        assert_eq!(r, e);
20816    }
20817
20818    #[simd_test(enable = "neon")]
20819    unsafe fn test_vqnegh_s16() {
20820        let a: i16 = 1;
20821        let e: i16 = -1;
20822        let r: i16 = vqnegh_s16(a);
20823        assert_eq!(r, e);
20824    }
20825
20826    #[simd_test(enable = "neon")]
20827    unsafe fn test_vqnegs_s32() {
20828        let a: i32 = 1;
20829        let e: i32 = -1;
20830        let r: i32 = vqnegs_s32(a);
20831        assert_eq!(r, e);
20832    }
20833
20834    #[simd_test(enable = "neon")]
20835    unsafe fn test_vqnegd_s64() {
20836        let a: i64 = 1;
20837        let e: i64 = -1;
20838        let r: i64 = vqnegd_s64(a);
20839        assert_eq!(r, e);
20840    }
20841
20842    #[simd_test(enable = "neon")]
20843    unsafe fn test_vqsubb_s8() {
20844        let a: i8 = 42;
20845        let b: i8 = 1;
20846        let e: i8 = 41;
20847        let r: i8 = vqsubb_s8(a, b);
20848        assert_eq!(r, e);
20849    }
20850
20851    #[simd_test(enable = "neon")]
20852    unsafe fn test_vqsubh_s16() {
20853        let a: i16 = 42;
20854        let b: i16 = 1;
20855        let e: i16 = 41;
20856        let r: i16 = vqsubh_s16(a, b);
20857        assert_eq!(r, e);
20858    }
20859
20860    #[simd_test(enable = "neon")]
20861    unsafe fn test_vqsubb_u8() {
20862        let a: u8 = 42;
20863        let b: u8 = 1;
20864        let e: u8 = 41;
20865        let r: u8 = vqsubb_u8(a, b);
20866        assert_eq!(r, e);
20867    }
20868
20869    #[simd_test(enable = "neon")]
20870    unsafe fn test_vqsubh_u16() {
20871        let a: u16 = 42;
20872        let b: u16 = 1;
20873        let e: u16 = 41;
20874        let r: u16 = vqsubh_u16(a, b);
20875        assert_eq!(r, e);
20876    }
20877
20878    #[simd_test(enable = "neon")]
20879    unsafe fn test_vqsubs_u32() {
20880        let a: u32 = 42;
20881        let b: u32 = 1;
20882        let e: u32 = 41;
20883        let r: u32 = vqsubs_u32(a, b);
20884        assert_eq!(r, e);
20885    }
20886
20887    #[simd_test(enable = "neon")]
20888    unsafe fn test_vqsubd_u64() {
20889        let a: u64 = 42;
20890        let b: u64 = 1;
20891        let e: u64 = 41;
20892        let r: u64 = vqsubd_u64(a, b);
20893        assert_eq!(r, e);
20894    }
20895
20896    #[simd_test(enable = "neon")]
20897    unsafe fn test_vqsubs_s32() {
20898        let a: i32 = 42;
20899        let b: i32 = 1;
20900        let e: i32 = 41;
20901        let r: i32 = vqsubs_s32(a, b);
20902        assert_eq!(r, e);
20903    }
20904
20905    #[simd_test(enable = "neon")]
20906    unsafe fn test_vqsubd_s64() {
20907        let a: i64 = 42;
20908        let b: i64 = 1;
20909        let e: i64 = 41;
20910        let r: i64 = vqsubd_s64(a, b);
20911        assert_eq!(r, e);
20912    }
20913
20914    #[simd_test(enable = "neon")]
20915    unsafe fn test_vrbit_s8() {
20916        let a: i8x8 = i8x8::new(0, 2, 4, 6, 8, 10, 12, 14);
20917        let e: i8x8 = i8x8::new(0, 64, 32, 96, 16, 80, 48, 112);
20918        let r: i8x8 = transmute(vrbit_s8(transmute(a)));
20919        assert_eq!(r, e);
20920    }
20921
20922    #[simd_test(enable = "neon")]
20923    unsafe fn test_vrbitq_s8() {
20924        let a: i8x16 = i8x16::new(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
20925        let e: i8x16 = i8x16::new(0, 64, 32, 96, 16, 80, 48, 112, 8, 72, 40, 104, 24, 88, 56, 120);
20926        let r: i8x16 = transmute(vrbitq_s8(transmute(a)));
20927        assert_eq!(r, e);
20928    }
20929
20930    #[simd_test(enable = "neon")]
20931    unsafe fn test_vrbit_u8() {
20932        let a: u8x8 = u8x8::new(0, 2, 4, 6, 8, 10, 12, 14);
20933        let e: u8x8 = u8x8::new(0, 64, 32, 96, 16, 80, 48, 112);
20934        let r: u8x8 = transmute(vrbit_u8(transmute(a)));
20935        assert_eq!(r, e);
20936    }
20937
20938    #[simd_test(enable = "neon")]
20939    unsafe fn test_vrbitq_u8() {
20940        let a: u8x16 = u8x16::new(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
20941        let e: u8x16 = u8x16::new(0, 64, 32, 96, 16, 80, 48, 112, 8, 72, 40, 104, 24, 88, 56, 120);
20942        let r: u8x16 = transmute(vrbitq_u8(transmute(a)));
20943        assert_eq!(r, e);
20944    }
20945
20946    #[simd_test(enable = "neon")]
20947    unsafe fn test_vrbit_p8() {
20948        let a: i8x8 = i8x8::new(0, 2, 4, 6, 8, 10, 12, 14);
20949        let e: i8x8 = i8x8::new(0, 64, 32, 96, 16, 80, 48, 112);
20950        let r: i8x8 = transmute(vrbit_p8(transmute(a)));
20951        assert_eq!(r, e);
20952    }
20953
20954    #[simd_test(enable = "neon")]
20955    unsafe fn test_vrbitq_p8() {
20956        let a: i8x16 = i8x16::new(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
20957        let e: i8x16 = i8x16::new(0, 64, 32, 96, 16, 80, 48, 112, 8, 72, 40, 104, 24, 88, 56, 120);
20958        let r: i8x16 = transmute(vrbitq_p8(transmute(a)));
20959        assert_eq!(r, e);
20960    }
20961
20962    #[simd_test(enable = "neon")]
20963    unsafe fn test_vrndx_f32() {
20964        let a: f32x2 = f32x2::new(-1.5, 0.5);
20965        let e: f32x2 = f32x2::new(-2.0, 0.0);
20966        let r: f32x2 = transmute(vrndx_f32(transmute(a)));
20967        assert_eq!(r, e);
20968    }
20969
20970    #[simd_test(enable = "neon")]
20971    unsafe fn test_vrndxq_f32() {
20972        let a: f32x4 = f32x4::new(-1.5, 0.5, 1.5, 2.5);
20973        let e: f32x4 = f32x4::new(-2.0, 0.0, 2.0, 2.0);
20974        let r: f32x4 = transmute(vrndxq_f32(transmute(a)));
20975        assert_eq!(r, e);
20976    }
20977
20978    #[simd_test(enable = "neon")]
20979    unsafe fn test_vrndx_f64() {
20980        let a: f64 = -1.5;
20981        let e: f64 = -2.0;
20982        let r: f64 = transmute(vrndx_f64(transmute(a)));
20983        assert_eq!(r, e);
20984    }
20985
20986    #[simd_test(enable = "neon")]
20987    unsafe fn test_vrndxq_f64() {
20988        let a: f64x2 = f64x2::new(-1.5, 0.5);
20989        let e: f64x2 = f64x2::new(-2.0, 0.0);
20990        let r: f64x2 = transmute(vrndxq_f64(transmute(a)));
20991        assert_eq!(r, e);
20992    }
20993
20994    #[simd_test(enable = "neon")]
20995    unsafe fn test_vrnda_f32() {
20996        let a: f32x2 = f32x2::new(-1.5, 0.5);
20997        let e: f32x2 = f32x2::new(-2.0, 1.0);
20998        let r: f32x2 = transmute(vrnda_f32(transmute(a)));
20999        assert_eq!(r, e);
21000    }
21001
21002    #[simd_test(enable = "neon")]
21003    unsafe fn test_vrndaq_f32() {
21004        let a: f32x4 = f32x4::new(-1.5, 0.5, 1.5, 2.5);
21005        let e: f32x4 = f32x4::new(-2.0, 1.0, 2.0, 3.0);
21006        let r: f32x4 = transmute(vrndaq_f32(transmute(a)));
21007        assert_eq!(r, e);
21008    }
21009
21010    #[simd_test(enable = "neon")]
21011    unsafe fn test_vrnda_f64() {
21012        let a: f64 = -1.5;
21013        let e: f64 = -2.0;
21014        let r: f64 = transmute(vrnda_f64(transmute(a)));
21015        assert_eq!(r, e);
21016    }
21017
21018    #[simd_test(enable = "neon")]
21019    unsafe fn test_vrndaq_f64() {
21020        let a: f64x2 = f64x2::new(-1.5, 0.5);
21021        let e: f64x2 = f64x2::new(-2.0, 1.0);
21022        let r: f64x2 = transmute(vrndaq_f64(transmute(a)));
21023        assert_eq!(r, e);
21024    }
21025
21026    #[simd_test(enable = "neon")]
21027    unsafe fn test_vrndn_f64() {
21028        let a: f64 = -1.5;
21029        let e: f64 = -2.0;
21030        let r: f64 = transmute(vrndn_f64(transmute(a)));
21031        assert_eq!(r, e);
21032    }
21033
21034    #[simd_test(enable = "neon")]
21035    unsafe fn test_vrndnq_f64() {
21036        let a: f64x2 = f64x2::new(-1.5, 0.5);
21037        let e: f64x2 = f64x2::new(-2.0, 0.0);
21038        let r: f64x2 = transmute(vrndnq_f64(transmute(a)));
21039        assert_eq!(r, e);
21040    }
21041
21042    #[simd_test(enable = "neon")]
21043    unsafe fn test_vrndns_f32() {
21044        let a: f32 = -1.5;
21045        let e: f32 = -2.0;
21046        let r: f32 = vrndns_f32(a);
21047        assert_eq!(r, e);
21048    }
21049
21050    #[simd_test(enable = "neon")]
21051    unsafe fn test_vrndm_f32() {
21052        let a: f32x2 = f32x2::new(-1.5, 0.5);
21053        let e: f32x2 = f32x2::new(-2.0, 0.0);
21054        let r: f32x2 = transmute(vrndm_f32(transmute(a)));
21055        assert_eq!(r, e);
21056    }
21057
21058    #[simd_test(enable = "neon")]
21059    unsafe fn test_vrndmq_f32() {
21060        let a: f32x4 = f32x4::new(-1.5, 0.5, 1.5, 2.5);
21061        let e: f32x4 = f32x4::new(-2.0, 0.0, 1.0, 2.0);
21062        let r: f32x4 = transmute(vrndmq_f32(transmute(a)));
21063        assert_eq!(r, e);
21064    }
21065
21066    #[simd_test(enable = "neon")]
21067    unsafe fn test_vrndm_f64() {
21068        let a: f64 = -1.5;
21069        let e: f64 = -2.0;
21070        let r: f64 = transmute(vrndm_f64(transmute(a)));
21071        assert_eq!(r, e);
21072    }
21073
21074    #[simd_test(enable = "neon")]
21075    unsafe fn test_vrndmq_f64() {
21076        let a: f64x2 = f64x2::new(-1.5, 0.5);
21077        let e: f64x2 = f64x2::new(-2.0, 0.0);
21078        let r: f64x2 = transmute(vrndmq_f64(transmute(a)));
21079        assert_eq!(r, e);
21080    }
21081
21082    #[simd_test(enable = "neon")]
21083    unsafe fn test_vrndp_f32() {
21084        let a: f32x2 = f32x2::new(-1.5, 0.5);
21085        let e: f32x2 = f32x2::new(-1.0, 1.0);
21086        let r: f32x2 = transmute(vrndp_f32(transmute(a)));
21087        assert_eq!(r, e);
21088    }
21089
21090    #[simd_test(enable = "neon")]
21091    unsafe fn test_vrndpq_f32() {
21092        let a: f32x4 = f32x4::new(-1.5, 0.5, 1.5, 2.5);
21093        let e: f32x4 = f32x4::new(-1.0, 1.0, 2.0, 3.0);
21094        let r: f32x4 = transmute(vrndpq_f32(transmute(a)));
21095        assert_eq!(r, e);
21096    }
21097
21098    #[simd_test(enable = "neon")]
21099    unsafe fn test_vrndp_f64() {
21100        let a: f64 = -1.5;
21101        let e: f64 = -1.0;
21102        let r: f64 = transmute(vrndp_f64(transmute(a)));
21103        assert_eq!(r, e);
21104    }
21105
21106    #[simd_test(enable = "neon")]
21107    unsafe fn test_vrndpq_f64() {
21108        let a: f64x2 = f64x2::new(-1.5, 0.5);
21109        let e: f64x2 = f64x2::new(-1.0, 1.0);
21110        let r: f64x2 = transmute(vrndpq_f64(transmute(a)));
21111        assert_eq!(r, e);
21112    }
21113
21114    #[simd_test(enable = "neon")]
21115    unsafe fn test_vrnd_f32() {
21116        let a: f32x2 = f32x2::new(-1.5, 0.5);
21117        let e: f32x2 = f32x2::new(-1.0, 0.0);
21118        let r: f32x2 = transmute(vrnd_f32(transmute(a)));
21119        assert_eq!(r, e);
21120    }
21121
21122    #[simd_test(enable = "neon")]
21123    unsafe fn test_vrndq_f32() {
21124        let a: f32x4 = f32x4::new(-1.5, 0.5, 1.5, 2.5);
21125        let e: f32x4 = f32x4::new(-1.0, 0.0, 1.0, 2.0);
21126        let r: f32x4 = transmute(vrndq_f32(transmute(a)));
21127        assert_eq!(r, e);
21128    }
21129
21130    #[simd_test(enable = "neon")]
21131    unsafe fn test_vrnd_f64() {
21132        let a: f64 = -1.5;
21133        let e: f64 = -1.0;
21134        let r: f64 = transmute(vrnd_f64(transmute(a)));
21135        assert_eq!(r, e);
21136    }
21137
21138    #[simd_test(enable = "neon")]
21139    unsafe fn test_vrndq_f64() {
21140        let a: f64x2 = f64x2::new(-1.5, 0.5);
21141        let e: f64x2 = f64x2::new(-1.0, 0.0);
21142        let r: f64x2 = transmute(vrndq_f64(transmute(a)));
21143        assert_eq!(r, e);
21144    }
21145
21146    #[simd_test(enable = "neon")]
21147    unsafe fn test_vrndi_f32() {
21148        let a: f32x2 = f32x2::new(-1.5, 0.5);
21149        let e: f32x2 = f32x2::new(-2.0, 0.0);
21150        let r: f32x2 = transmute(vrndi_f32(transmute(a)));
21151        assert_eq!(r, e);
21152    }
21153
21154    #[simd_test(enable = "neon")]
21155    unsafe fn test_vrndiq_f32() {
21156        let a: f32x4 = f32x4::new(-1.5, 0.5, 1.5, 2.5);
21157        let e: f32x4 = f32x4::new(-2.0, 0.0, 2.0, 2.0);
21158        let r: f32x4 = transmute(vrndiq_f32(transmute(a)));
21159        assert_eq!(r, e);
21160    }
21161
21162    #[simd_test(enable = "neon")]
21163    unsafe fn test_vrndi_f64() {
21164        let a: f64 = -1.5;
21165        let e: f64 = -2.0;
21166        let r: f64 = transmute(vrndi_f64(transmute(a)));
21167        assert_eq!(r, e);
21168    }
21169
21170    #[simd_test(enable = "neon")]
21171    unsafe fn test_vrndiq_f64() {
21172        let a: f64x2 = f64x2::new(-1.5, 0.5);
21173        let e: f64x2 = f64x2::new(-2.0, 0.0);
21174        let r: f64x2 = transmute(vrndiq_f64(transmute(a)));
21175        assert_eq!(r, e);
21176    }
21177
21178    #[simd_test(enable = "neon")]
21179    unsafe fn test_vqaddb_s8() {
21180        let a: i8 = 42;
21181        let b: i8 = 1;
21182        let e: i8 = 43;
21183        let r: i8 = vqaddb_s8(a, b);
21184        assert_eq!(r, e);
21185    }
21186
21187    #[simd_test(enable = "neon")]
21188    unsafe fn test_vqaddh_s16() {
21189        let a: i16 = 42;
21190        let b: i16 = 1;
21191        let e: i16 = 43;
21192        let r: i16 = vqaddh_s16(a, b);
21193        assert_eq!(r, e);
21194    }
21195
21196    #[simd_test(enable = "neon")]
21197    unsafe fn test_vqaddb_u8() {
21198        let a: u8 = 42;
21199        let b: u8 = 1;
21200        let e: u8 = 43;
21201        let r: u8 = vqaddb_u8(a, b);
21202        assert_eq!(r, e);
21203    }
21204
21205    #[simd_test(enable = "neon")]
21206    unsafe fn test_vqaddh_u16() {
21207        let a: u16 = 42;
21208        let b: u16 = 1;
21209        let e: u16 = 43;
21210        let r: u16 = vqaddh_u16(a, b);
21211        assert_eq!(r, e);
21212    }
21213
21214    #[simd_test(enable = "neon")]
21215    unsafe fn test_vqadds_u32() {
21216        let a: u32 = 42;
21217        let b: u32 = 1;
21218        let e: u32 = 43;
21219        let r: u32 = vqadds_u32(a, b);
21220        assert_eq!(r, e);
21221    }
21222
21223    #[simd_test(enable = "neon")]
21224    unsafe fn test_vqaddd_u64() {
21225        let a: u64 = 42;
21226        let b: u64 = 1;
21227        let e: u64 = 43;
21228        let r: u64 = vqaddd_u64(a, b);
21229        assert_eq!(r, e);
21230    }
21231
21232    #[simd_test(enable = "neon")]
21233    unsafe fn test_vqadds_s32() {
21234        let a: i32 = 42;
21235        let b: i32 = 1;
21236        let e: i32 = 43;
21237        let r: i32 = vqadds_s32(a, b);
21238        assert_eq!(r, e);
21239    }
21240
21241    #[simd_test(enable = "neon")]
21242    unsafe fn test_vqaddd_s64() {
21243        let a: i64 = 42;
21244        let b: i64 = 1;
21245        let e: i64 = 43;
21246        let r: i64 = vqaddd_s64(a, b);
21247        assert_eq!(r, e);
21248    }
21249
21250    #[simd_test(enable = "neon")]
21251    unsafe fn test_vld1_f64_x2() {
21252        let a: [f64; 3] = [0., 1., 2.];
21253        let e: [f64; 2] = [1., 2.];
21254        let r: [f64; 2] = transmute(vld1_f64_x2(a[1..].as_ptr()));
21255        assert_eq!(r, e);
21256    }
21257
21258    #[simd_test(enable = "neon")]
21259    unsafe fn test_vld1q_f64_x2() {
21260        let a: [f64; 5] = [0., 1., 2., 3., 4.];
21261        let e: [f64x2; 2] = [f64x2::new(1., 2.), f64x2::new(3., 4.)];
21262        let r: [f64x2; 2] = transmute(vld1q_f64_x2(a[1..].as_ptr()));
21263        assert_eq!(r, e);
21264    }
21265
21266    #[simd_test(enable = "neon")]
21267    unsafe fn test_vld1_f64_x3() {
21268        let a: [f64; 4] = [0., 1., 2., 3.];
21269        let e: [f64; 3] = [1., 2., 3.];
21270        let r: [f64; 3] = transmute(vld1_f64_x3(a[1..].as_ptr()));
21271        assert_eq!(r, e);
21272    }
21273
21274    #[simd_test(enable = "neon")]
21275    unsafe fn test_vld1q_f64_x3() {
21276        let a: [f64; 7] = [0., 1., 2., 3., 4., 5., 6.];
21277        let e: [f64x2; 3] = [f64x2::new(1., 2.), f64x2::new(3., 4.), f64x2::new(5., 6.)];
21278        let r: [f64x2; 3] = transmute(vld1q_f64_x3(a[1..].as_ptr()));
21279        assert_eq!(r, e);
21280    }
21281
21282    #[simd_test(enable = "neon")]
21283    unsafe fn test_vld1_f64_x4() {
21284        let a: [f64; 5] = [0., 1., 2., 3., 4.];
21285        let e: [f64; 4] = [1., 2., 3., 4.];
21286        let r: [f64; 4] = transmute(vld1_f64_x4(a[1..].as_ptr()));
21287        assert_eq!(r, e);
21288    }
21289
21290    #[simd_test(enable = "neon")]
21291    unsafe fn test_vld1q_f64_x4() {
21292        let a: [f64; 9] = [0., 1., 2., 3., 4., 5., 6., 7., 8.];
21293        let e: [f64x2; 4] = [f64x2::new(1., 2.), f64x2::new(3., 4.), f64x2::new(5., 6.), f64x2::new(7., 8.)];
21294        let r: [f64x2; 4] = transmute(vld1q_f64_x4(a[1..].as_ptr()));
21295        assert_eq!(r, e);
21296    }
21297
21298    #[simd_test(enable = "neon")]
21299    unsafe fn test_vld2q_s64() {
21300        let a: [i64; 5] = [0, 1, 2, 2, 3];
21301        let e: [i64x2; 2] = [i64x2::new(1, 2), i64x2::new(2, 3)];
21302        let r: [i64x2; 2] = transmute(vld2q_s64(a[1..].as_ptr()));
21303        assert_eq!(r, e);
21304    }
21305
21306    #[simd_test(enable = "neon")]
21307    unsafe fn test_vld2q_u64() {
21308        let a: [u64; 5] = [0, 1, 2, 2, 3];
21309        let e: [u64x2; 2] = [u64x2::new(1, 2), u64x2::new(2, 3)];
21310        let r: [u64x2; 2] = transmute(vld2q_u64(a[1..].as_ptr()));
21311        assert_eq!(r, e);
21312    }
21313
21314    #[simd_test(enable = "neon")]
21315    unsafe fn test_vld2q_p64() {
21316        let a: [u64; 5] = [0, 1, 2, 2, 3];
21317        let e: [i64x2; 2] = [i64x2::new(1, 2), i64x2::new(2, 3)];
21318        let r: [i64x2; 2] = transmute(vld2q_p64(a[1..].as_ptr()));
21319        assert_eq!(r, e);
21320    }
21321
21322    #[simd_test(enable = "neon")]
21323    unsafe fn test_vld2_f64() {
21324        let a: [f64; 3] = [0., 1., 2.];
21325        let e: [f64; 2] = [1., 2.];
21326        let r: [f64; 2] = transmute(vld2_f64(a[1..].as_ptr()));
21327        assert_eq!(r, e);
21328    }
21329
21330    #[simd_test(enable = "neon")]
21331    unsafe fn test_vld2q_f64() {
21332        let a: [f64; 5] = [0., 1., 2., 2., 3.];
21333        let e: [f64x2; 2] = [f64x2::new(1., 2.), f64x2::new(2., 3.)];
21334        let r: [f64x2; 2] = transmute(vld2q_f64(a[1..].as_ptr()));
21335        assert_eq!(r, e);
21336    }
21337
21338    #[simd_test(enable = "neon")]
21339    unsafe fn test_vld2q_dup_s64() {
21340        let a: [i64; 5] = [0, 1, 1, 2, 3];
21341        let e: [i64x2; 2] = [i64x2::new(1, 1), i64x2::new(1, 1)];
21342        let r: [i64x2; 2] = transmute(vld2q_dup_s64(a[1..].as_ptr()));
21343        assert_eq!(r, e);
21344    }
21345
21346    #[simd_test(enable = "neon")]
21347    unsafe fn test_vld2q_dup_u64() {
21348        let a: [u64; 5] = [0, 1, 1, 2, 3];
21349        let e: [u64x2; 2] = [u64x2::new(1, 1), u64x2::new(1, 1)];
21350        let r: [u64x2; 2] = transmute(vld2q_dup_u64(a[1..].as_ptr()));
21351        assert_eq!(r, e);
21352    }
21353
21354    #[simd_test(enable = "neon")]
21355    unsafe fn test_vld2q_dup_p64() {
21356        let a: [u64; 5] = [0, 1, 1, 2, 3];
21357        let e: [i64x2; 2] = [i64x2::new(1, 1), i64x2::new(1, 1)];
21358        let r: [i64x2; 2] = transmute(vld2q_dup_p64(a[1..].as_ptr()));
21359        assert_eq!(r, e);
21360    }
21361
21362    #[simd_test(enable = "neon")]
21363    unsafe fn test_vld2_dup_f64() {
21364        let a: [f64; 3] = [0., 1., 1.];
21365        let e: [f64; 2] = [1., 1.];
21366        let r: [f64; 2] = transmute(vld2_dup_f64(a[1..].as_ptr()));
21367        assert_eq!(r, e);
21368    }
21369
21370    #[simd_test(enable = "neon")]
21371    unsafe fn test_vld2q_dup_f64() {
21372        let a: [f64; 5] = [0., 1., 1., 2., 3.];
21373        let e: [f64x2; 2] = [f64x2::new(1., 1.), f64x2::new(1., 1.)];
21374        let r: [f64x2; 2] = transmute(vld2q_dup_f64(a[1..].as_ptr()));
21375        assert_eq!(r, e);
21376    }
21377
21378    #[simd_test(enable = "neon")]
21379    unsafe fn test_vld2q_lane_s8() {
21380        let a: [i8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
21381        let b: [i8x16; 2] = [i8x16::new(0, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)];
21382        let e: [i8x16; 2] = [i8x16::new(1, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(2, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)];
21383        let r: [i8x16; 2] = transmute(vld2q_lane_s8::<0>(a[1..].as_ptr(), transmute(b)));
21384        assert_eq!(r, e);
21385    }
21386
21387    #[simd_test(enable = "neon")]
21388    unsafe fn test_vld2_lane_s64() {
21389        let a: [i64; 3] = [0, 1, 2];
21390        let b: [i64x1; 2] = [i64x1::new(0), i64x1::new(2)];
21391        let e: [i64x1; 2] = [i64x1::new(1), i64x1::new(2)];
21392        let r: [i64x1; 2] = transmute(vld2_lane_s64::<0>(a[1..].as_ptr(), transmute(b)));
21393        assert_eq!(r, e);
21394    }
21395
21396    #[simd_test(enable = "neon")]
21397    unsafe fn test_vld2q_lane_s64() {
21398        let a: [i64; 5] = [0, 1, 2, 3, 4];
21399        let b: [i64x2; 2] = [i64x2::new(0, 2), i64x2::new(2, 14)];
21400        let e: [i64x2; 2] = [i64x2::new(1, 2), i64x2::new(2, 14)];
21401        let r: [i64x2; 2] = transmute(vld2q_lane_s64::<0>(a[1..].as_ptr(), transmute(b)));
21402        assert_eq!(r, e);
21403    }
21404
21405    #[simd_test(enable = "neon")]
21406    unsafe fn test_vld2_lane_p64() {
21407        let a: [u64; 3] = [0, 1, 2];
21408        let b: [i64x1; 2] = [i64x1::new(0), i64x1::new(2)];
21409        let e: [i64x1; 2] = [i64x1::new(1), i64x1::new(2)];
21410        let r: [i64x1; 2] = transmute(vld2_lane_p64::<0>(a[1..].as_ptr(), transmute(b)));
21411        assert_eq!(r, e);
21412    }
21413
21414    #[simd_test(enable = "neon")]
21415    unsafe fn test_vld2q_lane_p64() {
21416        let a: [u64; 5] = [0, 1, 2, 3, 4];
21417        let b: [i64x2; 2] = [i64x2::new(0, 2), i64x2::new(2, 14)];
21418        let e: [i64x2; 2] = [i64x2::new(1, 2), i64x2::new(2, 14)];
21419        let r: [i64x2; 2] = transmute(vld2q_lane_p64::<0>(a[1..].as_ptr(), transmute(b)));
21420        assert_eq!(r, e);
21421    }
21422
21423    #[simd_test(enable = "neon")]
21424    unsafe fn test_vld2q_lane_u8() {
21425        let a: [u8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
21426        let b: [u8x16; 2] = [u8x16::new(0, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), u8x16::new(11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)];
21427        let e: [u8x16; 2] = [u8x16::new(1, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), u8x16::new(2, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)];
21428        let r: [u8x16; 2] = transmute(vld2q_lane_u8::<0>(a[1..].as_ptr(), transmute(b)));
21429        assert_eq!(r, e);
21430    }
21431
21432    #[simd_test(enable = "neon")]
21433    unsafe fn test_vld2_lane_u64() {
21434        let a: [u64; 3] = [0, 1, 2];
21435        let b: [u64x1; 2] = [u64x1::new(0), u64x1::new(2)];
21436        let e: [u64x1; 2] = [u64x1::new(1), u64x1::new(2)];
21437        let r: [u64x1; 2] = transmute(vld2_lane_u64::<0>(a[1..].as_ptr(), transmute(b)));
21438        assert_eq!(r, e);
21439    }
21440
21441    #[simd_test(enable = "neon")]
21442    unsafe fn test_vld2q_lane_u64() {
21443        let a: [u64; 5] = [0, 1, 2, 3, 4];
21444        let b: [u64x2; 2] = [u64x2::new(0, 2), u64x2::new(2, 14)];
21445        let e: [u64x2; 2] = [u64x2::new(1, 2), u64x2::new(2, 14)];
21446        let r: [u64x2; 2] = transmute(vld2q_lane_u64::<0>(a[1..].as_ptr(), transmute(b)));
21447        assert_eq!(r, e);
21448    }
21449
21450    #[simd_test(enable = "neon")]
21451    unsafe fn test_vld2q_lane_p8() {
21452        let a: [u8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
21453        let b: [i8x16; 2] = [i8x16::new(0, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)];
21454        let e: [i8x16; 2] = [i8x16::new(1, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(2, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)];
21455        let r: [i8x16; 2] = transmute(vld2q_lane_p8::<0>(a[1..].as_ptr(), transmute(b)));
21456        assert_eq!(r, e);
21457    }
21458
21459    #[simd_test(enable = "neon")]
21460    unsafe fn test_vld2_lane_f64() {
21461        let a: [f64; 3] = [0., 1., 2.];
21462        let b: [f64; 2] = [0., 2.];
21463        let e: [f64; 2] = [1., 2.];
21464        let r: [f64; 2] = transmute(vld2_lane_f64::<0>(a[1..].as_ptr(), transmute(b)));
21465        assert_eq!(r, e);
21466    }
21467
21468    #[simd_test(enable = "neon")]
21469    unsafe fn test_vld2q_lane_f64() {
21470        let a: [f64; 5] = [0., 1., 2., 3., 4.];
21471        let b: [f64x2; 2] = [f64x2::new(0., 2.), f64x2::new(2., 14.)];
21472        let e: [f64x2; 2] = [f64x2::new(1., 2.), f64x2::new(2., 14.)];
21473        let r: [f64x2; 2] = transmute(vld2q_lane_f64::<0>(a[1..].as_ptr(), transmute(b)));
21474        assert_eq!(r, e);
21475    }
21476
21477    #[simd_test(enable = "neon")]
21478    unsafe fn test_vld3q_s64() {
21479        let a: [i64; 7] = [0, 1, 2, 2, 2, 4, 4];
21480        let e: [i64x2; 3] = [i64x2::new(1, 2), i64x2::new(2, 4), i64x2::new(2, 4)];
21481        let r: [i64x2; 3] = transmute(vld3q_s64(a[1..].as_ptr()));
21482        assert_eq!(r, e);
21483    }
21484
21485    #[simd_test(enable = "neon")]
21486    unsafe fn test_vld3q_u64() {
21487        let a: [u64; 7] = [0, 1, 2, 2, 2, 4, 4];
21488        let e: [u64x2; 3] = [u64x2::new(1, 2), u64x2::new(2, 4), u64x2::new(2, 4)];
21489        let r: [u64x2; 3] = transmute(vld3q_u64(a[1..].as_ptr()));
21490        assert_eq!(r, e);
21491    }
21492
21493    #[simd_test(enable = "neon")]
21494    unsafe fn test_vld3q_p64() {
21495        let a: [u64; 7] = [0, 1, 2, 2, 2, 4, 4];
21496        let e: [i64x2; 3] = [i64x2::new(1, 2), i64x2::new(2, 4), i64x2::new(2, 4)];
21497        let r: [i64x2; 3] = transmute(vld3q_p64(a[1..].as_ptr()));
21498        assert_eq!(r, e);
21499    }
21500
21501    #[simd_test(enable = "neon")]
21502    unsafe fn test_vld3_f64() {
21503        let a: [f64; 4] = [0., 1., 2., 2.];
21504        let e: [f64; 3] = [1., 2., 2.];
21505        let r: [f64; 3] = transmute(vld3_f64(a[1..].as_ptr()));
21506        assert_eq!(r, e);
21507    }
21508
21509    #[simd_test(enable = "neon")]
21510    unsafe fn test_vld3q_f64() {
21511        let a: [f64; 7] = [0., 1., 2., 2., 2., 4., 4.];
21512        let e: [f64x2; 3] = [f64x2::new(1., 2.), f64x2::new(2., 4.), f64x2::new(2., 4.)];
21513        let r: [f64x2; 3] = transmute(vld3q_f64(a[1..].as_ptr()));
21514        assert_eq!(r, e);
21515    }
21516
21517    #[simd_test(enable = "neon")]
21518    unsafe fn test_vld3q_dup_s64() {
21519        let a: [i64; 7] = [0, 1, 1, 1, 3, 1, 4];
21520        let e: [i64x2; 3] = [i64x2::new(1, 1), i64x2::new(1, 1), i64x2::new(1, 1)];
21521        let r: [i64x2; 3] = transmute(vld3q_dup_s64(a[1..].as_ptr()));
21522        assert_eq!(r, e);
21523    }
21524
21525    #[simd_test(enable = "neon")]
21526    unsafe fn test_vld3q_dup_u64() {
21527        let a: [u64; 7] = [0, 1, 1, 1, 3, 1, 4];
21528        let e: [u64x2; 3] = [u64x2::new(1, 1), u64x2::new(1, 1), u64x2::new(1, 1)];
21529        let r: [u64x2; 3] = transmute(vld3q_dup_u64(a[1..].as_ptr()));
21530        assert_eq!(r, e);
21531    }
21532
21533    #[simd_test(enable = "neon")]
21534    unsafe fn test_vld3q_dup_p64() {
21535        let a: [u64; 7] = [0, 1, 1, 1, 3, 1, 4];
21536        let e: [i64x2; 3] = [i64x2::new(1, 1), i64x2::new(1, 1), i64x2::new(1, 1)];
21537        let r: [i64x2; 3] = transmute(vld3q_dup_p64(a[1..].as_ptr()));
21538        assert_eq!(r, e);
21539    }
21540
21541    #[simd_test(enable = "neon")]
21542    unsafe fn test_vld3_dup_f64() {
21543        let a: [f64; 4] = [0., 1., 1., 1.];
21544        let e: [f64; 3] = [1., 1., 1.];
21545        let r: [f64; 3] = transmute(vld3_dup_f64(a[1..].as_ptr()));
21546        assert_eq!(r, e);
21547    }
21548
21549    #[simd_test(enable = "neon")]
21550    unsafe fn test_vld3q_dup_f64() {
21551        let a: [f64; 7] = [0., 1., 1., 1., 3., 1., 4.];
21552        let e: [f64x2; 3] = [f64x2::new(1., 1.), f64x2::new(1., 1.), f64x2::new(1., 1.)];
21553        let r: [f64x2; 3] = transmute(vld3q_dup_f64(a[1..].as_ptr()));
21554        assert_eq!(r, e);
21555    }
21556
21557    #[simd_test(enable = "neon")]
21558    unsafe fn test_vld3q_lane_s8() {
21559        let a: [i8; 49] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
21560        let b: [i8x16; 3] = [i8x16::new(0, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26), i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8)];
21561        let e: [i8x16; 3] = [i8x16::new(1, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(2, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26), i8x16::new(2, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8)];
21562        let r: [i8x16; 3] = transmute(vld3q_lane_s8::<0>(a[1..].as_ptr(), transmute(b)));
21563        assert_eq!(r, e);
21564    }
21565
21566    #[simd_test(enable = "neon")]
21567    unsafe fn test_vld3_lane_s64() {
21568        let a: [i64; 4] = [0, 1, 2, 2];
21569        let b: [i64x1; 3] = [i64x1::new(0), i64x1::new(2), i64x1::new(2)];
21570        let e: [i64x1; 3] = [i64x1::new(1), i64x1::new(2), i64x1::new(2)];
21571        let r: [i64x1; 3] = transmute(vld3_lane_s64::<0>(a[1..].as_ptr(), transmute(b)));
21572        assert_eq!(r, e);
21573    }
21574
21575    #[simd_test(enable = "neon")]
21576    unsafe fn test_vld3q_lane_s64() {
21577        let a: [i64; 7] = [0, 1, 2, 2, 4, 5, 6];
21578        let b: [i64x2; 3] = [i64x2::new(0, 2), i64x2::new(2, 14), i64x2::new(2, 16)];
21579        let e: [i64x2; 3] = [i64x2::new(1, 2), i64x2::new(2, 14), i64x2::new(2, 16)];
21580        let r: [i64x2; 3] = transmute(vld3q_lane_s64::<0>(a[1..].as_ptr(), transmute(b)));
21581        assert_eq!(r, e);
21582    }
21583
21584    #[simd_test(enable = "neon")]
21585    unsafe fn test_vld3_lane_p64() {
21586        let a: [u64; 4] = [0, 1, 2, 2];
21587        let b: [i64x1; 3] = [i64x1::new(0), i64x1::new(2), i64x1::new(2)];
21588        let e: [i64x1; 3] = [i64x1::new(1), i64x1::new(2), i64x1::new(2)];
21589        let r: [i64x1; 3] = transmute(vld3_lane_p64::<0>(a[1..].as_ptr(), transmute(b)));
21590        assert_eq!(r, e);
21591    }
21592
21593    #[simd_test(enable = "neon")]
21594    unsafe fn test_vld3q_lane_p64() {
21595        let a: [u64; 7] = [0, 1, 2, 2, 4, 5, 6];
21596        let b: [i64x2; 3] = [i64x2::new(0, 2), i64x2::new(2, 14), i64x2::new(2, 16)];
21597        let e: [i64x2; 3] = [i64x2::new(1, 2), i64x2::new(2, 14), i64x2::new(2, 16)];
21598        let r: [i64x2; 3] = transmute(vld3q_lane_p64::<0>(a[1..].as_ptr(), transmute(b)));
21599        assert_eq!(r, e);
21600    }
21601
21602    #[simd_test(enable = "neon")]
21603    unsafe fn test_vld3q_lane_p8() {
21604        let a: [u8; 49] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
21605        let b: [i8x16; 3] = [i8x16::new(0, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26), i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8)];
21606        let e: [i8x16; 3] = [i8x16::new(1, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(2, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26), i8x16::new(2, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8)];
21607        let r: [i8x16; 3] = transmute(vld3q_lane_p8::<0>(a[1..].as_ptr(), transmute(b)));
21608        assert_eq!(r, e);
21609    }
21610
21611    #[simd_test(enable = "neon")]
21612    unsafe fn test_vld3q_lane_u8() {
21613        let a: [u8; 49] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
21614        let b: [u8x16; 3] = [u8x16::new(0, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), u8x16::new(11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26), u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8)];
21615        let e: [u8x16; 3] = [u8x16::new(1, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), u8x16::new(2, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26), u8x16::new(2, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8)];
21616        let r: [u8x16; 3] = transmute(vld3q_lane_u8::<0>(a[1..].as_ptr(), transmute(b)));
21617        assert_eq!(r, e);
21618    }
21619
21620    #[simd_test(enable = "neon")]
21621    unsafe fn test_vld3_lane_u64() {
21622        let a: [u64; 4] = [0, 1, 2, 2];
21623        let b: [u64x1; 3] = [u64x1::new(0), u64x1::new(2), u64x1::new(2)];
21624        let e: [u64x1; 3] = [u64x1::new(1), u64x1::new(2), u64x1::new(2)];
21625        let r: [u64x1; 3] = transmute(vld3_lane_u64::<0>(a[1..].as_ptr(), transmute(b)));
21626        assert_eq!(r, e);
21627    }
21628
21629    #[simd_test(enable = "neon")]
21630    unsafe fn test_vld3q_lane_u64() {
21631        let a: [u64; 7] = [0, 1, 2, 2, 4, 5, 6];
21632        let b: [u64x2; 3] = [u64x2::new(0, 2), u64x2::new(2, 14), u64x2::new(2, 16)];
21633        let e: [u64x2; 3] = [u64x2::new(1, 2), u64x2::new(2, 14), u64x2::new(2, 16)];
21634        let r: [u64x2; 3] = transmute(vld3q_lane_u64::<0>(a[1..].as_ptr(), transmute(b)));
21635        assert_eq!(r, e);
21636    }
21637
21638    #[simd_test(enable = "neon")]
21639    unsafe fn test_vld3_lane_f64() {
21640        let a: [f64; 4] = [0., 1., 2., 2.];
21641        let b: [f64; 3] = [0., 2., 2.];
21642        let e: [f64; 3] = [1., 2., 2.];
21643        let r: [f64; 3] = transmute(vld3_lane_f64::<0>(a[1..].as_ptr(), transmute(b)));
21644        assert_eq!(r, e);
21645    }
21646
21647    #[simd_test(enable = "neon")]
21648    unsafe fn test_vld3q_lane_f64() {
21649        let a: [f64; 7] = [0., 1., 2., 2., 4., 5., 6.];
21650        let b: [f64x2; 3] = [f64x2::new(0., 2.), f64x2::new(2., 14.), f64x2::new(9., 16.)];
21651        let e: [f64x2; 3] = [f64x2::new(1., 2.), f64x2::new(2., 14.), f64x2::new(2., 16.)];
21652        let r: [f64x2; 3] = transmute(vld3q_lane_f64::<0>(a[1..].as_ptr(), transmute(b)));
21653        assert_eq!(r, e);
21654    }
21655
21656    #[simd_test(enable = "neon")]
21657    unsafe fn test_vld4q_s64() {
21658        let a: [i64; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8];
21659        let e: [i64x2; 4] = [i64x2::new(1, 2), i64x2::new(2, 6), i64x2::new(2, 6), i64x2::new(6, 8)];
21660        let r: [i64x2; 4] = transmute(vld4q_s64(a[1..].as_ptr()));
21661        assert_eq!(r, e);
21662    }
21663
21664    #[simd_test(enable = "neon")]
21665    unsafe fn test_vld4q_u64() {
21666        let a: [u64; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8];
21667        let e: [u64x2; 4] = [u64x2::new(1, 2), u64x2::new(2, 6), u64x2::new(2, 6), u64x2::new(6, 8)];
21668        let r: [u64x2; 4] = transmute(vld4q_u64(a[1..].as_ptr()));
21669        assert_eq!(r, e);
21670    }
21671
21672    #[simd_test(enable = "neon")]
21673    unsafe fn test_vld4q_p64() {
21674        let a: [u64; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8];
21675        let e: [i64x2; 4] = [i64x2::new(1, 2), i64x2::new(2, 6), i64x2::new(2, 6), i64x2::new(6, 8)];
21676        let r: [i64x2; 4] = transmute(vld4q_p64(a[1..].as_ptr()));
21677        assert_eq!(r, e);
21678    }
21679
21680    #[simd_test(enable = "neon")]
21681    unsafe fn test_vld4_f64() {
21682        let a: [f64; 5] = [0., 1., 2., 2., 6.];
21683        let e: [f64; 4] = [1., 2., 2., 6.];
21684        let r: [f64; 4] = transmute(vld4_f64(a[1..].as_ptr()));
21685        assert_eq!(r, e);
21686    }
21687
21688    #[simd_test(enable = "neon")]
21689    unsafe fn test_vld4q_f64() {
21690        let a: [f64; 9] = [0., 1., 2., 2., 6., 2., 6., 6., 8.];
21691        let e: [f64x2; 4] = [f64x2::new(1., 2.), f64x2::new(2., 6.), f64x2::new(2., 6.), f64x2::new(6., 8.)];
21692        let r: [f64x2; 4] = transmute(vld4q_f64(a[1..].as_ptr()));
21693        assert_eq!(r, e);
21694    }
21695
21696    #[simd_test(enable = "neon")]
21697    unsafe fn test_vld4q_dup_s64() {
21698        let a: [i64; 9] = [0, 1, 1, 1, 1, 2, 4, 3, 5];
21699        let e: [i64x2; 4] = [i64x2::new(1, 1), i64x2::new(1, 1), i64x2::new(1, 1), i64x2::new(1, 1)];
21700        let r: [i64x2; 4] = transmute(vld4q_dup_s64(a[1..].as_ptr()));
21701        assert_eq!(r, e);
21702    }
21703
21704    #[simd_test(enable = "neon")]
21705    unsafe fn test_vld4q_dup_u64() {
21706        let a: [u64; 9] = [0, 1, 1, 1, 1, 2, 4, 3, 5];
21707        let e: [u64x2; 4] = [u64x2::new(1, 1), u64x2::new(1, 1), u64x2::new(1, 1), u64x2::new(1, 1)];
21708        let r: [u64x2; 4] = transmute(vld4q_dup_u64(a[1..].as_ptr()));
21709        assert_eq!(r, e);
21710    }
21711
21712    #[simd_test(enable = "neon")]
21713    unsafe fn test_vld4q_dup_p64() {
21714        let a: [u64; 9] = [0, 1, 1, 1, 1, 2, 4, 3, 5];
21715        let e: [i64x2; 4] = [i64x2::new(1, 1), i64x2::new(1, 1), i64x2::new(1, 1), i64x2::new(1, 1)];
21716        let r: [i64x2; 4] = transmute(vld4q_dup_p64(a[1..].as_ptr()));
21717        assert_eq!(r, e);
21718    }
21719
21720    #[simd_test(enable = "neon")]
21721    unsafe fn test_vld4_dup_f64() {
21722        let a: [f64; 5] = [0., 1., 1., 1., 1.];
21723        let e: [f64; 4] = [1., 1., 1., 1.];
21724        let r: [f64; 4] = transmute(vld4_dup_f64(a[1..].as_ptr()));
21725        assert_eq!(r, e);
21726    }
21727
21728    #[simd_test(enable = "neon")]
21729    unsafe fn test_vld4q_dup_f64() {
21730        let a: [f64; 9] = [0., 1., 1., 1., 1., 6., 4., 3., 5.];
21731        let e: [f64x2; 4] = [f64x2::new(1., 1.), f64x2::new(1., 1.), f64x2::new(1., 1.), f64x2::new(1., 1.)];
21732        let r: [f64x2; 4] = transmute(vld4q_dup_f64(a[1..].as_ptr()));
21733        assert_eq!(r, e);
21734    }
21735
21736    #[simd_test(enable = "neon")]
21737    unsafe fn test_vld4q_lane_s8() {
21738        let a: [i8; 65] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16];
21739        let b: [i8x16; 4] = [i8x16::new(0, 2, 2, 2, 2, 16, 2, 18, 2, 20, 21, 22, 2, 24, 25, 26), i8x16::new(11, 12, 13, 14, 15, 16, 2, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8), i8x16::new(1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16)];
21740        let e: [i8x16; 4] = [i8x16::new(1, 2, 2, 2, 2, 16, 2, 18, 2, 20, 21, 22, 2, 24, 25, 26), i8x16::new(2, 12, 13, 14, 15, 16, 2, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(2, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8), i8x16::new(2, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16)];
21741        let r: [i8x16; 4] = transmute(vld4q_lane_s8::<0>(a[1..].as_ptr(), transmute(b)));
21742        assert_eq!(r, e);
21743    }
21744
21745    #[simd_test(enable = "neon")]
21746    unsafe fn test_vld4_lane_s64() {
21747        let a: [i64; 5] = [0, 1, 2, 2, 2];
21748        let b: [i64x1; 4] = [i64x1::new(0), i64x1::new(2), i64x1::new(2), i64x1::new(2)];
21749        let e: [i64x1; 4] = [i64x1::new(1), i64x1::new(2), i64x1::new(2), i64x1::new(2)];
21750        let r: [i64x1; 4] = transmute(vld4_lane_s64::<0>(a[1..].as_ptr(), transmute(b)));
21751        assert_eq!(r, e);
21752    }
21753
21754    #[simd_test(enable = "neon")]
21755    unsafe fn test_vld4q_lane_s64() {
21756        let a: [i64; 9] = [0, 1, 2, 2, 2, 5, 6, 7, 8];
21757        let b: [i64x2; 4] = [i64x2::new(0, 2), i64x2::new(2, 2), i64x2::new(2, 16), i64x2::new(2, 18)];
21758        let e: [i64x2; 4] = [i64x2::new(1, 2), i64x2::new(2, 2), i64x2::new(2, 16), i64x2::new(2, 18)];
21759        let r: [i64x2; 4] = transmute(vld4q_lane_s64::<0>(a[1..].as_ptr(), transmute(b)));
21760        assert_eq!(r, e);
21761    }
21762
21763    #[simd_test(enable = "neon")]
21764    unsafe fn test_vld4_lane_p64() {
21765        let a: [u64; 5] = [0, 1, 2, 2, 2];
21766        let b: [i64x1; 4] = [i64x1::new(0), i64x1::new(2), i64x1::new(2), i64x1::new(2)];
21767        let e: [i64x1; 4] = [i64x1::new(1), i64x1::new(2), i64x1::new(2), i64x1::new(2)];
21768        let r: [i64x1; 4] = transmute(vld4_lane_p64::<0>(a[1..].as_ptr(), transmute(b)));
21769        assert_eq!(r, e);
21770    }
21771
21772    #[simd_test(enable = "neon")]
21773    unsafe fn test_vld4q_lane_p64() {
21774        let a: [u64; 9] = [0, 1, 2, 2, 2, 5, 6, 7, 8];
21775        let b: [i64x2; 4] = [i64x2::new(0, 2), i64x2::new(2, 2), i64x2::new(2, 16), i64x2::new(2, 18)];
21776        let e: [i64x2; 4] = [i64x2::new(1, 2), i64x2::new(2, 2), i64x2::new(2, 16), i64x2::new(2, 18)];
21777        let r: [i64x2; 4] = transmute(vld4q_lane_p64::<0>(a[1..].as_ptr(), transmute(b)));
21778        assert_eq!(r, e);
21779    }
21780
21781    #[simd_test(enable = "neon")]
21782    unsafe fn test_vld4q_lane_p8() {
21783        let a: [u8; 65] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16];
21784        let b: [i8x16; 4] = [i8x16::new(0, 2, 2, 2, 2, 16, 2, 18, 2, 20, 21, 22, 2, 24, 25, 26), i8x16::new(11, 12, 13, 14, 15, 16, 2, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8), i8x16::new(1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16)];
21785        let e: [i8x16; 4] = [i8x16::new(1, 2, 2, 2, 2, 16, 2, 18, 2, 20, 21, 22, 2, 24, 25, 26), i8x16::new(2, 12, 13, 14, 15, 16, 2, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(2, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8), i8x16::new(2, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16)];
21786        let r: [i8x16; 4] = transmute(vld4q_lane_p8::<0>(a[1..].as_ptr(), transmute(b)));
21787        assert_eq!(r, e);
21788    }
21789
21790    #[simd_test(enable = "neon")]
21791    unsafe fn test_vld4q_lane_u8() {
21792        let a: [u8; 65] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16];
21793        let b: [u8x16; 4] = [u8x16::new(0, 2, 2, 2, 2, 16, 2, 18, 2, 20, 21, 22, 2, 24, 25, 26), u8x16::new(11, 12, 13, 14, 15, 16, 2, 18, 2, 20, 21, 22, 23, 24, 25, 26), u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8), u8x16::new(1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16)];
21794        let e: [u8x16; 4] = [u8x16::new(1, 2, 2, 2, 2, 16, 2, 18, 2, 20, 21, 22, 2, 24, 25, 26), u8x16::new(2, 12, 13, 14, 15, 16, 2, 18, 2, 20, 21, 22, 23, 24, 25, 26), u8x16::new(2, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8), u8x16::new(2, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16)];
21795        let r: [u8x16; 4] = transmute(vld4q_lane_u8::<0>(a[1..].as_ptr(), transmute(b)));
21796        assert_eq!(r, e);
21797    }
21798
21799    #[simd_test(enable = "neon")]
21800    unsafe fn test_vld4_lane_u64() {
21801        let a: [u64; 5] = [0, 1, 2, 2, 2];
21802        let b: [u64x1; 4] = [u64x1::new(0), u64x1::new(2), u64x1::new(2), u64x1::new(2)];
21803        let e: [u64x1; 4] = [u64x1::new(1), u64x1::new(2), u64x1::new(2), u64x1::new(2)];
21804        let r: [u64x1; 4] = transmute(vld4_lane_u64::<0>(a[1..].as_ptr(), transmute(b)));
21805        assert_eq!(r, e);
21806    }
21807
21808    #[simd_test(enable = "neon")]
21809    unsafe fn test_vld4q_lane_u64() {
21810        let a: [u64; 9] = [0, 1, 2, 2, 2, 5, 6, 7, 8];
21811        let b: [u64x2; 4] = [u64x2::new(0, 2), u64x2::new(2, 2), u64x2::new(2, 16), u64x2::new(2, 18)];
21812        let e: [u64x2; 4] = [u64x2::new(1, 2), u64x2::new(2, 2), u64x2::new(2, 16), u64x2::new(2, 18)];
21813        let r: [u64x2; 4] = transmute(vld4q_lane_u64::<0>(a[1..].as_ptr(), transmute(b)));
21814        assert_eq!(r, e);
21815    }
21816
21817    #[simd_test(enable = "neon")]
21818    unsafe fn test_vld4_lane_f64() {
21819        let a: [f64; 5] = [0., 1., 2., 2., 2.];
21820        let b: [f64; 4] = [0., 2., 2., 2.];
21821        let e: [f64; 4] = [1., 2., 2., 2.];
21822        let r: [f64; 4] = transmute(vld4_lane_f64::<0>(a[1..].as_ptr(), transmute(b)));
21823        assert_eq!(r, e);
21824    }
21825
21826    #[simd_test(enable = "neon")]
21827    unsafe fn test_vld4q_lane_f64() {
21828        let a: [f64; 9] = [0., 1., 2., 2., 2., 5., 6., 7., 8.];
21829        let b: [f64x2; 4] = [f64x2::new(0., 2.), f64x2::new(2., 2.), f64x2::new(2., 16.), f64x2::new(2., 18.)];
21830        let e: [f64x2; 4] = [f64x2::new(1., 2.), f64x2::new(2., 2.), f64x2::new(2., 16.), f64x2::new(2., 18.)];
21831        let r: [f64x2; 4] = transmute(vld4q_lane_f64::<0>(a[1..].as_ptr(), transmute(b)));
21832        assert_eq!(r, e);
21833    }
21834
21835    #[simd_test(enable = "neon")]
21836    unsafe fn test_vst1_lane_f64() {
21837        let a: [f64; 2] = [0., 1.];
21838        let e: [f64; 1] = [1.];
21839        let mut r: [f64; 1] = [0f64; 1];
21840        vst1_lane_f64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
21841        assert_eq!(r, e);
21842    }
21843
21844    #[simd_test(enable = "neon")]
21845    unsafe fn test_vst1q_lane_f64() {
21846        let a: [f64; 3] = [0., 1., 2.];
21847        let e: [f64; 2] = [1., 0.];
21848        let mut r: [f64; 2] = [0f64; 2];
21849        vst1q_lane_f64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
21850        assert_eq!(r, e);
21851    }
21852
21853    #[simd_test(enable = "neon")]
21854    unsafe fn test_vst1_f64_x2() {
21855        let a: [f64; 3] = [0., 1., 2.];
21856        let e: [f64; 2] = [1., 2.];
21857        let mut r: [f64; 2] = [0f64; 2];
21858        vst1_f64_x2(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
21859        assert_eq!(r, e);
21860    }
21861
21862    #[simd_test(enable = "neon")]
21863    unsafe fn test_vst1q_f64_x2() {
21864        let a: [f64; 5] = [0., 1., 2., 3., 4.];
21865        let e: [f64; 4] = [1., 2., 3., 4.];
21866        let mut r: [f64; 4] = [0f64; 4];
21867        vst1q_f64_x2(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
21868        assert_eq!(r, e);
21869    }
21870
21871    #[simd_test(enable = "neon")]
21872    unsafe fn test_vst1_f64_x3() {
21873        let a: [f64; 4] = [0., 1., 2., 3.];
21874        let e: [f64; 3] = [1., 2., 3.];
21875        let mut r: [f64; 3] = [0f64; 3];
21876        vst1_f64_x3(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
21877        assert_eq!(r, e);
21878    }
21879
21880    #[simd_test(enable = "neon")]
21881    unsafe fn test_vst1q_f64_x3() {
21882        let a: [f64; 7] = [0., 1., 2., 3., 4., 5., 6.];
21883        let e: [f64; 6] = [1., 2., 3., 4., 5., 6.];
21884        let mut r: [f64; 6] = [0f64; 6];
21885        vst1q_f64_x3(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
21886        assert_eq!(r, e);
21887    }
21888
21889    #[simd_test(enable = "neon")]
21890    unsafe fn test_vst1_f64_x4() {
21891        let a: [f64; 5] = [0., 1., 2., 3., 4.];
21892        let e: [f64; 4] = [1., 2., 3., 4.];
21893        let mut r: [f64; 4] = [0f64; 4];
21894        vst1_f64_x4(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
21895        assert_eq!(r, e);
21896    }
21897
21898    #[simd_test(enable = "neon")]
21899    unsafe fn test_vst1q_f64_x4() {
21900        let a: [f64; 9] = [0., 1., 2., 3., 4., 5., 6., 7., 8.];
21901        let e: [f64; 8] = [1., 2., 3., 4., 5., 6., 7., 8.];
21902        let mut r: [f64; 8] = [0f64; 8];
21903        vst1q_f64_x4(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
21904        assert_eq!(r, e);
21905    }
21906
21907    #[simd_test(enable = "neon")]
21908    unsafe fn test_vst2q_s64() {
21909        let a: [i64; 5] = [0, 1, 2, 2, 3];
21910        let e: [i64; 4] = [1, 2, 2, 3];
21911        let mut r: [i64; 4] = [0i64; 4];
21912        vst2q_s64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
21913        assert_eq!(r, e);
21914    }
21915
21916    #[simd_test(enable = "neon")]
21917    unsafe fn test_vst2q_u64() {
21918        let a: [u64; 5] = [0, 1, 2, 2, 3];
21919        let e: [u64; 4] = [1, 2, 2, 3];
21920        let mut r: [u64; 4] = [0u64; 4];
21921        vst2q_u64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
21922        assert_eq!(r, e);
21923    }
21924
21925    #[simd_test(enable = "neon")]
21926    unsafe fn test_vst2q_p64() {
21927        let a: [u64; 5] = [0, 1, 2, 2, 3];
21928        let e: [u64; 4] = [1, 2, 2, 3];
21929        let mut r: [u64; 4] = [0u64; 4];
21930        vst2q_p64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
21931        assert_eq!(r, e);
21932    }
21933
21934    #[simd_test(enable = "neon")]
21935    unsafe fn test_vst2_f64() {
21936        let a: [f64; 3] = [0., 1., 2.];
21937        let e: [f64; 2] = [1., 2.];
21938        let mut r: [f64; 2] = [0f64; 2];
21939        vst2_f64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
21940        assert_eq!(r, e);
21941    }
21942
21943    #[simd_test(enable = "neon")]
21944    unsafe fn test_vst2q_f64() {
21945        let a: [f64; 5] = [0., 1., 2., 2., 3.];
21946        let e: [f64; 4] = [1., 2., 2., 3.];
21947        let mut r: [f64; 4] = [0f64; 4];
21948        vst2q_f64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
21949        assert_eq!(r, e);
21950    }
21951
21952    #[simd_test(enable = "neon")]
21953    unsafe fn test_vst2q_lane_s8() {
21954        let a: [i8; 33] = [0, 1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17];
21955        let e: [i8; 32] = [1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
21956        let mut r: [i8; 32] = [0i8; 32];
21957        vst2q_lane_s8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
21958        assert_eq!(r, e);
21959    }
21960
21961    #[simd_test(enable = "neon")]
21962    unsafe fn test_vst2_lane_s64() {
21963        let a: [i64; 3] = [0, 1, 2];
21964        let e: [i64; 2] = [1, 2];
21965        let mut r: [i64; 2] = [0i64; 2];
21966        vst2_lane_s64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
21967        assert_eq!(r, e);
21968    }
21969
21970    #[simd_test(enable = "neon")]
21971    unsafe fn test_vst2q_lane_s64() {
21972        let a: [i64; 5] = [0, 1, 2, 2, 3];
21973        let e: [i64; 4] = [1, 2, 0, 0];
21974        let mut r: [i64; 4] = [0i64; 4];
21975        vst2q_lane_s64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
21976        assert_eq!(r, e);
21977    }
21978
21979    #[simd_test(enable = "neon")]
21980    unsafe fn test_vst2q_lane_u8() {
21981        let a: [u8; 33] = [0, 1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17];
21982        let e: [u8; 32] = [1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
21983        let mut r: [u8; 32] = [0u8; 32];
21984        vst2q_lane_u8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
21985        assert_eq!(r, e);
21986    }
21987
21988    #[simd_test(enable = "neon")]
21989    unsafe fn test_vst2_lane_u64() {
21990        let a: [u64; 3] = [0, 1, 2];
21991        let e: [u64; 2] = [1, 2];
21992        let mut r: [u64; 2] = [0u64; 2];
21993        vst2_lane_u64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
21994        assert_eq!(r, e);
21995    }
21996
21997    #[simd_test(enable = "neon")]
21998    unsafe fn test_vst2q_lane_u64() {
21999        let a: [u64; 5] = [0, 1, 2, 2, 3];
22000        let e: [u64; 4] = [1, 2, 0, 0];
22001        let mut r: [u64; 4] = [0u64; 4];
22002        vst2q_lane_u64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22003        assert_eq!(r, e);
22004    }
22005
22006    #[simd_test(enable = "neon")]
22007    unsafe fn test_vst2q_lane_p8() {
22008        let a: [u8; 33] = [0, 1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17];
22009        let e: [u8; 32] = [1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
22010        let mut r: [u8; 32] = [0u8; 32];
22011        vst2q_lane_p8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22012        assert_eq!(r, e);
22013    }
22014
22015    #[simd_test(enable = "neon")]
22016    unsafe fn test_vst2_lane_p64() {
22017        let a: [u64; 3] = [0, 1, 2];
22018        let e: [u64; 2] = [1, 2];
22019        let mut r: [u64; 2] = [0u64; 2];
22020        vst2_lane_p64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22021        assert_eq!(r, e);
22022    }
22023
22024    #[simd_test(enable = "neon")]
22025    unsafe fn test_vst2q_lane_p64() {
22026        let a: [u64; 5] = [0, 1, 2, 2, 3];
22027        let e: [u64; 4] = [1, 2, 0, 0];
22028        let mut r: [u64; 4] = [0u64; 4];
22029        vst2q_lane_p64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22030        assert_eq!(r, e);
22031    }
22032
22033    #[simd_test(enable = "neon")]
22034    unsafe fn test_vst2_lane_f64() {
22035        let a: [f64; 3] = [0., 1., 2.];
22036        let e: [f64; 2] = [1., 2.];
22037        let mut r: [f64; 2] = [0f64; 2];
22038        vst2_lane_f64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22039        assert_eq!(r, e);
22040    }
22041
22042    #[simd_test(enable = "neon")]
22043    unsafe fn test_vst2q_lane_f64() {
22044        let a: [f64; 5] = [0., 1., 2., 2., 3.];
22045        let e: [f64; 4] = [1., 2., 0., 0.];
22046        let mut r: [f64; 4] = [0f64; 4];
22047        vst2q_lane_f64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22048        assert_eq!(r, e);
22049    }
22050
22051    #[simd_test(enable = "neon")]
22052    unsafe fn test_vst3q_s64() {
22053        let a: [i64; 7] = [0, 1, 2, 2, 4, 2, 4];
22054        let e: [i64; 6] = [1, 2, 2, 2, 4, 4];
22055        let mut r: [i64; 6] = [0i64; 6];
22056        vst3q_s64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22057        assert_eq!(r, e);
22058    }
22059
22060    #[simd_test(enable = "neon")]
22061    unsafe fn test_vst3q_u64() {
22062        let a: [u64; 7] = [0, 1, 2, 2, 4, 2, 4];
22063        let e: [u64; 6] = [1, 2, 2, 2, 4, 4];
22064        let mut r: [u64; 6] = [0u64; 6];
22065        vst3q_u64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22066        assert_eq!(r, e);
22067    }
22068
22069    #[simd_test(enable = "neon")]
22070    unsafe fn test_vst3q_p64() {
22071        let a: [u64; 7] = [0, 1, 2, 2, 4, 2, 4];
22072        let e: [u64; 6] = [1, 2, 2, 2, 4, 4];
22073        let mut r: [u64; 6] = [0u64; 6];
22074        vst3q_p64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22075        assert_eq!(r, e);
22076    }
22077
22078    #[simd_test(enable = "neon")]
22079    unsafe fn test_vst3_f64() {
22080        let a: [f64; 4] = [0., 1., 2., 2.];
22081        let e: [f64; 3] = [1., 2., 2.];
22082        let mut r: [f64; 3] = [0f64; 3];
22083        vst3_f64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22084        assert_eq!(r, e);
22085    }
22086
22087    #[simd_test(enable = "neon")]
22088    unsafe fn test_vst3q_f64() {
22089        let a: [f64; 7] = [0., 1., 2., 2., 4., 2., 4.];
22090        let e: [f64; 6] = [1., 2., 2., 2., 4., 4.];
22091        let mut r: [f64; 6] = [0f64; 6];
22092        vst3q_f64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22093        assert_eq!(r, e);
22094    }
22095
22096    #[simd_test(enable = "neon")]
22097    unsafe fn test_vst3q_lane_s8() {
22098        let a: [i8; 49] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16, 2, 4, 7, 8, 13, 14, 15, 16, 25, 26, 27, 28, 29, 30, 31, 32, 2, 4, 7, 8, 13, 14, 15, 16, 41, 42, 43, 44, 45, 46, 47, 48];
22099        let e: [i8; 48] = [1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
22100        let mut r: [i8; 48] = [0i8; 48];
22101        vst3q_lane_s8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22102        assert_eq!(r, e);
22103    }
22104
22105    #[simd_test(enable = "neon")]
22106    unsafe fn test_vst3_lane_s64() {
22107        let a: [i64; 4] = [0, 1, 2, 2];
22108        let e: [i64; 3] = [1, 2, 2];
22109        let mut r: [i64; 3] = [0i64; 3];
22110        vst3_lane_s64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22111        assert_eq!(r, e);
22112    }
22113
22114    #[simd_test(enable = "neon")]
22115    unsafe fn test_vst3q_lane_s64() {
22116        let a: [i64; 7] = [0, 1, 2, 2, 4, 2, 4];
22117        let e: [i64; 6] = [1, 2, 2, 0, 0, 0];
22118        let mut r: [i64; 6] = [0i64; 6];
22119        vst3q_lane_s64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22120        assert_eq!(r, e);
22121    }
22122
22123    #[simd_test(enable = "neon")]
22124    unsafe fn test_vst3q_lane_u8() {
22125        let a: [u8; 49] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16, 2, 4, 7, 8, 13, 14, 15, 16, 25, 26, 27, 28, 29, 30, 31, 32, 2, 4, 7, 8, 13, 14, 15, 16, 41, 42, 43, 44, 45, 46, 47, 48];
22126        let e: [u8; 48] = [1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
22127        let mut r: [u8; 48] = [0u8; 48];
22128        vst3q_lane_u8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22129        assert_eq!(r, e);
22130    }
22131
22132    #[simd_test(enable = "neon")]
22133    unsafe fn test_vst3_lane_u64() {
22134        let a: [u64; 4] = [0, 1, 2, 2];
22135        let e: [u64; 3] = [1, 2, 2];
22136        let mut r: [u64; 3] = [0u64; 3];
22137        vst3_lane_u64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22138        assert_eq!(r, e);
22139    }
22140
22141    #[simd_test(enable = "neon")]
22142    unsafe fn test_vst3q_lane_u64() {
22143        let a: [u64; 7] = [0, 1, 2, 2, 4, 2, 4];
22144        let e: [u64; 6] = [1, 2, 2, 0, 0, 0];
22145        let mut r: [u64; 6] = [0u64; 6];
22146        vst3q_lane_u64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22147        assert_eq!(r, e);
22148    }
22149
22150    #[simd_test(enable = "neon")]
22151    unsafe fn test_vst3q_lane_p8() {
22152        let a: [u8; 49] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16, 2, 4, 7, 8, 13, 14, 15, 16, 25, 26, 27, 28, 29, 30, 31, 32, 2, 4, 7, 8, 13, 14, 15, 16, 41, 42, 43, 44, 45, 46, 47, 48];
22153        let e: [u8; 48] = [1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
22154        let mut r: [u8; 48] = [0u8; 48];
22155        vst3q_lane_p8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22156        assert_eq!(r, e);
22157    }
22158
22159    #[simd_test(enable = "neon")]
22160    unsafe fn test_vst3_lane_p64() {
22161        let a: [u64; 4] = [0, 1, 2, 2];
22162        let e: [u64; 3] = [1, 2, 2];
22163        let mut r: [u64; 3] = [0u64; 3];
22164        vst3_lane_p64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22165        assert_eq!(r, e);
22166    }
22167
22168    #[simd_test(enable = "neon")]
22169    unsafe fn test_vst3q_lane_p64() {
22170        let a: [u64; 7] = [0, 1, 2, 2, 4, 2, 4];
22171        let e: [u64; 6] = [1, 2, 2, 0, 0, 0];
22172        let mut r: [u64; 6] = [0u64; 6];
22173        vst3q_lane_p64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22174        assert_eq!(r, e);
22175    }
22176
22177    #[simd_test(enable = "neon")]
22178    unsafe fn test_vst3_lane_f64() {
22179        let a: [f64; 4] = [0., 1., 2., 2.];
22180        let e: [f64; 3] = [1., 2., 2.];
22181        let mut r: [f64; 3] = [0f64; 3];
22182        vst3_lane_f64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22183        assert_eq!(r, e);
22184    }
22185
22186    #[simd_test(enable = "neon")]
22187    unsafe fn test_vst3q_lane_f64() {
22188        let a: [f64; 7] = [0., 1., 2., 2., 3., 2., 3.];
22189        let e: [f64; 6] = [1., 2., 2., 0., 0., 0.];
22190        let mut r: [f64; 6] = [0f64; 6];
22191        vst3q_lane_f64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22192        assert_eq!(r, e);
22193    }
22194
22195    #[simd_test(enable = "neon")]
22196    unsafe fn test_vst4q_s64() {
22197        let a: [i64; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8];
22198        let e: [i64; 8] = [1, 2, 2, 6, 2, 6, 6, 8];
22199        let mut r: [i64; 8] = [0i64; 8];
22200        vst4q_s64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22201        assert_eq!(r, e);
22202    }
22203
22204    #[simd_test(enable = "neon")]
22205    unsafe fn test_vst4q_u64() {
22206        let a: [u64; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8];
22207        let e: [u64; 8] = [1, 2, 2, 6, 2, 6, 6, 8];
22208        let mut r: [u64; 8] = [0u64; 8];
22209        vst4q_u64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22210        assert_eq!(r, e);
22211    }
22212
22213    #[simd_test(enable = "neon")]
22214    unsafe fn test_vst4q_p64() {
22215        let a: [u64; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8];
22216        let e: [u64; 8] = [1, 2, 2, 6, 2, 6, 6, 8];
22217        let mut r: [u64; 8] = [0u64; 8];
22218        vst4q_p64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22219        assert_eq!(r, e);
22220    }
22221
22222    #[simd_test(enable = "neon")]
22223    unsafe fn test_vst4_f64() {
22224        let a: [f64; 5] = [0., 1., 2., 2., 6.];
22225        let e: [f64; 4] = [1., 2., 2., 6.];
22226        let mut r: [f64; 4] = [0f64; 4];
22227        vst4_f64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22228        assert_eq!(r, e);
22229    }
22230
22231    #[simd_test(enable = "neon")]
22232    unsafe fn test_vst4q_f64() {
22233        let a: [f64; 9] = [0., 1., 2., 2., 6., 2., 6., 6., 8.];
22234        let e: [f64; 8] = [1., 2., 2., 6., 2., 6., 6., 8.];
22235        let mut r: [f64; 8] = [0f64; 8];
22236        vst4q_f64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22237        assert_eq!(r, e);
22238    }
22239
22240    #[simd_test(enable = "neon")]
22241    unsafe fn test_vst4q_lane_s8() {
22242        let a: [i8; 65] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 43, 44, 8, 16, 44, 48, 6, 8, 8, 16, 8, 16, 16, 32, 8, 16, 44, 48, 16, 32, 48, 64];
22243        let e: [i8; 64] = [1, 2, 2, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
22244        let mut r: [i8; 64] = [0i8; 64];
22245        vst4q_lane_s8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22246        assert_eq!(r, e);
22247    }
22248
22249    #[simd_test(enable = "neon")]
22250    unsafe fn test_vst4_lane_s64() {
22251        let a: [i64; 5] = [0, 1, 2, 2, 6];
22252        let e: [i64; 4] = [1, 2, 2, 6];
22253        let mut r: [i64; 4] = [0i64; 4];
22254        vst4_lane_s64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22255        assert_eq!(r, e);
22256    }
22257
22258    #[simd_test(enable = "neon")]
22259    unsafe fn test_vst4q_lane_s64() {
22260        let a: [i64; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8];
22261        let e: [i64; 8] = [1, 2, 2, 6, 0, 0, 0, 0];
22262        let mut r: [i64; 8] = [0i64; 8];
22263        vst4q_lane_s64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22264        assert_eq!(r, e);
22265    }
22266
22267    #[simd_test(enable = "neon")]
22268    unsafe fn test_vst4q_lane_u8() {
22269        let a: [u8; 65] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 43, 44, 8, 16, 44, 48, 6, 8, 8, 16, 8, 16, 16, 32, 8, 16, 44, 48, 16, 32, 48, 64];
22270        let e: [u8; 64] = [1, 2, 2, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
22271        let mut r: [u8; 64] = [0u8; 64];
22272        vst4q_lane_u8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22273        assert_eq!(r, e);
22274    }
22275
22276    #[simd_test(enable = "neon")]
22277    unsafe fn test_vst4_lane_u64() {
22278        let a: [u64; 5] = [0, 1, 2, 2, 6];
22279        let e: [u64; 4] = [1, 2, 2, 6];
22280        let mut r: [u64; 4] = [0u64; 4];
22281        vst4_lane_u64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22282        assert_eq!(r, e);
22283    }
22284
22285    #[simd_test(enable = "neon")]
22286    unsafe fn test_vst4q_lane_u64() {
22287        let a: [u64; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8];
22288        let e: [u64; 8] = [1, 2, 2, 6, 0, 0, 0, 0];
22289        let mut r: [u64; 8] = [0u64; 8];
22290        vst4q_lane_u64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22291        assert_eq!(r, e);
22292    }
22293
22294    #[simd_test(enable = "neon")]
22295    unsafe fn test_vst4q_lane_p8() {
22296        let a: [u8; 65] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 43, 44, 8, 16, 44, 48, 6, 8, 8, 16, 8, 16, 16, 32, 8, 16, 44, 48, 16, 32, 48, 64];
22297        let e: [u8; 64] = [1, 2, 2, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
22298        let mut r: [u8; 64] = [0u8; 64];
22299        vst4q_lane_p8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22300        assert_eq!(r, e);
22301    }
22302
22303    #[simd_test(enable = "neon")]
22304    unsafe fn test_vst4_lane_p64() {
22305        let a: [u64; 5] = [0, 1, 2, 2, 6];
22306        let e: [u64; 4] = [1, 2, 2, 6];
22307        let mut r: [u64; 4] = [0u64; 4];
22308        vst4_lane_p64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22309        assert_eq!(r, e);
22310    }
22311
22312    #[simd_test(enable = "neon")]
22313    unsafe fn test_vst4q_lane_p64() {
22314        let a: [u64; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8];
22315        let e: [u64; 8] = [1, 2, 2, 6, 0, 0, 0, 0];
22316        let mut r: [u64; 8] = [0u64; 8];
22317        vst4q_lane_p64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22318        assert_eq!(r, e);
22319    }
22320
22321    #[simd_test(enable = "neon")]
22322    unsafe fn test_vst4_lane_f64() {
22323        let a: [f64; 5] = [0., 1., 2., 2., 6.];
22324        let e: [f64; 4] = [1., 2., 2., 6.];
22325        let mut r: [f64; 4] = [0f64; 4];
22326        vst4_lane_f64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22327        assert_eq!(r, e);
22328    }
22329
22330    #[simd_test(enable = "neon")]
22331    unsafe fn test_vst4q_lane_f64() {
22332        let a: [f64; 9] = [0., 1., 2., 2., 6., 2., 6., 6., 8.];
22333        let e: [f64; 8] = [1., 2., 2., 6., 0., 0., 0., 0.];
22334        let mut r: [f64; 8] = [0f64; 8];
22335        vst4q_lane_f64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
22336        assert_eq!(r, e);
22337    }
22338
22339    #[simd_test(enable = "neon,i8mm")]
22340    unsafe fn test_vusdot_laneq_s32() {
22341        let a: i32x2 = i32x2::new(1000, -4200);
22342        let b: u8x8 = u8x8::new(100, 110, 120, 130, 140, 150, 160, 170);
22343        let c: i8x16 = i8x16::new(4, 3, 2, 1, 0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11);
22344        let e: i32x2 = i32x2::new(-3420, -10140);
22345        let r: i32x2 = transmute(vusdot_laneq_s32::<3>(transmute(a), transmute(b), transmute(c)));
22346        assert_eq!(r, e);
22347    }
22348
22349    #[simd_test(enable = "neon,i8mm")]
22350    unsafe fn test_vusdotq_laneq_s32() {
22351        let a: i32x4 = i32x4::new(1000, -4200, -1000, 2000);
22352        let b: u8x16 = u8x16::new(100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250);
22353        let c: i8x16 = i8x16::new(4, 3, 2, 1, 0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11);
22354        let e: i32x4 = i32x4::new(-3420, -10140, -8460, -6980);
22355        let r: i32x4 = transmute(vusdotq_laneq_s32::<3>(transmute(a), transmute(b), transmute(c)));
22356        assert_eq!(r, e);
22357    }
22358
22359    #[simd_test(enable = "neon,i8mm")]
22360    unsafe fn test_vsudot_laneq_s32() {
22361        let a: i32x2 = i32x2::new(-2000, 4200);
22362        let b: i8x8 = i8x8::new(4, 3, 2, 1, 0, -1, -2, -3);
22363        let c: u8x16 = u8x16::new(100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250);
22364        let e: i32x2 = i32x2::new(300, 2740);
22365        let r: i32x2 = transmute(vsudot_laneq_s32::<3>(transmute(a), transmute(b), transmute(c)));
22366        assert_eq!(r, e);
22367    }
22368
22369    #[simd_test(enable = "neon,i8mm")]
22370    unsafe fn test_vsudotq_laneq_s32() {
22371        let a: i32x4 = i32x4::new(-2000, 4200, -1000, 2000);
22372        let b: i8x16 = i8x16::new(4, 3, 2, 1, 0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11);
22373        let c: u8x16 = u8x16::new(100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250);
22374        let e: i32x4 = i32x4::new(300, 2740, -6220, -6980);
22375        let r: i32x4 = transmute(vsudotq_laneq_s32::<3>(transmute(a), transmute(b), transmute(c)));
22376        assert_eq!(r, e);
22377    }
22378
22379    #[simd_test(enable = "neon")]
22380    unsafe fn test_vmul_f64() {
22381        let a: f64 = 1.0;
22382        let b: f64 = 2.0;
22383        let e: f64 = 2.0;
22384        let r: f64 = transmute(vmul_f64(transmute(a), transmute(b)));
22385        assert_eq!(r, e);
22386    }
22387
22388    #[simd_test(enable = "neon")]
22389    unsafe fn test_vmulq_f64() {
22390        let a: f64x2 = f64x2::new(1.0, 2.0);
22391        let b: f64x2 = f64x2::new(2.0, 3.0);
22392        let e: f64x2 = f64x2::new(2.0, 6.0);
22393        let r: f64x2 = transmute(vmulq_f64(transmute(a), transmute(b)));
22394        assert_eq!(r, e);
22395    }
22396
22397    #[simd_test(enable = "neon")]
22398    unsafe fn test_vmul_n_f64() {
22399        let a: f64 = 1.;
22400        let b: f64 = 2.;
22401        let e: f64 = 2.;
22402        let r: f64 = transmute(vmul_n_f64(transmute(a), b));
22403        assert_eq!(r, e);
22404    }
22405
22406    #[simd_test(enable = "neon")]
22407    unsafe fn test_vmulq_n_f64() {
22408        let a: f64x2 = f64x2::new(1., 2.);
22409        let b: f64 = 2.;
22410        let e: f64x2 = f64x2::new(2., 4.);
22411        let r: f64x2 = transmute(vmulq_n_f64(transmute(a), b));
22412        assert_eq!(r, e);
22413    }
22414
22415    #[simd_test(enable = "neon")]
22416    unsafe fn test_vmul_lane_f64() {
22417        let a: f64 = 1.;
22418        let b: f64 = 2.;
22419        let e: f64 = 2.;
22420        let r: f64 = transmute(vmul_lane_f64::<0>(transmute(a), transmute(b)));
22421        assert_eq!(r, e);
22422    }
22423
22424    #[simd_test(enable = "neon")]
22425    unsafe fn test_vmul_laneq_f64() {
22426        let a: f64 = 1.;
22427        let b: f64x2 = f64x2::new(2., 0.);
22428        let e: f64 = 2.;
22429        let r: f64 = transmute(vmul_laneq_f64::<0>(transmute(a), transmute(b)));
22430        assert_eq!(r, e);
22431    }
22432
22433    #[simd_test(enable = "neon")]
22434    unsafe fn test_vmulq_lane_f64() {
22435        let a: f64x2 = f64x2::new(1., 2.);
22436        let b: f64 = 2.;
22437        let e: f64x2 = f64x2::new(2., 4.);
22438        let r: f64x2 = transmute(vmulq_lane_f64::<0>(transmute(a), transmute(b)));
22439        assert_eq!(r, e);
22440    }
22441
22442    #[simd_test(enable = "neon")]
22443    unsafe fn test_vmulq_laneq_f64() {
22444        let a: f64x2 = f64x2::new(1., 2.);
22445        let b: f64x2 = f64x2::new(2., 0.);
22446        let e: f64x2 = f64x2::new(2., 4.);
22447        let r: f64x2 = transmute(vmulq_laneq_f64::<0>(transmute(a), transmute(b)));
22448        assert_eq!(r, e);
22449    }
22450
22451    #[simd_test(enable = "neon")]
22452    unsafe fn test_vmuls_lane_f32() {
22453        let a: f32 = 1.;
22454        let b: f32x2 = f32x2::new(2., 0.);
22455        let e: f32 = 2.;
22456        let r: f32 = vmuls_lane_f32::<0>(a, transmute(b));
22457        assert_eq!(r, e);
22458    }
22459
22460    #[simd_test(enable = "neon")]
22461    unsafe fn test_vmuls_laneq_f32() {
22462        let a: f32 = 1.;
22463        let b: f32x4 = f32x4::new(2., 0., 0., 0.);
22464        let e: f32 = 2.;
22465        let r: f32 = vmuls_laneq_f32::<0>(a, transmute(b));
22466        assert_eq!(r, e);
22467    }
22468
22469    #[simd_test(enable = "neon")]
22470    unsafe fn test_vmuld_lane_f64() {
22471        let a: f64 = 1.;
22472        let b: f64 = 2.;
22473        let e: f64 = 2.;
22474        let r: f64 = vmuld_lane_f64::<0>(a, transmute(b));
22475        assert_eq!(r, e);
22476    }
22477
22478    #[simd_test(enable = "neon")]
22479    unsafe fn test_vmuld_laneq_f64() {
22480        let a: f64 = 1.;
22481        let b: f64x2 = f64x2::new(2., 0.);
22482        let e: f64 = 2.;
22483        let r: f64 = vmuld_laneq_f64::<0>(a, transmute(b));
22484        assert_eq!(r, e);
22485    }
22486
22487    #[simd_test(enable = "neon")]
22488    unsafe fn test_vmull_high_s8() {
22489        let a: i8x16 = i8x16::new(1, 2, 9, 10, 9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16);
22490        let b: i8x16 = i8x16::new(1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2);
22491        let e: i16x8 = i16x8::new(9, 20, 11, 24, 13, 28, 15, 32);
22492        let r: i16x8 = transmute(vmull_high_s8(transmute(a), transmute(b)));
22493        assert_eq!(r, e);
22494    }
22495
22496    #[simd_test(enable = "neon")]
22497    unsafe fn test_vmull_high_s16() {
22498        let a: i16x8 = i16x8::new(1, 2, 9, 10, 9, 10, 11, 12);
22499        let b: i16x8 = i16x8::new(1, 2, 1, 2, 1, 2, 1, 2);
22500        let e: i32x4 = i32x4::new(9, 20, 11, 24);
22501        let r: i32x4 = transmute(vmull_high_s16(transmute(a), transmute(b)));
22502        assert_eq!(r, e);
22503    }
22504
22505    #[simd_test(enable = "neon")]
22506    unsafe fn test_vmull_high_s32() {
22507        let a: i32x4 = i32x4::new(1, 2, 9, 10);
22508        let b: i32x4 = i32x4::new(1, 2, 1, 2);
22509        let e: i64x2 = i64x2::new(9, 20);
22510        let r: i64x2 = transmute(vmull_high_s32(transmute(a), transmute(b)));
22511        assert_eq!(r, e);
22512    }
22513
22514    #[simd_test(enable = "neon")]
22515    unsafe fn test_vmull_high_u8() {
22516        let a: u8x16 = u8x16::new(1, 2, 9, 10, 9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16);
22517        let b: u8x16 = u8x16::new(1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2);
22518        let e: u16x8 = u16x8::new(9, 20, 11, 24, 13, 28, 15, 32);
22519        let r: u16x8 = transmute(vmull_high_u8(transmute(a), transmute(b)));
22520        assert_eq!(r, e);
22521    }
22522
22523    #[simd_test(enable = "neon")]
22524    unsafe fn test_vmull_high_u16() {
22525        let a: u16x8 = u16x8::new(1, 2, 9, 10, 9, 10, 11, 12);
22526        let b: u16x8 = u16x8::new(1, 2, 1, 2, 1, 2, 1, 2);
22527        let e: u32x4 = u32x4::new(9, 20, 11, 24);
22528        let r: u32x4 = transmute(vmull_high_u16(transmute(a), transmute(b)));
22529        assert_eq!(r, e);
22530    }
22531
22532    #[simd_test(enable = "neon")]
22533    unsafe fn test_vmull_high_u32() {
22534        let a: u32x4 = u32x4::new(1, 2, 9, 10);
22535        let b: u32x4 = u32x4::new(1, 2, 1, 2);
22536        let e: u64x2 = u64x2::new(9, 20);
22537        let r: u64x2 = transmute(vmull_high_u32(transmute(a), transmute(b)));
22538        assert_eq!(r, e);
22539    }
22540
22541    #[simd_test(enable = "neon,aes")]
22542    unsafe fn test_vmull_p64() {
22543        let a: p64 = 15;
22544        let b: p64 = 3;
22545        let e: p128 = 17;
22546        let r: p128 = vmull_p64(a, b);
22547        assert_eq!(r, e);
22548    }
22549
22550    #[simd_test(enable = "neon")]
22551    unsafe fn test_vmull_high_p8() {
22552        let a: i8x16 = i8x16::new(1, 2, 9, 10, 9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16);
22553        let b: i8x16 = i8x16::new(1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3);
22554        let e: i16x8 = i16x8::new(9, 30, 11, 20, 13, 18, 15, 48);
22555        let r: i16x8 = transmute(vmull_high_p8(transmute(a), transmute(b)));
22556        assert_eq!(r, e);
22557    }
22558
22559    #[simd_test(enable = "neon,aes")]
22560    unsafe fn test_vmull_high_p64() {
22561        let a: i64x2 = i64x2::new(1, 15);
22562        let b: i64x2 = i64x2::new(1, 3);
22563        let e: p128 = 17;
22564        let r: p128 = vmull_high_p64(transmute(a), transmute(b));
22565        assert_eq!(r, e);
22566    }
22567
22568    #[simd_test(enable = "neon")]
22569    unsafe fn test_vmull_high_n_s16() {
22570        let a: i16x8 = i16x8::new(1, 2, 9, 10, 9, 10, 11, 12);
22571        let b: i16 = 2;
22572        let e: i32x4 = i32x4::new(18, 20, 22, 24);
22573        let r: i32x4 = transmute(vmull_high_n_s16(transmute(a), b));
22574        assert_eq!(r, e);
22575    }
22576
22577    #[simd_test(enable = "neon")]
22578    unsafe fn test_vmull_high_n_s32() {
22579        let a: i32x4 = i32x4::new(1, 2, 9, 10);
22580        let b: i32 = 2;
22581        let e: i64x2 = i64x2::new(18, 20);
22582        let r: i64x2 = transmute(vmull_high_n_s32(transmute(a), b));
22583        assert_eq!(r, e);
22584    }
22585
22586    #[simd_test(enable = "neon")]
22587    unsafe fn test_vmull_high_n_u16() {
22588        let a: u16x8 = u16x8::new(1, 2, 9, 10, 9, 10, 11, 12);
22589        let b: u16 = 2;
22590        let e: u32x4 = u32x4::new(18, 20, 22, 24);
22591        let r: u32x4 = transmute(vmull_high_n_u16(transmute(a), b));
22592        assert_eq!(r, e);
22593    }
22594
22595    #[simd_test(enable = "neon")]
22596    unsafe fn test_vmull_high_n_u32() {
22597        let a: u32x4 = u32x4::new(1, 2, 9, 10);
22598        let b: u32 = 2;
22599        let e: u64x2 = u64x2::new(18, 20);
22600        let r: u64x2 = transmute(vmull_high_n_u32(transmute(a), b));
22601        assert_eq!(r, e);
22602    }
22603
22604    #[simd_test(enable = "neon")]
22605    unsafe fn test_vmull_high_lane_s16() {
22606        let a: i16x8 = i16x8::new(1, 2, 9, 10, 9, 10, 11, 12);
22607        let b: i16x4 = i16x4::new(0, 2, 0, 0);
22608        let e: i32x4 = i32x4::new(18, 20, 22, 24);
22609        let r: i32x4 = transmute(vmull_high_lane_s16::<1>(transmute(a), transmute(b)));
22610        assert_eq!(r, e);
22611    }
22612
22613    #[simd_test(enable = "neon")]
22614    unsafe fn test_vmull_high_laneq_s16() {
22615        let a: i16x8 = i16x8::new(1, 2, 9, 10, 9, 10, 11, 12);
22616        let b: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
22617        let e: i32x4 = i32x4::new(18, 20, 22, 24);
22618        let r: i32x4 = transmute(vmull_high_laneq_s16::<1>(transmute(a), transmute(b)));
22619        assert_eq!(r, e);
22620    }
22621
22622    #[simd_test(enable = "neon")]
22623    unsafe fn test_vmull_high_lane_s32() {
22624        let a: i32x4 = i32x4::new(1, 2, 9, 10);
22625        let b: i32x2 = i32x2::new(0, 2);
22626        let e: i64x2 = i64x2::new(18, 20);
22627        let r: i64x2 = transmute(vmull_high_lane_s32::<1>(transmute(a), transmute(b)));
22628        assert_eq!(r, e);
22629    }
22630
22631    #[simd_test(enable = "neon")]
22632    unsafe fn test_vmull_high_laneq_s32() {
22633        let a: i32x4 = i32x4::new(1, 2, 9, 10);
22634        let b: i32x4 = i32x4::new(0, 2, 0, 0);
22635        let e: i64x2 = i64x2::new(18, 20);
22636        let r: i64x2 = transmute(vmull_high_laneq_s32::<1>(transmute(a), transmute(b)));
22637        assert_eq!(r, e);
22638    }
22639
22640    #[simd_test(enable = "neon")]
22641    unsafe fn test_vmull_high_lane_u16() {
22642        let a: u16x8 = u16x8::new(1, 2, 9, 10, 9, 10, 11, 12);
22643        let b: u16x4 = u16x4::new(0, 2, 0, 0);
22644        let e: u32x4 = u32x4::new(18, 20, 22, 24);
22645        let r: u32x4 = transmute(vmull_high_lane_u16::<1>(transmute(a), transmute(b)));
22646        assert_eq!(r, e);
22647    }
22648
22649    #[simd_test(enable = "neon")]
22650    unsafe fn test_vmull_high_laneq_u16() {
22651        let a: u16x8 = u16x8::new(1, 2, 9, 10, 9, 10, 11, 12);
22652        let b: u16x8 = u16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
22653        let e: u32x4 = u32x4::new(18, 20, 22, 24);
22654        let r: u32x4 = transmute(vmull_high_laneq_u16::<1>(transmute(a), transmute(b)));
22655        assert_eq!(r, e);
22656    }
22657
22658    #[simd_test(enable = "neon")]
22659    unsafe fn test_vmull_high_lane_u32() {
22660        let a: u32x4 = u32x4::new(1, 2, 9, 10);
22661        let b: u32x2 = u32x2::new(0, 2);
22662        let e: u64x2 = u64x2::new(18, 20);
22663        let r: u64x2 = transmute(vmull_high_lane_u32::<1>(transmute(a), transmute(b)));
22664        assert_eq!(r, e);
22665    }
22666
22667    #[simd_test(enable = "neon")]
22668    unsafe fn test_vmull_high_laneq_u32() {
22669        let a: u32x4 = u32x4::new(1, 2, 9, 10);
22670        let b: u32x4 = u32x4::new(0, 2, 0, 0);
22671        let e: u64x2 = u64x2::new(18, 20);
22672        let r: u64x2 = transmute(vmull_high_laneq_u32::<1>(transmute(a), transmute(b)));
22673        assert_eq!(r, e);
22674    }
22675
22676    #[simd_test(enable = "neon")]
22677    unsafe fn test_vmulx_f32() {
22678        let a: f32x2 = f32x2::new(1., 2.);
22679        let b: f32x2 = f32x2::new(2., 2.);
22680        let e: f32x2 = f32x2::new(2., 4.);
22681        let r: f32x2 = transmute(vmulx_f32(transmute(a), transmute(b)));
22682        assert_eq!(r, e);
22683    }
22684
22685    #[simd_test(enable = "neon")]
22686    unsafe fn test_vmulxq_f32() {
22687        let a: f32x4 = f32x4::new(1., 2., 3., 4.);
22688        let b: f32x4 = f32x4::new(2., 2., 2., 2.);
22689        let e: f32x4 = f32x4::new(2., 4., 6., 8.);
22690        let r: f32x4 = transmute(vmulxq_f32(transmute(a), transmute(b)));
22691        assert_eq!(r, e);
22692    }
22693
22694    #[simd_test(enable = "neon")]
22695    unsafe fn test_vmulx_f64() {
22696        let a: f64 = 1.;
22697        let b: f64 = 2.;
22698        let e: f64 = 2.;
22699        let r: f64 = transmute(vmulx_f64(transmute(a), transmute(b)));
22700        assert_eq!(r, e);
22701    }
22702
22703    #[simd_test(enable = "neon")]
22704    unsafe fn test_vmulxq_f64() {
22705        let a: f64x2 = f64x2::new(1., 2.);
22706        let b: f64x2 = f64x2::new(2., 2.);
22707        let e: f64x2 = f64x2::new(2., 4.);
22708        let r: f64x2 = transmute(vmulxq_f64(transmute(a), transmute(b)));
22709        assert_eq!(r, e);
22710    }
22711
22712    #[simd_test(enable = "neon")]
22713    unsafe fn test_vmulx_lane_f64() {
22714        let a: f64 = 1.;
22715        let b: f64 = 2.;
22716        let e: f64 = 2.;
22717        let r: f64 = transmute(vmulx_lane_f64::<0>(transmute(a), transmute(b)));
22718        assert_eq!(r, e);
22719    }
22720
22721    #[simd_test(enable = "neon")]
22722    unsafe fn test_vmulx_laneq_f64() {
22723        let a: f64 = 1.;
22724        let b: f64x2 = f64x2::new(2., 0.);
22725        let e: f64 = 2.;
22726        let r: f64 = transmute(vmulx_laneq_f64::<0>(transmute(a), transmute(b)));
22727        assert_eq!(r, e);
22728    }
22729
22730    #[simd_test(enable = "neon")]
22731    unsafe fn test_vmulx_lane_f32() {
22732        let a: f32x2 = f32x2::new(1., 2.);
22733        let b: f32x2 = f32x2::new(2., 0.);
22734        let e: f32x2 = f32x2::new(2., 4.);
22735        let r: f32x2 = transmute(vmulx_lane_f32::<0>(transmute(a), transmute(b)));
22736        assert_eq!(r, e);
22737    }
22738
22739    #[simd_test(enable = "neon")]
22740    unsafe fn test_vmulx_laneq_f32() {
22741        let a: f32x2 = f32x2::new(1., 2.);
22742        let b: f32x4 = f32x4::new(2., 0., 0., 0.);
22743        let e: f32x2 = f32x2::new(2., 4.);
22744        let r: f32x2 = transmute(vmulx_laneq_f32::<0>(transmute(a), transmute(b)));
22745        assert_eq!(r, e);
22746    }
22747
22748    #[simd_test(enable = "neon")]
22749    unsafe fn test_vmulxq_lane_f32() {
22750        let a: f32x4 = f32x4::new(1., 2., 3., 4.);
22751        let b: f32x2 = f32x2::new(2., 0.);
22752        let e: f32x4 = f32x4::new(2., 4., 6., 8.);
22753        let r: f32x4 = transmute(vmulxq_lane_f32::<0>(transmute(a), transmute(b)));
22754        assert_eq!(r, e);
22755    }
22756
22757    #[simd_test(enable = "neon")]
22758    unsafe fn test_vmulxq_laneq_f32() {
22759        let a: f32x4 = f32x4::new(1., 2., 3., 4.);
22760        let b: f32x4 = f32x4::new(2., 0., 0., 0.);
22761        let e: f32x4 = f32x4::new(2., 4., 6., 8.);
22762        let r: f32x4 = transmute(vmulxq_laneq_f32::<0>(transmute(a), transmute(b)));
22763        assert_eq!(r, e);
22764    }
22765
22766    #[simd_test(enable = "neon")]
22767    unsafe fn test_vmulxq_lane_f64() {
22768        let a: f64x2 = f64x2::new(1., 2.);
22769        let b: f64 = 2.;
22770        let e: f64x2 = f64x2::new(2., 4.);
22771        let r: f64x2 = transmute(vmulxq_lane_f64::<0>(transmute(a), transmute(b)));
22772        assert_eq!(r, e);
22773    }
22774
22775    #[simd_test(enable = "neon")]
22776    unsafe fn test_vmulxq_laneq_f64() {
22777        let a: f64x2 = f64x2::new(1., 2.);
22778        let b: f64x2 = f64x2::new(2., 0.);
22779        let e: f64x2 = f64x2::new(2., 4.);
22780        let r: f64x2 = transmute(vmulxq_laneq_f64::<0>(transmute(a), transmute(b)));
22781        assert_eq!(r, e);
22782    }
22783
22784    #[simd_test(enable = "neon")]
22785    unsafe fn test_vmulxs_f32() {
22786        let a: f32 = 2.;
22787        let b: f32 = 3.;
22788        let e: f32 = 6.;
22789        let r: f32 = vmulxs_f32(a, b);
22790        assert_eq!(r, e);
22791    }
22792
22793    #[simd_test(enable = "neon")]
22794    unsafe fn test_vmulxd_f64() {
22795        let a: f64 = 2.;
22796        let b: f64 = 3.;
22797        let e: f64 = 6.;
22798        let r: f64 = vmulxd_f64(a, b);
22799        assert_eq!(r, e);
22800    }
22801
22802    #[simd_test(enable = "neon")]
22803    unsafe fn test_vmulxs_lane_f32() {
22804        let a: f32 = 2.;
22805        let b: f32x2 = f32x2::new(3., 0.);
22806        let e: f32 = 6.;
22807        let r: f32 = vmulxs_lane_f32::<0>(a, transmute(b));
22808        assert_eq!(r, e);
22809    }
22810
22811    #[simd_test(enable = "neon")]
22812    unsafe fn test_vmulxs_laneq_f32() {
22813        let a: f32 = 2.;
22814        let b: f32x4 = f32x4::new(3., 0., 0., 0.);
22815        let e: f32 = 6.;
22816        let r: f32 = vmulxs_laneq_f32::<0>(a, transmute(b));
22817        assert_eq!(r, e);
22818    }
22819
22820    #[simd_test(enable = "neon")]
22821    unsafe fn test_vmulxd_lane_f64() {
22822        let a: f64 = 2.;
22823        let b: f64 = 3.;
22824        let e: f64 = 6.;
22825        let r: f64 = vmulxd_lane_f64::<0>(a, transmute(b));
22826        assert_eq!(r, e);
22827    }
22828
22829    #[simd_test(enable = "neon")]
22830    unsafe fn test_vmulxd_laneq_f64() {
22831        let a: f64 = 2.;
22832        let b: f64x2 = f64x2::new(3., 0.);
22833        let e: f64 = 6.;
22834        let r: f64 = vmulxd_laneq_f64::<0>(a, transmute(b));
22835        assert_eq!(r, e);
22836    }
22837
22838    #[simd_test(enable = "neon")]
22839    unsafe fn test_vfma_f64() {
22840        let a: f64 = 8.0;
22841        let b: f64 = 6.0;
22842        let c: f64 = 2.0;
22843        let e: f64 = 20.0;
22844        let r: f64 = transmute(vfma_f64(transmute(a), transmute(b), transmute(c)));
22845        assert_eq!(r, e);
22846    }
22847
22848    #[simd_test(enable = "neon")]
22849    unsafe fn test_vfmaq_f64() {
22850        let a: f64x2 = f64x2::new(8.0, 18.0);
22851        let b: f64x2 = f64x2::new(6.0, 4.0);
22852        let c: f64x2 = f64x2::new(2.0, 3.0);
22853        let e: f64x2 = f64x2::new(20.0, 30.0);
22854        let r: f64x2 = transmute(vfmaq_f64(transmute(a), transmute(b), transmute(c)));
22855        assert_eq!(r, e);
22856    }
22857
22858    #[simd_test(enable = "neon")]
22859    unsafe fn test_vfma_n_f64() {
22860        let a: f64 = 2.0;
22861        let b: f64 = 6.0;
22862        let c: f64 = 8.0;
22863        let e: f64 = 50.0;
22864        let r: f64 = transmute(vfma_n_f64(transmute(a), transmute(b), c));
22865        assert_eq!(r, e);
22866    }
22867
22868    #[simd_test(enable = "neon")]
22869    unsafe fn test_vfmaq_n_f64() {
22870        let a: f64x2 = f64x2::new(2.0, 3.0);
22871        let b: f64x2 = f64x2::new(6.0, 4.0);
22872        let c: f64 = 8.0;
22873        let e: f64x2 = f64x2::new(50.0, 35.0);
22874        let r: f64x2 = transmute(vfmaq_n_f64(transmute(a), transmute(b), c));
22875        assert_eq!(r, e);
22876    }
22877
22878    #[simd_test(enable = "neon")]
22879    unsafe fn test_vfma_lane_f32() {
22880        let a: f32x2 = f32x2::new(2., 3.);
22881        let b: f32x2 = f32x2::new(6., 4.);
22882        let c: f32x2 = f32x2::new(2., 0.);
22883        let e: f32x2 = f32x2::new(14., 11.);
22884        let r: f32x2 = transmute(vfma_lane_f32::<0>(transmute(a), transmute(b), transmute(c)));
22885        assert_eq!(r, e);
22886    }
22887
22888    #[simd_test(enable = "neon")]
22889    unsafe fn test_vfma_laneq_f32() {
22890        let a: f32x2 = f32x2::new(2., 3.);
22891        let b: f32x2 = f32x2::new(6., 4.);
22892        let c: f32x4 = f32x4::new(2., 0., 0., 0.);
22893        let e: f32x2 = f32x2::new(14., 11.);
22894        let r: f32x2 = transmute(vfma_laneq_f32::<0>(transmute(a), transmute(b), transmute(c)));
22895        assert_eq!(r, e);
22896    }
22897
22898    #[simd_test(enable = "neon")]
22899    unsafe fn test_vfmaq_lane_f32() {
22900        let a: f32x4 = f32x4::new(2., 3., 4., 5.);
22901        let b: f32x4 = f32x4::new(6., 4., 7., 8.);
22902        let c: f32x2 = f32x2::new(2., 0.);
22903        let e: f32x4 = f32x4::new(14., 11., 18., 21.);
22904        let r: f32x4 = transmute(vfmaq_lane_f32::<0>(transmute(a), transmute(b), transmute(c)));
22905        assert_eq!(r, e);
22906    }
22907
22908    #[simd_test(enable = "neon")]
22909    unsafe fn test_vfmaq_laneq_f32() {
22910        let a: f32x4 = f32x4::new(2., 3., 4., 5.);
22911        let b: f32x4 = f32x4::new(6., 4., 7., 8.);
22912        let c: f32x4 = f32x4::new(2., 0., 0., 0.);
22913        let e: f32x4 = f32x4::new(14., 11., 18., 21.);
22914        let r: f32x4 = transmute(vfmaq_laneq_f32::<0>(transmute(a), transmute(b), transmute(c)));
22915        assert_eq!(r, e);
22916    }
22917
22918    #[simd_test(enable = "neon")]
22919    unsafe fn test_vfma_lane_f64() {
22920        let a: f64 = 2.;
22921        let b: f64 = 6.;
22922        let c: f64 = 2.;
22923        let e: f64 = 14.;
22924        let r: f64 = transmute(vfma_lane_f64::<0>(transmute(a), transmute(b), transmute(c)));
22925        assert_eq!(r, e);
22926    }
22927
22928    #[simd_test(enable = "neon")]
22929    unsafe fn test_vfma_laneq_f64() {
22930        let a: f64 = 2.;
22931        let b: f64 = 6.;
22932        let c: f64x2 = f64x2::new(2., 0.);
22933        let e: f64 = 14.;
22934        let r: f64 = transmute(vfma_laneq_f64::<0>(transmute(a), transmute(b), transmute(c)));
22935        assert_eq!(r, e);
22936    }
22937
22938    #[simd_test(enable = "neon")]
22939    unsafe fn test_vfmaq_lane_f64() {
22940        let a: f64x2 = f64x2::new(2., 3.);
22941        let b: f64x2 = f64x2::new(6., 4.);
22942        let c: f64 = 2.;
22943        let e: f64x2 = f64x2::new(14., 11.);
22944        let r: f64x2 = transmute(vfmaq_lane_f64::<0>(transmute(a), transmute(b), transmute(c)));
22945        assert_eq!(r, e);
22946    }
22947
22948    #[simd_test(enable = "neon")]
22949    unsafe fn test_vfmaq_laneq_f64() {
22950        let a: f64x2 = f64x2::new(2., 3.);
22951        let b: f64x2 = f64x2::new(6., 4.);
22952        let c: f64x2 = f64x2::new(2., 0.);
22953        let e: f64x2 = f64x2::new(14., 11.);
22954        let r: f64x2 = transmute(vfmaq_laneq_f64::<0>(transmute(a), transmute(b), transmute(c)));
22955        assert_eq!(r, e);
22956    }
22957
22958    #[simd_test(enable = "neon")]
22959    unsafe fn test_vfmas_lane_f32() {
22960        let a: f32 = 2.;
22961        let b: f32 = 6.;
22962        let c: f32x2 = f32x2::new(3., 0.);
22963        let e: f32 = 20.;
22964        let r: f32 = vfmas_lane_f32::<0>(a, b, transmute(c));
22965        assert_eq!(r, e);
22966    }
22967
22968    #[simd_test(enable = "neon")]
22969    unsafe fn test_vfmas_laneq_f32() {
22970        let a: f32 = 2.;
22971        let b: f32 = 6.;
22972        let c: f32x4 = f32x4::new(3., 0., 0., 0.);
22973        let e: f32 = 20.;
22974        let r: f32 = vfmas_laneq_f32::<0>(a, b, transmute(c));
22975        assert_eq!(r, e);
22976    }
22977
22978    #[simd_test(enable = "neon")]
22979    unsafe fn test_vfmad_lane_f64() {
22980        let a: f64 = 2.;
22981        let b: f64 = 6.;
22982        let c: f64 = 3.;
22983        let e: f64 = 20.;
22984        let r: f64 = vfmad_lane_f64::<0>(a, b, transmute(c));
22985        assert_eq!(r, e);
22986    }
22987
22988    #[simd_test(enable = "neon")]
22989    unsafe fn test_vfmad_laneq_f64() {
22990        let a: f64 = 2.;
22991        let b: f64 = 6.;
22992        let c: f64x2 = f64x2::new(3., 0.);
22993        let e: f64 = 20.;
22994        let r: f64 = vfmad_laneq_f64::<0>(a, b, transmute(c));
22995        assert_eq!(r, e);
22996    }
22997
22998    #[simd_test(enable = "neon")]
22999    unsafe fn test_vfms_f64() {
23000        let a: f64 = 20.0;
23001        let b: f64 = 6.0;
23002        let c: f64 = 2.0;
23003        let e: f64 = 8.0;
23004        let r: f64 = transmute(vfms_f64(transmute(a), transmute(b), transmute(c)));
23005        assert_eq!(r, e);
23006    }
23007
23008    #[simd_test(enable = "neon")]
23009    unsafe fn test_vfmsq_f64() {
23010        let a: f64x2 = f64x2::new(20.0, 30.0);
23011        let b: f64x2 = f64x2::new(6.0, 4.0);
23012        let c: f64x2 = f64x2::new(2.0, 3.0);
23013        let e: f64x2 = f64x2::new(8.0, 18.0);
23014        let r: f64x2 = transmute(vfmsq_f64(transmute(a), transmute(b), transmute(c)));
23015        assert_eq!(r, e);
23016    }
23017
23018    #[simd_test(enable = "neon")]
23019    unsafe fn test_vfms_n_f64() {
23020        let a: f64 = 50.0;
23021        let b: f64 = 6.0;
23022        let c: f64 = 8.0;
23023        let e: f64 = 2.0;
23024        let r: f64 = transmute(vfms_n_f64(transmute(a), transmute(b), c));
23025        assert_eq!(r, e);
23026    }
23027
23028    #[simd_test(enable = "neon")]
23029    unsafe fn test_vfmsq_n_f64() {
23030        let a: f64x2 = f64x2::new(50.0, 35.0);
23031        let b: f64x2 = f64x2::new(6.0, 4.0);
23032        let c: f64 = 8.0;
23033        let e: f64x2 = f64x2::new(2.0, 3.0);
23034        let r: f64x2 = transmute(vfmsq_n_f64(transmute(a), transmute(b), c));
23035        assert_eq!(r, e);
23036    }
23037
23038    #[simd_test(enable = "neon")]
23039    unsafe fn test_vfms_lane_f32() {
23040        let a: f32x2 = f32x2::new(14., 11.);
23041        let b: f32x2 = f32x2::new(6., 4.);
23042        let c: f32x2 = f32x2::new(2., 0.);
23043        let e: f32x2 = f32x2::new(2., 3.);
23044        let r: f32x2 = transmute(vfms_lane_f32::<0>(transmute(a), transmute(b), transmute(c)));
23045        assert_eq!(r, e);
23046    }
23047
23048    #[simd_test(enable = "neon")]
23049    unsafe fn test_vfms_laneq_f32() {
23050        let a: f32x2 = f32x2::new(14., 11.);
23051        let b: f32x2 = f32x2::new(6., 4.);
23052        let c: f32x4 = f32x4::new(2., 0., 0., 0.);
23053        let e: f32x2 = f32x2::new(2., 3.);
23054        let r: f32x2 = transmute(vfms_laneq_f32::<0>(transmute(a), transmute(b), transmute(c)));
23055        assert_eq!(r, e);
23056    }
23057
23058    #[simd_test(enable = "neon")]
23059    unsafe fn test_vfmsq_lane_f32() {
23060        let a: f32x4 = f32x4::new(14., 11., 18., 21.);
23061        let b: f32x4 = f32x4::new(6., 4., 7., 8.);
23062        let c: f32x2 = f32x2::new(2., 0.);
23063        let e: f32x4 = f32x4::new(2., 3., 4., 5.);
23064        let r: f32x4 = transmute(vfmsq_lane_f32::<0>(transmute(a), transmute(b), transmute(c)));
23065        assert_eq!(r, e);
23066    }
23067
23068    #[simd_test(enable = "neon")]
23069    unsafe fn test_vfmsq_laneq_f32() {
23070        let a: f32x4 = f32x4::new(14., 11., 18., 21.);
23071        let b: f32x4 = f32x4::new(6., 4., 7., 8.);
23072        let c: f32x4 = f32x4::new(2., 0., 0., 0.);
23073        let e: f32x4 = f32x4::new(2., 3., 4., 5.);
23074        let r: f32x4 = transmute(vfmsq_laneq_f32::<0>(transmute(a), transmute(b), transmute(c)));
23075        assert_eq!(r, e);
23076    }
23077
23078    #[simd_test(enable = "neon")]
23079    unsafe fn test_vfms_lane_f64() {
23080        let a: f64 = 14.;
23081        let b: f64 = 6.;
23082        let c: f64 = 2.;
23083        let e: f64 = 2.;
23084        let r: f64 = transmute(vfms_lane_f64::<0>(transmute(a), transmute(b), transmute(c)));
23085        assert_eq!(r, e);
23086    }
23087
23088    #[simd_test(enable = "neon")]
23089    unsafe fn test_vfms_laneq_f64() {
23090        let a: f64 = 14.;
23091        let b: f64 = 6.;
23092        let c: f64x2 = f64x2::new(2., 0.);
23093        let e: f64 = 2.;
23094        let r: f64 = transmute(vfms_laneq_f64::<0>(transmute(a), transmute(b), transmute(c)));
23095        assert_eq!(r, e);
23096    }
23097
23098    #[simd_test(enable = "neon")]
23099    unsafe fn test_vfmsq_lane_f64() {
23100        let a: f64x2 = f64x2::new(14., 11.);
23101        let b: f64x2 = f64x2::new(6., 4.);
23102        let c: f64 = 2.;
23103        let e: f64x2 = f64x2::new(2., 3.);
23104        let r: f64x2 = transmute(vfmsq_lane_f64::<0>(transmute(a), transmute(b), transmute(c)));
23105        assert_eq!(r, e);
23106    }
23107
23108    #[simd_test(enable = "neon")]
23109    unsafe fn test_vfmsq_laneq_f64() {
23110        let a: f64x2 = f64x2::new(14., 11.);
23111        let b: f64x2 = f64x2::new(6., 4.);
23112        let c: f64x2 = f64x2::new(2., 0.);
23113        let e: f64x2 = f64x2::new(2., 3.);
23114        let r: f64x2 = transmute(vfmsq_laneq_f64::<0>(transmute(a), transmute(b), transmute(c)));
23115        assert_eq!(r, e);
23116    }
23117
23118    #[simd_test(enable = "neon")]
23119    unsafe fn test_vfmss_lane_f32() {
23120        let a: f32 = 14.;
23121        let b: f32 = 6.;
23122        let c: f32x2 = f32x2::new(2., 0.);
23123        let e: f32 = 2.;
23124        let r: f32 = vfmss_lane_f32::<0>(a, b, transmute(c));
23125        assert_eq!(r, e);
23126    }
23127
23128    #[simd_test(enable = "neon")]
23129    unsafe fn test_vfmss_laneq_f32() {
23130        let a: f32 = 14.;
23131        let b: f32 = 6.;
23132        let c: f32x4 = f32x4::new(2., 0., 0., 0.);
23133        let e: f32 = 2.;
23134        let r: f32 = vfmss_laneq_f32::<0>(a, b, transmute(c));
23135        assert_eq!(r, e);
23136    }
23137
23138    #[simd_test(enable = "neon")]
23139    unsafe fn test_vfmsd_lane_f64() {
23140        let a: f64 = 14.;
23141        let b: f64 = 6.;
23142        let c: f64 = 2.;
23143        let e: f64 = 2.;
23144        let r: f64 = vfmsd_lane_f64::<0>(a, b, transmute(c));
23145        assert_eq!(r, e);
23146    }
23147
23148    #[simd_test(enable = "neon")]
23149    unsafe fn test_vfmsd_laneq_f64() {
23150        let a: f64 = 14.;
23151        let b: f64 = 6.;
23152        let c: f64x2 = f64x2::new(2., 0.);
23153        let e: f64 = 2.;
23154        let r: f64 = vfmsd_laneq_f64::<0>(a, b, transmute(c));
23155        assert_eq!(r, e);
23156    }
23157
23158    #[simd_test(enable = "neon")]
23159    unsafe fn test_vdiv_f32() {
23160        let a: f32x2 = f32x2::new(2.0, 6.0);
23161        let b: f32x2 = f32x2::new(1.0, 2.0);
23162        let e: f32x2 = f32x2::new(2.0, 3.0);
23163        let r: f32x2 = transmute(vdiv_f32(transmute(a), transmute(b)));
23164        assert_eq!(r, e);
23165    }
23166
23167    #[simd_test(enable = "neon")]
23168    unsafe fn test_vdivq_f32() {
23169        let a: f32x4 = f32x4::new(2.0, 6.0, 4.0, 10.0);
23170        let b: f32x4 = f32x4::new(1.0, 2.0, 1.0, 2.0);
23171        let e: f32x4 = f32x4::new(2.0, 3.0, 4.0, 5.0);
23172        let r: f32x4 = transmute(vdivq_f32(transmute(a), transmute(b)));
23173        assert_eq!(r, e);
23174    }
23175
23176    #[simd_test(enable = "neon")]
23177    unsafe fn test_vdiv_f64() {
23178        let a: f64 = 2.0;
23179        let b: f64 = 1.0;
23180        let e: f64 = 2.0;
23181        let r: f64 = transmute(vdiv_f64(transmute(a), transmute(b)));
23182        assert_eq!(r, e);
23183    }
23184
23185    #[simd_test(enable = "neon")]
23186    unsafe fn test_vdivq_f64() {
23187        let a: f64x2 = f64x2::new(2.0, 6.0);
23188        let b: f64x2 = f64x2::new(1.0, 2.0);
23189        let e: f64x2 = f64x2::new(2.0, 3.0);
23190        let r: f64x2 = transmute(vdivq_f64(transmute(a), transmute(b)));
23191        assert_eq!(r, e);
23192    }
23193
23194    #[simd_test(enable = "neon")]
23195    unsafe fn test_vsub_f64() {
23196        let a: f64 = 1.0;
23197        let b: f64 = 1.0;
23198        let e: f64 = 0.0;
23199        let r: f64 = transmute(vsub_f64(transmute(a), transmute(b)));
23200        assert_eq!(r, e);
23201    }
23202
23203    #[simd_test(enable = "neon")]
23204    unsafe fn test_vsubq_f64() {
23205        let a: f64x2 = f64x2::new(1.0, 4.0);
23206        let b: f64x2 = f64x2::new(1.0, 2.0);
23207        let e: f64x2 = f64x2::new(0.0, 2.0);
23208        let r: f64x2 = transmute(vsubq_f64(transmute(a), transmute(b)));
23209        assert_eq!(r, e);
23210    }
23211
23212    #[simd_test(enable = "neon")]
23213    unsafe fn test_vsubd_s64() {
23214        let a: i64 = 3;
23215        let b: i64 = 2;
23216        let e: i64 = 1;
23217        let r: i64 = vsubd_s64(a, b);
23218        assert_eq!(r, e);
23219    }
23220
23221    #[simd_test(enable = "neon")]
23222    unsafe fn test_vsubd_u64() {
23223        let a: u64 = 3;
23224        let b: u64 = 2;
23225        let e: u64 = 1;
23226        let r: u64 = vsubd_u64(a, b);
23227        assert_eq!(r, e);
23228    }
23229
23230    #[simd_test(enable = "neon")]
23231    unsafe fn test_vaddd_s64() {
23232        let a: i64 = 1;
23233        let b: i64 = 2;
23234        let e: i64 = 3;
23235        let r: i64 = vaddd_s64(a, b);
23236        assert_eq!(r, e);
23237    }
23238
23239    #[simd_test(enable = "neon")]
23240    unsafe fn test_vaddd_u64() {
23241        let a: u64 = 1;
23242        let b: u64 = 2;
23243        let e: u64 = 3;
23244        let r: u64 = vaddd_u64(a, b);
23245        assert_eq!(r, e);
23246    }
23247
23248    #[simd_test(enable = "neon")]
23249    unsafe fn test_vaddv_f32() {
23250        let a: f32x2 = f32x2::new(1., 2.);
23251        let e: f32 = 3.;
23252        let r: f32 = vaddv_f32(transmute(a));
23253        assert_eq!(r, e);
23254    }
23255
23256    #[simd_test(enable = "neon")]
23257    unsafe fn test_vaddvq_f32() {
23258        let a: f32x4 = f32x4::new(1., 2., 0., 0.);
23259        let e: f32 = 3.;
23260        let r: f32 = vaddvq_f32(transmute(a));
23261        assert_eq!(r, e);
23262    }
23263
23264    #[simd_test(enable = "neon")]
23265    unsafe fn test_vaddvq_f64() {
23266        let a: f64x2 = f64x2::new(1., 2.);
23267        let e: f64 = 3.;
23268        let r: f64 = vaddvq_f64(transmute(a));
23269        assert_eq!(r, e);
23270    }
23271
23272    #[simd_test(enable = "neon")]
23273    unsafe fn test_vaddlv_s16() {
23274        let a: i16x4 = i16x4::new(1, 2, 3, 4);
23275        let e: i32 = 10;
23276        let r: i32 = vaddlv_s16(transmute(a));
23277        assert_eq!(r, e);
23278    }
23279
23280    #[simd_test(enable = "neon")]
23281    unsafe fn test_vaddlvq_s16() {
23282        let a: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
23283        let e: i32 = 36;
23284        let r: i32 = vaddlvq_s16(transmute(a));
23285        assert_eq!(r, e);
23286    }
23287
23288    #[simd_test(enable = "neon")]
23289    unsafe fn test_vaddlv_s32() {
23290        let a: i32x2 = i32x2::new(1, 2);
23291        let e: i64 = 3;
23292        let r: i64 = vaddlv_s32(transmute(a));
23293        assert_eq!(r, e);
23294    }
23295
23296    #[simd_test(enable = "neon")]
23297    unsafe fn test_vaddlvq_s32() {
23298        let a: i32x4 = i32x4::new(1, 2, 3, 4);
23299        let e: i64 = 10;
23300        let r: i64 = vaddlvq_s32(transmute(a));
23301        assert_eq!(r, e);
23302    }
23303
23304    #[simd_test(enable = "neon")]
23305    unsafe fn test_vaddlv_u16() {
23306        let a: u16x4 = u16x4::new(1, 2, 3, 4);
23307        let e: u32 = 10;
23308        let r: u32 = vaddlv_u16(transmute(a));
23309        assert_eq!(r, e);
23310    }
23311
23312    #[simd_test(enable = "neon")]
23313    unsafe fn test_vaddlvq_u16() {
23314        let a: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
23315        let e: u32 = 36;
23316        let r: u32 = vaddlvq_u16(transmute(a));
23317        assert_eq!(r, e);
23318    }
23319
23320    #[simd_test(enable = "neon")]
23321    unsafe fn test_vaddlv_u32() {
23322        let a: u32x2 = u32x2::new(1, 2);
23323        let e: u64 = 3;
23324        let r: u64 = vaddlv_u32(transmute(a));
23325        assert_eq!(r, e);
23326    }
23327
23328    #[simd_test(enable = "neon")]
23329    unsafe fn test_vaddlvq_u32() {
23330        let a: u32x4 = u32x4::new(1, 2, 3, 4);
23331        let e: u64 = 10;
23332        let r: u64 = vaddlvq_u32(transmute(a));
23333        assert_eq!(r, e);
23334    }
23335
23336    #[simd_test(enable = "neon")]
23337    unsafe fn test_vsubw_high_s8() {
23338        let a: i16x8 = i16x8::new(8, 9, 10, 12, 13, 14, 15, 16);
23339        let b: i8x16 = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16);
23340        let e: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
23341        let r: i16x8 = transmute(vsubw_high_s8(transmute(a), transmute(b)));
23342        assert_eq!(r, e);
23343    }
23344
23345    #[simd_test(enable = "neon")]
23346    unsafe fn test_vsubw_high_s16() {
23347        let a: i32x4 = i32x4::new(8, 9, 10, 11);
23348        let b: i16x8 = i16x8::new(0, 1, 2, 3, 8, 9, 10, 11);
23349        let e: i32x4 = i32x4::new(0, 0, 0, 0);
23350        let r: i32x4 = transmute(vsubw_high_s16(transmute(a), transmute(b)));
23351        assert_eq!(r, e);
23352    }
23353
23354    #[simd_test(enable = "neon")]
23355    unsafe fn test_vsubw_high_s32() {
23356        let a: i64x2 = i64x2::new(8, 9);
23357        let b: i32x4 = i32x4::new(6, 7, 8, 9);
23358        let e: i64x2 = i64x2::new(0, 0);
23359        let r: i64x2 = transmute(vsubw_high_s32(transmute(a), transmute(b)));
23360        assert_eq!(r, e);
23361    }
23362
23363    #[simd_test(enable = "neon")]
23364    unsafe fn test_vsubw_high_u8() {
23365        let a: u16x8 = u16x8::new(8, 9, 10, 11, 12, 13, 14, 15);
23366        let b: u8x16 = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
23367        let e: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
23368        let r: u16x8 = transmute(vsubw_high_u8(transmute(a), transmute(b)));
23369        assert_eq!(r, e);
23370    }
23371
23372    #[simd_test(enable = "neon")]
23373    unsafe fn test_vsubw_high_u16() {
23374        let a: u32x4 = u32x4::new(8, 9, 10, 11);
23375        let b: u16x8 = u16x8::new(0, 1, 2, 3, 8, 9, 10, 11);
23376        let e: u32x4 = u32x4::new(0, 0, 0, 0);
23377        let r: u32x4 = transmute(vsubw_high_u16(transmute(a), transmute(b)));
23378        assert_eq!(r, e);
23379    }
23380
23381    #[simd_test(enable = "neon")]
23382    unsafe fn test_vsubw_high_u32() {
23383        let a: u64x2 = u64x2::new(8, 9);
23384        let b: u32x4 = u32x4::new(6, 7, 8, 9);
23385        let e: u64x2 = u64x2::new(0, 0);
23386        let r: u64x2 = transmute(vsubw_high_u32(transmute(a), transmute(b)));
23387        assert_eq!(r, e);
23388    }
23389
23390    #[simd_test(enable = "neon")]
23391    unsafe fn test_vsubl_high_s8() {
23392        let a: i8x16 = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
23393        let b: i8x16 = i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2);
23394        let e: i16x8 = i16x8::new(6, 7, 8, 9, 10, 11, 12, 13);
23395        let r: i16x8 = transmute(vsubl_high_s8(transmute(a), transmute(b)));
23396        assert_eq!(r, e);
23397    }
23398
23399    #[simd_test(enable = "neon")]
23400    unsafe fn test_vsubl_high_s16() {
23401        let a: i16x8 = i16x8::new(8, 9, 10, 11, 12, 13, 14, 15);
23402        let b: i16x8 = i16x8::new(6, 6, 6, 6, 8, 8, 8, 8);
23403        let e: i32x4 = i32x4::new(4, 5, 6, 7);
23404        let r: i32x4 = transmute(vsubl_high_s16(transmute(a), transmute(b)));
23405        assert_eq!(r, e);
23406    }
23407
23408    #[simd_test(enable = "neon")]
23409    unsafe fn test_vsubl_high_s32() {
23410        let a: i32x4 = i32x4::new(12, 13, 14, 15);
23411        let b: i32x4 = i32x4::new(6, 6, 8, 8);
23412        let e: i64x2 = i64x2::new(6, 7);
23413        let r: i64x2 = transmute(vsubl_high_s32(transmute(a), transmute(b)));
23414        assert_eq!(r, e);
23415    }
23416
23417    #[simd_test(enable = "neon")]
23418    unsafe fn test_vsubl_high_u8() {
23419        let a: u8x16 = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
23420        let b: u8x16 = u8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2);
23421        let e: u16x8 = u16x8::new(6, 7, 8, 9, 10, 11, 12, 13);
23422        let r: u16x8 = transmute(vsubl_high_u8(transmute(a), transmute(b)));
23423        assert_eq!(r, e);
23424    }
23425
23426    #[simd_test(enable = "neon")]
23427    unsafe fn test_vsubl_high_u16() {
23428        let a: u16x8 = u16x8::new(8, 9, 10, 11, 12, 13, 14, 15);
23429        let b: u16x8 = u16x8::new(6, 6, 6, 6, 8, 8, 8, 8);
23430        let e: u32x4 = u32x4::new(4, 5, 6, 7);
23431        let r: u32x4 = transmute(vsubl_high_u16(transmute(a), transmute(b)));
23432        assert_eq!(r, e);
23433    }
23434
23435    #[simd_test(enable = "neon")]
23436    unsafe fn test_vsubl_high_u32() {
23437        let a: u32x4 = u32x4::new(12, 13, 14, 15);
23438        let b: u32x4 = u32x4::new(6, 6, 8, 8);
23439        let e: u64x2 = u64x2::new(6, 7);
23440        let r: u64x2 = transmute(vsubl_high_u32(transmute(a), transmute(b)));
23441        assert_eq!(r, e);
23442    }
23443
23444    #[simd_test(enable = "neon,sha3")]
23445    unsafe fn test_vbcaxq_s8() {
23446        let a: i8x16 = i8x16::new(1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0);
23447        let b: i8x16 = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
23448        let c: i8x16 = i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1);
23449        let e: i8x16 = i8x16::new(1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
23450        let r: i8x16 = transmute(vbcaxq_s8(transmute(a), transmute(b), transmute(c)));
23451        assert_eq!(r, e);
23452    }
23453
23454    #[simd_test(enable = "neon,sha3")]
23455    unsafe fn test_vbcaxq_s16() {
23456        let a: i16x8 = i16x8::new(1, 0, 1, 0, 1, 0, 1, 0);
23457        let b: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
23458        let c: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
23459        let e: i16x8 = i16x8::new(1, 0, 3, 2, 5, 4, 7, 6);
23460        let r: i16x8 = transmute(vbcaxq_s16(transmute(a), transmute(b), transmute(c)));
23461        assert_eq!(r, e);
23462    }
23463
23464    #[simd_test(enable = "neon,sha3")]
23465    unsafe fn test_vbcaxq_s32() {
23466        let a: i32x4 = i32x4::new(1, 0, 1, 0);
23467        let b: i32x4 = i32x4::new(0, 1, 2, 3);
23468        let c: i32x4 = i32x4::new(1, 1, 1, 1);
23469        let e: i32x4 = i32x4::new(1, 0, 3, 2);
23470        let r: i32x4 = transmute(vbcaxq_s32(transmute(a), transmute(b), transmute(c)));
23471        assert_eq!(r, e);
23472    }
23473
23474    #[simd_test(enable = "neon,sha3")]
23475    unsafe fn test_vbcaxq_s64() {
23476        let a: i64x2 = i64x2::new(1, 0);
23477        let b: i64x2 = i64x2::new(0, 1);
23478        let c: i64x2 = i64x2::new(1, 1);
23479        let e: i64x2 = i64x2::new(1, 0);
23480        let r: i64x2 = transmute(vbcaxq_s64(transmute(a), transmute(b), transmute(c)));
23481        assert_eq!(r, e);
23482    }
23483
23484    #[simd_test(enable = "neon,sha3")]
23485    unsafe fn test_vbcaxq_u8() {
23486        let a: u8x16 = u8x16::new(1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0);
23487        let b: u8x16 = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
23488        let c: u8x16 = u8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1);
23489        let e: u8x16 = u8x16::new(1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
23490        let r: u8x16 = transmute(vbcaxq_u8(transmute(a), transmute(b), transmute(c)));
23491        assert_eq!(r, e);
23492    }
23493
23494    #[simd_test(enable = "neon,sha3")]
23495    unsafe fn test_vbcaxq_u16() {
23496        let a: u16x8 = u16x8::new(1, 0, 1, 0, 1, 0, 1, 0);
23497        let b: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
23498        let c: u16x8 = u16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
23499        let e: u16x8 = u16x8::new(1, 0, 3, 2, 5, 4, 7, 6);
23500        let r: u16x8 = transmute(vbcaxq_u16(transmute(a), transmute(b), transmute(c)));
23501        assert_eq!(r, e);
23502    }
23503
23504    #[simd_test(enable = "neon,sha3")]
23505    unsafe fn test_vbcaxq_u32() {
23506        let a: u32x4 = u32x4::new(1, 0, 1, 0);
23507        let b: u32x4 = u32x4::new(0, 1, 2, 3);
23508        let c: u32x4 = u32x4::new(1, 1, 1, 1);
23509        let e: u32x4 = u32x4::new(1, 0, 3, 2);
23510        let r: u32x4 = transmute(vbcaxq_u32(transmute(a), transmute(b), transmute(c)));
23511        assert_eq!(r, e);
23512    }
23513
23514    #[simd_test(enable = "neon,sha3")]
23515    unsafe fn test_vbcaxq_u64() {
23516        let a: u64x2 = u64x2::new(1, 0);
23517        let b: u64x2 = u64x2::new(0, 1);
23518        let c: u64x2 = u64x2::new(1, 1);
23519        let e: u64x2 = u64x2::new(1, 0);
23520        let r: u64x2 = transmute(vbcaxq_u64(transmute(a), transmute(b), transmute(c)));
23521        assert_eq!(r, e);
23522    }
23523
23524    #[simd_test(enable = "neon,fcma")]
23525    unsafe fn test_vcadd_rot270_f32() {
23526        let a: f32x2 = f32x2::new(1., -1.);
23527        let b: f32x2 = f32x2::new(-1., 1.);
23528        let e: f32x2 = f32x2::new(2., 0.);
23529        let r: f32x2 = transmute(vcadd_rot270_f32(transmute(a), transmute(b)));
23530        assert_eq!(r, e);
23531    }
23532
23533    #[simd_test(enable = "neon,fcma")]
23534    unsafe fn test_vcaddq_rot270_f32() {
23535        let a: f32x4 = f32x4::new(1., -1., 1., -1.);
23536        let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
23537        let e: f32x4 = f32x4::new(2., 0., 2., 0.);
23538        let r: f32x4 = transmute(vcaddq_rot270_f32(transmute(a), transmute(b)));
23539        assert_eq!(r, e);
23540    }
23541
23542    #[simd_test(enable = "neon,fcma")]
23543    unsafe fn test_vcaddq_rot270_f64() {
23544        let a: f64x2 = f64x2::new(1., -1.);
23545        let b: f64x2 = f64x2::new(-1., 1.);
23546        let e: f64x2 = f64x2::new(2., 0.);
23547        let r: f64x2 = transmute(vcaddq_rot270_f64(transmute(a), transmute(b)));
23548        assert_eq!(r, e);
23549    }
23550
23551    #[simd_test(enable = "neon,fcma")]
23552    unsafe fn test_vcadd_rot90_f32() {
23553        let a: f32x2 = f32x2::new(1., -1.);
23554        let b: f32x2 = f32x2::new(-1., 1.);
23555        let e: f32x2 = f32x2::new(0., -2.);
23556        let r: f32x2 = transmute(vcadd_rot90_f32(transmute(a), transmute(b)));
23557        assert_eq!(r, e);
23558    }
23559
23560    #[simd_test(enable = "neon,fcma")]
23561    unsafe fn test_vcaddq_rot90_f32() {
23562        let a: f32x4 = f32x4::new(1., -1., 1., -1.);
23563        let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
23564        let e: f32x4 = f32x4::new(0., -2., 0., -2.);
23565        let r: f32x4 = transmute(vcaddq_rot90_f32(transmute(a), transmute(b)));
23566        assert_eq!(r, e);
23567    }
23568
23569    #[simd_test(enable = "neon,fcma")]
23570    unsafe fn test_vcaddq_rot90_f64() {
23571        let a: f64x2 = f64x2::new(1., -1.);
23572        let b: f64x2 = f64x2::new(-1., 1.);
23573        let e: f64x2 = f64x2::new(0., -2.);
23574        let r: f64x2 = transmute(vcaddq_rot90_f64(transmute(a), transmute(b)));
23575        assert_eq!(r, e);
23576    }
23577
23578    #[simd_test(enable = "neon,fcma")]
23579    unsafe fn test_vcmla_f32() {
23580        let a: f32x2 = f32x2::new(1., -1.);
23581        let b: f32x2 = f32x2::new(-1., 1.);
23582        let c: f32x2 = f32x2::new(1., 1.);
23583        let e: f32x2 = f32x2::new(0., -2.);
23584        let r: f32x2 = transmute(vcmla_f32(transmute(a), transmute(b), transmute(c)));
23585        assert_eq!(r, e);
23586    }
23587
23588    #[simd_test(enable = "neon,fcma")]
23589    unsafe fn test_vcmlaq_f32() {
23590        let a: f32x4 = f32x4::new(1., -1., 1., -1.);
23591        let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
23592        let c: f32x4 = f32x4::new(1., 1., -1., -1.);
23593        let e: f32x4 = f32x4::new(0., -2., 2., 0.);
23594        let r: f32x4 = transmute(vcmlaq_f32(transmute(a), transmute(b), transmute(c)));
23595        assert_eq!(r, e);
23596    }
23597
23598    #[simd_test(enable = "neon,fcma")]
23599    unsafe fn test_vcmlaq_f64() {
23600        let a: f64x2 = f64x2::new(1., -1.);
23601        let b: f64x2 = f64x2::new(-1., 1.);
23602        let c: f64x2 = f64x2::new(1., 1.);
23603        let e: f64x2 = f64x2::new(0., -2.);
23604        let r: f64x2 = transmute(vcmlaq_f64(transmute(a), transmute(b), transmute(c)));
23605        assert_eq!(r, e);
23606    }
23607
23608    #[simd_test(enable = "neon,fcma")]
23609    unsafe fn test_vcmla_rot90_f32() {
23610        let a: f32x2 = f32x2::new(1., 1.);
23611        let b: f32x2 = f32x2::new(1., -1.);
23612        let c: f32x2 = f32x2::new(1., 1.);
23613        let e: f32x2 = f32x2::new(2., 0.);
23614        let r: f32x2 = transmute(vcmla_rot90_f32(transmute(a), transmute(b), transmute(c)));
23615        assert_eq!(r, e);
23616    }
23617
23618    #[simd_test(enable = "neon,fcma")]
23619    unsafe fn test_vcmlaq_rot90_f32() {
23620        let a: f32x4 = f32x4::new(1., 1., 1., 1.);
23621        let b: f32x4 = f32x4::new(1., -1., 1., -1.);
23622        let c: f32x4 = f32x4::new(1., 1., 1., 1.);
23623        let e: f32x4 = f32x4::new(2., 0., 2., 0.);
23624        let r: f32x4 = transmute(vcmlaq_rot90_f32(transmute(a), transmute(b), transmute(c)));
23625        assert_eq!(r, e);
23626    }
23627
23628    #[simd_test(enable = "neon,fcma")]
23629    unsafe fn test_vcmlaq_rot90_f64() {
23630        let a: f64x2 = f64x2::new(1., 1.);
23631        let b: f64x2 = f64x2::new(1., -1.);
23632        let c: f64x2 = f64x2::new(1., 1.);
23633        let e: f64x2 = f64x2::new(2., 0.);
23634        let r: f64x2 = transmute(vcmlaq_rot90_f64(transmute(a), transmute(b), transmute(c)));
23635        assert_eq!(r, e);
23636    }
23637
23638    #[simd_test(enable = "neon,fcma")]
23639    unsafe fn test_vcmla_rot180_f32() {
23640        let a: f32x2 = f32x2::new(1., 1.);
23641        let b: f32x2 = f32x2::new(1., -1.);
23642        let c: f32x2 = f32x2::new(1., 1.);
23643        let e: f32x2 = f32x2::new(0., 0.);
23644        let r: f32x2 = transmute(vcmla_rot180_f32(transmute(a), transmute(b), transmute(c)));
23645        assert_eq!(r, e);
23646    }
23647
23648    #[simd_test(enable = "neon,fcma")]
23649    unsafe fn test_vcmlaq_rot180_f32() {
23650        let a: f32x4 = f32x4::new(1., 1., 1., 1.);
23651        let b: f32x4 = f32x4::new(1., -1., 1., -1.);
23652        let c: f32x4 = f32x4::new(1., 1., 1., 1.);
23653        let e: f32x4 = f32x4::new(0., 0., 0., 0.);
23654        let r: f32x4 = transmute(vcmlaq_rot180_f32(transmute(a), transmute(b), transmute(c)));
23655        assert_eq!(r, e);
23656    }
23657
23658    #[simd_test(enable = "neon,fcma")]
23659    unsafe fn test_vcmlaq_rot180_f64() {
23660        let a: f64x2 = f64x2::new(1., 1.);
23661        let b: f64x2 = f64x2::new(1., -1.);
23662        let c: f64x2 = f64x2::new(1., 1.);
23663        let e: f64x2 = f64x2::new(0., 0.);
23664        let r: f64x2 = transmute(vcmlaq_rot180_f64(transmute(a), transmute(b), transmute(c)));
23665        assert_eq!(r, e);
23666    }
23667
23668    #[simd_test(enable = "neon,fcma")]
23669    unsafe fn test_vcmla_rot270_f32() {
23670        let a: f32x2 = f32x2::new(1., 1.);
23671        let b: f32x2 = f32x2::new(1., -1.);
23672        let c: f32x2 = f32x2::new(1., 1.);
23673        let e: f32x2 = f32x2::new(0., 2.);
23674        let r: f32x2 = transmute(vcmla_rot270_f32(transmute(a), transmute(b), transmute(c)));
23675        assert_eq!(r, e);
23676    }
23677
23678    #[simd_test(enable = "neon,fcma")]
23679    unsafe fn test_vcmlaq_rot270_f32() {
23680        let a: f32x4 = f32x4::new(1., 1., 1., 1.);
23681        let b: f32x4 = f32x4::new(1., -1., 1., -1.);
23682        let c: f32x4 = f32x4::new(1., 1., 1., 1.);
23683        let e: f32x4 = f32x4::new(0., 2., 0., 2.);
23684        let r: f32x4 = transmute(vcmlaq_rot270_f32(transmute(a), transmute(b), transmute(c)));
23685        assert_eq!(r, e);
23686    }
23687
23688    #[simd_test(enable = "neon,fcma")]
23689    unsafe fn test_vcmlaq_rot270_f64() {
23690        let a: f64x2 = f64x2::new(1., 1.);
23691        let b: f64x2 = f64x2::new(1., -1.);
23692        let c: f64x2 = f64x2::new(1., 1.);
23693        let e: f64x2 = f64x2::new(0., 2.);
23694        let r: f64x2 = transmute(vcmlaq_rot270_f64(transmute(a), transmute(b), transmute(c)));
23695        assert_eq!(r, e);
23696    }
23697
23698    #[simd_test(enable = "neon,fcma")]
23699    unsafe fn test_vcmla_lane_f32() {
23700        let a: f32x2 = f32x2::new(1., -1.);
23701        let b: f32x2 = f32x2::new(-1., 1.);
23702        let c: f32x2 = f32x2::new(1., 1.);
23703        let e: f32x2 = f32x2::new(0., -2.);
23704        let r: f32x2 = transmute(vcmla_lane_f32::<0>(transmute(a), transmute(b), transmute(c)));
23705        assert_eq!(r, e);
23706    }
23707
23708    #[simd_test(enable = "neon,fcma")]
23709    unsafe fn test_vcmla_laneq_f32() {
23710        let a: f32x2 = f32x2::new(1., -1.);
23711        let b: f32x2 = f32x2::new(-1., 1.);
23712        let c: f32x4 = f32x4::new(1., 1., -1., -1.);
23713        let e: f32x2 = f32x2::new(0., -2.);
23714        let r: f32x2 = transmute(vcmla_laneq_f32::<0>(transmute(a), transmute(b), transmute(c)));
23715        assert_eq!(r, e);
23716    }
23717
23718    #[simd_test(enable = "neon,fcma")]
23719    unsafe fn test_vcmlaq_lane_f32() {
23720        let a: f32x4 = f32x4::new(1., -1., 1., -1.);
23721        let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
23722        let c: f32x2 = f32x2::new(1., 1.);
23723        let e: f32x4 = f32x4::new(0., -2., 0., -2.);
23724        let r: f32x4 = transmute(vcmlaq_lane_f32::<0>(transmute(a), transmute(b), transmute(c)));
23725        assert_eq!(r, e);
23726    }
23727
23728    #[simd_test(enable = "neon,fcma")]
23729    unsafe fn test_vcmlaq_laneq_f32() {
23730        let a: f32x4 = f32x4::new(1., -1., 1., -1.);
23731        let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
23732        let c: f32x4 = f32x4::new(1., 1., -1., -1.);
23733        let e: f32x4 = f32x4::new(0., -2., 0., -2.);
23734        let r: f32x4 = transmute(vcmlaq_laneq_f32::<0>(transmute(a), transmute(b), transmute(c)));
23735        assert_eq!(r, e);
23736    }
23737
23738    #[simd_test(enable = "neon,fcma")]
23739    unsafe fn test_vcmla_rot90_lane_f32() {
23740        let a: f32x2 = f32x2::new(1., -1.);
23741        let b: f32x2 = f32x2::new(-1., 1.);
23742        let c: f32x2 = f32x2::new(1., 1.);
23743        let e: f32x2 = f32x2::new(0., 0.);
23744        let r: f32x2 = transmute(vcmla_rot90_lane_f32::<0>(transmute(a), transmute(b), transmute(c)));
23745        assert_eq!(r, e);
23746    }
23747
23748    #[simd_test(enable = "neon,fcma")]
23749    unsafe fn test_vcmla_rot90_laneq_f32() {
23750        let a: f32x2 = f32x2::new(1., -1.);
23751        let b: f32x2 = f32x2::new(-1., 1.);
23752        let c: f32x4 = f32x4::new(1., 1., -1., -1.);
23753        let e: f32x2 = f32x2::new(0., 0.);
23754        let r: f32x2 = transmute(vcmla_rot90_laneq_f32::<0>(transmute(a), transmute(b), transmute(c)));
23755        assert_eq!(r, e);
23756    }
23757
23758    #[simd_test(enable = "neon,fcma")]
23759    unsafe fn test_vcmlaq_rot90_lane_f32() {
23760        let a: f32x4 = f32x4::new(1., -1., 1., -1.);
23761        let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
23762        let c: f32x2 = f32x2::new(1., 1.);
23763        let e: f32x4 = f32x4::new(0., 0., 0., 0.);
23764        let r: f32x4 = transmute(vcmlaq_rot90_lane_f32::<0>(transmute(a), transmute(b), transmute(c)));
23765        assert_eq!(r, e);
23766    }
23767
23768    #[simd_test(enable = "neon,fcma")]
23769    unsafe fn test_vcmlaq_rot90_laneq_f32() {
23770        let a: f32x4 = f32x4::new(1., -1., 1., -1.);
23771        let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
23772        let c: f32x4 = f32x4::new(1., 1., -1., -1.);
23773        let e: f32x4 = f32x4::new(0., 0., 0., 0.);
23774        let r: f32x4 = transmute(vcmlaq_rot90_laneq_f32::<0>(transmute(a), transmute(b), transmute(c)));
23775        assert_eq!(r, e);
23776    }
23777
23778    #[simd_test(enable = "neon,fcma")]
23779    unsafe fn test_vcmla_rot180_lane_f32() {
23780        let a: f32x2 = f32x2::new(1., -1.);
23781        let b: f32x2 = f32x2::new(-1., 1.);
23782        let c: f32x2 = f32x2::new(1., 1.);
23783        let e: f32x2 = f32x2::new(2., 0.);
23784        let r: f32x2 = transmute(vcmla_rot180_lane_f32::<0>(transmute(a), transmute(b), transmute(c)));
23785        assert_eq!(r, e);
23786    }
23787
23788    #[simd_test(enable = "neon,fcma")]
23789    unsafe fn test_vcmla_rot180_laneq_f32() {
23790        let a: f32x2 = f32x2::new(1., -1.);
23791        let b: f32x2 = f32x2::new(-1., 1.);
23792        let c: f32x4 = f32x4::new(1., 1., -1., -1.);
23793        let e: f32x2 = f32x2::new(2., 0.);
23794        let r: f32x2 = transmute(vcmla_rot180_laneq_f32::<0>(transmute(a), transmute(b), transmute(c)));
23795        assert_eq!(r, e);
23796    }
23797
23798    #[simd_test(enable = "neon,fcma")]
23799    unsafe fn test_vcmlaq_rot180_lane_f32() {
23800        let a: f32x4 = f32x4::new(1., -1., 1., -1.);
23801        let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
23802        let c: f32x2 = f32x2::new(1., 1.);
23803        let e: f32x4 = f32x4::new(2., 0., 2., 0.);
23804        let r: f32x4 = transmute(vcmlaq_rot180_lane_f32::<0>(transmute(a), transmute(b), transmute(c)));
23805        assert_eq!(r, e);
23806    }
23807
23808    #[simd_test(enable = "neon,fcma")]
23809    unsafe fn test_vcmlaq_rot180_laneq_f32() {
23810        let a: f32x4 = f32x4::new(1., -1., 1., -1.);
23811        let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
23812        let c: f32x4 = f32x4::new(1., 1., -1., -1.);
23813        let e: f32x4 = f32x4::new(2., 0., 2., 0.);
23814        let r: f32x4 = transmute(vcmlaq_rot180_laneq_f32::<0>(transmute(a), transmute(b), transmute(c)));
23815        assert_eq!(r, e);
23816    }
23817
23818    #[simd_test(enable = "neon,fcma")]
23819    unsafe fn test_vcmla_rot270_lane_f32() {
23820        let a: f32x2 = f32x2::new(1., -1.);
23821        let b: f32x2 = f32x2::new(-1., 1.);
23822        let c: f32x2 = f32x2::new(1., 1.);
23823        let e: f32x2 = f32x2::new(2., -2.);
23824        let r: f32x2 = transmute(vcmla_rot270_lane_f32::<0>(transmute(a), transmute(b), transmute(c)));
23825        assert_eq!(r, e);
23826    }
23827
23828    #[simd_test(enable = "neon,fcma")]
23829    unsafe fn test_vcmla_rot270_laneq_f32() {
23830        let a: f32x2 = f32x2::new(1., -1.);
23831        let b: f32x2 = f32x2::new(-1., 1.);
23832        let c: f32x4 = f32x4::new(1., 1., -1., -1.);
23833        let e: f32x2 = f32x2::new(2., -2.);
23834        let r: f32x2 = transmute(vcmla_rot270_laneq_f32::<0>(transmute(a), transmute(b), transmute(c)));
23835        assert_eq!(r, e);
23836    }
23837
23838    #[simd_test(enable = "neon,fcma")]
23839    unsafe fn test_vcmlaq_rot270_lane_f32() {
23840        let a: f32x4 = f32x4::new(1., -1., 1., -1.);
23841        let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
23842        let c: f32x2 = f32x2::new(1., 1.);
23843        let e: f32x4 = f32x4::new(2., -2., 2., -2.);
23844        let r: f32x4 = transmute(vcmlaq_rot270_lane_f32::<0>(transmute(a), transmute(b), transmute(c)));
23845        assert_eq!(r, e);
23846    }
23847
23848    #[simd_test(enable = "neon,fcma")]
23849    unsafe fn test_vcmlaq_rot270_laneq_f32() {
23850        let a: f32x4 = f32x4::new(1., -1., 1., -1.);
23851        let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
23852        let c: f32x4 = f32x4::new(1., 1., -1., -1.);
23853        let e: f32x4 = f32x4::new(2., -2., 2., -2.);
23854        let r: f32x4 = transmute(vcmlaq_rot270_laneq_f32::<0>(transmute(a), transmute(b), transmute(c)));
23855        assert_eq!(r, e);
23856    }
23857
23858    #[simd_test(enable = "neon,dotprod")]
23859    unsafe fn test_vdot_laneq_s32() {
23860        let a: i32x2 = i32x2::new(1, 2);
23861        let b: i8x8 = i8x8::new(-1, 2, 3, 4, 5, 6, 7, 8);
23862        let c: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
23863        let e: i32x2 = i32x2::new(29, 72);
23864        let r: i32x2 = transmute(vdot_laneq_s32::<0>(transmute(a), transmute(b), transmute(c)));
23865        assert_eq!(r, e);
23866    }
23867
23868    #[simd_test(enable = "neon,dotprod")]
23869    unsafe fn test_vdotq_laneq_s32() {
23870        let a: i32x4 = i32x4::new(1, 2, 1, 2);
23871        let b: i8x16 = i8x16::new(-1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
23872        let c: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
23873        let e: i32x4 = i32x4::new(29, 72, 31, 72);
23874        let r: i32x4 = transmute(vdotq_laneq_s32::<0>(transmute(a), transmute(b), transmute(c)));
23875        assert_eq!(r, e);
23876    }
23877
23878    #[simd_test(enable = "neon,dotprod")]
23879    unsafe fn test_vdot_laneq_u32() {
23880        let a: u32x2 = u32x2::new(1, 2);
23881        let b: u8x8 = u8x8::new(255, 2, 3, 4, 5, 6, 7, 8);
23882        let c: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
23883        let e: u32x2 = u32x2::new(285, 72);
23884        let r: u32x2 = transmute(vdot_laneq_u32::<0>(transmute(a), transmute(b), transmute(c)));
23885        assert_eq!(r, e);
23886    }
23887
23888    #[simd_test(enable = "neon,dotprod")]
23889    unsafe fn test_vdotq_laneq_u32() {
23890        let a: u32x4 = u32x4::new(1, 2, 1, 2);
23891        let b: u8x16 = u8x16::new(255, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
23892        let c: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
23893        let e: u32x4 = u32x4::new(285, 72, 31, 72);
23894        let r: u32x4 = transmute(vdotq_laneq_u32::<0>(transmute(a), transmute(b), transmute(c)));
23895        assert_eq!(r, e);
23896    }
23897
23898    #[simd_test(enable = "neon")]
23899    unsafe fn test_vmax_f64() {
23900        let a: f64 = 1.0;
23901        let b: f64 = 0.0;
23902        let e: f64 = 1.0;
23903        let r: f64 = transmute(vmax_f64(transmute(a), transmute(b)));
23904        assert_eq!(r, e);
23905    }
23906
23907    #[simd_test(enable = "neon")]
23908    unsafe fn test_vmaxq_f64() {
23909        let a: f64x2 = f64x2::new(1.0, -2.0);
23910        let b: f64x2 = f64x2::new(0.0, 3.0);
23911        let e: f64x2 = f64x2::new(1.0, 3.0);
23912        let r: f64x2 = transmute(vmaxq_f64(transmute(a), transmute(b)));
23913        assert_eq!(r, e);
23914    }
23915
23916    #[simd_test(enable = "neon")]
23917    unsafe fn test_vmaxnm_f64() {
23918        let a: f64 = 1.0;
23919        let b: f64 = 8.0;
23920        let e: f64 = 8.0;
23921        let r: f64 = transmute(vmaxnm_f64(transmute(a), transmute(b)));
23922        assert_eq!(r, e);
23923    }
23924
23925    #[simd_test(enable = "neon")]
23926    unsafe fn test_vmaxnmq_f64() {
23927        let a: f64x2 = f64x2::new(1.0, 2.0);
23928        let b: f64x2 = f64x2::new(8.0, 16.0);
23929        let e: f64x2 = f64x2::new(8.0, 16.0);
23930        let r: f64x2 = transmute(vmaxnmq_f64(transmute(a), transmute(b)));
23931        assert_eq!(r, e);
23932    }
23933
23934    #[simd_test(enable = "neon")]
23935    unsafe fn test_vmaxnmv_f32() {
23936        let a: f32x2 = f32x2::new(1., 2.);
23937        let e: f32 = 2.;
23938        let r: f32 = vmaxnmv_f32(transmute(a));
23939        assert_eq!(r, e);
23940    }
23941
23942    #[simd_test(enable = "neon")]
23943    unsafe fn test_vmaxnmvq_f64() {
23944        let a: f64x2 = f64x2::new(1., 2.);
23945        let e: f64 = 2.;
23946        let r: f64 = vmaxnmvq_f64(transmute(a));
23947        assert_eq!(r, e);
23948    }
23949
23950    #[simd_test(enable = "neon")]
23951    unsafe fn test_vmaxnmvq_f32() {
23952        let a: f32x4 = f32x4::new(1., 2., 0., 1.);
23953        let e: f32 = 2.;
23954        let r: f32 = vmaxnmvq_f32(transmute(a));
23955        assert_eq!(r, e);
23956    }
23957
23958    #[simd_test(enable = "neon")]
23959    unsafe fn test_vpmaxnm_f32() {
23960        let a: f32x2 = f32x2::new(1.0, 2.0);
23961        let b: f32x2 = f32x2::new(6.0, -3.0);
23962        let e: f32x2 = f32x2::new(2.0, 6.0);
23963        let r: f32x2 = transmute(vpmaxnm_f32(transmute(a), transmute(b)));
23964        assert_eq!(r, e);
23965    }
23966
23967    #[simd_test(enable = "neon")]
23968    unsafe fn test_vpmaxnmq_f64() {
23969        let a: f64x2 = f64x2::new(1.0, 2.0);
23970        let b: f64x2 = f64x2::new(6.0, -3.0);
23971        let e: f64x2 = f64x2::new(2.0, 6.0);
23972        let r: f64x2 = transmute(vpmaxnmq_f64(transmute(a), transmute(b)));
23973        assert_eq!(r, e);
23974    }
23975
23976    #[simd_test(enable = "neon")]
23977    unsafe fn test_vpmaxnmq_f32() {
23978        let a: f32x4 = f32x4::new(1.0, 2.0, 3.0, -4.0);
23979        let b: f32x4 = f32x4::new(8.0, 16.0, -1.0, 6.0);
23980        let e: f32x4 = f32x4::new(2.0, 3.0, 16.0, 6.0);
23981        let r: f32x4 = transmute(vpmaxnmq_f32(transmute(a), transmute(b)));
23982        assert_eq!(r, e);
23983    }
23984
23985    #[simd_test(enable = "neon")]
23986    unsafe fn test_vpmaxnms_f32() {
23987        let a: f32x2 = f32x2::new(1., 2.);
23988        let e: f32 = 2.;
23989        let r: f32 = vpmaxnms_f32(transmute(a));
23990        assert_eq!(r, e);
23991    }
23992
23993    #[simd_test(enable = "neon")]
23994    unsafe fn test_vpmaxnmqd_f64() {
23995        let a: f64x2 = f64x2::new(1., 2.);
23996        let e: f64 = 2.;
23997        let r: f64 = vpmaxnmqd_f64(transmute(a));
23998        assert_eq!(r, e);
23999    }
24000
24001    #[simd_test(enable = "neon")]
24002    unsafe fn test_vpmaxs_f32() {
24003        let a: f32x2 = f32x2::new(1., 2.);
24004        let e: f32 = 2.;
24005        let r: f32 = vpmaxs_f32(transmute(a));
24006        assert_eq!(r, e);
24007    }
24008
24009    #[simd_test(enable = "neon")]
24010    unsafe fn test_vpmaxqd_f64() {
24011        let a: f64x2 = f64x2::new(1., 2.);
24012        let e: f64 = 2.;
24013        let r: f64 = vpmaxqd_f64(transmute(a));
24014        assert_eq!(r, e);
24015    }
24016
24017    #[simd_test(enable = "neon")]
24018    unsafe fn test_vmin_f64() {
24019        let a: f64 = 1.0;
24020        let b: f64 = 0.0;
24021        let e: f64 = 0.0;
24022        let r: f64 = transmute(vmin_f64(transmute(a), transmute(b)));
24023        assert_eq!(r, e);
24024    }
24025
24026    #[simd_test(enable = "neon")]
24027    unsafe fn test_vminq_f64() {
24028        let a: f64x2 = f64x2::new(1.0, -2.0);
24029        let b: f64x2 = f64x2::new(0.0, 3.0);
24030        let e: f64x2 = f64x2::new(0.0, -2.0);
24031        let r: f64x2 = transmute(vminq_f64(transmute(a), transmute(b)));
24032        assert_eq!(r, e);
24033    }
24034
24035    #[simd_test(enable = "neon")]
24036    unsafe fn test_vminnm_f64() {
24037        let a: f64 = 1.0;
24038        let b: f64 = 8.0;
24039        let e: f64 = 1.0;
24040        let r: f64 = transmute(vminnm_f64(transmute(a), transmute(b)));
24041        assert_eq!(r, e);
24042    }
24043
24044    #[simd_test(enable = "neon")]
24045    unsafe fn test_vminnmq_f64() {
24046        let a: f64x2 = f64x2::new(1.0, 2.0);
24047        let b: f64x2 = f64x2::new(8.0, 16.0);
24048        let e: f64x2 = f64x2::new(1.0, 2.0);
24049        let r: f64x2 = transmute(vminnmq_f64(transmute(a), transmute(b)));
24050        assert_eq!(r, e);
24051    }
24052
24053    #[simd_test(enable = "neon")]
24054    unsafe fn test_vminnmv_f32() {
24055        let a: f32x2 = f32x2::new(1., 0.);
24056        let e: f32 = 0.;
24057        let r: f32 = vminnmv_f32(transmute(a));
24058        assert_eq!(r, e);
24059    }
24060
24061    #[simd_test(enable = "neon")]
24062    unsafe fn test_vminnmvq_f64() {
24063        let a: f64x2 = f64x2::new(1., 0.);
24064        let e: f64 = 0.;
24065        let r: f64 = vminnmvq_f64(transmute(a));
24066        assert_eq!(r, e);
24067    }
24068
24069    #[simd_test(enable = "neon")]
24070    unsafe fn test_vminnmvq_f32() {
24071        let a: f32x4 = f32x4::new(1., 0., 2., 3.);
24072        let e: f32 = 0.;
24073        let r: f32 = vminnmvq_f32(transmute(a));
24074        assert_eq!(r, e);
24075    }
24076
24077    #[simd_test(enable = "neon")]
24078    unsafe fn test_vmovl_high_s8() {
24079        let a: i8x16 = i8x16::new(1, 2, 3, 4, 3, 4, 5, 6, 3, 4, 5, 6, 7, 8, 9, 10);
24080        let e: i16x8 = i16x8::new(3, 4, 5, 6, 7, 8, 9, 10);
24081        let r: i16x8 = transmute(vmovl_high_s8(transmute(a)));
24082        assert_eq!(r, e);
24083    }
24084
24085    #[simd_test(enable = "neon")]
24086    unsafe fn test_vmovl_high_s16() {
24087        let a: i16x8 = i16x8::new(1, 2, 3, 4, 3, 4, 5, 6);
24088        let e: i32x4 = i32x4::new(3, 4, 5, 6);
24089        let r: i32x4 = transmute(vmovl_high_s16(transmute(a)));
24090        assert_eq!(r, e);
24091    }
24092
24093    #[simd_test(enable = "neon")]
24094    unsafe fn test_vmovl_high_s32() {
24095        let a: i32x4 = i32x4::new(1, 2, 3, 4);
24096        let e: i64x2 = i64x2::new(3, 4);
24097        let r: i64x2 = transmute(vmovl_high_s32(transmute(a)));
24098        assert_eq!(r, e);
24099    }
24100
24101    #[simd_test(enable = "neon")]
24102    unsafe fn test_vmovl_high_u8() {
24103        let a: u8x16 = u8x16::new(1, 2, 3, 4, 3, 4, 5, 6, 3, 4, 5, 6, 7, 8, 9, 10);
24104        let e: u16x8 = u16x8::new(3, 4, 5, 6, 7, 8, 9, 10);
24105        let r: u16x8 = transmute(vmovl_high_u8(transmute(a)));
24106        assert_eq!(r, e);
24107    }
24108
24109    #[simd_test(enable = "neon")]
24110    unsafe fn test_vmovl_high_u16() {
24111        let a: u16x8 = u16x8::new(1, 2, 3, 4, 3, 4, 5, 6);
24112        let e: u32x4 = u32x4::new(3, 4, 5, 6);
24113        let r: u32x4 = transmute(vmovl_high_u16(transmute(a)));
24114        assert_eq!(r, e);
24115    }
24116
24117    #[simd_test(enable = "neon")]
24118    unsafe fn test_vmovl_high_u32() {
24119        let a: u32x4 = u32x4::new(1, 2, 3, 4);
24120        let e: u64x2 = u64x2::new(3, 4);
24121        let r: u64x2 = transmute(vmovl_high_u32(transmute(a)));
24122        assert_eq!(r, e);
24123    }
24124
24125    #[simd_test(enable = "neon")]
24126    unsafe fn test_vpaddq_f32() {
24127        let a: f32x4 = f32x4::new(1., 2., 3., 4.);
24128        let b: f32x4 = f32x4::new(3., 4., 5., 6.);
24129        let e: f32x4 = f32x4::new(3., 7., 7., 11.);
24130        let r: f32x4 = transmute(vpaddq_f32(transmute(a), transmute(b)));
24131        assert_eq!(r, e);
24132    }
24133
24134    #[simd_test(enable = "neon")]
24135    unsafe fn test_vpaddq_f64() {
24136        let a: f64x2 = f64x2::new(1., 2.);
24137        let b: f64x2 = f64x2::new(3., 4.);
24138        let e: f64x2 = f64x2::new(3., 7.);
24139        let r: f64x2 = transmute(vpaddq_f64(transmute(a), transmute(b)));
24140        assert_eq!(r, e);
24141    }
24142
24143    #[simd_test(enable = "neon")]
24144    unsafe fn test_vpadds_f32() {
24145        let a: f32x2 = f32x2::new(1., 2.);
24146        let e: f32 = 3.;
24147        let r: f32 = vpadds_f32(transmute(a));
24148        assert_eq!(r, e);
24149    }
24150
24151    #[simd_test(enable = "neon")]
24152    unsafe fn test_vpaddd_f64() {
24153        let a: f64x2 = f64x2::new(1., 2.);
24154        let e: f64 = 3.;
24155        let r: f64 = vpaddd_f64(transmute(a));
24156        assert_eq!(r, e);
24157    }
24158
24159    #[simd_test(enable = "neon")]
24160    unsafe fn test_vpminnm_f32() {
24161        let a: f32x2 = f32x2::new(1.0, 2.0);
24162        let b: f32x2 = f32x2::new(6.0, -3.0);
24163        let e: f32x2 = f32x2::new(1.0, -3.0);
24164        let r: f32x2 = transmute(vpminnm_f32(transmute(a), transmute(b)));
24165        assert_eq!(r, e);
24166    }
24167
24168    #[simd_test(enable = "neon")]
24169    unsafe fn test_vpminnmq_f64() {
24170        let a: f64x2 = f64x2::new(1.0, 2.0);
24171        let b: f64x2 = f64x2::new(6.0, -3.0);
24172        let e: f64x2 = f64x2::new(1.0, -3.0);
24173        let r: f64x2 = transmute(vpminnmq_f64(transmute(a), transmute(b)));
24174        assert_eq!(r, e);
24175    }
24176
24177    #[simd_test(enable = "neon")]
24178    unsafe fn test_vpminnmq_f32() {
24179        let a: f32x4 = f32x4::new(1.0, 2.0, 3.0, -4.0);
24180        let b: f32x4 = f32x4::new(8.0, 16.0, -1.0, 6.0);
24181        let e: f32x4 = f32x4::new(1.0, -4.0, 8.0, -1.0);
24182        let r: f32x4 = transmute(vpminnmq_f32(transmute(a), transmute(b)));
24183        assert_eq!(r, e);
24184    }
24185
24186    #[simd_test(enable = "neon")]
24187    unsafe fn test_vpminnms_f32() {
24188        let a: f32x2 = f32x2::new(1., 2.);
24189        let e: f32 = 1.;
24190        let r: f32 = vpminnms_f32(transmute(a));
24191        assert_eq!(r, e);
24192    }
24193
24194    #[simd_test(enable = "neon")]
24195    unsafe fn test_vpminnmqd_f64() {
24196        let a: f64x2 = f64x2::new(1., 2.);
24197        let e: f64 = 1.;
24198        let r: f64 = vpminnmqd_f64(transmute(a));
24199        assert_eq!(r, e);
24200    }
24201
24202    #[simd_test(enable = "neon")]
24203    unsafe fn test_vpmins_f32() {
24204        let a: f32x2 = f32x2::new(1., 2.);
24205        let e: f32 = 1.;
24206        let r: f32 = vpmins_f32(transmute(a));
24207        assert_eq!(r, e);
24208    }
24209
24210    #[simd_test(enable = "neon")]
24211    unsafe fn test_vpminqd_f64() {
24212        let a: f64x2 = f64x2::new(1., 2.);
24213        let e: f64 = 1.;
24214        let r: f64 = vpminqd_f64(transmute(a));
24215        assert_eq!(r, e);
24216    }
24217
24218    #[simd_test(enable = "neon")]
24219    unsafe fn test_vqdmullh_s16() {
24220        let a: i16 = 2;
24221        let b: i16 = 3;
24222        let e: i32 = 12;
24223        let r: i32 = vqdmullh_s16(a, b);
24224        assert_eq!(r, e);
24225    }
24226
24227    #[simd_test(enable = "neon")]
24228    unsafe fn test_vqdmulls_s32() {
24229        let a: i32 = 2;
24230        let b: i32 = 3;
24231        let e: i64 = 12;
24232        let r: i64 = vqdmulls_s32(a, b);
24233        assert_eq!(r, e);
24234    }
24235
24236    #[simd_test(enable = "neon")]
24237    unsafe fn test_vqdmull_high_s16() {
24238        let a: i16x8 = i16x8::new(0, 1, 4, 5, 4, 5, 6, 7);
24239        let b: i16x8 = i16x8::new(1, 2, 5, 6, 5, 6, 7, 8);
24240        let e: i32x4 = i32x4::new(40, 60, 84, 112);
24241        let r: i32x4 = transmute(vqdmull_high_s16(transmute(a), transmute(b)));
24242        assert_eq!(r, e);
24243    }
24244
24245    #[simd_test(enable = "neon")]
24246    unsafe fn test_vqdmull_high_s32() {
24247        let a: i32x4 = i32x4::new(0, 1, 4, 5);
24248        let b: i32x4 = i32x4::new(1, 2, 5, 6);
24249        let e: i64x2 = i64x2::new(40, 60);
24250        let r: i64x2 = transmute(vqdmull_high_s32(transmute(a), transmute(b)));
24251        assert_eq!(r, e);
24252    }
24253
24254    #[simd_test(enable = "neon")]
24255    unsafe fn test_vqdmull_high_n_s16() {
24256        let a: i16x8 = i16x8::new(0, 2, 8, 10, 8, 10, 12, 14);
24257        let b: i16 = 2;
24258        let e: i32x4 = i32x4::new(32, 40, 48, 56);
24259        let r: i32x4 = transmute(vqdmull_high_n_s16(transmute(a), b));
24260        assert_eq!(r, e);
24261    }
24262
24263    #[simd_test(enable = "neon")]
24264    unsafe fn test_vqdmull_high_n_s32() {
24265        let a: i32x4 = i32x4::new(0, 2, 8, 10);
24266        let b: i32 = 2;
24267        let e: i64x2 = i64x2::new(32, 40);
24268        let r: i64x2 = transmute(vqdmull_high_n_s32(transmute(a), b));
24269        assert_eq!(r, e);
24270    }
24271
24272    #[simd_test(enable = "neon")]
24273    unsafe fn test_vqdmull_laneq_s16() {
24274        let a: i16x4 = i16x4::new(1, 2, 3, 4);
24275        let b: i16x8 = i16x8::new(0, 2, 2, 0, 2, 0, 0, 0);
24276        let e: i32x4 = i32x4::new(4, 8, 12, 16);
24277        let r: i32x4 = transmute(vqdmull_laneq_s16::<4>(transmute(a), transmute(b)));
24278        assert_eq!(r, e);
24279    }
24280
24281    #[simd_test(enable = "neon")]
24282    unsafe fn test_vqdmull_laneq_s32() {
24283        let a: i32x2 = i32x2::new(1, 2);
24284        let b: i32x4 = i32x4::new(0, 2, 2, 0);
24285        let e: i64x2 = i64x2::new(4, 8);
24286        let r: i64x2 = transmute(vqdmull_laneq_s32::<2>(transmute(a), transmute(b)));
24287        assert_eq!(r, e);
24288    }
24289
24290    #[simd_test(enable = "neon")]
24291    unsafe fn test_vqdmullh_lane_s16() {
24292        let a: i16 = 2;
24293        let b: i16x4 = i16x4::new(0, 2, 2, 0);
24294        let e: i32 = 8;
24295        let r: i32 = vqdmullh_lane_s16::<2>(a, transmute(b));
24296        assert_eq!(r, e);
24297    }
24298
24299    #[simd_test(enable = "neon")]
24300    unsafe fn test_vqdmullh_laneq_s16() {
24301        let a: i16 = 2;
24302        let b: i16x8 = i16x8::new(0, 2, 2, 0, 2, 0, 0, 0);
24303        let e: i32 = 8;
24304        let r: i32 = vqdmullh_laneq_s16::<4>(a, transmute(b));
24305        assert_eq!(r, e);
24306    }
24307
24308    #[simd_test(enable = "neon")]
24309    unsafe fn test_vqdmulls_lane_s32() {
24310        let a: i32 = 2;
24311        let b: i32x2 = i32x2::new(0, 2);
24312        let e: i64 = 8;
24313        let r: i64 = vqdmulls_lane_s32::<1>(a, transmute(b));
24314        assert_eq!(r, e);
24315    }
24316
24317    #[simd_test(enable = "neon")]
24318    unsafe fn test_vqdmulls_laneq_s32() {
24319        let a: i32 = 2;
24320        let b: i32x4 = i32x4::new(0, 2, 2, 0);
24321        let e: i64 = 8;
24322        let r: i64 = vqdmulls_laneq_s32::<2>(a, transmute(b));
24323        assert_eq!(r, e);
24324    }
24325
24326    #[simd_test(enable = "neon")]
24327    unsafe fn test_vqdmull_high_lane_s16() {
24328        let a: i16x8 = i16x8::new(0, 1, 4, 5, 4, 5, 6, 7);
24329        let b: i16x4 = i16x4::new(0, 2, 2, 0);
24330        let e: i32x4 = i32x4::new(16, 20, 24, 28);
24331        let r: i32x4 = transmute(vqdmull_high_lane_s16::<2>(transmute(a), transmute(b)));
24332        assert_eq!(r, e);
24333    }
24334
24335    #[simd_test(enable = "neon")]
24336    unsafe fn test_vqdmull_high_lane_s32() {
24337        let a: i32x4 = i32x4::new(0, 1, 4, 5);
24338        let b: i32x2 = i32x2::new(0, 2);
24339        let e: i64x2 = i64x2::new(16, 20);
24340        let r: i64x2 = transmute(vqdmull_high_lane_s32::<1>(transmute(a), transmute(b)));
24341        assert_eq!(r, e);
24342    }
24343
24344    #[simd_test(enable = "neon")]
24345    unsafe fn test_vqdmull_high_laneq_s16() {
24346        let a: i16x8 = i16x8::new(0, 1, 4, 5, 4, 5, 6, 7);
24347        let b: i16x8 = i16x8::new(0, 2, 2, 0, 2, 0, 0, 0);
24348        let e: i32x4 = i32x4::new(16, 20, 24, 28);
24349        let r: i32x4 = transmute(vqdmull_high_laneq_s16::<4>(transmute(a), transmute(b)));
24350        assert_eq!(r, e);
24351    }
24352
24353    #[simd_test(enable = "neon")]
24354    unsafe fn test_vqdmull_high_laneq_s32() {
24355        let a: i32x4 = i32x4::new(0, 1, 4, 5);
24356        let b: i32x4 = i32x4::new(0, 2, 2, 0);
24357        let e: i64x2 = i64x2::new(16, 20);
24358        let r: i64x2 = transmute(vqdmull_high_laneq_s32::<2>(transmute(a), transmute(b)));
24359        assert_eq!(r, e);
24360    }
24361
24362    #[simd_test(enable = "neon")]
24363    unsafe fn test_vqdmlal_high_s16() {
24364        let a: i32x4 = i32x4::new(1, 2, 3, 4);
24365        let b: i16x8 = i16x8::new(0, 1, 4, 5, 4, 5, 6, 7);
24366        let c: i16x8 = i16x8::new(1, 2, 5, 6, 5, 6, 7, 8);
24367        let e: i32x4 = i32x4::new(41, 62, 87, 116);
24368        let r: i32x4 = transmute(vqdmlal_high_s16(transmute(a), transmute(b), transmute(c)));
24369        assert_eq!(r, e);
24370    }
24371
24372    #[simd_test(enable = "neon")]
24373    unsafe fn test_vqdmlal_high_s32() {
24374        let a: i64x2 = i64x2::new(1, 2);
24375        let b: i32x4 = i32x4::new(0, 1, 4, 5);
24376        let c: i32x4 = i32x4::new(1, 2, 5, 6);
24377        let e: i64x2 = i64x2::new(41, 62);
24378        let r: i64x2 = transmute(vqdmlal_high_s32(transmute(a), transmute(b), transmute(c)));
24379        assert_eq!(r, e);
24380    }
24381
24382    #[simd_test(enable = "neon")]
24383    unsafe fn test_vqdmlal_high_n_s16() {
24384        let a: i32x4 = i32x4::new(1, 2, 3, 4);
24385        let b: i16x8 = i16x8::new(0, 2, 8, 10, 8, 10, 12, 14);
24386        let c: i16 = 2;
24387        let e: i32x4 = i32x4::new(33, 42, 51, 60);
24388        let r: i32x4 = transmute(vqdmlal_high_n_s16(transmute(a), transmute(b), c));
24389        assert_eq!(r, e);
24390    }
24391
24392    #[simd_test(enable = "neon")]
24393    unsafe fn test_vqdmlal_high_n_s32() {
24394        let a: i64x2 = i64x2::new(1, 2);
24395        let b: i32x4 = i32x4::new(0, 2, 8, 10);
24396        let c: i32 = 2;
24397        let e: i64x2 = i64x2::new(33, 42);
24398        let r: i64x2 = transmute(vqdmlal_high_n_s32(transmute(a), transmute(b), c));
24399        assert_eq!(r, e);
24400    }
24401
24402    #[simd_test(enable = "neon")]
24403    unsafe fn test_vqdmlal_laneq_s16() {
24404        let a: i32x4 = i32x4::new(1, 2, 3, 4);
24405        let b: i16x4 = i16x4::new(1, 2, 3, 4);
24406        let c: i16x8 = i16x8::new(0, 2, 2, 0, 2, 0, 0, 0);
24407        let e: i32x4 = i32x4::new(5, 10, 15, 20);
24408        let r: i32x4 = transmute(vqdmlal_laneq_s16::<2>(transmute(a), transmute(b), transmute(c)));
24409        assert_eq!(r, e);
24410    }
24411
24412    #[simd_test(enable = "neon")]
24413    unsafe fn test_vqdmlal_laneq_s32() {
24414        let a: i64x2 = i64x2::new(1, 2);
24415        let b: i32x2 = i32x2::new(1, 2);
24416        let c: i32x4 = i32x4::new(0, 2, 2, 0);
24417        let e: i64x2 = i64x2::new(5, 10);
24418        let r: i64x2 = transmute(vqdmlal_laneq_s32::<1>(transmute(a), transmute(b), transmute(c)));
24419        assert_eq!(r, e);
24420    }
24421
24422    #[simd_test(enable = "neon")]
24423    unsafe fn test_vqdmlal_high_lane_s16() {
24424        let a: i32x4 = i32x4::new(1, 2, 3, 4);
24425        let b: i16x8 = i16x8::new(0, 1, 4, 5, 4, 5, 6, 7);
24426        let c: i16x4 = i16x4::new(0, 2, 0, 0);
24427        let e: i32x4 = i32x4::new(17, 22, 27, 32);
24428        let r: i32x4 = transmute(vqdmlal_high_lane_s16::<1>(transmute(a), transmute(b), transmute(c)));
24429        assert_eq!(r, e);
24430    }
24431
24432    #[simd_test(enable = "neon")]
24433    unsafe fn test_vqdmlal_high_laneq_s16() {
24434        let a: i32x4 = i32x4::new(1, 2, 3, 4);
24435        let b: i16x8 = i16x8::new(0, 1, 4, 5, 4, 5, 6, 7);
24436        let c: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
24437        let e: i32x4 = i32x4::new(17, 22, 27, 32);
24438        let r: i32x4 = transmute(vqdmlal_high_laneq_s16::<1>(transmute(a), transmute(b), transmute(c)));
24439        assert_eq!(r, e);
24440    }
24441
24442    #[simd_test(enable = "neon")]
24443    unsafe fn test_vqdmlal_high_lane_s32() {
24444        let a: i64x2 = i64x2::new(1, 2);
24445        let b: i32x4 = i32x4::new(0, 1, 4, 5);
24446        let c: i32x2 = i32x2::new(0, 2);
24447        let e: i64x2 = i64x2::new(17, 22);
24448        let r: i64x2 = transmute(vqdmlal_high_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
24449        assert_eq!(r, e);
24450    }
24451
24452    #[simd_test(enable = "neon")]
24453    unsafe fn test_vqdmlal_high_laneq_s32() {
24454        let a: i64x2 = i64x2::new(1, 2);
24455        let b: i32x4 = i32x4::new(0, 1, 4, 5);
24456        let c: i32x4 = i32x4::new(0, 2, 0, 0);
24457        let e: i64x2 = i64x2::new(17, 22);
24458        let r: i64x2 = transmute(vqdmlal_high_laneq_s32::<1>(transmute(a), transmute(b), transmute(c)));
24459        assert_eq!(r, e);
24460    }
24461
24462    #[simd_test(enable = "neon")]
24463    unsafe fn test_vqdmlalh_s16() {
24464        let a: i32 = 1;
24465        let b: i16 = 1;
24466        let c: i16 = 2;
24467        let e: i32 = 5;
24468        let r: i32 = vqdmlalh_s16(a, b, c);
24469        assert_eq!(r, e);
24470    }
24471
24472    #[simd_test(enable = "neon")]
24473    unsafe fn test_vqdmlals_s32() {
24474        let a: i64 = 1;
24475        let b: i32 = 1;
24476        let c: i32 = 2;
24477        let e: i64 = 5;
24478        let r: i64 = vqdmlals_s32(a, b, c);
24479        assert_eq!(r, e);
24480    }
24481
24482    #[simd_test(enable = "neon")]
24483    unsafe fn test_vqdmlalh_lane_s16() {
24484        let a: i32 = 1;
24485        let b: i16 = 1;
24486        let c: i16x4 = i16x4::new(2, 1, 1, 1);
24487        let e: i32 = 5;
24488        let r: i32 = vqdmlalh_lane_s16::<0>(a, b, transmute(c));
24489        assert_eq!(r, e);
24490    }
24491
24492    #[simd_test(enable = "neon")]
24493    unsafe fn test_vqdmlalh_laneq_s16() {
24494        let a: i32 = 1;
24495        let b: i16 = 1;
24496        let c: i16x8 = i16x8::new(2, 1, 1, 1, 1, 1, 1, 1);
24497        let e: i32 = 5;
24498        let r: i32 = vqdmlalh_laneq_s16::<0>(a, b, transmute(c));
24499        assert_eq!(r, e);
24500    }
24501
24502    #[simd_test(enable = "neon")]
24503    unsafe fn test_vqdmlals_lane_s32() {
24504        let a: i64 = 1;
24505        let b: i32 = 1;
24506        let c: i32x2 = i32x2::new(2, 1);
24507        let e: i64 = 5;
24508        let r: i64 = vqdmlals_lane_s32::<0>(a, b, transmute(c));
24509        assert_eq!(r, e);
24510    }
24511
24512    #[simd_test(enable = "neon")]
24513    unsafe fn test_vqdmlals_laneq_s32() {
24514        let a: i64 = 1;
24515        let b: i32 = 1;
24516        let c: i32x4 = i32x4::new(2, 1, 1, 1);
24517        let e: i64 = 5;
24518        let r: i64 = vqdmlals_laneq_s32::<0>(a, b, transmute(c));
24519        assert_eq!(r, e);
24520    }
24521
24522    #[simd_test(enable = "neon")]
24523    unsafe fn test_vqdmlsl_high_s16() {
24524        let a: i32x4 = i32x4::new(39, 58, 81, 108);
24525        let b: i16x8 = i16x8::new(0, 1, 4, 5, 4, 5, 6, 7);
24526        let c: i16x8 = i16x8::new(1, 2, 5, 6, 5, 6, 7, 8);
24527        let e: i32x4 = i32x4::new(-1, -2, -3, -4);
24528        let r: i32x4 = transmute(vqdmlsl_high_s16(transmute(a), transmute(b), transmute(c)));
24529        assert_eq!(r, e);
24530    }
24531
24532    #[simd_test(enable = "neon")]
24533    unsafe fn test_vqdmlsl_high_s32() {
24534        let a: i64x2 = i64x2::new(39, 58);
24535        let b: i32x4 = i32x4::new(0, 1, 4, 5);
24536        let c: i32x4 = i32x4::new(1, 2, 5, 6);
24537        let e: i64x2 = i64x2::new(-1, -2);
24538        let r: i64x2 = transmute(vqdmlsl_high_s32(transmute(a), transmute(b), transmute(c)));
24539        assert_eq!(r, e);
24540    }
24541
24542    #[simd_test(enable = "neon")]
24543    unsafe fn test_vqdmlsl_high_n_s16() {
24544        let a: i32x4 = i32x4::new(31, 38, 45, 52);
24545        let b: i16x8 = i16x8::new(0, 2, 8, 10, 8, 10, 12, 14);
24546        let c: i16 = 2;
24547        let e: i32x4 = i32x4::new(-1, -2, -3, -4);
24548        let r: i32x4 = transmute(vqdmlsl_high_n_s16(transmute(a), transmute(b), c));
24549        assert_eq!(r, e);
24550    }
24551
24552    #[simd_test(enable = "neon")]
24553    unsafe fn test_vqdmlsl_high_n_s32() {
24554        let a: i64x2 = i64x2::new(31, 38);
24555        let b: i32x4 = i32x4::new(0, 2, 8, 10);
24556        let c: i32 = 2;
24557        let e: i64x2 = i64x2::new(-1, -2);
24558        let r: i64x2 = transmute(vqdmlsl_high_n_s32(transmute(a), transmute(b), c));
24559        assert_eq!(r, e);
24560    }
24561
24562    #[simd_test(enable = "neon")]
24563    unsafe fn test_vqdmlsl_laneq_s16() {
24564        let a: i32x4 = i32x4::new(3, 6, 9, 12);
24565        let b: i16x4 = i16x4::new(1, 2, 3, 4);
24566        let c: i16x8 = i16x8::new(0, 2, 2, 0, 2, 0, 0, 0);
24567        let e: i32x4 = i32x4::new(-1, -2, -3, -4);
24568        let r: i32x4 = transmute(vqdmlsl_laneq_s16::<2>(transmute(a), transmute(b), transmute(c)));
24569        assert_eq!(r, e);
24570    }
24571
24572    #[simd_test(enable = "neon")]
24573    unsafe fn test_vqdmlsl_laneq_s32() {
24574        let a: i64x2 = i64x2::new(3, 6);
24575        let b: i32x2 = i32x2::new(1, 2);
24576        let c: i32x4 = i32x4::new(0, 2, 2, 0);
24577        let e: i64x2 = i64x2::new(-1, -2);
24578        let r: i64x2 = transmute(vqdmlsl_laneq_s32::<1>(transmute(a), transmute(b), transmute(c)));
24579        assert_eq!(r, e);
24580    }
24581
24582    #[simd_test(enable = "neon")]
24583    unsafe fn test_vqdmlsl_high_lane_s16() {
24584        let a: i32x4 = i32x4::new(15, 18, 21, 24);
24585        let b: i16x8 = i16x8::new(0, 1, 4, 5, 4, 5, 6, 7);
24586        let c: i16x4 = i16x4::new(0, 2, 0, 0);
24587        let e: i32x4 = i32x4::new(-1, -2, -3, -4);
24588        let r: i32x4 = transmute(vqdmlsl_high_lane_s16::<1>(transmute(a), transmute(b), transmute(c)));
24589        assert_eq!(r, e);
24590    }
24591
24592    #[simd_test(enable = "neon")]
24593    unsafe fn test_vqdmlsl_high_laneq_s16() {
24594        let a: i32x4 = i32x4::new(15, 18, 21, 24);
24595        let b: i16x8 = i16x8::new(0, 1, 4, 5, 4, 5, 6, 7);
24596        let c: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
24597        let e: i32x4 = i32x4::new(-1, -2, -3, -4);
24598        let r: i32x4 = transmute(vqdmlsl_high_laneq_s16::<1>(transmute(a), transmute(b), transmute(c)));
24599        assert_eq!(r, e);
24600    }
24601
24602    #[simd_test(enable = "neon")]
24603    unsafe fn test_vqdmlsl_high_lane_s32() {
24604        let a: i64x2 = i64x2::new(15, 18);
24605        let b: i32x4 = i32x4::new(0, 1, 4, 5);
24606        let c: i32x2 = i32x2::new(0, 2);
24607        let e: i64x2 = i64x2::new(-1, -2);
24608        let r: i64x2 = transmute(vqdmlsl_high_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
24609        assert_eq!(r, e);
24610    }
24611
24612    #[simd_test(enable = "neon")]
24613    unsafe fn test_vqdmlsl_high_laneq_s32() {
24614        let a: i64x2 = i64x2::new(15, 18);
24615        let b: i32x4 = i32x4::new(0, 1, 4, 5);
24616        let c: i32x4 = i32x4::new(0, 2, 0, 0);
24617        let e: i64x2 = i64x2::new(-1, -2);
24618        let r: i64x2 = transmute(vqdmlsl_high_laneq_s32::<1>(transmute(a), transmute(b), transmute(c)));
24619        assert_eq!(r, e);
24620    }
24621
24622    #[simd_test(enable = "neon")]
24623    unsafe fn test_vqdmlslh_s16() {
24624        let a: i32 = 10;
24625        let b: i16 = 1;
24626        let c: i16 = 2;
24627        let e: i32 = 6;
24628        let r: i32 = vqdmlslh_s16(a, b, c);
24629        assert_eq!(r, e);
24630    }
24631
24632    #[simd_test(enable = "neon")]
24633    unsafe fn test_vqdmlsls_s32() {
24634        let a: i64 = 10;
24635        let b: i32 = 1;
24636        let c: i32 = 2;
24637        let e: i64 = 6;
24638        let r: i64 = vqdmlsls_s32(a, b, c);
24639        assert_eq!(r, e);
24640    }
24641
24642    #[simd_test(enable = "neon")]
24643    unsafe fn test_vqdmlslh_lane_s16() {
24644        let a: i32 = 10;
24645        let b: i16 = 1;
24646        let c: i16x4 = i16x4::new(2, 1, 1, 1);
24647        let e: i32 = 6;
24648        let r: i32 = vqdmlslh_lane_s16::<0>(a, b, transmute(c));
24649        assert_eq!(r, e);
24650    }
24651
24652    #[simd_test(enable = "neon")]
24653    unsafe fn test_vqdmlslh_laneq_s16() {
24654        let a: i32 = 10;
24655        let b: i16 = 1;
24656        let c: i16x8 = i16x8::new(2, 1, 1, 1, 1, 1, 1, 1);
24657        let e: i32 = 6;
24658        let r: i32 = vqdmlslh_laneq_s16::<0>(a, b, transmute(c));
24659        assert_eq!(r, e);
24660    }
24661
24662    #[simd_test(enable = "neon")]
24663    unsafe fn test_vqdmlsls_lane_s32() {
24664        let a: i64 = 10;
24665        let b: i32 = 1;
24666        let c: i32x2 = i32x2::new(2, 1);
24667        let e: i64 = 6;
24668        let r: i64 = vqdmlsls_lane_s32::<0>(a, b, transmute(c));
24669        assert_eq!(r, e);
24670    }
24671
24672    #[simd_test(enable = "neon")]
24673    unsafe fn test_vqdmlsls_laneq_s32() {
24674        let a: i64 = 10;
24675        let b: i32 = 1;
24676        let c: i32x4 = i32x4::new(2, 1, 1, 1);
24677        let e: i64 = 6;
24678        let r: i64 = vqdmlsls_laneq_s32::<0>(a, b, transmute(c));
24679        assert_eq!(r, e);
24680    }
24681
24682    #[simd_test(enable = "neon")]
24683    unsafe fn test_vqdmulhh_s16() {
24684        let a: i16 = 1;
24685        let b: i16 = 2;
24686        let e: i16 = 0;
24687        let r: i16 = vqdmulhh_s16(a, b);
24688        assert_eq!(r, e);
24689    }
24690
24691    #[simd_test(enable = "neon")]
24692    unsafe fn test_vqdmulhs_s32() {
24693        let a: i32 = 1;
24694        let b: i32 = 2;
24695        let e: i32 = 0;
24696        let r: i32 = vqdmulhs_s32(a, b);
24697        assert_eq!(r, e);
24698    }
24699
24700    #[simd_test(enable = "neon")]
24701    unsafe fn test_vqdmulhh_lane_s16() {
24702        let a: i16 = 2;
24703        let b: i16x4 = i16x4::new(0, 0, 0x7F_FF, 0);
24704        let e: i16 = 1;
24705        let r: i16 = vqdmulhh_lane_s16::<2>(a, transmute(b));
24706        assert_eq!(r, e);
24707    }
24708
24709    #[simd_test(enable = "neon")]
24710    unsafe fn test_vqdmulhh_laneq_s16() {
24711        let a: i16 = 2;
24712        let b: i16x8 = i16x8::new(0, 0, 0x7F_FF, 0, 0, 0, 0, 0);
24713        let e: i16 = 1;
24714        let r: i16 = vqdmulhh_laneq_s16::<2>(a, transmute(b));
24715        assert_eq!(r, e);
24716    }
24717
24718    #[simd_test(enable = "neon")]
24719    unsafe fn test_vqdmulhs_lane_s32() {
24720        let a: i32 = 2;
24721        let b: i32x2 = i32x2::new(0, 0x7F_FF_FF_FF);
24722        let e: i32 = 1;
24723        let r: i32 = vqdmulhs_lane_s32::<1>(a, transmute(b));
24724        assert_eq!(r, e);
24725    }
24726
24727    #[simd_test(enable = "neon")]
24728    unsafe fn test_vqdmulhs_laneq_s32() {
24729        let a: i32 = 2;
24730        let b: i32x4 = i32x4::new(0, 0x7F_FF_FF_FF, 0, 0);
24731        let e: i32 = 1;
24732        let r: i32 = vqdmulhs_laneq_s32::<1>(a, transmute(b));
24733        assert_eq!(r, e);
24734    }
24735
24736    #[simd_test(enable = "neon")]
24737    unsafe fn test_vqdmulh_lane_s16() {
24738        let a: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
24739        let b: i16x4 = i16x4::new(2, 1, 1, 1);
24740        let e: i16x4 = i16x4::new(1, 1, 1, 1);
24741        let r: i16x4 = transmute(vqdmulh_lane_s16::<0>(transmute(a), transmute(b)));
24742        assert_eq!(r, e);
24743    }
24744
24745    #[simd_test(enable = "neon")]
24746    unsafe fn test_vqdmulhq_lane_s16() {
24747        let a: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
24748        let b: i16x4 = i16x4::new(2, 1, 1, 1);
24749        let e: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
24750        let r: i16x8 = transmute(vqdmulhq_lane_s16::<0>(transmute(a), transmute(b)));
24751        assert_eq!(r, e);
24752    }
24753
24754    #[simd_test(enable = "neon")]
24755    unsafe fn test_vqdmulh_lane_s32() {
24756        let a: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
24757        let b: i32x2 = i32x2::new(2, 1);
24758        let e: i32x2 = i32x2::new(1, 1);
24759        let r: i32x2 = transmute(vqdmulh_lane_s32::<0>(transmute(a), transmute(b)));
24760        assert_eq!(r, e);
24761    }
24762
24763    #[simd_test(enable = "neon")]
24764    unsafe fn test_vqdmulhq_lane_s32() {
24765        let a: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
24766        let b: i32x2 = i32x2::new(2, 1);
24767        let e: i32x4 = i32x4::new(1, 1, 1, 1);
24768        let r: i32x4 = transmute(vqdmulhq_lane_s32::<0>(transmute(a), transmute(b)));
24769        assert_eq!(r, e);
24770    }
24771
24772    #[simd_test(enable = "neon")]
24773    unsafe fn test_vqmovnh_s16() {
24774        let a: i16 = 1;
24775        let e: i8 = 1;
24776        let r: i8 = vqmovnh_s16(a);
24777        assert_eq!(r, e);
24778    }
24779
24780    #[simd_test(enable = "neon")]
24781    unsafe fn test_vqmovns_s32() {
24782        let a: i32 = 1;
24783        let e: i16 = 1;
24784        let r: i16 = vqmovns_s32(a);
24785        assert_eq!(r, e);
24786    }
24787
24788    #[simd_test(enable = "neon")]
24789    unsafe fn test_vqmovnh_u16() {
24790        let a: u16 = 1;
24791        let e: u8 = 1;
24792        let r: u8 = vqmovnh_u16(a);
24793        assert_eq!(r, e);
24794    }
24795
24796    #[simd_test(enable = "neon")]
24797    unsafe fn test_vqmovns_u32() {
24798        let a: u32 = 1;
24799        let e: u16 = 1;
24800        let r: u16 = vqmovns_u32(a);
24801        assert_eq!(r, e);
24802    }
24803
24804    #[simd_test(enable = "neon")]
24805    unsafe fn test_vqmovnd_s64() {
24806        let a: i64 = 1;
24807        let e: i32 = 1;
24808        let r: i32 = vqmovnd_s64(a);
24809        assert_eq!(r, e);
24810    }
24811
24812    #[simd_test(enable = "neon")]
24813    unsafe fn test_vqmovnd_u64() {
24814        let a: u64 = 1;
24815        let e: u32 = 1;
24816        let r: u32 = vqmovnd_u64(a);
24817        assert_eq!(r, e);
24818    }
24819
24820    #[simd_test(enable = "neon")]
24821    unsafe fn test_vqmovn_high_s16() {
24822        let a: i8x8 = i8x8::new(0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F);
24823        let b: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
24824        let e: i8x16 = i8x16::new(0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F);
24825        let r: i8x16 = transmute(vqmovn_high_s16(transmute(a), transmute(b)));
24826        assert_eq!(r, e);
24827    }
24828
24829    #[simd_test(enable = "neon")]
24830    unsafe fn test_vqmovn_high_s32() {
24831        let a: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
24832        let b: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
24833        let e: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
24834        let r: i16x8 = transmute(vqmovn_high_s32(transmute(a), transmute(b)));
24835        assert_eq!(r, e);
24836    }
24837
24838    #[simd_test(enable = "neon")]
24839    unsafe fn test_vqmovn_high_s64() {
24840        let a: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
24841        let b: i64x2 = i64x2::new(0x7F_FF_FF_FF_FF_FF_FF_FF, 0x7F_FF_FF_FF_FF_FF_FF_FF);
24842        let e: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
24843        let r: i32x4 = transmute(vqmovn_high_s64(transmute(a), transmute(b)));
24844        assert_eq!(r, e);
24845    }
24846
24847    #[simd_test(enable = "neon")]
24848    unsafe fn test_vqmovn_high_u16() {
24849        let a: u8x8 = u8x8::new(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
24850        let b: u16x8 = u16x8::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
24851        let e: u8x16 = u8x16::new(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
24852        let r: u8x16 = transmute(vqmovn_high_u16(transmute(a), transmute(b)));
24853        assert_eq!(r, e);
24854    }
24855
24856    #[simd_test(enable = "neon")]
24857    unsafe fn test_vqmovn_high_u32() {
24858        let a: u16x4 = u16x4::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
24859        let b: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
24860        let e: u16x8 = u16x8::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
24861        let r: u16x8 = transmute(vqmovn_high_u32(transmute(a), transmute(b)));
24862        assert_eq!(r, e);
24863    }
24864
24865    #[simd_test(enable = "neon")]
24866    unsafe fn test_vqmovn_high_u64() {
24867        let a: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
24868        let b: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
24869        let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
24870        let r: u32x4 = transmute(vqmovn_high_u64(transmute(a), transmute(b)));
24871        assert_eq!(r, e);
24872    }
24873
24874    #[simd_test(enable = "neon")]
24875    unsafe fn test_vqmovunh_s16() {
24876        let a: i16 = 1;
24877        let e: u8 = 1;
24878        let r: u8 = vqmovunh_s16(a);
24879        assert_eq!(r, e);
24880    }
24881
24882    #[simd_test(enable = "neon")]
24883    unsafe fn test_vqmovuns_s32() {
24884        let a: i32 = 1;
24885        let e: u16 = 1;
24886        let r: u16 = vqmovuns_s32(a);
24887        assert_eq!(r, e);
24888    }
24889
24890    #[simd_test(enable = "neon")]
24891    unsafe fn test_vqmovund_s64() {
24892        let a: i64 = 1;
24893        let e: u32 = 1;
24894        let r: u32 = vqmovund_s64(a);
24895        assert_eq!(r, e);
24896    }
24897
24898    #[simd_test(enable = "neon")]
24899    unsafe fn test_vqmovun_high_s16() {
24900        let a: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
24901        let b: i16x8 = i16x8::new(-1, -1, -1, -1, -1, -1, -1, -1);
24902        let e: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
24903        let r: u8x16 = transmute(vqmovun_high_s16(transmute(a), transmute(b)));
24904        assert_eq!(r, e);
24905    }
24906
24907    #[simd_test(enable = "neon")]
24908    unsafe fn test_vqmovun_high_s32() {
24909        let a: u16x4 = u16x4::new(0, 0, 0, 0);
24910        let b: i32x4 = i32x4::new(-1, -1, -1, -1);
24911        let e: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
24912        let r: u16x8 = transmute(vqmovun_high_s32(transmute(a), transmute(b)));
24913        assert_eq!(r, e);
24914    }
24915
24916    #[simd_test(enable = "neon")]
24917    unsafe fn test_vqmovun_high_s64() {
24918        let a: u32x2 = u32x2::new(0, 0);
24919        let b: i64x2 = i64x2::new(-1, -1);
24920        let e: u32x4 = u32x4::new(0, 0, 0, 0);
24921        let r: u32x4 = transmute(vqmovun_high_s64(transmute(a), transmute(b)));
24922        assert_eq!(r, e);
24923    }
24924
24925    #[simd_test(enable = "neon")]
24926    unsafe fn test_vqrdmulhh_s16() {
24927        let a: i16 = 1;
24928        let b: i16 = 2;
24929        let e: i16 = 0;
24930        let r: i16 = vqrdmulhh_s16(a, b);
24931        assert_eq!(r, e);
24932    }
24933
24934    #[simd_test(enable = "neon")]
24935    unsafe fn test_vqrdmulhs_s32() {
24936        let a: i32 = 1;
24937        let b: i32 = 2;
24938        let e: i32 = 0;
24939        let r: i32 = vqrdmulhs_s32(a, b);
24940        assert_eq!(r, e);
24941    }
24942
24943    #[simd_test(enable = "neon")]
24944    unsafe fn test_vqrdmulhh_lane_s16() {
24945        let a: i16 = 1;
24946        let b: i16x4 = i16x4::new(0, 2, 0, 0);
24947        let e: i16 = 0;
24948        let r: i16 = vqrdmulhh_lane_s16::<1>(a, transmute(b));
24949        assert_eq!(r, e);
24950    }
24951
24952    #[simd_test(enable = "neon")]
24953    unsafe fn test_vqrdmulhh_laneq_s16() {
24954        let a: i16 = 1;
24955        let b: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
24956        let e: i16 = 0;
24957        let r: i16 = vqrdmulhh_laneq_s16::<1>(a, transmute(b));
24958        assert_eq!(r, e);
24959    }
24960
24961    #[simd_test(enable = "neon")]
24962    unsafe fn test_vqrdmulhs_lane_s32() {
24963        let a: i32 = 1;
24964        let b: i32x2 = i32x2::new(0, 2);
24965        let e: i32 = 0;
24966        let r: i32 = vqrdmulhs_lane_s32::<1>(a, transmute(b));
24967        assert_eq!(r, e);
24968    }
24969
24970    #[simd_test(enable = "neon")]
24971    unsafe fn test_vqrdmulhs_laneq_s32() {
24972        let a: i32 = 1;
24973        let b: i32x4 = i32x4::new(0, 2, 0, 0);
24974        let e: i32 = 0;
24975        let r: i32 = vqrdmulhs_laneq_s32::<1>(a, transmute(b));
24976        assert_eq!(r, e);
24977    }
24978
24979    #[simd_test(enable = "rdm")]
24980    unsafe fn test_vqrdmlah_s16() {
24981        let a: i16x4 = i16x4::new(1, 1, 1, 1);
24982        let b: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
24983        let c: i16x4 = i16x4::new(2, 2, 2, 2);
24984        let e: i16x4 = i16x4::new(3, 3, 3, 3);
24985        let r: i16x4 = transmute(vqrdmlah_s16(transmute(a), transmute(b), transmute(c)));
24986        assert_eq!(r, e);
24987    }
24988
24989    #[simd_test(enable = "rdm")]
24990    unsafe fn test_vqrdmlahq_s16() {
24991        let a: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
24992        let b: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
24993        let c: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
24994        let e: i16x8 = i16x8::new(3, 3, 3, 3, 3, 3, 3, 3);
24995        let r: i16x8 = transmute(vqrdmlahq_s16(transmute(a), transmute(b), transmute(c)));
24996        assert_eq!(r, e);
24997    }
24998
24999    #[simd_test(enable = "rdm")]
25000    unsafe fn test_vqrdmlah_s32() {
25001        let a: i32x2 = i32x2::new(1, 1);
25002        let b: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
25003        let c: i32x2 = i32x2::new(2, 2);
25004        let e: i32x2 = i32x2::new(3, 3);
25005        let r: i32x2 = transmute(vqrdmlah_s32(transmute(a), transmute(b), transmute(c)));
25006        assert_eq!(r, e);
25007    }
25008
25009    #[simd_test(enable = "rdm")]
25010    unsafe fn test_vqrdmlahq_s32() {
25011        let a: i32x4 = i32x4::new(1, 1, 1, 1);
25012        let b: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
25013        let c: i32x4 = i32x4::new(2, 2, 2, 2);
25014        let e: i32x4 = i32x4::new(3, 3, 3, 3);
25015        let r: i32x4 = transmute(vqrdmlahq_s32(transmute(a), transmute(b), transmute(c)));
25016        assert_eq!(r, e);
25017    }
25018
25019    #[simd_test(enable = "rdm")]
25020    unsafe fn test_vqrdmlahh_s16() {
25021        let a: i16 = 1;
25022        let b: i16 = 1;
25023        let c: i16 = 2;
25024        let e: i16 = 1;
25025        let r: i16 = vqrdmlahh_s16(a, b, c);
25026        assert_eq!(r, e);
25027    }
25028
25029    #[simd_test(enable = "rdm")]
25030    unsafe fn test_vqrdmlahs_s32() {
25031        let a: i32 = 1;
25032        let b: i32 = 1;
25033        let c: i32 = 2;
25034        let e: i32 = 1;
25035        let r: i32 = vqrdmlahs_s32(a, b, c);
25036        assert_eq!(r, e);
25037    }
25038
25039    #[simd_test(enable = "rdm")]
25040    unsafe fn test_vqrdmlah_lane_s16() {
25041        let a: i16x4 = i16x4::new(1, 1, 1, 1);
25042        let b: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
25043        let c: i16x4 = i16x4::new(0, 2, 0, 0);
25044        let e: i16x4 = i16x4::new(3, 3, 3, 3);
25045        let r: i16x4 = transmute(vqrdmlah_lane_s16::<1>(transmute(a), transmute(b), transmute(c)));
25046        assert_eq!(r, e);
25047    }
25048
25049    #[simd_test(enable = "rdm")]
25050    unsafe fn test_vqrdmlah_laneq_s16() {
25051        let a: i16x4 = i16x4::new(1, 1, 1, 1);
25052        let b: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
25053        let c: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
25054        let e: i16x4 = i16x4::new(3, 3, 3, 3);
25055        let r: i16x4 = transmute(vqrdmlah_laneq_s16::<1>(transmute(a), transmute(b), transmute(c)));
25056        assert_eq!(r, e);
25057    }
25058
25059    #[simd_test(enable = "rdm")]
25060    unsafe fn test_vqrdmlahq_lane_s16() {
25061        let a: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
25062        let b: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
25063        let c: i16x4 = i16x4::new(0, 2, 0, 0);
25064        let e: i16x8 = i16x8::new(3, 3, 3, 3, 3, 3, 3, 3);
25065        let r: i16x8 = transmute(vqrdmlahq_lane_s16::<1>(transmute(a), transmute(b), transmute(c)));
25066        assert_eq!(r, e);
25067    }
25068
25069    #[simd_test(enable = "rdm")]
25070    unsafe fn test_vqrdmlahq_laneq_s16() {
25071        let a: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
25072        let b: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
25073        let c: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
25074        let e: i16x8 = i16x8::new(3, 3, 3, 3, 3, 3, 3, 3);
25075        let r: i16x8 = transmute(vqrdmlahq_laneq_s16::<1>(transmute(a), transmute(b), transmute(c)));
25076        assert_eq!(r, e);
25077    }
25078
25079    #[simd_test(enable = "rdm")]
25080    unsafe fn test_vqrdmlah_lane_s32() {
25081        let a: i32x2 = i32x2::new(1, 1);
25082        let b: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
25083        let c: i32x2 = i32x2::new(0, 2);
25084        let e: i32x2 = i32x2::new(3, 3);
25085        let r: i32x2 = transmute(vqrdmlah_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
25086        assert_eq!(r, e);
25087    }
25088
25089    #[simd_test(enable = "rdm")]
25090    unsafe fn test_vqrdmlah_laneq_s32() {
25091        let a: i32x2 = i32x2::new(1, 1);
25092        let b: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
25093        let c: i32x4 = i32x4::new(0, 2, 0, 0);
25094        let e: i32x2 = i32x2::new(3, 3);
25095        let r: i32x2 = transmute(vqrdmlah_laneq_s32::<1>(transmute(a), transmute(b), transmute(c)));
25096        assert_eq!(r, e);
25097    }
25098
25099    #[simd_test(enable = "rdm")]
25100    unsafe fn test_vqrdmlahq_lane_s32() {
25101        let a: i32x4 = i32x4::new(1, 1, 1, 1);
25102        let b: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
25103        let c: i32x2 = i32x2::new(0, 2);
25104        let e: i32x4 = i32x4::new(3, 3, 3, 3);
25105        let r: i32x4 = transmute(vqrdmlahq_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
25106        assert_eq!(r, e);
25107    }
25108
25109    #[simd_test(enable = "rdm")]
25110    unsafe fn test_vqrdmlahq_laneq_s32() {
25111        let a: i32x4 = i32x4::new(1, 1, 1, 1);
25112        let b: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
25113        let c: i32x4 = i32x4::new(0, 2, 0, 0);
25114        let e: i32x4 = i32x4::new(3, 3, 3, 3);
25115        let r: i32x4 = transmute(vqrdmlahq_laneq_s32::<1>(transmute(a), transmute(b), transmute(c)));
25116        assert_eq!(r, e);
25117    }
25118
25119    #[simd_test(enable = "rdm")]
25120    unsafe fn test_vqrdmlahh_lane_s16() {
25121        let a: i16 = 1;
25122        let b: i16 = 1;
25123        let c: i16x4 = i16x4::new(0, 2, 0, 0);
25124        let e: i16 = 1;
25125        let r: i16 = vqrdmlahh_lane_s16::<1>(a, b, transmute(c));
25126        assert_eq!(r, e);
25127    }
25128
25129    #[simd_test(enable = "rdm")]
25130    unsafe fn test_vqrdmlahh_laneq_s16() {
25131        let a: i16 = 1;
25132        let b: i16 = 1;
25133        let c: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
25134        let e: i16 = 1;
25135        let r: i16 = vqrdmlahh_laneq_s16::<1>(a, b, transmute(c));
25136        assert_eq!(r, e);
25137    }
25138
25139    #[simd_test(enable = "rdm")]
25140    unsafe fn test_vqrdmlahs_lane_s32() {
25141        let a: i32 = 1;
25142        let b: i32 = 1;
25143        let c: i32x2 = i32x2::new(0, 2);
25144        let e: i32 = 1;
25145        let r: i32 = vqrdmlahs_lane_s32::<1>(a, b, transmute(c));
25146        assert_eq!(r, e);
25147    }
25148
25149    #[simd_test(enable = "rdm")]
25150    unsafe fn test_vqrdmlahs_laneq_s32() {
25151        let a: i32 = 1;
25152        let b: i32 = 1;
25153        let c: i32x4 = i32x4::new(0, 2, 0, 0);
25154        let e: i32 = 1;
25155        let r: i32 = vqrdmlahs_laneq_s32::<1>(a, b, transmute(c));
25156        assert_eq!(r, e);
25157    }
25158
25159    #[simd_test(enable = "rdm")]
25160    unsafe fn test_vqrdmlsh_s16() {
25161        let a: i16x4 = i16x4::new(1, 1, 1, 1);
25162        let b: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
25163        let c: i16x4 = i16x4::new(2, 2, 2, 2);
25164        let e: i16x4 = i16x4::new(-1, -1, -1, -1);
25165        let r: i16x4 = transmute(vqrdmlsh_s16(transmute(a), transmute(b), transmute(c)));
25166        assert_eq!(r, e);
25167    }
25168
25169    #[simd_test(enable = "rdm")]
25170    unsafe fn test_vqrdmlshq_s16() {
25171        let a: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
25172        let b: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
25173        let c: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
25174        let e: i16x8 = i16x8::new(-1, -1, -1, -1, -1, -1, -1, -1);
25175        let r: i16x8 = transmute(vqrdmlshq_s16(transmute(a), transmute(b), transmute(c)));
25176        assert_eq!(r, e);
25177    }
25178
25179    #[simd_test(enable = "rdm")]
25180    unsafe fn test_vqrdmlsh_s32() {
25181        let a: i32x2 = i32x2::new(1, 1);
25182        let b: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
25183        let c: i32x2 = i32x2::new(2, 2);
25184        let e: i32x2 = i32x2::new(-1, -1);
25185        let r: i32x2 = transmute(vqrdmlsh_s32(transmute(a), transmute(b), transmute(c)));
25186        assert_eq!(r, e);
25187    }
25188
25189    #[simd_test(enable = "rdm")]
25190    unsafe fn test_vqrdmlshq_s32() {
25191        let a: i32x4 = i32x4::new(1, 1, 1, 1);
25192        let b: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
25193        let c: i32x4 = i32x4::new(2, 2, 2, 2);
25194        let e: i32x4 = i32x4::new(-1, -1, -1, -1);
25195        let r: i32x4 = transmute(vqrdmlshq_s32(transmute(a), transmute(b), transmute(c)));
25196        assert_eq!(r, e);
25197    }
25198
25199    #[simd_test(enable = "rdm")]
25200    unsafe fn test_vqrdmlshh_s16() {
25201        let a: i16 = 1;
25202        let b: i16 = 1;
25203        let c: i16 = 2;
25204        let e: i16 = 1;
25205        let r: i16 = vqrdmlshh_s16(a, b, c);
25206        assert_eq!(r, e);
25207    }
25208
25209    #[simd_test(enable = "rdm")]
25210    unsafe fn test_vqrdmlshs_s32() {
25211        let a: i32 = 1;
25212        let b: i32 = 1;
25213        let c: i32 = 2;
25214        let e: i32 = 1;
25215        let r: i32 = vqrdmlshs_s32(a, b, c);
25216        assert_eq!(r, e);
25217    }
25218
25219    #[simd_test(enable = "rdm")]
25220    unsafe fn test_vqrdmlsh_lane_s16() {
25221        let a: i16x4 = i16x4::new(1, 1, 1, 1);
25222        let b: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
25223        let c: i16x4 = i16x4::new(0, 2, 0, 0);
25224        let e: i16x4 = i16x4::new(-1, -1, -1, -1);
25225        let r: i16x4 = transmute(vqrdmlsh_lane_s16::<1>(transmute(a), transmute(b), transmute(c)));
25226        assert_eq!(r, e);
25227    }
25228
25229    #[simd_test(enable = "rdm")]
25230    unsafe fn test_vqrdmlsh_laneq_s16() {
25231        let a: i16x4 = i16x4::new(1, 1, 1, 1);
25232        let b: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
25233        let c: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
25234        let e: i16x4 = i16x4::new(-1, -1, -1, -1);
25235        let r: i16x4 = transmute(vqrdmlsh_laneq_s16::<1>(transmute(a), transmute(b), transmute(c)));
25236        assert_eq!(r, e);
25237    }
25238
25239    #[simd_test(enable = "rdm")]
25240    unsafe fn test_vqrdmlshq_lane_s16() {
25241        let a: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
25242        let b: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
25243        let c: i16x4 = i16x4::new(0, 2, 0, 0);
25244        let e: i16x8 = i16x8::new(-1, -1, -1, -1, -1, -1, -1, -1);
25245        let r: i16x8 = transmute(vqrdmlshq_lane_s16::<1>(transmute(a), transmute(b), transmute(c)));
25246        assert_eq!(r, e);
25247    }
25248
25249    #[simd_test(enable = "rdm")]
25250    unsafe fn test_vqrdmlshq_laneq_s16() {
25251        let a: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
25252        let b: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
25253        let c: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
25254        let e: i16x8 = i16x8::new(-1, -1, -1, -1, -1, -1, -1, -1);
25255        let r: i16x8 = transmute(vqrdmlshq_laneq_s16::<1>(transmute(a), transmute(b), transmute(c)));
25256        assert_eq!(r, e);
25257    }
25258
25259    #[simd_test(enable = "rdm")]
25260    unsafe fn test_vqrdmlsh_lane_s32() {
25261        let a: i32x2 = i32x2::new(1, 1);
25262        let b: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
25263        let c: i32x2 = i32x2::new(0, 2);
25264        let e: i32x2 = i32x2::new(-1, -1);
25265        let r: i32x2 = transmute(vqrdmlsh_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
25266        assert_eq!(r, e);
25267    }
25268
25269    #[simd_test(enable = "rdm")]
25270    unsafe fn test_vqrdmlsh_laneq_s32() {
25271        let a: i32x2 = i32x2::new(1, 1);
25272        let b: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
25273        let c: i32x4 = i32x4::new(0, 2, 0, 0);
25274        let e: i32x2 = i32x2::new(-1, -1);
25275        let r: i32x2 = transmute(vqrdmlsh_laneq_s32::<1>(transmute(a), transmute(b), transmute(c)));
25276        assert_eq!(r, e);
25277    }
25278
25279    #[simd_test(enable = "rdm")]
25280    unsafe fn test_vqrdmlshq_lane_s32() {
25281        let a: i32x4 = i32x4::new(1, 1, 1, 1);
25282        let b: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
25283        let c: i32x2 = i32x2::new(0, 2);
25284        let e: i32x4 = i32x4::new(-1, -1, -1, -1);
25285        let r: i32x4 = transmute(vqrdmlshq_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
25286        assert_eq!(r, e);
25287    }
25288
25289    #[simd_test(enable = "rdm")]
25290    unsafe fn test_vqrdmlshq_laneq_s32() {
25291        let a: i32x4 = i32x4::new(1, 1, 1, 1);
25292        let b: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
25293        let c: i32x4 = i32x4::new(0, 2, 0, 0);
25294        let e: i32x4 = i32x4::new(-1, -1, -1, -1);
25295        let r: i32x4 = transmute(vqrdmlshq_laneq_s32::<1>(transmute(a), transmute(b), transmute(c)));
25296        assert_eq!(r, e);
25297    }
25298
25299    #[simd_test(enable = "rdm")]
25300    unsafe fn test_vqrdmlshh_lane_s16() {
25301        let a: i16 = 1;
25302        let b: i16 = 1;
25303        let c: i16x4 = i16x4::new(0, 2, 0, 0);
25304        let e: i16 = 1;
25305        let r: i16 = vqrdmlshh_lane_s16::<1>(a, b, transmute(c));
25306        assert_eq!(r, e);
25307    }
25308
25309    #[simd_test(enable = "rdm")]
25310    unsafe fn test_vqrdmlshh_laneq_s16() {
25311        let a: i16 = 1;
25312        let b: i16 = 1;
25313        let c: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
25314        let e: i16 = 1;
25315        let r: i16 = vqrdmlshh_laneq_s16::<1>(a, b, transmute(c));
25316        assert_eq!(r, e);
25317    }
25318
25319    #[simd_test(enable = "rdm")]
25320    unsafe fn test_vqrdmlshs_lane_s32() {
25321        let a: i32 = 1;
25322        let b: i32 = 1;
25323        let c: i32x2 = i32x2::new(0, 2);
25324        let e: i32 = 1;
25325        let r: i32 = vqrdmlshs_lane_s32::<1>(a, b, transmute(c));
25326        assert_eq!(r, e);
25327    }
25328
25329    #[simd_test(enable = "rdm")]
25330    unsafe fn test_vqrdmlshs_laneq_s32() {
25331        let a: i32 = 1;
25332        let b: i32 = 1;
25333        let c: i32x4 = i32x4::new(0, 2, 0, 0);
25334        let e: i32 = 1;
25335        let r: i32 = vqrdmlshs_laneq_s32::<1>(a, b, transmute(c));
25336        assert_eq!(r, e);
25337    }
25338
25339    #[simd_test(enable = "neon")]
25340    unsafe fn test_vqrshls_s32() {
25341        let a: i32 = 2;
25342        let b: i32 = 2;
25343        let e: i32 = 8;
25344        let r: i32 = vqrshls_s32(a, b);
25345        assert_eq!(r, e);
25346    }
25347
25348    #[simd_test(enable = "neon")]
25349    unsafe fn test_vqrshld_s64() {
25350        let a: i64 = 2;
25351        let b: i64 = 2;
25352        let e: i64 = 8;
25353        let r: i64 = vqrshld_s64(a, b);
25354        assert_eq!(r, e);
25355    }
25356
25357    #[simd_test(enable = "neon")]
25358    unsafe fn test_vqrshlb_s8() {
25359        let a: i8 = 1;
25360        let b: i8 = 2;
25361        let e: i8 = 4;
25362        let r: i8 = vqrshlb_s8(a, b);
25363        assert_eq!(r, e);
25364    }
25365
25366    #[simd_test(enable = "neon")]
25367    unsafe fn test_vqrshlh_s16() {
25368        let a: i16 = 1;
25369        let b: i16 = 2;
25370        let e: i16 = 4;
25371        let r: i16 = vqrshlh_s16(a, b);
25372        assert_eq!(r, e);
25373    }
25374
25375    #[simd_test(enable = "neon")]
25376    unsafe fn test_vqrshls_u32() {
25377        let a: u32 = 2;
25378        let b: i32 = 2;
25379        let e: u32 = 8;
25380        let r: u32 = vqrshls_u32(a, b);
25381        assert_eq!(r, e);
25382    }
25383
25384    #[simd_test(enable = "neon")]
25385    unsafe fn test_vqrshld_u64() {
25386        let a: u64 = 2;
25387        let b: i64 = 2;
25388        let e: u64 = 8;
25389        let r: u64 = vqrshld_u64(a, b);
25390        assert_eq!(r, e);
25391    }
25392
25393    #[simd_test(enable = "neon")]
25394    unsafe fn test_vqrshlb_u8() {
25395        let a: u8 = 1;
25396        let b: i8 = 2;
25397        let e: u8 = 4;
25398        let r: u8 = vqrshlb_u8(a, b);
25399        assert_eq!(r, e);
25400    }
25401
25402    #[simd_test(enable = "neon")]
25403    unsafe fn test_vqrshlh_u16() {
25404        let a: u16 = 1;
25405        let b: i16 = 2;
25406        let e: u16 = 4;
25407        let r: u16 = vqrshlh_u16(a, b);
25408        assert_eq!(r, e);
25409    }
25410
25411    #[simd_test(enable = "neon")]
25412    unsafe fn test_vqrshrnh_n_s16() {
25413        let a: i16 = 4;
25414        let e: i8 = 1;
25415        let r: i8 = vqrshrnh_n_s16::<2>(a);
25416        assert_eq!(r, e);
25417    }
25418
25419    #[simd_test(enable = "neon")]
25420    unsafe fn test_vqrshrns_n_s32() {
25421        let a: i32 = 4;
25422        let e: i16 = 1;
25423        let r: i16 = vqrshrns_n_s32::<2>(a);
25424        assert_eq!(r, e);
25425    }
25426
25427    #[simd_test(enable = "neon")]
25428    unsafe fn test_vqrshrnd_n_s64() {
25429        let a: i64 = 4;
25430        let e: i32 = 1;
25431        let r: i32 = vqrshrnd_n_s64::<2>(a);
25432        assert_eq!(r, e);
25433    }
25434
25435    #[simd_test(enable = "neon")]
25436    unsafe fn test_vqrshrn_high_n_s16() {
25437        let a: i8x8 = i8x8::new(0, 1, 2, 3, 2, 3, 6, 7);
25438        let b: i16x8 = i16x8::new(8, 12, 24, 28, 48, 52, 56, 60);
25439        let e: i8x16 = i8x16::new(0, 1, 2, 3, 2, 3, 6, 7, 2, 3, 6, 7, 12, 13, 14, 15);
25440        let r: i8x16 = transmute(vqrshrn_high_n_s16::<2>(transmute(a), transmute(b)));
25441        assert_eq!(r, e);
25442    }
25443
25444    #[simd_test(enable = "neon")]
25445    unsafe fn test_vqrshrn_high_n_s32() {
25446        let a: i16x4 = i16x4::new(0, 1, 2, 3);
25447        let b: i32x4 = i32x4::new(8, 12, 24, 28);
25448        let e: i16x8 = i16x8::new(0, 1, 2, 3, 2, 3, 6, 7);
25449        let r: i16x8 = transmute(vqrshrn_high_n_s32::<2>(transmute(a), transmute(b)));
25450        assert_eq!(r, e);
25451    }
25452
25453    #[simd_test(enable = "neon")]
25454    unsafe fn test_vqrshrn_high_n_s64() {
25455        let a: i32x2 = i32x2::new(0, 1);
25456        let b: i64x2 = i64x2::new(8, 12);
25457        let e: i32x4 = i32x4::new(0, 1, 2, 3);
25458        let r: i32x4 = transmute(vqrshrn_high_n_s64::<2>(transmute(a), transmute(b)));
25459        assert_eq!(r, e);
25460    }
25461
25462    #[simd_test(enable = "neon")]
25463    unsafe fn test_vqrshrnh_n_u16() {
25464        let a: u16 = 4;
25465        let e: u8 = 1;
25466        let r: u8 = vqrshrnh_n_u16::<2>(a);
25467        assert_eq!(r, e);
25468    }
25469
25470    #[simd_test(enable = "neon")]
25471    unsafe fn test_vqrshrns_n_u32() {
25472        let a: u32 = 4;
25473        let e: u16 = 1;
25474        let r: u16 = vqrshrns_n_u32::<2>(a);
25475        assert_eq!(r, e);
25476    }
25477
25478    #[simd_test(enable = "neon")]
25479    unsafe fn test_vqrshrnd_n_u64() {
25480        let a: u64 = 4;
25481        let e: u32 = 1;
25482        let r: u32 = vqrshrnd_n_u64::<2>(a);
25483        assert_eq!(r, e);
25484    }
25485
25486    #[simd_test(enable = "neon")]
25487    unsafe fn test_vqrshrn_high_n_u16() {
25488        let a: u8x8 = u8x8::new(0, 1, 2, 3, 2, 3, 6, 7);
25489        let b: u16x8 = u16x8::new(8, 12, 24, 28, 48, 52, 56, 60);
25490        let e: u8x16 = u8x16::new(0, 1, 2, 3, 2, 3, 6, 7, 2, 3, 6, 7, 12, 13, 14, 15);
25491        let r: u8x16 = transmute(vqrshrn_high_n_u16::<2>(transmute(a), transmute(b)));
25492        assert_eq!(r, e);
25493    }
25494
25495    #[simd_test(enable = "neon")]
25496    unsafe fn test_vqrshrn_high_n_u32() {
25497        let a: u16x4 = u16x4::new(0, 1, 2, 3);
25498        let b: u32x4 = u32x4::new(8, 12, 24, 28);
25499        let e: u16x8 = u16x8::new(0, 1, 2, 3, 2, 3, 6, 7);
25500        let r: u16x8 = transmute(vqrshrn_high_n_u32::<2>(transmute(a), transmute(b)));
25501        assert_eq!(r, e);
25502    }
25503
25504    #[simd_test(enable = "neon")]
25505    unsafe fn test_vqrshrn_high_n_u64() {
25506        let a: u32x2 = u32x2::new(0, 1);
25507        let b: u64x2 = u64x2::new(8, 12);
25508        let e: u32x4 = u32x4::new(0, 1, 2, 3);
25509        let r: u32x4 = transmute(vqrshrn_high_n_u64::<2>(transmute(a), transmute(b)));
25510        assert_eq!(r, e);
25511    }
25512
25513    #[simd_test(enable = "neon")]
25514    unsafe fn test_vqrshrunh_n_s16() {
25515        let a: i16 = 4;
25516        let e: u8 = 1;
25517        let r: u8 = vqrshrunh_n_s16::<2>(a);
25518        assert_eq!(r, e);
25519    }
25520
25521    #[simd_test(enable = "neon")]
25522    unsafe fn test_vqrshruns_n_s32() {
25523        let a: i32 = 4;
25524        let e: u16 = 1;
25525        let r: u16 = vqrshruns_n_s32::<2>(a);
25526        assert_eq!(r, e);
25527    }
25528
25529    #[simd_test(enable = "neon")]
25530    unsafe fn test_vqrshrund_n_s64() {
25531        let a: i64 = 4;
25532        let e: u32 = 1;
25533        let r: u32 = vqrshrund_n_s64::<2>(a);
25534        assert_eq!(r, e);
25535    }
25536
25537    #[simd_test(enable = "neon")]
25538    unsafe fn test_vqrshrun_high_n_s16() {
25539        let a: u8x8 = u8x8::new(0, 1, 2, 3, 2, 3, 6, 7);
25540        let b: i16x8 = i16x8::new(8, 12, 24, 28, 48, 52, 56, 60);
25541        let e: u8x16 = u8x16::new(0, 1, 2, 3, 2, 3, 6, 7, 2, 3, 6, 7, 12, 13, 14, 15);
25542        let r: u8x16 = transmute(vqrshrun_high_n_s16::<2>(transmute(a), transmute(b)));
25543        assert_eq!(r, e);
25544    }
25545
25546    #[simd_test(enable = "neon")]
25547    unsafe fn test_vqrshrun_high_n_s32() {
25548        let a: u16x4 = u16x4::new(0, 1, 2, 3);
25549        let b: i32x4 = i32x4::new(8, 12, 24, 28);
25550        let e: u16x8 = u16x8::new(0, 1, 2, 3, 2, 3, 6, 7);
25551        let r: u16x8 = transmute(vqrshrun_high_n_s32::<2>(transmute(a), transmute(b)));
25552        assert_eq!(r, e);
25553    }
25554
25555    #[simd_test(enable = "neon")]
25556    unsafe fn test_vqrshrun_high_n_s64() {
25557        let a: u32x2 = u32x2::new(0, 1);
25558        let b: i64x2 = i64x2::new(8, 12);
25559        let e: u32x4 = u32x4::new(0, 1, 2, 3);
25560        let r: u32x4 = transmute(vqrshrun_high_n_s64::<2>(transmute(a), transmute(b)));
25561        assert_eq!(r, e);
25562    }
25563
25564    #[simd_test(enable = "neon")]
25565    unsafe fn test_vqshld_s64() {
25566        let a: i64 = 0;
25567        let b: i64 = 2;
25568        let e: i64 = 0;
25569        let r: i64 = vqshld_s64(a, b);
25570        assert_eq!(r, e);
25571    }
25572
25573    #[simd_test(enable = "neon")]
25574    unsafe fn test_vqshlb_s8() {
25575        let a: i8 = 1;
25576        let b: i8 = 2;
25577        let e: i8 = 4;
25578        let r: i8 = vqshlb_s8(a, b);
25579        assert_eq!(r, e);
25580    }
25581
25582    #[simd_test(enable = "neon")]
25583    unsafe fn test_vqshlh_s16() {
25584        let a: i16 = 1;
25585        let b: i16 = 2;
25586        let e: i16 = 4;
25587        let r: i16 = vqshlh_s16(a, b);
25588        assert_eq!(r, e);
25589    }
25590
25591    #[simd_test(enable = "neon")]
25592    unsafe fn test_vqshls_s32() {
25593        let a: i32 = 1;
25594        let b: i32 = 2;
25595        let e: i32 = 4;
25596        let r: i32 = vqshls_s32(a, b);
25597        assert_eq!(r, e);
25598    }
25599
25600    #[simd_test(enable = "neon")]
25601    unsafe fn test_vqshld_u64() {
25602        let a: u64 = 0;
25603        let b: i64 = 2;
25604        let e: u64 = 0;
25605        let r: u64 = vqshld_u64(a, b);
25606        assert_eq!(r, e);
25607    }
25608
25609    #[simd_test(enable = "neon")]
25610    unsafe fn test_vqshlb_u8() {
25611        let a: u8 = 1;
25612        let b: i8 = 2;
25613        let e: u8 = 4;
25614        let r: u8 = vqshlb_u8(a, b);
25615        assert_eq!(r, e);
25616    }
25617
25618    #[simd_test(enable = "neon")]
25619    unsafe fn test_vqshlh_u16() {
25620        let a: u16 = 1;
25621        let b: i16 = 2;
25622        let e: u16 = 4;
25623        let r: u16 = vqshlh_u16(a, b);
25624        assert_eq!(r, e);
25625    }
25626
25627    #[simd_test(enable = "neon")]
25628    unsafe fn test_vqshls_u32() {
25629        let a: u32 = 1;
25630        let b: i32 = 2;
25631        let e: u32 = 4;
25632        let r: u32 = vqshls_u32(a, b);
25633        assert_eq!(r, e);
25634    }
25635
25636    #[simd_test(enable = "neon")]
25637    unsafe fn test_vqshlb_n_s8() {
25638        let a: i8 = 1;
25639        let e: i8 = 4;
25640        let r: i8 = vqshlb_n_s8::<2>(a);
25641        assert_eq!(r, e);
25642    }
25643
25644    #[simd_test(enable = "neon")]
25645    unsafe fn test_vqshlh_n_s16() {
25646        let a: i16 = 1;
25647        let e: i16 = 4;
25648        let r: i16 = vqshlh_n_s16::<2>(a);
25649        assert_eq!(r, e);
25650    }
25651
25652    #[simd_test(enable = "neon")]
25653    unsafe fn test_vqshls_n_s32() {
25654        let a: i32 = 1;
25655        let e: i32 = 4;
25656        let r: i32 = vqshls_n_s32::<2>(a);
25657        assert_eq!(r, e);
25658    }
25659
25660    #[simd_test(enable = "neon")]
25661    unsafe fn test_vqshld_n_s64() {
25662        let a: i64 = 1;
25663        let e: i64 = 4;
25664        let r: i64 = vqshld_n_s64::<2>(a);
25665        assert_eq!(r, e);
25666    }
25667
25668    #[simd_test(enable = "neon")]
25669    unsafe fn test_vqshlb_n_u8() {
25670        let a: u8 = 1;
25671        let e: u8 = 4;
25672        let r: u8 = vqshlb_n_u8::<2>(a);
25673        assert_eq!(r, e);
25674    }
25675
25676    #[simd_test(enable = "neon")]
25677    unsafe fn test_vqshlh_n_u16() {
25678        let a: u16 = 1;
25679        let e: u16 = 4;
25680        let r: u16 = vqshlh_n_u16::<2>(a);
25681        assert_eq!(r, e);
25682    }
25683
25684    #[simd_test(enable = "neon")]
25685    unsafe fn test_vqshls_n_u32() {
25686        let a: u32 = 1;
25687        let e: u32 = 4;
25688        let r: u32 = vqshls_n_u32::<2>(a);
25689        assert_eq!(r, e);
25690    }
25691
25692    #[simd_test(enable = "neon")]
25693    unsafe fn test_vqshld_n_u64() {
25694        let a: u64 = 1;
25695        let e: u64 = 4;
25696        let r: u64 = vqshld_n_u64::<2>(a);
25697        assert_eq!(r, e);
25698    }
25699
25700    #[simd_test(enable = "neon")]
25701    unsafe fn test_vqshlub_n_s8() {
25702        let a: i8 = 1;
25703        let e: u8 = 4;
25704        let r: u8 = vqshlub_n_s8::<2>(a);
25705        assert_eq!(r, e);
25706    }
25707
25708    #[simd_test(enable = "neon")]
25709    unsafe fn test_vqshluh_n_s16() {
25710        let a: i16 = 1;
25711        let e: u16 = 4;
25712        let r: u16 = vqshluh_n_s16::<2>(a);
25713        assert_eq!(r, e);
25714    }
25715
25716    #[simd_test(enable = "neon")]
25717    unsafe fn test_vqshlus_n_s32() {
25718        let a: i32 = 1;
25719        let e: u32 = 4;
25720        let r: u32 = vqshlus_n_s32::<2>(a);
25721        assert_eq!(r, e);
25722    }
25723
25724    #[simd_test(enable = "neon")]
25725    unsafe fn test_vqshlud_n_s64() {
25726        let a: i64 = 1;
25727        let e: u64 = 4;
25728        let r: u64 = vqshlud_n_s64::<2>(a);
25729        assert_eq!(r, e);
25730    }
25731
25732    #[simd_test(enable = "neon")]
25733    unsafe fn test_vqshrnd_n_s64() {
25734        let a: i64 = 0;
25735        let e: i32 = 0;
25736        let r: i32 = vqshrnd_n_s64::<2>(a);
25737        assert_eq!(r, e);
25738    }
25739
25740    #[simd_test(enable = "neon")]
25741    unsafe fn test_vqshrnh_n_s16() {
25742        let a: i16 = 4;
25743        let e: i8 = 1;
25744        let r: i8 = vqshrnh_n_s16::<2>(a);
25745        assert_eq!(r, e);
25746    }
25747
25748    #[simd_test(enable = "neon")]
25749    unsafe fn test_vqshrns_n_s32() {
25750        let a: i32 = 4;
25751        let e: i16 = 1;
25752        let r: i16 = vqshrns_n_s32::<2>(a);
25753        assert_eq!(r, e);
25754    }
25755
25756    #[simd_test(enable = "neon")]
25757    unsafe fn test_vqshrn_high_n_s16() {
25758        let a: i8x8 = i8x8::new(0, 1, 8, 9, 8, 9, 10, 11);
25759        let b: i16x8 = i16x8::new(32, 36, 40, 44, 48, 52, 56, 60);
25760        let e: i8x16 = i8x16::new(0, 1, 8, 9, 8, 9, 10, 11, 8, 9, 10, 11, 12, 13, 14, 15);
25761        let r: i8x16 = transmute(vqshrn_high_n_s16::<2>(transmute(a), transmute(b)));
25762        assert_eq!(r, e);
25763    }
25764
25765    #[simd_test(enable = "neon")]
25766    unsafe fn test_vqshrn_high_n_s32() {
25767        let a: i16x4 = i16x4::new(0, 1, 8, 9);
25768        let b: i32x4 = i32x4::new(32, 36, 40, 44);
25769        let e: i16x8 = i16x8::new(0, 1, 8, 9, 8, 9, 10, 11);
25770        let r: i16x8 = transmute(vqshrn_high_n_s32::<2>(transmute(a), transmute(b)));
25771        assert_eq!(r, e);
25772    }
25773
25774    #[simd_test(enable = "neon")]
25775    unsafe fn test_vqshrn_high_n_s64() {
25776        let a: i32x2 = i32x2::new(0, 1);
25777        let b: i64x2 = i64x2::new(32, 36);
25778        let e: i32x4 = i32x4::new(0, 1, 8, 9);
25779        let r: i32x4 = transmute(vqshrn_high_n_s64::<2>(transmute(a), transmute(b)));
25780        assert_eq!(r, e);
25781    }
25782
25783    #[simd_test(enable = "neon")]
25784    unsafe fn test_vqshrnd_n_u64() {
25785        let a: u64 = 0;
25786        let e: u32 = 0;
25787        let r: u32 = vqshrnd_n_u64::<2>(a);
25788        assert_eq!(r, e);
25789    }
25790
25791    #[simd_test(enable = "neon")]
25792    unsafe fn test_vqshrnh_n_u16() {
25793        let a: u16 = 4;
25794        let e: u8 = 1;
25795        let r: u8 = vqshrnh_n_u16::<2>(a);
25796        assert_eq!(r, e);
25797    }
25798
25799    #[simd_test(enable = "neon")]
25800    unsafe fn test_vqshrns_n_u32() {
25801        let a: u32 = 4;
25802        let e: u16 = 1;
25803        let r: u16 = vqshrns_n_u32::<2>(a);
25804        assert_eq!(r, e);
25805    }
25806
25807    #[simd_test(enable = "neon")]
25808    unsafe fn test_vqshrn_high_n_u16() {
25809        let a: u8x8 = u8x8::new(0, 1, 8, 9, 8, 9, 10, 11);
25810        let b: u16x8 = u16x8::new(32, 36, 40, 44, 48, 52, 56, 60);
25811        let e: u8x16 = u8x16::new(0, 1, 8, 9, 8, 9, 10, 11, 8, 9, 10, 11, 12, 13, 14, 15);
25812        let r: u8x16 = transmute(vqshrn_high_n_u16::<2>(transmute(a), transmute(b)));
25813        assert_eq!(r, e);
25814    }
25815
25816    #[simd_test(enable = "neon")]
25817    unsafe fn test_vqshrn_high_n_u32() {
25818        let a: u16x4 = u16x4::new(0, 1, 8, 9);
25819        let b: u32x4 = u32x4::new(32, 36, 40, 44);
25820        let e: u16x8 = u16x8::new(0, 1, 8, 9, 8, 9, 10, 11);
25821        let r: u16x8 = transmute(vqshrn_high_n_u32::<2>(transmute(a), transmute(b)));
25822        assert_eq!(r, e);
25823    }
25824
25825    #[simd_test(enable = "neon")]
25826    unsafe fn test_vqshrn_high_n_u64() {
25827        let a: u32x2 = u32x2::new(0, 1);
25828        let b: u64x2 = u64x2::new(32, 36);
25829        let e: u32x4 = u32x4::new(0, 1, 8, 9);
25830        let r: u32x4 = transmute(vqshrn_high_n_u64::<2>(transmute(a), transmute(b)));
25831        assert_eq!(r, e);
25832    }
25833
25834    #[simd_test(enable = "neon")]
25835    unsafe fn test_vqshrunh_n_s16() {
25836        let a: i16 = 4;
25837        let e: u8 = 1;
25838        let r: u8 = vqshrunh_n_s16::<2>(a);
25839        assert_eq!(r, e);
25840    }
25841
25842    #[simd_test(enable = "neon")]
25843    unsafe fn test_vqshruns_n_s32() {
25844        let a: i32 = 4;
25845        let e: u16 = 1;
25846        let r: u16 = vqshruns_n_s32::<2>(a);
25847        assert_eq!(r, e);
25848    }
25849
25850    #[simd_test(enable = "neon")]
25851    unsafe fn test_vqshrund_n_s64() {
25852        let a: i64 = 4;
25853        let e: u32 = 1;
25854        let r: u32 = vqshrund_n_s64::<2>(a);
25855        assert_eq!(r, e);
25856    }
25857
25858    #[simd_test(enable = "neon")]
25859    unsafe fn test_vqshrun_high_n_s16() {
25860        let a: u8x8 = u8x8::new(0, 1, 8, 9, 8, 9, 10, 11);
25861        let b: i16x8 = i16x8::new(32, 36, 40, 44, 48, 52, 56, 60);
25862        let e: u8x16 = u8x16::new(0, 1, 8, 9, 8, 9, 10, 11, 8, 9, 10, 11, 12, 13, 14, 15);
25863        let r: u8x16 = transmute(vqshrun_high_n_s16::<2>(transmute(a), transmute(b)));
25864        assert_eq!(r, e);
25865    }
25866
25867    #[simd_test(enable = "neon")]
25868    unsafe fn test_vqshrun_high_n_s32() {
25869        let a: u16x4 = u16x4::new(0, 1, 8, 9);
25870        let b: i32x4 = i32x4::new(32, 36, 40, 44);
25871        let e: u16x8 = u16x8::new(0, 1, 8, 9, 8, 9, 10, 11);
25872        let r: u16x8 = transmute(vqshrun_high_n_s32::<2>(transmute(a), transmute(b)));
25873        assert_eq!(r, e);
25874    }
25875
25876    #[simd_test(enable = "neon")]
25877    unsafe fn test_vqshrun_high_n_s64() {
25878        let a: u32x2 = u32x2::new(0, 1);
25879        let b: i64x2 = i64x2::new(32, 36);
25880        let e: u32x4 = u32x4::new(0, 1, 8, 9);
25881        let r: u32x4 = transmute(vqshrun_high_n_s64::<2>(transmute(a), transmute(b)));
25882        assert_eq!(r, e);
25883    }
25884
25885    #[simd_test(enable = "neon")]
25886    unsafe fn test_vsqaddb_u8() {
25887        let a: u8 = 2;
25888        let b: i8 = 2;
25889        let e: u8 = 4;
25890        let r: u8 = vsqaddb_u8(a, b);
25891        assert_eq!(r, e);
25892    }
25893
25894    #[simd_test(enable = "neon")]
25895    unsafe fn test_vsqaddh_u16() {
25896        let a: u16 = 2;
25897        let b: i16 = 2;
25898        let e: u16 = 4;
25899        let r: u16 = vsqaddh_u16(a, b);
25900        assert_eq!(r, e);
25901    }
25902
25903    #[simd_test(enable = "neon")]
25904    unsafe fn test_vsqadds_u32() {
25905        let a: u32 = 2;
25906        let b: i32 = 2;
25907        let e: u32 = 4;
25908        let r: u32 = vsqadds_u32(a, b);
25909        assert_eq!(r, e);
25910    }
25911
25912    #[simd_test(enable = "neon")]
25913    unsafe fn test_vsqaddd_u64() {
25914        let a: u64 = 2;
25915        let b: i64 = 2;
25916        let e: u64 = 4;
25917        let r: u64 = vsqaddd_u64(a, b);
25918        assert_eq!(r, e);
25919    }
25920
25921    #[simd_test(enable = "neon")]
25922    unsafe fn test_vsqrt_f32() {
25923        let a: f32x2 = f32x2::new(4.0, 9.0);
25924        let e: f32x2 = f32x2::new(2.0, 3.0);
25925        let r: f32x2 = transmute(vsqrt_f32(transmute(a)));
25926        assert_eq!(r, e);
25927    }
25928
25929    #[simd_test(enable = "neon")]
25930    unsafe fn test_vsqrtq_f32() {
25931        let a: f32x4 = f32x4::new(4.0, 9.0, 16.0, 25.0);
25932        let e: f32x4 = f32x4::new(2.0, 3.0, 4.0, 5.0);
25933        let r: f32x4 = transmute(vsqrtq_f32(transmute(a)));
25934        assert_eq!(r, e);
25935    }
25936
25937    #[simd_test(enable = "neon")]
25938    unsafe fn test_vsqrt_f64() {
25939        let a: f64 = 4.0;
25940        let e: f64 = 2.0;
25941        let r: f64 = transmute(vsqrt_f64(transmute(a)));
25942        assert_eq!(r, e);
25943    }
25944
25945    #[simd_test(enable = "neon")]
25946    unsafe fn test_vsqrtq_f64() {
25947        let a: f64x2 = f64x2::new(4.0, 9.0);
25948        let e: f64x2 = f64x2::new(2.0, 3.0);
25949        let r: f64x2 = transmute(vsqrtq_f64(transmute(a)));
25950        assert_eq!(r, e);
25951    }
25952
25953    #[simd_test(enable = "neon")]
25954    unsafe fn test_vrsqrte_f64() {
25955        let a: f64 = 1.0;
25956        let e: f64 = 0.998046875;
25957        let r: f64 = transmute(vrsqrte_f64(transmute(a)));
25958        assert_eq!(r, e);
25959    }
25960
25961    #[simd_test(enable = "neon")]
25962    unsafe fn test_vrsqrteq_f64() {
25963        let a: f64x2 = f64x2::new(1.0, 2.0);
25964        let e: f64x2 = f64x2::new(0.998046875, 0.705078125);
25965        let r: f64x2 = transmute(vrsqrteq_f64(transmute(a)));
25966        assert_eq!(r, e);
25967    }
25968
25969    #[simd_test(enable = "neon")]
25970    unsafe fn test_vrsqrtes_f32() {
25971        let a: f32 = 1.0;
25972        let e: f32 = 0.998046875;
25973        let r: f32 = vrsqrtes_f32(a);
25974        assert_eq!(r, e);
25975    }
25976
25977    #[simd_test(enable = "neon")]
25978    unsafe fn test_vrsqrted_f64() {
25979        let a: f64 = 1.0;
25980        let e: f64 = 0.998046875;
25981        let r: f64 = vrsqrted_f64(a);
25982        assert_eq!(r, e);
25983    }
25984
25985    #[simd_test(enable = "neon")]
25986    unsafe fn test_vrsqrts_f64() {
25987        let a: f64 = 1.0;
25988        let b: f64 = 1.0;
25989        let e: f64 = 1.;
25990        let r: f64 = transmute(vrsqrts_f64(transmute(a), transmute(b)));
25991        assert_eq!(r, e);
25992    }
25993
25994    #[simd_test(enable = "neon")]
25995    unsafe fn test_vrsqrtsq_f64() {
25996        let a: f64x2 = f64x2::new(1.0, 2.0);
25997        let b: f64x2 = f64x2::new(1.0, 2.0);
25998        let e: f64x2 = f64x2::new(1., -0.5);
25999        let r: f64x2 = transmute(vrsqrtsq_f64(transmute(a), transmute(b)));
26000        assert_eq!(r, e);
26001    }
26002
26003    #[simd_test(enable = "neon")]
26004    unsafe fn test_vrsqrtss_f32() {
26005        let a: f32 = 1.0;
26006        let b: f32 = 1.0;
26007        let e: f32 = 1.;
26008        let r: f32 = vrsqrtss_f32(a, b);
26009        assert_eq!(r, e);
26010    }
26011
26012    #[simd_test(enable = "neon")]
26013    unsafe fn test_vrsqrtsd_f64() {
26014        let a: f64 = 1.0;
26015        let b: f64 = 1.0;
26016        let e: f64 = 1.;
26017        let r: f64 = vrsqrtsd_f64(a, b);
26018        assert_eq!(r, e);
26019    }
26020
26021    #[simd_test(enable = "neon")]
26022    unsafe fn test_vrecpe_f64() {
26023        let a: f64 = 4.0;
26024        let e: f64 = 0.24951171875;
26025        let r: f64 = transmute(vrecpe_f64(transmute(a)));
26026        assert_eq!(r, e);
26027    }
26028
26029    #[simd_test(enable = "neon")]
26030    unsafe fn test_vrecpeq_f64() {
26031        let a: f64x2 = f64x2::new(4.0, 3.0);
26032        let e: f64x2 = f64x2::new(0.24951171875, 0.3330078125);
26033        let r: f64x2 = transmute(vrecpeq_f64(transmute(a)));
26034        assert_eq!(r, e);
26035    }
26036
26037    #[simd_test(enable = "neon")]
26038    unsafe fn test_vrecpes_f32() {
26039        let a: f32 = 4.0;
26040        let e: f32 = 0.24951171875;
26041        let r: f32 = vrecpes_f32(a);
26042        assert_eq!(r, e);
26043    }
26044
26045    #[simd_test(enable = "neon")]
26046    unsafe fn test_vrecped_f64() {
26047        let a: f64 = 4.0;
26048        let e: f64 = 0.24951171875;
26049        let r: f64 = vrecped_f64(a);
26050        assert_eq!(r, e);
26051    }
26052
26053    #[simd_test(enable = "neon")]
26054    unsafe fn test_vrecps_f64() {
26055        let a: f64 = 4.0;
26056        let b: f64 = 4.0;
26057        let e: f64 = -14.;
26058        let r: f64 = transmute(vrecps_f64(transmute(a), transmute(b)));
26059        assert_eq!(r, e);
26060    }
26061
26062    #[simd_test(enable = "neon")]
26063    unsafe fn test_vrecpsq_f64() {
26064        let a: f64x2 = f64x2::new(4.0, 3.0);
26065        let b: f64x2 = f64x2::new(4.0, 3.0);
26066        let e: f64x2 = f64x2::new(-14., -7.);
26067        let r: f64x2 = transmute(vrecpsq_f64(transmute(a), transmute(b)));
26068        assert_eq!(r, e);
26069    }
26070
26071    #[simd_test(enable = "neon")]
26072    unsafe fn test_vrecpss_f32() {
26073        let a: f32 = 4.0;
26074        let b: f32 = 4.0;
26075        let e: f32 = -14.;
26076        let r: f32 = vrecpss_f32(a, b);
26077        assert_eq!(r, e);
26078    }
26079
26080    #[simd_test(enable = "neon")]
26081    unsafe fn test_vrecpsd_f64() {
26082        let a: f64 = 4.0;
26083        let b: f64 = 4.0;
26084        let e: f64 = -14.;
26085        let r: f64 = vrecpsd_f64(a, b);
26086        assert_eq!(r, e);
26087    }
26088
26089    #[simd_test(enable = "neon")]
26090    unsafe fn test_vrecpxs_f32() {
26091        let a: f32 = 4.0;
26092        let e: f32 = 0.5;
26093        let r: f32 = vrecpxs_f32(a);
26094        assert_eq!(r, e);
26095    }
26096
26097    #[simd_test(enable = "neon")]
26098    unsafe fn test_vrecpxd_f64() {
26099        let a: f64 = 4.0;
26100        let e: f64 = 0.5;
26101        let r: f64 = vrecpxd_f64(a);
26102        assert_eq!(r, e);
26103    }
26104
26105    #[simd_test(enable = "neon")]
26106    unsafe fn test_vreinterpret_s64_p64() {
26107        let a: i64x1 = i64x1::new(0);
26108        let e: i64x1 = i64x1::new(0);
26109        let r: i64x1 = transmute(vreinterpret_s64_p64(transmute(a)));
26110        assert_eq!(r, e);
26111    }
26112
26113    #[simd_test(enable = "neon")]
26114    unsafe fn test_vreinterpret_u64_p64() {
26115        let a: i64x1 = i64x1::new(0);
26116        let e: u64x1 = u64x1::new(0);
26117        let r: u64x1 = transmute(vreinterpret_u64_p64(transmute(a)));
26118        assert_eq!(r, e);
26119    }
26120
26121    #[simd_test(enable = "neon")]
26122    unsafe fn test_vreinterpret_p64_s64() {
26123        let a: i64x1 = i64x1::new(0);
26124        let e: i64x1 = i64x1::new(0);
26125        let r: i64x1 = transmute(vreinterpret_p64_s64(transmute(a)));
26126        assert_eq!(r, e);
26127    }
26128
26129    #[simd_test(enable = "neon")]
26130    unsafe fn test_vreinterpret_p64_u64() {
26131        let a: u64x1 = u64x1::new(0);
26132        let e: i64x1 = i64x1::new(0);
26133        let r: i64x1 = transmute(vreinterpret_p64_u64(transmute(a)));
26134        assert_eq!(r, e);
26135    }
26136
26137    #[simd_test(enable = "neon")]
26138    unsafe fn test_vreinterpretq_s64_p64() {
26139        let a: i64x2 = i64x2::new(0, 1);
26140        let e: i64x2 = i64x2::new(0, 1);
26141        let r: i64x2 = transmute(vreinterpretq_s64_p64(transmute(a)));
26142        assert_eq!(r, e);
26143    }
26144
26145    #[simd_test(enable = "neon")]
26146    unsafe fn test_vreinterpretq_u64_p64() {
26147        let a: i64x2 = i64x2::new(0, 1);
26148        let e: u64x2 = u64x2::new(0, 1);
26149        let r: u64x2 = transmute(vreinterpretq_u64_p64(transmute(a)));
26150        assert_eq!(r, e);
26151    }
26152
26153    #[simd_test(enable = "neon")]
26154    unsafe fn test_vreinterpretq_p64_s64() {
26155        let a: i64x2 = i64x2::new(0, 1);
26156        let e: i64x2 = i64x2::new(0, 1);
26157        let r: i64x2 = transmute(vreinterpretq_p64_s64(transmute(a)));
26158        assert_eq!(r, e);
26159    }
26160
26161    #[simd_test(enable = "neon")]
26162    unsafe fn test_vreinterpretq_p64_u64() {
26163        let a: u64x2 = u64x2::new(0, 1);
26164        let e: i64x2 = i64x2::new(0, 1);
26165        let r: i64x2 = transmute(vreinterpretq_p64_u64(transmute(a)));
26166        assert_eq!(r, e);
26167    }
26168
26169    #[simd_test(enable = "neon")]
26170    unsafe fn test_vreinterpret_s8_f64() {
26171        let a: f64 = 0.;
26172        let e: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
26173        let r: i8x8 = transmute(vreinterpret_s8_f64(transmute(a)));
26174        assert_eq!(r, e);
26175    }
26176
26177    #[simd_test(enable = "neon")]
26178    unsafe fn test_vreinterpret_s16_f64() {
26179        let a: f64 = 0.;
26180        let e: i16x4 = i16x4::new(0, 0, 0, 0);
26181        let r: i16x4 = transmute(vreinterpret_s16_f64(transmute(a)));
26182        assert_eq!(r, e);
26183    }
26184
26185    #[simd_test(enable = "neon")]
26186    unsafe fn test_vreinterpret_s32_f64() {
26187        let a: f64 = 0.;
26188        let e: i32x2 = i32x2::new(0, 0);
26189        let r: i32x2 = transmute(vreinterpret_s32_f64(transmute(a)));
26190        assert_eq!(r, e);
26191    }
26192
26193    #[simd_test(enable = "neon")]
26194    unsafe fn test_vreinterpret_s64_f64() {
26195        let a: f64 = 0.;
26196        let e: i64x1 = i64x1::new(0);
26197        let r: i64x1 = transmute(vreinterpret_s64_f64(transmute(a)));
26198        assert_eq!(r, e);
26199    }
26200
26201    #[simd_test(enable = "neon")]
26202    unsafe fn test_vreinterpretq_s8_f64() {
26203        let a: f64x2 = f64x2::new(0., 0.);
26204        let e: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
26205        let r: i8x16 = transmute(vreinterpretq_s8_f64(transmute(a)));
26206        assert_eq!(r, e);
26207    }
26208
26209    #[simd_test(enable = "neon")]
26210    unsafe fn test_vreinterpretq_s16_f64() {
26211        let a: f64x2 = f64x2::new(0., 0.);
26212        let e: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
26213        let r: i16x8 = transmute(vreinterpretq_s16_f64(transmute(a)));
26214        assert_eq!(r, e);
26215    }
26216
26217    #[simd_test(enable = "neon")]
26218    unsafe fn test_vreinterpretq_s32_f64() {
26219        let a: f64x2 = f64x2::new(0., 0.);
26220        let e: i32x4 = i32x4::new(0, 0, 0, 0);
26221        let r: i32x4 = transmute(vreinterpretq_s32_f64(transmute(a)));
26222        assert_eq!(r, e);
26223    }
26224
26225    #[simd_test(enable = "neon")]
26226    unsafe fn test_vreinterpretq_s64_f64() {
26227        let a: f64x2 = f64x2::new(0., 0.);
26228        let e: i64x2 = i64x2::new(0, 0);
26229        let r: i64x2 = transmute(vreinterpretq_s64_f64(transmute(a)));
26230        assert_eq!(r, e);
26231    }
26232
26233    #[simd_test(enable = "neon")]
26234    unsafe fn test_vreinterpret_u8_f64() {
26235        let a: f64 = 0.;
26236        let e: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
26237        let r: u8x8 = transmute(vreinterpret_u8_f64(transmute(a)));
26238        assert_eq!(r, e);
26239    }
26240
26241    #[simd_test(enable = "neon")]
26242    unsafe fn test_vreinterpret_u16_f64() {
26243        let a: f64 = 0.;
26244        let e: u16x4 = u16x4::new(0, 0, 0, 0);
26245        let r: u16x4 = transmute(vreinterpret_u16_f64(transmute(a)));
26246        assert_eq!(r, e);
26247    }
26248
26249    #[simd_test(enable = "neon")]
26250    unsafe fn test_vreinterpret_u32_f64() {
26251        let a: f64 = 0.;
26252        let e: u32x2 = u32x2::new(0, 0);
26253        let r: u32x2 = transmute(vreinterpret_u32_f64(transmute(a)));
26254        assert_eq!(r, e);
26255    }
26256
26257    #[simd_test(enable = "neon")]
26258    unsafe fn test_vreinterpret_u64_f64() {
26259        let a: f64 = 0.;
26260        let e: u64x1 = u64x1::new(0);
26261        let r: u64x1 = transmute(vreinterpret_u64_f64(transmute(a)));
26262        assert_eq!(r, e);
26263    }
26264
26265    #[simd_test(enable = "neon")]
26266    unsafe fn test_vreinterpretq_u8_f64() {
26267        let a: f64x2 = f64x2::new(0., 0.);
26268        let e: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
26269        let r: u8x16 = transmute(vreinterpretq_u8_f64(transmute(a)));
26270        assert_eq!(r, e);
26271    }
26272
26273    #[simd_test(enable = "neon")]
26274    unsafe fn test_vreinterpretq_u16_f64() {
26275        let a: f64x2 = f64x2::new(0., 0.);
26276        let e: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
26277        let r: u16x8 = transmute(vreinterpretq_u16_f64(transmute(a)));
26278        assert_eq!(r, e);
26279    }
26280
26281    #[simd_test(enable = "neon")]
26282    unsafe fn test_vreinterpretq_u32_f64() {
26283        let a: f64x2 = f64x2::new(0., 0.);
26284        let e: u32x4 = u32x4::new(0, 0, 0, 0);
26285        let r: u32x4 = transmute(vreinterpretq_u32_f64(transmute(a)));
26286        assert_eq!(r, e);
26287    }
26288
26289    #[simd_test(enable = "neon")]
26290    unsafe fn test_vreinterpretq_u64_f64() {
26291        let a: f64x2 = f64x2::new(0., 0.);
26292        let e: u64x2 = u64x2::new(0, 0);
26293        let r: u64x2 = transmute(vreinterpretq_u64_f64(transmute(a)));
26294        assert_eq!(r, e);
26295    }
26296
26297    #[simd_test(enable = "neon")]
26298    unsafe fn test_vreinterpret_p8_f64() {
26299        let a: f64 = 0.;
26300        let e: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
26301        let r: i8x8 = transmute(vreinterpret_p8_f64(transmute(a)));
26302        assert_eq!(r, e);
26303    }
26304
26305    #[simd_test(enable = "neon")]
26306    unsafe fn test_vreinterpret_p16_f64() {
26307        let a: f64 = 0.;
26308        let e: i16x4 = i16x4::new(0, 0, 0, 0);
26309        let r: i16x4 = transmute(vreinterpret_p16_f64(transmute(a)));
26310        assert_eq!(r, e);
26311    }
26312
26313    #[simd_test(enable = "neon")]
26314    unsafe fn test_vreinterpret_p64_f32() {
26315        let a: f32x2 = f32x2::new(0., 0.);
26316        let e: i64x1 = i64x1::new(0);
26317        let r: i64x1 = transmute(vreinterpret_p64_f32(transmute(a)));
26318        assert_eq!(r, e);
26319    }
26320
26321    #[simd_test(enable = "neon")]
26322    unsafe fn test_vreinterpret_p64_f64() {
26323        let a: f64 = 0.;
26324        let e: i64x1 = i64x1::new(0);
26325        let r: i64x1 = transmute(vreinterpret_p64_f64(transmute(a)));
26326        assert_eq!(r, e);
26327    }
26328
26329    #[simd_test(enable = "neon")]
26330    unsafe fn test_vreinterpretq_p8_f64() {
26331        let a: f64x2 = f64x2::new(0., 0.);
26332        let e: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
26333        let r: i8x16 = transmute(vreinterpretq_p8_f64(transmute(a)));
26334        assert_eq!(r, e);
26335    }
26336
26337    #[simd_test(enable = "neon")]
26338    unsafe fn test_vreinterpretq_p16_f64() {
26339        let a: f64x2 = f64x2::new(0., 0.);
26340        let e: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
26341        let r: i16x8 = transmute(vreinterpretq_p16_f64(transmute(a)));
26342        assert_eq!(r, e);
26343    }
26344
26345    #[simd_test(enable = "neon")]
26346    unsafe fn test_vreinterpretq_p64_f32() {
26347        let a: f32x4 = f32x4::new(0., 0., 0., 0.);
26348        let e: i64x2 = i64x2::new(0, 0);
26349        let r: i64x2 = transmute(vreinterpretq_p64_f32(transmute(a)));
26350        assert_eq!(r, e);
26351    }
26352
26353    #[simd_test(enable = "neon")]
26354    unsafe fn test_vreinterpretq_p64_f64() {
26355        let a: f64x2 = f64x2::new(0., 0.);
26356        let e: i64x2 = i64x2::new(0, 0);
26357        let r: i64x2 = transmute(vreinterpretq_p64_f64(transmute(a)));
26358        assert_eq!(r, e);
26359    }
26360
26361    #[simd_test(enable = "neon")]
26362    unsafe fn test_vreinterpretq_p128_f64() {
26363        let a: f64x2 = f64x2::new(0., 0.);
26364        let e: p128 = 0;
26365        let r: p128 = vreinterpretq_p128_f64(transmute(a));
26366        assert_eq!(r, e);
26367    }
26368
26369    #[simd_test(enable = "neon")]
26370    unsafe fn test_vreinterpret_f64_s8() {
26371        let a: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
26372        let e: f64 = 0.;
26373        let r: f64 = transmute(vreinterpret_f64_s8(transmute(a)));
26374        assert_eq!(r, e);
26375    }
26376
26377    #[simd_test(enable = "neon")]
26378    unsafe fn test_vreinterpret_f64_s16() {
26379        let a: i16x4 = i16x4::new(0, 0, 0, 0);
26380        let e: f64 = 0.;
26381        let r: f64 = transmute(vreinterpret_f64_s16(transmute(a)));
26382        assert_eq!(r, e);
26383    }
26384
26385    #[simd_test(enable = "neon")]
26386    unsafe fn test_vreinterpret_f64_s32() {
26387        let a: i32x2 = i32x2::new(0, 0);
26388        let e: f64 = 0.;
26389        let r: f64 = transmute(vreinterpret_f64_s32(transmute(a)));
26390        assert_eq!(r, e);
26391    }
26392
26393    #[simd_test(enable = "neon")]
26394    unsafe fn test_vreinterpret_f64_s64() {
26395        let a: i64x1 = i64x1::new(0);
26396        let e: f64 = 0.;
26397        let r: f64 = transmute(vreinterpret_f64_s64(transmute(a)));
26398        assert_eq!(r, e);
26399    }
26400
26401    #[simd_test(enable = "neon")]
26402    unsafe fn test_vreinterpretq_f64_s8() {
26403        let a: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
26404        let e: f64x2 = f64x2::new(0., 0.);
26405        let r: f64x2 = transmute(vreinterpretq_f64_s8(transmute(a)));
26406        assert_eq!(r, e);
26407    }
26408
26409    #[simd_test(enable = "neon")]
26410    unsafe fn test_vreinterpretq_f64_s16() {
26411        let a: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
26412        let e: f64x2 = f64x2::new(0., 0.);
26413        let r: f64x2 = transmute(vreinterpretq_f64_s16(transmute(a)));
26414        assert_eq!(r, e);
26415    }
26416
26417    #[simd_test(enable = "neon")]
26418    unsafe fn test_vreinterpretq_f64_s32() {
26419        let a: i32x4 = i32x4::new(0, 0, 0, 0);
26420        let e: f64x2 = f64x2::new(0., 0.);
26421        let r: f64x2 = transmute(vreinterpretq_f64_s32(transmute(a)));
26422        assert_eq!(r, e);
26423    }
26424
26425    #[simd_test(enable = "neon")]
26426    unsafe fn test_vreinterpretq_f64_s64() {
26427        let a: i64x2 = i64x2::new(0, 0);
26428        let e: f64x2 = f64x2::new(0., 0.);
26429        let r: f64x2 = transmute(vreinterpretq_f64_s64(transmute(a)));
26430        assert_eq!(r, e);
26431    }
26432
26433    #[simd_test(enable = "neon")]
26434    unsafe fn test_vreinterpret_f64_p8() {
26435        let a: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
26436        let e: f64 = 0.;
26437        let r: f64 = transmute(vreinterpret_f64_p8(transmute(a)));
26438        assert_eq!(r, e);
26439    }
26440
26441    #[simd_test(enable = "neon")]
26442    unsafe fn test_vreinterpret_f64_u16() {
26443        let a: u16x4 = u16x4::new(0, 0, 0, 0);
26444        let e: f64 = 0.;
26445        let r: f64 = transmute(vreinterpret_f64_u16(transmute(a)));
26446        assert_eq!(r, e);
26447    }
26448
26449    #[simd_test(enable = "neon")]
26450    unsafe fn test_vreinterpret_f64_u32() {
26451        let a: u32x2 = u32x2::new(0, 0);
26452        let e: f64 = 0.;
26453        let r: f64 = transmute(vreinterpret_f64_u32(transmute(a)));
26454        assert_eq!(r, e);
26455    }
26456
26457    #[simd_test(enable = "neon")]
26458    unsafe fn test_vreinterpret_f64_u64() {
26459        let a: u64x1 = u64x1::new(0);
26460        let e: f64 = 0.;
26461        let r: f64 = transmute(vreinterpret_f64_u64(transmute(a)));
26462        assert_eq!(r, e);
26463    }
26464
26465    #[simd_test(enable = "neon")]
26466    unsafe fn test_vreinterpretq_f64_p8() {
26467        let a: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
26468        let e: f64x2 = f64x2::new(0., 0.);
26469        let r: f64x2 = transmute(vreinterpretq_f64_p8(transmute(a)));
26470        assert_eq!(r, e);
26471    }
26472
26473    #[simd_test(enable = "neon")]
26474    unsafe fn test_vreinterpretq_f64_u16() {
26475        let a: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
26476        let e: f64x2 = f64x2::new(0., 0.);
26477        let r: f64x2 = transmute(vreinterpretq_f64_u16(transmute(a)));
26478        assert_eq!(r, e);
26479    }
26480
26481    #[simd_test(enable = "neon")]
26482    unsafe fn test_vreinterpretq_f64_u32() {
26483        let a: u32x4 = u32x4::new(0, 0, 0, 0);
26484        let e: f64x2 = f64x2::new(0., 0.);
26485        let r: f64x2 = transmute(vreinterpretq_f64_u32(transmute(a)));
26486        assert_eq!(r, e);
26487    }
26488
26489    #[simd_test(enable = "neon")]
26490    unsafe fn test_vreinterpretq_f64_u64() {
26491        let a: u64x2 = u64x2::new(0, 0);
26492        let e: f64x2 = f64x2::new(0., 0.);
26493        let r: f64x2 = transmute(vreinterpretq_f64_u64(transmute(a)));
26494        assert_eq!(r, e);
26495    }
26496
26497    #[simd_test(enable = "neon")]
26498    unsafe fn test_vreinterpret_f64_u8() {
26499        let a: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
26500        let e: f64 = 0.;
26501        let r: f64 = transmute(vreinterpret_f64_u8(transmute(a)));
26502        assert_eq!(r, e);
26503    }
26504
26505    #[simd_test(enable = "neon")]
26506    unsafe fn test_vreinterpret_f64_p16() {
26507        let a: i16x4 = i16x4::new(0, 0, 0, 0);
26508        let e: f64 = 0.;
26509        let r: f64 = transmute(vreinterpret_f64_p16(transmute(a)));
26510        assert_eq!(r, e);
26511    }
26512
26513    #[simd_test(enable = "neon")]
26514    unsafe fn test_vreinterpret_f64_p64() {
26515        let a: i64x1 = i64x1::new(0);
26516        let e: f64 = 0.;
26517        let r: f64 = transmute(vreinterpret_f64_p64(transmute(a)));
26518        assert_eq!(r, e);
26519    }
26520
26521    #[simd_test(enable = "neon")]
26522    unsafe fn test_vreinterpret_f32_p64() {
26523        let a: i64x1 = i64x1::new(0);
26524        let e: f32x2 = f32x2::new(0., 0.);
26525        let r: f32x2 = transmute(vreinterpret_f32_p64(transmute(a)));
26526        assert_eq!(r, e);
26527    }
26528
26529    #[simd_test(enable = "neon")]
26530    unsafe fn test_vreinterpretq_f64_u8() {
26531        let a: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
26532        let e: f64x2 = f64x2::new(0., 0.);
26533        let r: f64x2 = transmute(vreinterpretq_f64_u8(transmute(a)));
26534        assert_eq!(r, e);
26535    }
26536
26537    #[simd_test(enable = "neon")]
26538    unsafe fn test_vreinterpretq_f64_p16() {
26539        let a: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
26540        let e: f64x2 = f64x2::new(0., 0.);
26541        let r: f64x2 = transmute(vreinterpretq_f64_p16(transmute(a)));
26542        assert_eq!(r, e);
26543    }
26544
26545    #[simd_test(enable = "neon")]
26546    unsafe fn test_vreinterpretq_f64_p64() {
26547        let a: i64x2 = i64x2::new(0, 0);
26548        let e: f64x2 = f64x2::new(0., 0.);
26549        let r: f64x2 = transmute(vreinterpretq_f64_p64(transmute(a)));
26550        assert_eq!(r, e);
26551    }
26552
26553    #[simd_test(enable = "neon")]
26554    unsafe fn test_vreinterpretq_f32_p64() {
26555        let a: i64x2 = i64x2::new(0, 0);
26556        let e: f32x4 = f32x4::new(0., 0., 0., 0.);
26557        let r: f32x4 = transmute(vreinterpretq_f32_p64(transmute(a)));
26558        assert_eq!(r, e);
26559    }
26560
26561    #[simd_test(enable = "neon")]
26562    unsafe fn test_vreinterpretq_f64_p128() {
26563        let a: p128 = 0;
26564        let e: f64x2 = f64x2::new(0., 0.);
26565        let r: f64x2 = transmute(vreinterpretq_f64_p128(a));
26566        assert_eq!(r, e);
26567    }
26568
26569    #[simd_test(enable = "neon")]
26570    unsafe fn test_vreinterpret_f64_f32() {
26571        let a: f32x2 = f32x2::new(0., 0.);
26572        let e: f64 = 0.;
26573        let r: f64 = transmute(vreinterpret_f64_f32(transmute(a)));
26574        assert_eq!(r, e);
26575    }
26576
26577    #[simd_test(enable = "neon")]
26578    unsafe fn test_vreinterpret_f32_f64() {
26579        let a: f64 = 0.;
26580        let e: f32x2 = f32x2::new(0., 0.);
26581        let r: f32x2 = transmute(vreinterpret_f32_f64(transmute(a)));
26582        assert_eq!(r, e);
26583    }
26584
26585    #[simd_test(enable = "neon")]
26586    unsafe fn test_vreinterpretq_f64_f32() {
26587        let a: f32x4 = f32x4::new(0., 0., 0., 0.);
26588        let e: f64x2 = f64x2::new(0., 0.);
26589        let r: f64x2 = transmute(vreinterpretq_f64_f32(transmute(a)));
26590        assert_eq!(r, e);
26591    }
26592
26593    #[simd_test(enable = "neon")]
26594    unsafe fn test_vreinterpretq_f32_f64() {
26595        let a: f64x2 = f64x2::new(0., 0.);
26596        let e: f32x4 = f32x4::new(0., 0., 0., 0.);
26597        let r: f32x4 = transmute(vreinterpretq_f32_f64(transmute(a)));
26598        assert_eq!(r, e);
26599    }
26600
26601    #[simd_test(enable = "neon")]
26602    unsafe fn test_vrshld_s64() {
26603        let a: i64 = 1;
26604        let b: i64 = 2;
26605        let e: i64 = 4;
26606        let r: i64 = vrshld_s64(a, b);
26607        assert_eq!(r, e);
26608    }
26609
26610    #[simd_test(enable = "neon")]
26611    unsafe fn test_vrshld_u64() {
26612        let a: u64 = 1;
26613        let b: i64 = 2;
26614        let e: u64 = 4;
26615        let r: u64 = vrshld_u64(a, b);
26616        assert_eq!(r, e);
26617    }
26618
26619    #[simd_test(enable = "neon")]
26620    unsafe fn test_vrshrd_n_s64() {
26621        let a: i64 = 4;
26622        let e: i64 = 1;
26623        let r: i64 = vrshrd_n_s64::<2>(a);
26624        assert_eq!(r, e);
26625    }
26626
26627    #[simd_test(enable = "neon")]
26628    unsafe fn test_vrshrd_n_u64() {
26629        let a: u64 = 4;
26630        let e: u64 = 1;
26631        let r: u64 = vrshrd_n_u64::<2>(a);
26632        assert_eq!(r, e);
26633    }
26634
26635    #[simd_test(enable = "neon")]
26636    unsafe fn test_vrshrn_high_n_s16() {
26637        let a: i8x8 = i8x8::new(0, 1, 8, 9, 8, 9, 10, 11);
26638        let b: i16x8 = i16x8::new(32, 36, 40, 44, 48, 52, 56, 60);
26639        let e: i8x16 = i8x16::new(0, 1, 8, 9, 8, 9, 10, 11, 8, 9, 10, 11, 12, 13, 14, 15);
26640        let r: i8x16 = transmute(vrshrn_high_n_s16::<2>(transmute(a), transmute(b)));
26641        assert_eq!(r, e);
26642    }
26643
26644    #[simd_test(enable = "neon")]
26645    unsafe fn test_vrshrn_high_n_s32() {
26646        let a: i16x4 = i16x4::new(0, 1, 8, 9);
26647        let b: i32x4 = i32x4::new(32, 36, 40, 44);
26648        let e: i16x8 = i16x8::new(0, 1, 8, 9, 8, 9, 10, 11);
26649        let r: i16x8 = transmute(vrshrn_high_n_s32::<2>(transmute(a), transmute(b)));
26650        assert_eq!(r, e);
26651    }
26652
26653    #[simd_test(enable = "neon")]
26654    unsafe fn test_vrshrn_high_n_s64() {
26655        let a: i32x2 = i32x2::new(0, 1);
26656        let b: i64x2 = i64x2::new(32, 36);
26657        let e: i32x4 = i32x4::new(0, 1, 8, 9);
26658        let r: i32x4 = transmute(vrshrn_high_n_s64::<2>(transmute(a), transmute(b)));
26659        assert_eq!(r, e);
26660    }
26661
26662    #[simd_test(enable = "neon")]
26663    unsafe fn test_vrshrn_high_n_u16() {
26664        let a: u8x8 = u8x8::new(0, 1, 8, 9, 8, 9, 10, 11);
26665        let b: u16x8 = u16x8::new(32, 36, 40, 44, 48, 52, 56, 60);
26666        let e: u8x16 = u8x16::new(0, 1, 8, 9, 8, 9, 10, 11, 8, 9, 10, 11, 12, 13, 14, 15);
26667        let r: u8x16 = transmute(vrshrn_high_n_u16::<2>(transmute(a), transmute(b)));
26668        assert_eq!(r, e);
26669    }
26670
26671    #[simd_test(enable = "neon")]
26672    unsafe fn test_vrshrn_high_n_u32() {
26673        let a: u16x4 = u16x4::new(0, 1, 8, 9);
26674        let b: u32x4 = u32x4::new(32, 36, 40, 44);
26675        let e: u16x8 = u16x8::new(0, 1, 8, 9, 8, 9, 10, 11);
26676        let r: u16x8 = transmute(vrshrn_high_n_u32::<2>(transmute(a), transmute(b)));
26677        assert_eq!(r, e);
26678    }
26679
26680    #[simd_test(enable = "neon")]
26681    unsafe fn test_vrshrn_high_n_u64() {
26682        let a: u32x2 = u32x2::new(0, 1);
26683        let b: u64x2 = u64x2::new(32, 36);
26684        let e: u32x4 = u32x4::new(0, 1, 8, 9);
26685        let r: u32x4 = transmute(vrshrn_high_n_u64::<2>(transmute(a), transmute(b)));
26686        assert_eq!(r, e);
26687    }
26688
26689    #[simd_test(enable = "neon")]
26690    unsafe fn test_vrsrad_n_s64() {
26691        let a: i64 = 1;
26692        let b: i64 = 4;
26693        let e: i64 = 2;
26694        let r: i64 = vrsrad_n_s64::<2>(a, b);
26695        assert_eq!(r, e);
26696    }
26697
26698    #[simd_test(enable = "neon")]
26699    unsafe fn test_vrsrad_n_u64() {
26700        let a: u64 = 1;
26701        let b: u64 = 4;
26702        let e: u64 = 2;
26703        let r: u64 = vrsrad_n_u64::<2>(a, b);
26704        assert_eq!(r, e);
26705    }
26706
26707    #[simd_test(enable = "neon")]
26708    unsafe fn test_vrsubhn_high_s16() {
26709        let a: i8x8 = i8x8::new(1, 2, 0, 0, 0, 0, 0, 0);
26710        let b: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
26711        let c: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
26712        let e: i8x16 = i8x16::new(1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
26713        let r: i8x16 = transmute(vrsubhn_high_s16(transmute(a), transmute(b), transmute(c)));
26714        assert_eq!(r, e);
26715    }
26716
26717    #[simd_test(enable = "neon")]
26718    unsafe fn test_vrsubhn_high_s32() {
26719        let a: i16x4 = i16x4::new(1, 2, 0, 0);
26720        let b: i32x4 = i32x4::new(1, 2, 3, 4);
26721        let c: i32x4 = i32x4::new(1, 2, 3, 4);
26722        let e: i16x8 = i16x8::new(1, 2, 0, 0, 0, 0, 0, 0);
26723        let r: i16x8 = transmute(vrsubhn_high_s32(transmute(a), transmute(b), transmute(c)));
26724        assert_eq!(r, e);
26725    }
26726
26727    #[simd_test(enable = "neon")]
26728    unsafe fn test_vrsubhn_high_s64() {
26729        let a: i32x2 = i32x2::new(1, 2);
26730        let b: i64x2 = i64x2::new(1, 2);
26731        let c: i64x2 = i64x2::new(1, 2);
26732        let e: i32x4 = i32x4::new(1, 2, 0, 0);
26733        let r: i32x4 = transmute(vrsubhn_high_s64(transmute(a), transmute(b), transmute(c)));
26734        assert_eq!(r, e);
26735    }
26736
26737    #[simd_test(enable = "neon")]
26738    unsafe fn test_vrsubhn_high_u16() {
26739        let a: u8x8 = u8x8::new(1, 2, 0, 0, 0, 0, 0, 0);
26740        let b: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
26741        let c: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
26742        let e: u8x16 = u8x16::new(1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
26743        let r: u8x16 = transmute(vrsubhn_high_u16(transmute(a), transmute(b), transmute(c)));
26744        assert_eq!(r, e);
26745    }
26746
26747    #[simd_test(enable = "neon")]
26748    unsafe fn test_vrsubhn_high_u32() {
26749        let a: u16x4 = u16x4::new(1, 2, 0, 0);
26750        let b: u32x4 = u32x4::new(1, 2, 3, 4);
26751        let c: u32x4 = u32x4::new(1, 2, 3, 4);
26752        let e: u16x8 = u16x8::new(1, 2, 0, 0, 0, 0, 0, 0);
26753        let r: u16x8 = transmute(vrsubhn_high_u32(transmute(a), transmute(b), transmute(c)));
26754        assert_eq!(r, e);
26755    }
26756
26757    #[simd_test(enable = "neon")]
26758    unsafe fn test_vrsubhn_high_u64() {
26759        let a: u32x2 = u32x2::new(1, 2);
26760        let b: u64x2 = u64x2::new(1, 2);
26761        let c: u64x2 = u64x2::new(1, 2);
26762        let e: u32x4 = u32x4::new(1, 2, 0, 0);
26763        let r: u32x4 = transmute(vrsubhn_high_u64(transmute(a), transmute(b), transmute(c)));
26764        assert_eq!(r, e);
26765    }
26766
26767    #[simd_test(enable = "neon")]
26768    unsafe fn test_vset_lane_f64() {
26769        let a: f64 = 1.;
26770        let b: f64 = 0.;
26771        let e: f64 = 1.;
26772        let r: f64 = transmute(vset_lane_f64::<0>(a, transmute(b)));
26773        assert_eq!(r, e);
26774    }
26775
26776    #[simd_test(enable = "neon")]
26777    unsafe fn test_vsetq_lane_f64() {
26778        let a: f64 = 1.;
26779        let b: f64x2 = f64x2::new(0., 2.);
26780        let e: f64x2 = f64x2::new(1., 2.);
26781        let r: f64x2 = transmute(vsetq_lane_f64::<0>(a, transmute(b)));
26782        assert_eq!(r, e);
26783    }
26784
26785    #[simd_test(enable = "neon")]
26786    unsafe fn test_vshld_s64() {
26787        let a: i64 = 1;
26788        let b: i64 = 2;
26789        let e: i64 = 4;
26790        let r: i64 = vshld_s64(a, b);
26791        assert_eq!(r, e);
26792    }
26793
26794    #[simd_test(enable = "neon")]
26795    unsafe fn test_vshld_u64() {
26796        let a: u64 = 1;
26797        let b: i64 = 2;
26798        let e: u64 = 4;
26799        let r: u64 = vshld_u64(a, b);
26800        assert_eq!(r, e);
26801    }
26802
26803    #[simd_test(enable = "neon")]
26804    unsafe fn test_vshll_high_n_s8() {
26805        let a: i8x16 = i8x16::new(0, 0, 1, 2, 1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8);
26806        let e: i16x8 = i16x8::new(4, 8, 12, 16, 20, 24, 28, 32);
26807        let r: i16x8 = transmute(vshll_high_n_s8::<2>(transmute(a)));
26808        assert_eq!(r, e);
26809    }
26810
26811    #[simd_test(enable = "neon")]
26812    unsafe fn test_vshll_high_n_s16() {
26813        let a: i16x8 = i16x8::new(0, 0, 1, 2, 1, 2, 3, 4);
26814        let e: i32x4 = i32x4::new(4, 8, 12, 16);
26815        let r: i32x4 = transmute(vshll_high_n_s16::<2>(transmute(a)));
26816        assert_eq!(r, e);
26817    }
26818
26819    #[simd_test(enable = "neon")]
26820    unsafe fn test_vshll_high_n_s32() {
26821        let a: i32x4 = i32x4::new(0, 0, 1, 2);
26822        let e: i64x2 = i64x2::new(4, 8);
26823        let r: i64x2 = transmute(vshll_high_n_s32::<2>(transmute(a)));
26824        assert_eq!(r, e);
26825    }
26826
26827    #[simd_test(enable = "neon")]
26828    unsafe fn test_vshll_high_n_u8() {
26829        let a: u8x16 = u8x16::new(0, 0, 1, 2, 1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8);
26830        let e: u16x8 = u16x8::new(4, 8, 12, 16, 20, 24, 28, 32);
26831        let r: u16x8 = transmute(vshll_high_n_u8::<2>(transmute(a)));
26832        assert_eq!(r, e);
26833    }
26834
26835    #[simd_test(enable = "neon")]
26836    unsafe fn test_vshll_high_n_u16() {
26837        let a: u16x8 = u16x8::new(0, 0, 1, 2, 1, 2, 3, 4);
26838        let e: u32x4 = u32x4::new(4, 8, 12, 16);
26839        let r: u32x4 = transmute(vshll_high_n_u16::<2>(transmute(a)));
26840        assert_eq!(r, e);
26841    }
26842
26843    #[simd_test(enable = "neon")]
26844    unsafe fn test_vshll_high_n_u32() {
26845        let a: u32x4 = u32x4::new(0, 0, 1, 2);
26846        let e: u64x2 = u64x2::new(4, 8);
26847        let r: u64x2 = transmute(vshll_high_n_u32::<2>(transmute(a)));
26848        assert_eq!(r, e);
26849    }
26850
26851    #[simd_test(enable = "neon")]
26852    unsafe fn test_vshrn_high_n_s16() {
26853        let a: i8x8 = i8x8::new(1, 2, 5, 6, 5, 6, 7, 8);
26854        let b: i16x8 = i16x8::new(20, 24, 28, 32, 52, 56, 60, 64);
26855        let e: i8x16 = i8x16::new(1, 2, 5, 6, 5, 6, 7, 8, 5, 6, 7, 8, 13, 14, 15, 16);
26856        let r: i8x16 = transmute(vshrn_high_n_s16::<2>(transmute(a), transmute(b)));
26857        assert_eq!(r, e);
26858    }
26859
26860    #[simd_test(enable = "neon")]
26861    unsafe fn test_vshrn_high_n_s32() {
26862        let a: i16x4 = i16x4::new(1, 2, 5, 6);
26863        let b: i32x4 = i32x4::new(20, 24, 28, 32);
26864        let e: i16x8 = i16x8::new(1, 2, 5, 6, 5, 6, 7, 8);
26865        let r: i16x8 = transmute(vshrn_high_n_s32::<2>(transmute(a), transmute(b)));
26866        assert_eq!(r, e);
26867    }
26868
26869    #[simd_test(enable = "neon")]
26870    unsafe fn test_vshrn_high_n_s64() {
26871        let a: i32x2 = i32x2::new(1, 2);
26872        let b: i64x2 = i64x2::new(20, 24);
26873        let e: i32x4 = i32x4::new(1, 2, 5, 6);
26874        let r: i32x4 = transmute(vshrn_high_n_s64::<2>(transmute(a), transmute(b)));
26875        assert_eq!(r, e);
26876    }
26877
26878    #[simd_test(enable = "neon")]
26879    unsafe fn test_vshrn_high_n_u16() {
26880        let a: u8x8 = u8x8::new(1, 2, 5, 6, 5, 6, 7, 8);
26881        let b: u16x8 = u16x8::new(20, 24, 28, 32, 52, 56, 60, 64);
26882        let e: u8x16 = u8x16::new(1, 2, 5, 6, 5, 6, 7, 8, 5, 6, 7, 8, 13, 14, 15, 16);
26883        let r: u8x16 = transmute(vshrn_high_n_u16::<2>(transmute(a), transmute(b)));
26884        assert_eq!(r, e);
26885    }
26886
26887    #[simd_test(enable = "neon")]
26888    unsafe fn test_vshrn_high_n_u32() {
26889        let a: u16x4 = u16x4::new(1, 2, 5, 6);
26890        let b: u32x4 = u32x4::new(20, 24, 28, 32);
26891        let e: u16x8 = u16x8::new(1, 2, 5, 6, 5, 6, 7, 8);
26892        let r: u16x8 = transmute(vshrn_high_n_u32::<2>(transmute(a), transmute(b)));
26893        assert_eq!(r, e);
26894    }
26895
26896    #[simd_test(enable = "neon")]
26897    unsafe fn test_vshrn_high_n_u64() {
26898        let a: u32x2 = u32x2::new(1, 2);
26899        let b: u64x2 = u64x2::new(20, 24);
26900        let e: u32x4 = u32x4::new(1, 2, 5, 6);
26901        let r: u32x4 = transmute(vshrn_high_n_u64::<2>(transmute(a), transmute(b)));
26902        assert_eq!(r, e);
26903    }
26904
26905    #[simd_test(enable = "neon,sm4")]
26906    unsafe fn test_vsm3partw1q_u32() {
26907        let a: u32x4 = u32x4::new(1, 2, 3, 4);
26908        let b: u32x4 = u32x4::new(1, 2, 3, 4);
26909        let c: u32x4 = u32x4::new(1, 2, 3, 4);
26910        let e: u32x4 = u32x4::new(2147549312, 3221323968, 131329, 2684362752);
26911        let r: u32x4 = transmute(vsm3partw1q_u32(transmute(a), transmute(b), transmute(c)));
26912        assert_eq!(r, e);
26913    }
26914
26915    #[simd_test(enable = "neon,sm4")]
26916    unsafe fn test_vsm3partw2q_u32() {
26917        let a: u32x4 = u32x4::new(1, 2, 3, 4);
26918        let b: u32x4 = u32x4::new(1, 2, 3, 4);
26919        let c: u32x4 = u32x4::new(1, 2, 3, 4);
26920        let e: u32x4 = u32x4::new(128, 256, 384, 1077977696);
26921        let r: u32x4 = transmute(vsm3partw2q_u32(transmute(a), transmute(b), transmute(c)));
26922        assert_eq!(r, e);
26923    }
26924
26925    #[simd_test(enable = "neon,sm4")]
26926    unsafe fn test_vsm3ss1q_u32() {
26927        let a: u32x4 = u32x4::new(1, 2, 3, 4);
26928        let b: u32x4 = u32x4::new(1, 2, 3, 4);
26929        let c: u32x4 = u32x4::new(1, 2, 3, 4);
26930        let e: u32x4 = u32x4::new(0, 0, 0, 2098176);
26931        let r: u32x4 = transmute(vsm3ss1q_u32(transmute(a), transmute(b), transmute(c)));
26932        assert_eq!(r, e);
26933    }
26934
26935    #[simd_test(enable = "neon,sm4")]
26936    unsafe fn test_vsm4ekeyq_u32() {
26937        let a: u32x4 = u32x4::new(1, 2, 3, 4);
26938        let b: u32x4 = u32x4::new(1, 2, 3, 4);
26939        let e: u32x4 = u32x4::new(1784948604, 136020997, 2940231695, 3789947679);
26940        let r: u32x4 = transmute(vsm4ekeyq_u32(transmute(a), transmute(b)));
26941        assert_eq!(r, e);
26942    }
26943
26944    #[simd_test(enable = "neon,sm4")]
26945    unsafe fn test_vsm4eq_u32() {
26946        let a: u32x4 = u32x4::new(1, 2, 3, 4);
26947        let b: u32x4 = u32x4::new(1, 2, 3, 4);
26948        let e: u32x4 = u32x4::new(1093874472, 3616769504, 3878330411, 2765298765);
26949        let r: u32x4 = transmute(vsm4eq_u32(transmute(a), transmute(b)));
26950        assert_eq!(r, e);
26951    }
26952
26953    #[simd_test(enable = "neon,sha3")]
26954    unsafe fn test_vrax1q_u64() {
26955        let a: u64x2 = u64x2::new(1, 2);
26956        let b: u64x2 = u64x2::new(3, 4);
26957        let e: u64x2 = u64x2::new(7, 10);
26958        let r: u64x2 = transmute(vrax1q_u64(transmute(a), transmute(b)));
26959        assert_eq!(r, e);
26960    }
26961
26962    #[simd_test(enable = "neon,sha3")]
26963    unsafe fn test_vsha512hq_u64() {
26964        let a: u64x2 = u64x2::new(1, 2);
26965        let b: u64x2 = u64x2::new(3, 4);
26966        let c: u64x2 = u64x2::new(5, 6);
26967        let e: u64x2 = u64x2::new(11189044327219203, 7177611956453380);
26968        let r: u64x2 = transmute(vsha512hq_u64(transmute(a), transmute(b), transmute(c)));
26969        assert_eq!(r, e);
26970    }
26971
26972    #[simd_test(enable = "neon,sha3")]
26973    unsafe fn test_vsha512h2q_u64() {
26974        let a: u64x2 = u64x2::new(1, 2);
26975        let b: u64x2 = u64x2::new(3, 4);
26976        let c: u64x2 = u64x2::new(5, 6);
26977        let e: u64x2 = u64x2::new(5770237651009406214, 349133864969);
26978        let r: u64x2 = transmute(vsha512h2q_u64(transmute(a), transmute(b), transmute(c)));
26979        assert_eq!(r, e);
26980    }
26981
26982    #[simd_test(enable = "neon,sha3")]
26983    unsafe fn test_vsha512su0q_u64() {
26984        let a: u64x2 = u64x2::new(1, 2);
26985        let b: u64x2 = u64x2::new(3, 4);
26986        let e: u64x2 = u64x2::new(144115188075855874, 9439544818968559619);
26987        let r: u64x2 = transmute(vsha512su0q_u64(transmute(a), transmute(b)));
26988        assert_eq!(r, e);
26989    }
26990
26991    #[simd_test(enable = "neon,sha3")]
26992    unsafe fn test_vsha512su1q_u64() {
26993        let a: u64x2 = u64x2::new(1, 2);
26994        let b: u64x2 = u64x2::new(3, 4);
26995        let c: u64x2 = u64x2::new(5, 6);
26996        let e: u64x2 = u64x2::new(105553116266526, 140737488355368);
26997        let r: u64x2 = transmute(vsha512su1q_u64(transmute(a), transmute(b), transmute(c)));
26998        assert_eq!(r, e);
26999    }
27000
27001    #[simd_test(enable = "neon,frintts")]
27002    unsafe fn test_vrnd32x_f32() {
27003        let a: f32x2 = f32x2::new(-1.5, 2.9);
27004        let e: f32x2 = f32x2::new(-2.0, 3.0);
27005        let r: f32x2 = transmute(vrnd32x_f32(transmute(a)));
27006        assert_eq!(r, e);
27007    }
27008
27009    #[simd_test(enable = "neon,frintts")]
27010    unsafe fn test_vrnd32xq_f32() {
27011        let a: f32x4 = f32x4::new(-1.5, 2.9, 1.5, -2.5);
27012        let e: f32x4 = f32x4::new(-2.0, 3.0, 2.0, -2.0);
27013        let r: f32x4 = transmute(vrnd32xq_f32(transmute(a)));
27014        assert_eq!(r, e);
27015    }
27016
27017    #[simd_test(enable = "neon,frintts")]
27018    unsafe fn test_vrnd32xq_f64() {
27019        let a: f64x2 = f64x2::new(-1.5, 2.9);
27020        let e: f64x2 = f64x2::new(-2.0, 3.0);
27021        let r: f64x2 = transmute(vrnd32xq_f64(transmute(a)));
27022        assert_eq!(r, e);
27023
27024        let a: f64x2 = f64x2::new(1.5, -2.5);
27025        let e: f64x2 = f64x2::new(2.0, -2.0);
27026        let r: f64x2 = transmute(vrnd32xq_f64(transmute(a)));
27027        assert_eq!(r, e);
27028
27029        let a: f64x2 = f64x2::new(2147483647.499999762, 2147483647.5);
27030        let e: f64x2 = f64x2::new(2147483647.0, -2147483648.0);
27031        let r: f64x2 = transmute(vrnd32xq_f64(transmute(a)));
27032        assert_eq!(r, e);
27033
27034        let a: f64x2 = f64x2::new(-2147483647.499999762, -2147483648.500000477);
27035        let e: f64x2 = f64x2::new(-2147483647.0, -2147483648.0);
27036        let r: f64x2 = transmute(vrnd32xq_f64(transmute(a)));
27037        assert_eq!(r, e);
27038    }
27039
27040    #[simd_test(enable = "neon,frintts")]
27041    unsafe fn test_vrnd32x_f64() {
27042        let a: f64 = -1.5;
27043        let e: f64 = -2.0;
27044        let r: f64 = transmute(vrnd32x_f64(transmute(a)));
27045        assert_eq!(r, e);
27046
27047        let a: f64 = 1.5;
27048        let e: f64 = 2.0;
27049        let r: f64 = transmute(vrnd32x_f64(transmute(a)));
27050        assert_eq!(r, e);
27051
27052        let a: f64 = 2147483647.499999762;
27053        let e: f64 = 2147483647.0;
27054        let r: f64 = transmute(vrnd32x_f64(transmute(a)));
27055        assert_eq!(r, e);
27056
27057        let a: f64 = -2147483647.499999762;
27058        let e: f64 = -2147483647.0;
27059        let r: f64 = transmute(vrnd32x_f64(transmute(a)));
27060        assert_eq!(r, e);
27061
27062        let a: f64 = 2.9;
27063        let e: f64 = 3.0;
27064        let r: f64 = transmute(vrnd32x_f64(transmute(a)));
27065        assert_eq!(r, e);
27066
27067        let a: f64 = -2.5;
27068        let e: f64 = -2.0;
27069        let r: f64 = transmute(vrnd32x_f64(transmute(a)));
27070        assert_eq!(r, e);
27071
27072        let a: f64 = 2147483647.5;
27073        let e: f64 = -2147483648.0;
27074        let r: f64 = transmute(vrnd32x_f64(transmute(a)));
27075        assert_eq!(r, e);
27076
27077        let a: f64 = -2147483648.500000477;
27078        let e: f64 = -2147483648.0;
27079        let r: f64 = transmute(vrnd32x_f64(transmute(a)));
27080        assert_eq!(r, e);
27081    }
27082
27083    #[simd_test(enable = "neon,frintts")]
27084    unsafe fn test_vrnd32z_f32() {
27085        let a: f32x2 = f32x2::new(-1.5, 2.9);
27086        let e: f32x2 = f32x2::new(-1.0, 2.0);
27087        let r: f32x2 = transmute(vrnd32z_f32(transmute(a)));
27088        assert_eq!(r, e);
27089    }
27090
27091    #[simd_test(enable = "neon,frintts")]
27092    unsafe fn test_vrnd32zq_f32() {
27093        let a: f32x4 = f32x4::new(-1.5, 2.9, 1.5, -2.5);
27094        let e: f32x4 = f32x4::new(-1.0, 2.0, 1.0, -2.0);
27095        let r: f32x4 = transmute(vrnd32zq_f32(transmute(a)));
27096        assert_eq!(r, e);
27097    }
27098
27099    #[simd_test(enable = "neon,frintts")]
27100    unsafe fn test_vrnd32zq_f64() {
27101        let a: f64x2 = f64x2::new(-1.5, 2.9);
27102        let e: f64x2 = f64x2::new(-1.0, 2.0);
27103        let r: f64x2 = transmute(vrnd32zq_f64(transmute(a)));
27104        assert_eq!(r, e);
27105
27106        let a: f64x2 = f64x2::new(1.5, -2.5);
27107        let e: f64x2 = f64x2::new(1.0, -2.0);
27108        let r: f64x2 = transmute(vrnd32zq_f64(transmute(a)));
27109        assert_eq!(r, e);
27110
27111        let a: f64x2 = f64x2::new(2147483647.999999762, 2147483648.0);
27112        let e: f64x2 = f64x2::new(2147483647.0, -2147483648.0);
27113        let r: f64x2 = transmute(vrnd32zq_f64(transmute(a)));
27114        assert_eq!(r, e);
27115
27116        let a: f64x2 = f64x2::new(-2147483647.999999762, -2147483649.0);
27117        let e: f64x2 = f64x2::new(-2147483647.0, -2147483648.0);
27118        let r: f64x2 = transmute(vrnd32zq_f64(transmute(a)));
27119        assert_eq!(r, e);
27120    }
27121
27122    #[simd_test(enable = "neon,frintts")]
27123    unsafe fn test_vrnd32z_f64() {
27124        let a: f64 = -1.5;
27125        let e: f64 = -1.0;
27126        let r: f64 = transmute(vrnd32z_f64(transmute(a)));
27127        assert_eq!(r, e);
27128
27129        let a: f64 = 1.5;
27130        let e: f64 = 1.0;
27131        let r: f64 = transmute(vrnd32z_f64(transmute(a)));
27132        assert_eq!(r, e);
27133
27134        let a: f64 = 2147483647.999999762;
27135        let e: f64 = 2147483647.0;
27136        let r: f64 = transmute(vrnd32z_f64(transmute(a)));
27137        assert_eq!(r, e);
27138
27139        let a: f64 = -2147483647.999999762;
27140        let e: f64 = -2147483647.0;
27141        let r: f64 = transmute(vrnd32z_f64(transmute(a)));
27142        assert_eq!(r, e);
27143
27144        let a: f64 = 2.9;
27145        let e: f64 = 2.0;
27146        let r: f64 = transmute(vrnd32z_f64(transmute(a)));
27147        assert_eq!(r, e);
27148
27149        let a: f64 = -2.5;
27150        let e: f64 = -2.0;
27151        let r: f64 = transmute(vrnd32z_f64(transmute(a)));
27152        assert_eq!(r, e);
27153
27154        let a: f64 = 2147483648.0;
27155        let e: f64 = -2147483648.0;
27156        let r: f64 = transmute(vrnd32z_f64(transmute(a)));
27157        assert_eq!(r, e);
27158
27159        let a: f64 = -2147483649.0;
27160        let e: f64 = -2147483648.0;
27161        let r: f64 = transmute(vrnd32z_f64(transmute(a)));
27162        assert_eq!(r, e);
27163    }
27164
27165    #[simd_test(enable = "neon,frintts")]
27166    unsafe fn test_vrnd64x_f32() {
27167        let a: f32x2 = f32x2::new(-1.5, 2.9);
27168        let e: f32x2 = f32x2::new(-2.0, 3.0);
27169        let r: f32x2 = transmute(vrnd64x_f32(transmute(a)));
27170        assert_eq!(r, e);
27171    }
27172
27173    #[simd_test(enable = "neon,frintts")]
27174    unsafe fn test_vrnd64xq_f32() {
27175        let a: f32x4 = f32x4::new(-1.5, 2.9, 1.5, -2.5);
27176        let e: f32x4 = f32x4::new(-2.0, 3.0, 2.0, -2.0);
27177        let r: f32x4 = transmute(vrnd64xq_f32(transmute(a)));
27178        assert_eq!(r, e);
27179    }
27180
27181    #[simd_test(enable = "neon,frintts")]
27182    unsafe fn test_vrnd64xq_f64() {
27183        let a: f64x2 = f64x2::new(-1.5, 2.9);
27184        let e: f64x2 = f64x2::new(-2.0, 3.0);
27185        let r: f64x2 = transmute(vrnd64xq_f64(transmute(a)));
27186        assert_eq!(r, e);
27187
27188        let a: f64x2 = f64x2::new(1.5, -2.5);
27189        let e: f64x2 = f64x2::new(2.0, -2.0);
27190        let r: f64x2 = transmute(vrnd64xq_f64(transmute(a)));
27191        assert_eq!(r, e);
27192
27193        let a: f64x2 = f64x2::new(9223372036854774784.0, 9223372036854775808.0);
27194        let e: f64x2 = f64x2::new(9223372036854774784.0, -9223372036854775808.0);
27195        let r: f64x2 = transmute(vrnd64xq_f64(transmute(a)));
27196        assert_eq!(r, e);
27197
27198        let a: f64x2 = f64x2::new(-9223372036854775808.0, -9223372036854777856.0);
27199        let e: f64x2 = f64x2::new(-9223372036854775808.0, -9223372036854775808.0);
27200        let r: f64x2 = transmute(vrnd64xq_f64(transmute(a)));
27201        assert_eq!(r, e);
27202    }
27203
27204    #[simd_test(enable = "neon,frintts")]
27205    unsafe fn test_vrnd64x_f64() {
27206        let a: f64 = -1.5;
27207        let e: f64 = -2.0;
27208        let r: f64 = transmute(vrnd64x_f64(transmute(a)));
27209        assert_eq!(r, e);
27210
27211        let a: f64 = 1.5;
27212        let e: f64 = 2.0;
27213        let r: f64 = transmute(vrnd64x_f64(transmute(a)));
27214        assert_eq!(r, e);
27215
27216        let a: f64 = 9223372036854774784.0;
27217        let e: f64 = 9223372036854774784.0;
27218        let r: f64 = transmute(vrnd64x_f64(transmute(a)));
27219        assert_eq!(r, e);
27220
27221        let a: f64 = -9223372036854775808.0;
27222        let e: f64 = -9223372036854775808.0;
27223        let r: f64 = transmute(vrnd64x_f64(transmute(a)));
27224        assert_eq!(r, e);
27225
27226        let a: f64 = 2.9;
27227        let e: f64 = 3.0;
27228        let r: f64 = transmute(vrnd64x_f64(transmute(a)));
27229        assert_eq!(r, e);
27230
27231        let a: f64 = -2.5;
27232        let e: f64 = -2.0;
27233        let r: f64 = transmute(vrnd64x_f64(transmute(a)));
27234        assert_eq!(r, e);
27235
27236        let a: f64 = 9223372036854775808.0;
27237        let e: f64 = -9223372036854775808.0;
27238        let r: f64 = transmute(vrnd64x_f64(transmute(a)));
27239        assert_eq!(r, e);
27240
27241        let a: f64 = -9223372036854777856.0;
27242        let e: f64 = -9223372036854775808.0;
27243        let r: f64 = transmute(vrnd64x_f64(transmute(a)));
27244        assert_eq!(r, e);
27245    }
27246
27247    #[simd_test(enable = "neon,frintts")]
27248    unsafe fn test_vrnd64z_f32() {
27249        let a: f32x2 = f32x2::new(-1.5, 2.9);
27250        let e: f32x2 = f32x2::new(-1.0, 2.0);
27251        let r: f32x2 = transmute(vrnd64z_f32(transmute(a)));
27252        assert_eq!(r, e);
27253    }
27254
27255    #[simd_test(enable = "neon,frintts")]
27256    unsafe fn test_vrnd64zq_f32() {
27257        let a: f32x4 = f32x4::new(-1.5, 2.9, 1.5, -2.5);
27258        let e: f32x4 = f32x4::new(-1.0, 2.0, 1.0, -2.0);
27259        let r: f32x4 = transmute(vrnd64zq_f32(transmute(a)));
27260        assert_eq!(r, e);
27261    }
27262
27263    #[simd_test(enable = "neon,frintts")]
27264    unsafe fn test_vrnd64zq_f64() {
27265        let a: f64x2 = f64x2::new(-1.5, 2.9);
27266        let e: f64x2 = f64x2::new(-1.0, 2.0);
27267        let r: f64x2 = transmute(vrnd64zq_f64(transmute(a)));
27268        assert_eq!(r, e);
27269
27270        let a: f64x2 = f64x2::new(1.5, -2.5);
27271        let e: f64x2 = f64x2::new(1.0, -2.0);
27272        let r: f64x2 = transmute(vrnd64zq_f64(transmute(a)));
27273        assert_eq!(r, e);
27274
27275        let a: f64x2 = f64x2::new(9223372036854774784.0, 9223372036854775808.0);
27276        let e: f64x2 = f64x2::new(9223372036854774784.0, -9223372036854775808.0);
27277        let r: f64x2 = transmute(vrnd64zq_f64(transmute(a)));
27278        assert_eq!(r, e);
27279
27280        let a: f64x2 = f64x2::new(-9223372036854775808.0, -9223372036854777856.0);
27281        let e: f64x2 = f64x2::new(-9223372036854775808.0, -9223372036854775808.0);
27282        let r: f64x2 = transmute(vrnd64zq_f64(transmute(a)));
27283        assert_eq!(r, e);
27284    }
27285
27286    #[simd_test(enable = "neon,frintts")]
27287    unsafe fn test_vrnd64z_f64() {
27288        let a: f64 = -1.5;
27289        let e: f64 = -1.0;
27290        let r: f64 = transmute(vrnd64z_f64(transmute(a)));
27291        assert_eq!(r, e);
27292
27293        let a: f64 = 1.5;
27294        let e: f64 = 1.0;
27295        let r: f64 = transmute(vrnd64z_f64(transmute(a)));
27296        assert_eq!(r, e);
27297
27298        let a: f64 = 9223372036854774784.0;
27299        let e: f64 = 9223372036854774784.0;
27300        let r: f64 = transmute(vrnd64z_f64(transmute(a)));
27301        assert_eq!(r, e);
27302
27303        let a: f64 = -9223372036854775808.0;
27304        let e: f64 = -9223372036854775808.0;
27305        let r: f64 = transmute(vrnd64z_f64(transmute(a)));
27306        assert_eq!(r, e);
27307
27308        let a: f64 = 2.9;
27309        let e: f64 = 2.0;
27310        let r: f64 = transmute(vrnd64z_f64(transmute(a)));
27311        assert_eq!(r, e);
27312
27313        let a: f64 = -2.5;
27314        let e: f64 = -2.0;
27315        let r: f64 = transmute(vrnd64z_f64(transmute(a)));
27316        assert_eq!(r, e);
27317
27318        let a: f64 = 9223372036854775808.0;
27319        let e: f64 = -9223372036854775808.0;
27320        let r: f64 = transmute(vrnd64z_f64(transmute(a)));
27321        assert_eq!(r, e);
27322
27323        let a: f64 = -9223372036854777856.0;
27324        let e: f64 = -9223372036854775808.0;
27325        let r: f64 = transmute(vrnd64z_f64(transmute(a)));
27326        assert_eq!(r, e);
27327    }
27328
27329    #[simd_test(enable = "neon")]
27330    unsafe fn test_vtrn1_s8() {
27331        let a: i8x8 = i8x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27332        let b: i8x8 = i8x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27333        let e: i8x8 = i8x8::new(0, 1, 4, 5, 8, 9, 12, 13);
27334        let r: i8x8 = transmute(vtrn1_s8(transmute(a), transmute(b)));
27335        assert_eq!(r, e);
27336    }
27337
27338    #[simd_test(enable = "neon")]
27339    unsafe fn test_vtrn1q_s8() {
27340        let a: i8x16 = i8x16::new(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
27341        let b: i8x16 = i8x16::new(1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
27342        let e: i8x16 = i8x16::new(0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29);
27343        let r: i8x16 = transmute(vtrn1q_s8(transmute(a), transmute(b)));
27344        assert_eq!(r, e);
27345    }
27346
27347    #[simd_test(enable = "neon")]
27348    unsafe fn test_vtrn1_s16() {
27349        let a: i16x4 = i16x4::new(0, 2, 4, 6);
27350        let b: i16x4 = i16x4::new(1, 3, 5, 7);
27351        let e: i16x4 = i16x4::new(0, 1, 4, 5);
27352        let r: i16x4 = transmute(vtrn1_s16(transmute(a), transmute(b)));
27353        assert_eq!(r, e);
27354    }
27355
27356    #[simd_test(enable = "neon")]
27357    unsafe fn test_vtrn1q_s16() {
27358        let a: i16x8 = i16x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27359        let b: i16x8 = i16x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27360        let e: i16x8 = i16x8::new(0, 1, 4, 5, 8, 9, 12, 13);
27361        let r: i16x8 = transmute(vtrn1q_s16(transmute(a), transmute(b)));
27362        assert_eq!(r, e);
27363    }
27364
27365    #[simd_test(enable = "neon")]
27366    unsafe fn test_vtrn1q_s32() {
27367        let a: i32x4 = i32x4::new(0, 2, 4, 6);
27368        let b: i32x4 = i32x4::new(1, 3, 5, 7);
27369        let e: i32x4 = i32x4::new(0, 1, 4, 5);
27370        let r: i32x4 = transmute(vtrn1q_s32(transmute(a), transmute(b)));
27371        assert_eq!(r, e);
27372    }
27373
27374    #[simd_test(enable = "neon")]
27375    unsafe fn test_vtrn1_u8() {
27376        let a: u8x8 = u8x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27377        let b: u8x8 = u8x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27378        let e: u8x8 = u8x8::new(0, 1, 4, 5, 8, 9, 12, 13);
27379        let r: u8x8 = transmute(vtrn1_u8(transmute(a), transmute(b)));
27380        assert_eq!(r, e);
27381    }
27382
27383    #[simd_test(enable = "neon")]
27384    unsafe fn test_vtrn1q_u8() {
27385        let a: u8x16 = u8x16::new(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
27386        let b: u8x16 = u8x16::new(1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
27387        let e: u8x16 = u8x16::new(0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29);
27388        let r: u8x16 = transmute(vtrn1q_u8(transmute(a), transmute(b)));
27389        assert_eq!(r, e);
27390    }
27391
27392    #[simd_test(enable = "neon")]
27393    unsafe fn test_vtrn1_u16() {
27394        let a: u16x4 = u16x4::new(0, 2, 4, 6);
27395        let b: u16x4 = u16x4::new(1, 3, 5, 7);
27396        let e: u16x4 = u16x4::new(0, 1, 4, 5);
27397        let r: u16x4 = transmute(vtrn1_u16(transmute(a), transmute(b)));
27398        assert_eq!(r, e);
27399    }
27400
27401    #[simd_test(enable = "neon")]
27402    unsafe fn test_vtrn1q_u16() {
27403        let a: u16x8 = u16x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27404        let b: u16x8 = u16x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27405        let e: u16x8 = u16x8::new(0, 1, 4, 5, 8, 9, 12, 13);
27406        let r: u16x8 = transmute(vtrn1q_u16(transmute(a), transmute(b)));
27407        assert_eq!(r, e);
27408    }
27409
27410    #[simd_test(enable = "neon")]
27411    unsafe fn test_vtrn1q_u32() {
27412        let a: u32x4 = u32x4::new(0, 2, 4, 6);
27413        let b: u32x4 = u32x4::new(1, 3, 5, 7);
27414        let e: u32x4 = u32x4::new(0, 1, 4, 5);
27415        let r: u32x4 = transmute(vtrn1q_u32(transmute(a), transmute(b)));
27416        assert_eq!(r, e);
27417    }
27418
27419    #[simd_test(enable = "neon")]
27420    unsafe fn test_vtrn1_p8() {
27421        let a: i8x8 = i8x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27422        let b: i8x8 = i8x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27423        let e: i8x8 = i8x8::new(0, 1, 4, 5, 8, 9, 12, 13);
27424        let r: i8x8 = transmute(vtrn1_p8(transmute(a), transmute(b)));
27425        assert_eq!(r, e);
27426    }
27427
27428    #[simd_test(enable = "neon")]
27429    unsafe fn test_vtrn1q_p8() {
27430        let a: i8x16 = i8x16::new(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
27431        let b: i8x16 = i8x16::new(1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
27432        let e: i8x16 = i8x16::new(0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29);
27433        let r: i8x16 = transmute(vtrn1q_p8(transmute(a), transmute(b)));
27434        assert_eq!(r, e);
27435    }
27436
27437    #[simd_test(enable = "neon")]
27438    unsafe fn test_vtrn1_p16() {
27439        let a: i16x4 = i16x4::new(0, 2, 4, 6);
27440        let b: i16x4 = i16x4::new(1, 3, 5, 7);
27441        let e: i16x4 = i16x4::new(0, 1, 4, 5);
27442        let r: i16x4 = transmute(vtrn1_p16(transmute(a), transmute(b)));
27443        assert_eq!(r, e);
27444    }
27445
27446    #[simd_test(enable = "neon")]
27447    unsafe fn test_vtrn1q_p16() {
27448        let a: i16x8 = i16x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27449        let b: i16x8 = i16x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27450        let e: i16x8 = i16x8::new(0, 1, 4, 5, 8, 9, 12, 13);
27451        let r: i16x8 = transmute(vtrn1q_p16(transmute(a), transmute(b)));
27452        assert_eq!(r, e);
27453    }
27454
27455    #[simd_test(enable = "neon")]
27456    unsafe fn test_vtrn1_s32() {
27457        let a: i32x2 = i32x2::new(0, 2);
27458        let b: i32x2 = i32x2::new(1, 3);
27459        let e: i32x2 = i32x2::new(0, 1);
27460        let r: i32x2 = transmute(vtrn1_s32(transmute(a), transmute(b)));
27461        assert_eq!(r, e);
27462    }
27463
27464    #[simd_test(enable = "neon")]
27465    unsafe fn test_vtrn1q_s64() {
27466        let a: i64x2 = i64x2::new(0, 2);
27467        let b: i64x2 = i64x2::new(1, 3);
27468        let e: i64x2 = i64x2::new(0, 1);
27469        let r: i64x2 = transmute(vtrn1q_s64(transmute(a), transmute(b)));
27470        assert_eq!(r, e);
27471    }
27472
27473    #[simd_test(enable = "neon")]
27474    unsafe fn test_vtrn1_u32() {
27475        let a: u32x2 = u32x2::new(0, 2);
27476        let b: u32x2 = u32x2::new(1, 3);
27477        let e: u32x2 = u32x2::new(0, 1);
27478        let r: u32x2 = transmute(vtrn1_u32(transmute(a), transmute(b)));
27479        assert_eq!(r, e);
27480    }
27481
27482    #[simd_test(enable = "neon")]
27483    unsafe fn test_vtrn1q_u64() {
27484        let a: u64x2 = u64x2::new(0, 2);
27485        let b: u64x2 = u64x2::new(1, 3);
27486        let e: u64x2 = u64x2::new(0, 1);
27487        let r: u64x2 = transmute(vtrn1q_u64(transmute(a), transmute(b)));
27488        assert_eq!(r, e);
27489    }
27490
27491    #[simd_test(enable = "neon")]
27492    unsafe fn test_vtrn1q_p64() {
27493        let a: i64x2 = i64x2::new(0, 2);
27494        let b: i64x2 = i64x2::new(1, 3);
27495        let e: i64x2 = i64x2::new(0, 1);
27496        let r: i64x2 = transmute(vtrn1q_p64(transmute(a), transmute(b)));
27497        assert_eq!(r, e);
27498    }
27499
27500    #[simd_test(enable = "neon")]
27501    unsafe fn test_vtrn1q_f32() {
27502        let a: f32x4 = f32x4::new(0., 2., 4., 6.);
27503        let b: f32x4 = f32x4::new(1., 3., 5., 7.);
27504        let e: f32x4 = f32x4::new(0., 1., 4., 5.);
27505        let r: f32x4 = transmute(vtrn1q_f32(transmute(a), transmute(b)));
27506        assert_eq!(r, e);
27507    }
27508
27509    #[simd_test(enable = "neon")]
27510    unsafe fn test_vtrn1_f32() {
27511        let a: f32x2 = f32x2::new(0., 2.);
27512        let b: f32x2 = f32x2::new(1., 3.);
27513        let e: f32x2 = f32x2::new(0., 1.);
27514        let r: f32x2 = transmute(vtrn1_f32(transmute(a), transmute(b)));
27515        assert_eq!(r, e);
27516    }
27517
27518    #[simd_test(enable = "neon")]
27519    unsafe fn test_vtrn1q_f64() {
27520        let a: f64x2 = f64x2::new(0., 2.);
27521        let b: f64x2 = f64x2::new(1., 3.);
27522        let e: f64x2 = f64x2::new(0., 1.);
27523        let r: f64x2 = transmute(vtrn1q_f64(transmute(a), transmute(b)));
27524        assert_eq!(r, e);
27525    }
27526
27527    #[simd_test(enable = "neon")]
27528    unsafe fn test_vtrn2_s8() {
27529        let a: i8x8 = i8x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27530        let b: i8x8 = i8x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27531        let e: i8x8 = i8x8::new(2, 3, 6, 7, 10, 11, 14, 15);
27532        let r: i8x8 = transmute(vtrn2_s8(transmute(a), transmute(b)));
27533        assert_eq!(r, e);
27534    }
27535
27536    #[simd_test(enable = "neon")]
27537    unsafe fn test_vtrn2q_s8() {
27538        let a: i8x16 = i8x16::new(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
27539        let b: i8x16 = i8x16::new(1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
27540        let e: i8x16 = i8x16::new(2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31);
27541        let r: i8x16 = transmute(vtrn2q_s8(transmute(a), transmute(b)));
27542        assert_eq!(r, e);
27543    }
27544
27545    #[simd_test(enable = "neon")]
27546    unsafe fn test_vtrn2_s16() {
27547        let a: i16x4 = i16x4::new(0, 2, 4, 6);
27548        let b: i16x4 = i16x4::new(1, 3, 5, 7);
27549        let e: i16x4 = i16x4::new(2, 3, 6, 7);
27550        let r: i16x4 = transmute(vtrn2_s16(transmute(a), transmute(b)));
27551        assert_eq!(r, e);
27552    }
27553
27554    #[simd_test(enable = "neon")]
27555    unsafe fn test_vtrn2q_s16() {
27556        let a: i16x8 = i16x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27557        let b: i16x8 = i16x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27558        let e: i16x8 = i16x8::new(2, 3, 6, 7, 10, 11, 14, 15);
27559        let r: i16x8 = transmute(vtrn2q_s16(transmute(a), transmute(b)));
27560        assert_eq!(r, e);
27561    }
27562
27563    #[simd_test(enable = "neon")]
27564    unsafe fn test_vtrn2q_s32() {
27565        let a: i32x4 = i32x4::new(0, 2, 4, 6);
27566        let b: i32x4 = i32x4::new(1, 3, 5, 7);
27567        let e: i32x4 = i32x4::new(2, 3, 6, 7);
27568        let r: i32x4 = transmute(vtrn2q_s32(transmute(a), transmute(b)));
27569        assert_eq!(r, e);
27570    }
27571
27572    #[simd_test(enable = "neon")]
27573    unsafe fn test_vtrn2_u8() {
27574        let a: u8x8 = u8x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27575        let b: u8x8 = u8x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27576        let e: u8x8 = u8x8::new(2, 3, 6, 7, 10, 11, 14, 15);
27577        let r: u8x8 = transmute(vtrn2_u8(transmute(a), transmute(b)));
27578        assert_eq!(r, e);
27579    }
27580
27581    #[simd_test(enable = "neon")]
27582    unsafe fn test_vtrn2q_u8() {
27583        let a: u8x16 = u8x16::new(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
27584        let b: u8x16 = u8x16::new(1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
27585        let e: u8x16 = u8x16::new(2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31);
27586        let r: u8x16 = transmute(vtrn2q_u8(transmute(a), transmute(b)));
27587        assert_eq!(r, e);
27588    }
27589
27590    #[simd_test(enable = "neon")]
27591    unsafe fn test_vtrn2_u16() {
27592        let a: u16x4 = u16x4::new(0, 2, 4, 6);
27593        let b: u16x4 = u16x4::new(1, 3, 5, 7);
27594        let e: u16x4 = u16x4::new(2, 3, 6, 7);
27595        let r: u16x4 = transmute(vtrn2_u16(transmute(a), transmute(b)));
27596        assert_eq!(r, e);
27597    }
27598
27599    #[simd_test(enable = "neon")]
27600    unsafe fn test_vtrn2q_u16() {
27601        let a: u16x8 = u16x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27602        let b: u16x8 = u16x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27603        let e: u16x8 = u16x8::new(2, 3, 6, 7, 10, 11, 14, 15);
27604        let r: u16x8 = transmute(vtrn2q_u16(transmute(a), transmute(b)));
27605        assert_eq!(r, e);
27606    }
27607
27608    #[simd_test(enable = "neon")]
27609    unsafe fn test_vtrn2q_u32() {
27610        let a: u32x4 = u32x4::new(0, 2, 4, 6);
27611        let b: u32x4 = u32x4::new(1, 3, 5, 7);
27612        let e: u32x4 = u32x4::new(2, 3, 6, 7);
27613        let r: u32x4 = transmute(vtrn2q_u32(transmute(a), transmute(b)));
27614        assert_eq!(r, e);
27615    }
27616
27617    #[simd_test(enable = "neon")]
27618    unsafe fn test_vtrn2_p8() {
27619        let a: i8x8 = i8x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27620        let b: i8x8 = i8x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27621        let e: i8x8 = i8x8::new(2, 3, 6, 7, 10, 11, 14, 15);
27622        let r: i8x8 = transmute(vtrn2_p8(transmute(a), transmute(b)));
27623        assert_eq!(r, e);
27624    }
27625
27626    #[simd_test(enable = "neon")]
27627    unsafe fn test_vtrn2q_p8() {
27628        let a: i8x16 = i8x16::new(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
27629        let b: i8x16 = i8x16::new(1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
27630        let e: i8x16 = i8x16::new(2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31);
27631        let r: i8x16 = transmute(vtrn2q_p8(transmute(a), transmute(b)));
27632        assert_eq!(r, e);
27633    }
27634
27635    #[simd_test(enable = "neon")]
27636    unsafe fn test_vtrn2_p16() {
27637        let a: i16x4 = i16x4::new(0, 2, 4, 6);
27638        let b: i16x4 = i16x4::new(1, 3, 5, 7);
27639        let e: i16x4 = i16x4::new(2, 3, 6, 7);
27640        let r: i16x4 = transmute(vtrn2_p16(transmute(a), transmute(b)));
27641        assert_eq!(r, e);
27642    }
27643
27644    #[simd_test(enable = "neon")]
27645    unsafe fn test_vtrn2q_p16() {
27646        let a: i16x8 = i16x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27647        let b: i16x8 = i16x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27648        let e: i16x8 = i16x8::new(2, 3, 6, 7, 10, 11, 14, 15);
27649        let r: i16x8 = transmute(vtrn2q_p16(transmute(a), transmute(b)));
27650        assert_eq!(r, e);
27651    }
27652
27653    #[simd_test(enable = "neon")]
27654    unsafe fn test_vtrn2_s32() {
27655        let a: i32x2 = i32x2::new(0, 2);
27656        let b: i32x2 = i32x2::new(1, 3);
27657        let e: i32x2 = i32x2::new(2, 3);
27658        let r: i32x2 = transmute(vtrn2_s32(transmute(a), transmute(b)));
27659        assert_eq!(r, e);
27660    }
27661
27662    #[simd_test(enable = "neon")]
27663    unsafe fn test_vtrn2q_s64() {
27664        let a: i64x2 = i64x2::new(0, 2);
27665        let b: i64x2 = i64x2::new(1, 3);
27666        let e: i64x2 = i64x2::new(2, 3);
27667        let r: i64x2 = transmute(vtrn2q_s64(transmute(a), transmute(b)));
27668        assert_eq!(r, e);
27669    }
27670
27671    #[simd_test(enable = "neon")]
27672    unsafe fn test_vtrn2_u32() {
27673        let a: u32x2 = u32x2::new(0, 2);
27674        let b: u32x2 = u32x2::new(1, 3);
27675        let e: u32x2 = u32x2::new(2, 3);
27676        let r: u32x2 = transmute(vtrn2_u32(transmute(a), transmute(b)));
27677        assert_eq!(r, e);
27678    }
27679
27680    #[simd_test(enable = "neon")]
27681    unsafe fn test_vtrn2q_u64() {
27682        let a: u64x2 = u64x2::new(0, 2);
27683        let b: u64x2 = u64x2::new(1, 3);
27684        let e: u64x2 = u64x2::new(2, 3);
27685        let r: u64x2 = transmute(vtrn2q_u64(transmute(a), transmute(b)));
27686        assert_eq!(r, e);
27687    }
27688
27689    #[simd_test(enable = "neon")]
27690    unsafe fn test_vtrn2q_p64() {
27691        let a: i64x2 = i64x2::new(0, 2);
27692        let b: i64x2 = i64x2::new(1, 3);
27693        let e: i64x2 = i64x2::new(2, 3);
27694        let r: i64x2 = transmute(vtrn2q_p64(transmute(a), transmute(b)));
27695        assert_eq!(r, e);
27696    }
27697
27698    #[simd_test(enable = "neon")]
27699    unsafe fn test_vtrn2q_f32() {
27700        let a: f32x4 = f32x4::new(0., 2., 4., 6.);
27701        let b: f32x4 = f32x4::new(1., 3., 5., 7.);
27702        let e: f32x4 = f32x4::new(2., 3., 6., 7.);
27703        let r: f32x4 = transmute(vtrn2q_f32(transmute(a), transmute(b)));
27704        assert_eq!(r, e);
27705    }
27706
27707    #[simd_test(enable = "neon")]
27708    unsafe fn test_vtrn2_f32() {
27709        let a: f32x2 = f32x2::new(0., 2.);
27710        let b: f32x2 = f32x2::new(1., 3.);
27711        let e: f32x2 = f32x2::new(2., 3.);
27712        let r: f32x2 = transmute(vtrn2_f32(transmute(a), transmute(b)));
27713        assert_eq!(r, e);
27714    }
27715
27716    #[simd_test(enable = "neon")]
27717    unsafe fn test_vtrn2q_f64() {
27718        let a: f64x2 = f64x2::new(0., 2.);
27719        let b: f64x2 = f64x2::new(1., 3.);
27720        let e: f64x2 = f64x2::new(2., 3.);
27721        let r: f64x2 = transmute(vtrn2q_f64(transmute(a), transmute(b)));
27722        assert_eq!(r, e);
27723    }
27724
27725    #[simd_test(enable = "neon")]
27726    unsafe fn test_vzip1_s8() {
27727        let a: i8x8 = i8x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27728        let b: i8x8 = i8x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27729        let e: i8x8 = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
27730        let r: i8x8 = transmute(vzip1_s8(transmute(a), transmute(b)));
27731        assert_eq!(r, e);
27732    }
27733
27734    #[simd_test(enable = "neon")]
27735    unsafe fn test_vzip1q_s8() {
27736        let a: i8x16 = i8x16::new(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
27737        let b: i8x16 = i8x16::new(1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
27738        let e: i8x16 = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
27739        let r: i8x16 = transmute(vzip1q_s8(transmute(a), transmute(b)));
27740        assert_eq!(r, e);
27741    }
27742
27743    #[simd_test(enable = "neon")]
27744    unsafe fn test_vzip1_s16() {
27745        let a: i16x4 = i16x4::new(0, 2, 4, 6);
27746        let b: i16x4 = i16x4::new(1, 3, 5, 7);
27747        let e: i16x4 = i16x4::new(0, 1, 2, 3);
27748        let r: i16x4 = transmute(vzip1_s16(transmute(a), transmute(b)));
27749        assert_eq!(r, e);
27750    }
27751
27752    #[simd_test(enable = "neon")]
27753    unsafe fn test_vzip1q_s16() {
27754        let a: i16x8 = i16x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27755        let b: i16x8 = i16x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27756        let e: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
27757        let r: i16x8 = transmute(vzip1q_s16(transmute(a), transmute(b)));
27758        assert_eq!(r, e);
27759    }
27760
27761    #[simd_test(enable = "neon")]
27762    unsafe fn test_vzip1_s32() {
27763        let a: i32x2 = i32x2::new(0, 2);
27764        let b: i32x2 = i32x2::new(1, 3);
27765        let e: i32x2 = i32x2::new(0, 1);
27766        let r: i32x2 = transmute(vzip1_s32(transmute(a), transmute(b)));
27767        assert_eq!(r, e);
27768    }
27769
27770    #[simd_test(enable = "neon")]
27771    unsafe fn test_vzip1q_s32() {
27772        let a: i32x4 = i32x4::new(0, 2, 4, 6);
27773        let b: i32x4 = i32x4::new(1, 3, 5, 7);
27774        let e: i32x4 = i32x4::new(0, 1, 2, 3);
27775        let r: i32x4 = transmute(vzip1q_s32(transmute(a), transmute(b)));
27776        assert_eq!(r, e);
27777    }
27778
27779    #[simd_test(enable = "neon")]
27780    unsafe fn test_vzip1q_s64() {
27781        let a: i64x2 = i64x2::new(0, 2);
27782        let b: i64x2 = i64x2::new(1, 3);
27783        let e: i64x2 = i64x2::new(0, 1);
27784        let r: i64x2 = transmute(vzip1q_s64(transmute(a), transmute(b)));
27785        assert_eq!(r, e);
27786    }
27787
27788    #[simd_test(enable = "neon")]
27789    unsafe fn test_vzip1_u8() {
27790        let a: u8x8 = u8x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27791        let b: u8x8 = u8x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27792        let e: u8x8 = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
27793        let r: u8x8 = transmute(vzip1_u8(transmute(a), transmute(b)));
27794        assert_eq!(r, e);
27795    }
27796
27797    #[simd_test(enable = "neon")]
27798    unsafe fn test_vzip1q_u8() {
27799        let a: u8x16 = u8x16::new(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
27800        let b: u8x16 = u8x16::new(1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
27801        let e: u8x16 = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
27802        let r: u8x16 = transmute(vzip1q_u8(transmute(a), transmute(b)));
27803        assert_eq!(r, e);
27804    }
27805
27806    #[simd_test(enable = "neon")]
27807    unsafe fn test_vzip1_u16() {
27808        let a: u16x4 = u16x4::new(0, 2, 4, 6);
27809        let b: u16x4 = u16x4::new(1, 3, 5, 7);
27810        let e: u16x4 = u16x4::new(0, 1, 2, 3);
27811        let r: u16x4 = transmute(vzip1_u16(transmute(a), transmute(b)));
27812        assert_eq!(r, e);
27813    }
27814
27815    #[simd_test(enable = "neon")]
27816    unsafe fn test_vzip1q_u16() {
27817        let a: u16x8 = u16x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27818        let b: u16x8 = u16x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27819        let e: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
27820        let r: u16x8 = transmute(vzip1q_u16(transmute(a), transmute(b)));
27821        assert_eq!(r, e);
27822    }
27823
27824    #[simd_test(enable = "neon")]
27825    unsafe fn test_vzip1_u32() {
27826        let a: u32x2 = u32x2::new(0, 2);
27827        let b: u32x2 = u32x2::new(1, 3);
27828        let e: u32x2 = u32x2::new(0, 1);
27829        let r: u32x2 = transmute(vzip1_u32(transmute(a), transmute(b)));
27830        assert_eq!(r, e);
27831    }
27832
27833    #[simd_test(enable = "neon")]
27834    unsafe fn test_vzip1q_u32() {
27835        let a: u32x4 = u32x4::new(0, 2, 4, 6);
27836        let b: u32x4 = u32x4::new(1, 3, 5, 7);
27837        let e: u32x4 = u32x4::new(0, 1, 2, 3);
27838        let r: u32x4 = transmute(vzip1q_u32(transmute(a), transmute(b)));
27839        assert_eq!(r, e);
27840    }
27841
27842    #[simd_test(enable = "neon")]
27843    unsafe fn test_vzip1q_u64() {
27844        let a: u64x2 = u64x2::new(0, 2);
27845        let b: u64x2 = u64x2::new(1, 3);
27846        let e: u64x2 = u64x2::new(0, 1);
27847        let r: u64x2 = transmute(vzip1q_u64(transmute(a), transmute(b)));
27848        assert_eq!(r, e);
27849    }
27850
27851    #[simd_test(enable = "neon")]
27852    unsafe fn test_vzip1_p8() {
27853        let a: i8x8 = i8x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27854        let b: i8x8 = i8x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27855        let e: i8x8 = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
27856        let r: i8x8 = transmute(vzip1_p8(transmute(a), transmute(b)));
27857        assert_eq!(r, e);
27858    }
27859
27860    #[simd_test(enable = "neon")]
27861    unsafe fn test_vzip1q_p8() {
27862        let a: i8x16 = i8x16::new(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
27863        let b: i8x16 = i8x16::new(1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
27864        let e: i8x16 = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
27865        let r: i8x16 = transmute(vzip1q_p8(transmute(a), transmute(b)));
27866        assert_eq!(r, e);
27867    }
27868
27869    #[simd_test(enable = "neon")]
27870    unsafe fn test_vzip1_p16() {
27871        let a: i16x4 = i16x4::new(0, 2, 4, 6);
27872        let b: i16x4 = i16x4::new(1, 3, 5, 7);
27873        let e: i16x4 = i16x4::new(0, 1, 2, 3);
27874        let r: i16x4 = transmute(vzip1_p16(transmute(a), transmute(b)));
27875        assert_eq!(r, e);
27876    }
27877
27878    #[simd_test(enable = "neon")]
27879    unsafe fn test_vzip1q_p16() {
27880        let a: i16x8 = i16x8::new(0, 2, 4, 6, 8, 10, 12, 14);
27881        let b: i16x8 = i16x8::new(1, 3, 5, 7, 9, 11, 13, 15);
27882        let e: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
27883        let r: i16x8 = transmute(vzip1q_p16(transmute(a), transmute(b)));
27884        assert_eq!(r, e);
27885    }
27886
27887    #[simd_test(enable = "neon")]
27888    unsafe fn test_vzip1q_p64() {
27889        let a: i64x2 = i64x2::new(0, 2);
27890        let b: i64x2 = i64x2::new(1, 3);
27891        let e: i64x2 = i64x2::new(0, 1);
27892        let r: i64x2 = transmute(vzip1q_p64(transmute(a), transmute(b)));
27893        assert_eq!(r, e);
27894    }
27895
27896    #[simd_test(enable = "neon")]
27897    unsafe fn test_vzip1_f32() {
27898        let a: f32x2 = f32x2::new(0., 2.);
27899        let b: f32x2 = f32x2::new(1., 3.);
27900        let e: f32x2 = f32x2::new(0., 1.);
27901        let r: f32x2 = transmute(vzip1_f32(transmute(a), transmute(b)));
27902        assert_eq!(r, e);
27903    }
27904
27905    #[simd_test(enable = "neon")]
27906    unsafe fn test_vzip1q_f32() {
27907        let a: f32x4 = f32x4::new(0., 2., 4., 6.);
27908        let b: f32x4 = f32x4::new(1., 3., 5., 7.);
27909        let e: f32x4 = f32x4::new(0., 1., 2., 3.);
27910        let r: f32x4 = transmute(vzip1q_f32(transmute(a), transmute(b)));
27911        assert_eq!(r, e);
27912    }
27913
27914    #[simd_test(enable = "neon")]
27915    unsafe fn test_vzip1q_f64() {
27916        let a: f64x2 = f64x2::new(0., 2.);
27917        let b: f64x2 = f64x2::new(1., 3.);
27918        let e: f64x2 = f64x2::new(0., 1.);
27919        let r: f64x2 = transmute(vzip1q_f64(transmute(a), transmute(b)));
27920        assert_eq!(r, e);
27921    }
27922
27923    #[simd_test(enable = "neon")]
27924    unsafe fn test_vzip2_s8() {
27925        let a: i8x8 = i8x8::new(0, 16, 16, 18, 16, 18, 20, 22);
27926        let b: i8x8 = i8x8::new(1, 17, 17, 19, 17, 19, 21, 23);
27927        let e: i8x8 = i8x8::new(16, 17, 18, 19, 20, 21, 22, 23);
27928        let r: i8x8 = transmute(vzip2_s8(transmute(a), transmute(b)));
27929        assert_eq!(r, e);
27930    }
27931
27932    #[simd_test(enable = "neon")]
27933    unsafe fn test_vzip2q_s8() {
27934        let a: i8x16 = i8x16::new(0, 16, 16, 18, 16, 18, 20, 22, 16, 18, 20, 22, 24, 26, 28, 30);
27935        let b: i8x16 = i8x16::new(1, 17, 17, 19, 17, 19, 21, 23, 17, 19, 21, 23, 25, 27, 29, 31);
27936        let e: i8x16 = i8x16::new(16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31);
27937        let r: i8x16 = transmute(vzip2q_s8(transmute(a), transmute(b)));
27938        assert_eq!(r, e);
27939    }
27940
27941    #[simd_test(enable = "neon")]
27942    unsafe fn test_vzip2_s16() {
27943        let a: i16x4 = i16x4::new(0, 16, 16, 18);
27944        let b: i16x4 = i16x4::new(1, 17, 17, 19);
27945        let e: i16x4 = i16x4::new(16, 17, 18, 19);
27946        let r: i16x4 = transmute(vzip2_s16(transmute(a), transmute(b)));
27947        assert_eq!(r, e);
27948    }
27949
27950    #[simd_test(enable = "neon")]
27951    unsafe fn test_vzip2q_s16() {
27952        let a: i16x8 = i16x8::new(0, 16, 16, 18, 16, 18, 20, 22);
27953        let b: i16x8 = i16x8::new(1, 17, 17, 19, 17, 19, 21, 23);
27954        let e: i16x8 = i16x8::new(16, 17, 18, 19, 20, 21, 22, 23);
27955        let r: i16x8 = transmute(vzip2q_s16(transmute(a), transmute(b)));
27956        assert_eq!(r, e);
27957    }
27958
27959    #[simd_test(enable = "neon")]
27960    unsafe fn test_vzip2_s32() {
27961        let a: i32x2 = i32x2::new(0, 16);
27962        let b: i32x2 = i32x2::new(1, 17);
27963        let e: i32x2 = i32x2::new(16, 17);
27964        let r: i32x2 = transmute(vzip2_s32(transmute(a), transmute(b)));
27965        assert_eq!(r, e);
27966    }
27967
27968    #[simd_test(enable = "neon")]
27969    unsafe fn test_vzip2q_s32() {
27970        let a: i32x4 = i32x4::new(0, 16, 16, 18);
27971        let b: i32x4 = i32x4::new(1, 17, 17, 19);
27972        let e: i32x4 = i32x4::new(16, 17, 18, 19);
27973        let r: i32x4 = transmute(vzip2q_s32(transmute(a), transmute(b)));
27974        assert_eq!(r, e);
27975    }
27976
27977    #[simd_test(enable = "neon")]
27978    unsafe fn test_vzip2q_s64() {
27979        let a: i64x2 = i64x2::new(0, 16);
27980        let b: i64x2 = i64x2::new(1, 17);
27981        let e: i64x2 = i64x2::new(16, 17);
27982        let r: i64x2 = transmute(vzip2q_s64(transmute(a), transmute(b)));
27983        assert_eq!(r, e);
27984    }
27985
27986    #[simd_test(enable = "neon")]
27987    unsafe fn test_vzip2_u8() {
27988        let a: u8x8 = u8x8::new(0, 16, 16, 18, 16, 18, 20, 22);
27989        let b: u8x8 = u8x8::new(1, 17, 17, 19, 17, 19, 21, 23);
27990        let e: u8x8 = u8x8::new(16, 17, 18, 19, 20, 21, 22, 23);
27991        let r: u8x8 = transmute(vzip2_u8(transmute(a), transmute(b)));
27992        assert_eq!(r, e);
27993    }
27994
27995    #[simd_test(enable = "neon")]
27996    unsafe fn test_vzip2q_u8() {
27997        let a: u8x16 = u8x16::new(0, 16, 16, 18, 16, 18, 20, 22, 16, 18, 20, 22, 24, 26, 28, 30);
27998        let b: u8x16 = u8x16::new(1, 17, 17, 19, 17, 19, 21, 23, 17, 19, 21, 23, 25, 27, 29, 31);
27999        let e: u8x16 = u8x16::new(16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31);
28000        let r: u8x16 = transmute(vzip2q_u8(transmute(a), transmute(b)));
28001        assert_eq!(r, e);
28002    }
28003
28004    #[simd_test(enable = "neon")]
28005    unsafe fn test_vzip2_u16() {
28006        let a: u16x4 = u16x4::new(0, 16, 16, 18);
28007        let b: u16x4 = u16x4::new(1, 17, 17, 19);
28008        let e: u16x4 = u16x4::new(16, 17, 18, 19);
28009        let r: u16x4 = transmute(vzip2_u16(transmute(a), transmute(b)));
28010        assert_eq!(r, e);
28011    }
28012
28013    #[simd_test(enable = "neon")]
28014    unsafe fn test_vzip2q_u16() {
28015        let a: u16x8 = u16x8::new(0, 16, 16, 18, 16, 18, 20, 22);
28016        let b: u16x8 = u16x8::new(1, 17, 17, 19, 17, 19, 21, 23);
28017        let e: u16x8 = u16x8::new(16, 17, 18, 19, 20, 21, 22, 23);
28018        let r: u16x8 = transmute(vzip2q_u16(transmute(a), transmute(b)));
28019        assert_eq!(r, e);
28020    }
28021
28022    #[simd_test(enable = "neon")]
28023    unsafe fn test_vzip2_u32() {
28024        let a: u32x2 = u32x2::new(0, 16);
28025        let b: u32x2 = u32x2::new(1, 17);
28026        let e: u32x2 = u32x2::new(16, 17);
28027        let r: u32x2 = transmute(vzip2_u32(transmute(a), transmute(b)));
28028        assert_eq!(r, e);
28029    }
28030
28031    #[simd_test(enable = "neon")]
28032    unsafe fn test_vzip2q_u32() {
28033        let a: u32x4 = u32x4::new(0, 16, 16, 18);
28034        let b: u32x4 = u32x4::new(1, 17, 17, 19);
28035        let e: u32x4 = u32x4::new(16, 17, 18, 19);
28036        let r: u32x4 = transmute(vzip2q_u32(transmute(a), transmute(b)));
28037        assert_eq!(r, e);
28038    }
28039
28040    #[simd_test(enable = "neon")]
28041    unsafe fn test_vzip2q_u64() {
28042        let a: u64x2 = u64x2::new(0, 16);
28043        let b: u64x2 = u64x2::new(1, 17);
28044        let e: u64x2 = u64x2::new(16, 17);
28045        let r: u64x2 = transmute(vzip2q_u64(transmute(a), transmute(b)));
28046        assert_eq!(r, e);
28047    }
28048
28049    #[simd_test(enable = "neon")]
28050    unsafe fn test_vzip2_p8() {
28051        let a: i8x8 = i8x8::new(0, 16, 16, 18, 16, 18, 20, 22);
28052        let b: i8x8 = i8x8::new(1, 17, 17, 19, 17, 19, 21, 23);
28053        let e: i8x8 = i8x8::new(16, 17, 18, 19, 20, 21, 22, 23);
28054        let r: i8x8 = transmute(vzip2_p8(transmute(a), transmute(b)));
28055        assert_eq!(r, e);
28056    }
28057
28058    #[simd_test(enable = "neon")]
28059    unsafe fn test_vzip2q_p8() {
28060        let a: i8x16 = i8x16::new(0, 16, 16, 18, 16, 18, 20, 22, 16, 18, 20, 22, 24, 26, 28, 30);
28061        let b: i8x16 = i8x16::new(1, 17, 17, 19, 17, 19, 21, 23, 17, 19, 21, 23, 25, 27, 29, 31);
28062        let e: i8x16 = i8x16::new(16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31);
28063        let r: i8x16 = transmute(vzip2q_p8(transmute(a), transmute(b)));
28064        assert_eq!(r, e);
28065    }
28066
28067    #[simd_test(enable = "neon")]
28068    unsafe fn test_vzip2_p16() {
28069        let a: i16x4 = i16x4::new(0, 16, 16, 18);
28070        let b: i16x4 = i16x4::new(1, 17, 17, 19);
28071        let e: i16x4 = i16x4::new(16, 17, 18, 19);
28072        let r: i16x4 = transmute(vzip2_p16(transmute(a), transmute(b)));
28073        assert_eq!(r, e);
28074    }
28075
28076    #[simd_test(enable = "neon")]
28077    unsafe fn test_vzip2q_p16() {
28078        let a: i16x8 = i16x8::new(0, 16, 16, 18, 16, 18, 20, 22);
28079        let b: i16x8 = i16x8::new(1, 17, 17, 19, 17, 19, 21, 23);
28080        let e: i16x8 = i16x8::new(16, 17, 18, 19, 20, 21, 22, 23);
28081        let r: i16x8 = transmute(vzip2q_p16(transmute(a), transmute(b)));
28082        assert_eq!(r, e);
28083    }
28084
28085    #[simd_test(enable = "neon")]
28086    unsafe fn test_vzip2q_p64() {
28087        let a: i64x2 = i64x2::new(0, 16);
28088        let b: i64x2 = i64x2::new(1, 17);
28089        let e: i64x2 = i64x2::new(16, 17);
28090        let r: i64x2 = transmute(vzip2q_p64(transmute(a), transmute(b)));
28091        assert_eq!(r, e);
28092    }
28093
28094    #[simd_test(enable = "neon")]
28095    unsafe fn test_vzip2_f32() {
28096        let a: f32x2 = f32x2::new(0., 8.);
28097        let b: f32x2 = f32x2::new(1., 9.);
28098        let e: f32x2 = f32x2::new(8., 9.);
28099        let r: f32x2 = transmute(vzip2_f32(transmute(a), transmute(b)));
28100        assert_eq!(r, e);
28101    }
28102
28103    #[simd_test(enable = "neon")]
28104    unsafe fn test_vzip2q_f32() {
28105        let a: f32x4 = f32x4::new(0., 8., 8., 10.);
28106        let b: f32x4 = f32x4::new(1., 9., 9., 11.);
28107        let e: f32x4 = f32x4::new(8., 9., 10., 11.);
28108        let r: f32x4 = transmute(vzip2q_f32(transmute(a), transmute(b)));
28109        assert_eq!(r, e);
28110    }
28111
28112    #[simd_test(enable = "neon")]
28113    unsafe fn test_vzip2q_f64() {
28114        let a: f64x2 = f64x2::new(0., 8.);
28115        let b: f64x2 = f64x2::new(1., 9.);
28116        let e: f64x2 = f64x2::new(8., 9.);
28117        let r: f64x2 = transmute(vzip2q_f64(transmute(a), transmute(b)));
28118        assert_eq!(r, e);
28119    }
28120
28121    #[simd_test(enable = "neon")]
28122    unsafe fn test_vuzp1_s8() {
28123        let a: i8x8 = i8x8::new(1, 0, 2, 0, 2, 0, 3, 0);
28124        let b: i8x8 = i8x8::new(2, 0, 3, 0, 7, 0, 8, 0);
28125        let e: i8x8 = i8x8::new(1, 2, 2, 3, 2, 3, 7, 8);
28126        let r: i8x8 = transmute(vuzp1_s8(transmute(a), transmute(b)));
28127        assert_eq!(r, e);
28128    }
28129
28130    #[simd_test(enable = "neon")]
28131    unsafe fn test_vuzp1q_s8() {
28132        let a: i8x16 = i8x16::new(1, 0, 2, 0, 2, 0, 3, 0, 2, 0, 3, 0, 7, 0, 8, 0);
28133        let b: i8x16 = i8x16::new(2, 0, 3, 0, 7, 0, 8, 0, 13, 0, 14, 0, 15, 0, 16, 0);
28134        let e: i8x16 = i8x16::new(1, 2, 2, 3, 2, 3, 7, 8, 2, 3, 7, 8, 13, 14, 15, 16);
28135        let r: i8x16 = transmute(vuzp1q_s8(transmute(a), transmute(b)));
28136        assert_eq!(r, e);
28137    }
28138
28139    #[simd_test(enable = "neon")]
28140    unsafe fn test_vuzp1_s16() {
28141        let a: i16x4 = i16x4::new(1, 0, 2, 0);
28142        let b: i16x4 = i16x4::new(2, 0, 3, 0);
28143        let e: i16x4 = i16x4::new(1, 2, 2, 3);
28144        let r: i16x4 = transmute(vuzp1_s16(transmute(a), transmute(b)));
28145        assert_eq!(r, e);
28146    }
28147
28148    #[simd_test(enable = "neon")]
28149    unsafe fn test_vuzp1q_s16() {
28150        let a: i16x8 = i16x8::new(1, 0, 2, 0, 2, 0, 3, 0);
28151        let b: i16x8 = i16x8::new(2, 0, 3, 0, 7, 0, 8, 0);
28152        let e: i16x8 = i16x8::new(1, 2, 2, 3, 2, 3, 7, 8);
28153        let r: i16x8 = transmute(vuzp1q_s16(transmute(a), transmute(b)));
28154        assert_eq!(r, e);
28155    }
28156
28157    #[simd_test(enable = "neon")]
28158    unsafe fn test_vuzp1q_s32() {
28159        let a: i32x4 = i32x4::new(1, 0, 2, 0);
28160        let b: i32x4 = i32x4::new(2, 0, 3, 0);
28161        let e: i32x4 = i32x4::new(1, 2, 2, 3);
28162        let r: i32x4 = transmute(vuzp1q_s32(transmute(a), transmute(b)));
28163        assert_eq!(r, e);
28164    }
28165
28166    #[simd_test(enable = "neon")]
28167    unsafe fn test_vuzp1_u8() {
28168        let a: u8x8 = u8x8::new(1, 0, 2, 0, 2, 0, 3, 0);
28169        let b: u8x8 = u8x8::new(2, 0, 3, 0, 7, 0, 8, 0);
28170        let e: u8x8 = u8x8::new(1, 2, 2, 3, 2, 3, 7, 8);
28171        let r: u8x8 = transmute(vuzp1_u8(transmute(a), transmute(b)));
28172        assert_eq!(r, e);
28173    }
28174
28175    #[simd_test(enable = "neon")]
28176    unsafe fn test_vuzp1q_u8() {
28177        let a: u8x16 = u8x16::new(1, 0, 2, 0, 2, 0, 3, 0, 2, 0, 3, 0, 7, 0, 8, 0);
28178        let b: u8x16 = u8x16::new(2, 0, 3, 0, 7, 0, 8, 0, 13, 0, 14, 0, 15, 0, 16, 0);
28179        let e: u8x16 = u8x16::new(1, 2, 2, 3, 2, 3, 7, 8, 2, 3, 7, 8, 13, 14, 15, 16);
28180        let r: u8x16 = transmute(vuzp1q_u8(transmute(a), transmute(b)));
28181        assert_eq!(r, e);
28182    }
28183
28184    #[simd_test(enable = "neon")]
28185    unsafe fn test_vuzp1_u16() {
28186        let a: u16x4 = u16x4::new(1, 0, 2, 0);
28187        let b: u16x4 = u16x4::new(2, 0, 3, 0);
28188        let e: u16x4 = u16x4::new(1, 2, 2, 3);
28189        let r: u16x4 = transmute(vuzp1_u16(transmute(a), transmute(b)));
28190        assert_eq!(r, e);
28191    }
28192
28193    #[simd_test(enable = "neon")]
28194    unsafe fn test_vuzp1q_u16() {
28195        let a: u16x8 = u16x8::new(1, 0, 2, 0, 2, 0, 3, 0);
28196        let b: u16x8 = u16x8::new(2, 0, 3, 0, 7, 0, 8, 0);
28197        let e: u16x8 = u16x8::new(1, 2, 2, 3, 2, 3, 7, 8);
28198        let r: u16x8 = transmute(vuzp1q_u16(transmute(a), transmute(b)));
28199        assert_eq!(r, e);
28200    }
28201
28202    #[simd_test(enable = "neon")]
28203    unsafe fn test_vuzp1q_u32() {
28204        let a: u32x4 = u32x4::new(1, 0, 2, 0);
28205        let b: u32x4 = u32x4::new(2, 0, 3, 0);
28206        let e: u32x4 = u32x4::new(1, 2, 2, 3);
28207        let r: u32x4 = transmute(vuzp1q_u32(transmute(a), transmute(b)));
28208        assert_eq!(r, e);
28209    }
28210
28211    #[simd_test(enable = "neon")]
28212    unsafe fn test_vuzp1_p8() {
28213        let a: i8x8 = i8x8::new(1, 0, 2, 0, 2, 0, 3, 0);
28214        let b: i8x8 = i8x8::new(2, 0, 3, 0, 7, 0, 8, 0);
28215        let e: i8x8 = i8x8::new(1, 2, 2, 3, 2, 3, 7, 8);
28216        let r: i8x8 = transmute(vuzp1_p8(transmute(a), transmute(b)));
28217        assert_eq!(r, e);
28218    }
28219
28220    #[simd_test(enable = "neon")]
28221    unsafe fn test_vuzp1q_p8() {
28222        let a: i8x16 = i8x16::new(1, 0, 2, 0, 2, 0, 3, 0, 2, 0, 3, 0, 7, 0, 8, 0);
28223        let b: i8x16 = i8x16::new(2, 0, 3, 0, 7, 0, 8, 0, 13, 0, 14, 0, 15, 0, 16, 0);
28224        let e: i8x16 = i8x16::new(1, 2, 2, 3, 2, 3, 7, 8, 2, 3, 7, 8, 13, 14, 15, 16);
28225        let r: i8x16 = transmute(vuzp1q_p8(transmute(a), transmute(b)));
28226        assert_eq!(r, e);
28227    }
28228
28229    #[simd_test(enable = "neon")]
28230    unsafe fn test_vuzp1_p16() {
28231        let a: i16x4 = i16x4::new(1, 0, 2, 0);
28232        let b: i16x4 = i16x4::new(2, 0, 3, 0);
28233        let e: i16x4 = i16x4::new(1, 2, 2, 3);
28234        let r: i16x4 = transmute(vuzp1_p16(transmute(a), transmute(b)));
28235        assert_eq!(r, e);
28236    }
28237
28238    #[simd_test(enable = "neon")]
28239    unsafe fn test_vuzp1q_p16() {
28240        let a: i16x8 = i16x8::new(1, 0, 2, 0, 2, 0, 3, 0);
28241        let b: i16x8 = i16x8::new(2, 0, 3, 0, 7, 0, 8, 0);
28242        let e: i16x8 = i16x8::new(1, 2, 2, 3, 2, 3, 7, 8);
28243        let r: i16x8 = transmute(vuzp1q_p16(transmute(a), transmute(b)));
28244        assert_eq!(r, e);
28245    }
28246
28247    #[simd_test(enable = "neon")]
28248    unsafe fn test_vuzp1_s32() {
28249        let a: i32x2 = i32x2::new(1, 0);
28250        let b: i32x2 = i32x2::new(2, 0);
28251        let e: i32x2 = i32x2::new(1, 2);
28252        let r: i32x2 = transmute(vuzp1_s32(transmute(a), transmute(b)));
28253        assert_eq!(r, e);
28254    }
28255
28256    #[simd_test(enable = "neon")]
28257    unsafe fn test_vuzp1q_s64() {
28258        let a: i64x2 = i64x2::new(1, 0);
28259        let b: i64x2 = i64x2::new(2, 0);
28260        let e: i64x2 = i64x2::new(1, 2);
28261        let r: i64x2 = transmute(vuzp1q_s64(transmute(a), transmute(b)));
28262        assert_eq!(r, e);
28263    }
28264
28265    #[simd_test(enable = "neon")]
28266    unsafe fn test_vuzp1_u32() {
28267        let a: u32x2 = u32x2::new(1, 0);
28268        let b: u32x2 = u32x2::new(2, 0);
28269        let e: u32x2 = u32x2::new(1, 2);
28270        let r: u32x2 = transmute(vuzp1_u32(transmute(a), transmute(b)));
28271        assert_eq!(r, e);
28272    }
28273
28274    #[simd_test(enable = "neon")]
28275    unsafe fn test_vuzp1q_u64() {
28276        let a: u64x2 = u64x2::new(1, 0);
28277        let b: u64x2 = u64x2::new(2, 0);
28278        let e: u64x2 = u64x2::new(1, 2);
28279        let r: u64x2 = transmute(vuzp1q_u64(transmute(a), transmute(b)));
28280        assert_eq!(r, e);
28281    }
28282
28283    #[simd_test(enable = "neon")]
28284    unsafe fn test_vuzp1q_p64() {
28285        let a: i64x2 = i64x2::new(1, 0);
28286        let b: i64x2 = i64x2::new(2, 0);
28287        let e: i64x2 = i64x2::new(1, 2);
28288        let r: i64x2 = transmute(vuzp1q_p64(transmute(a), transmute(b)));
28289        assert_eq!(r, e);
28290    }
28291
28292    #[simd_test(enable = "neon")]
28293    unsafe fn test_vuzp1q_f32() {
28294        let a: f32x4 = f32x4::new(0., 8., 1., 9.);
28295        let b: f32x4 = f32x4::new(1., 10., 3., 11.);
28296        let e: f32x4 = f32x4::new(0., 1., 1., 3.);
28297        let r: f32x4 = transmute(vuzp1q_f32(transmute(a), transmute(b)));
28298        assert_eq!(r, e);
28299    }
28300
28301    #[simd_test(enable = "neon")]
28302    unsafe fn test_vuzp1_f32() {
28303        let a: f32x2 = f32x2::new(0., 8.);
28304        let b: f32x2 = f32x2::new(1., 10.);
28305        let e: f32x2 = f32x2::new(0., 1.);
28306        let r: f32x2 = transmute(vuzp1_f32(transmute(a), transmute(b)));
28307        assert_eq!(r, e);
28308    }
28309
28310    #[simd_test(enable = "neon")]
28311    unsafe fn test_vuzp1q_f64() {
28312        let a: f64x2 = f64x2::new(0., 8.);
28313        let b: f64x2 = f64x2::new(1., 10.);
28314        let e: f64x2 = f64x2::new(0., 1.);
28315        let r: f64x2 = transmute(vuzp1q_f64(transmute(a), transmute(b)));
28316        assert_eq!(r, e);
28317    }
28318
28319    #[simd_test(enable = "neon")]
28320    unsafe fn test_vuzp2_s8() {
28321        let a: i8x8 = i8x8::new(0, 17, 0, 18, 0, 18, 0, 19);
28322        let b: i8x8 = i8x8::new(0, 18, 0, 19, 0, 23, 0, 24);
28323        let e: i8x8 = i8x8::new(17, 18, 18, 19, 18, 19, 23, 24);
28324        let r: i8x8 = transmute(vuzp2_s8(transmute(a), transmute(b)));
28325        assert_eq!(r, e);
28326    }
28327
28328    #[simd_test(enable = "neon")]
28329    unsafe fn test_vuzp2q_s8() {
28330        let a: i8x16 = i8x16::new(0, 17, 0, 18, 0, 18, 0, 19, 0, 18, 0, 19, 0, 23, 0, 24);
28331        let b: i8x16 = i8x16::new(0, 18, 0, 19, 0, 23, 0, 24, 0, 29, 0, 30, 0, 31, 0, 32);
28332        let e: i8x16 = i8x16::new(17, 18, 18, 19, 18, 19, 23, 24, 18, 19, 23, 24, 29, 30, 31, 32);
28333        let r: i8x16 = transmute(vuzp2q_s8(transmute(a), transmute(b)));
28334        assert_eq!(r, e);
28335    }
28336
28337    #[simd_test(enable = "neon")]
28338    unsafe fn test_vuzp2_s16() {
28339        let a: i16x4 = i16x4::new(0, 17, 0, 18);
28340        let b: i16x4 = i16x4::new(0, 18, 0, 19);
28341        let e: i16x4 = i16x4::new(17, 18, 18, 19);
28342        let r: i16x4 = transmute(vuzp2_s16(transmute(a), transmute(b)));
28343        assert_eq!(r, e);
28344    }
28345
28346    #[simd_test(enable = "neon")]
28347    unsafe fn test_vuzp2q_s16() {
28348        let a: i16x8 = i16x8::new(0, 17, 0, 18, 0, 18, 0, 19);
28349        let b: i16x8 = i16x8::new(0, 18, 0, 19, 0, 23, 0, 24);
28350        let e: i16x8 = i16x8::new(17, 18, 18, 19, 18, 19, 23, 24);
28351        let r: i16x8 = transmute(vuzp2q_s16(transmute(a), transmute(b)));
28352        assert_eq!(r, e);
28353    }
28354
28355    #[simd_test(enable = "neon")]
28356    unsafe fn test_vuzp2q_s32() {
28357        let a: i32x4 = i32x4::new(0, 17, 0, 18);
28358        let b: i32x4 = i32x4::new(0, 18, 0, 19);
28359        let e: i32x4 = i32x4::new(17, 18, 18, 19);
28360        let r: i32x4 = transmute(vuzp2q_s32(transmute(a), transmute(b)));
28361        assert_eq!(r, e);
28362    }
28363
28364    #[simd_test(enable = "neon")]
28365    unsafe fn test_vuzp2_u8() {
28366        let a: u8x8 = u8x8::new(0, 17, 0, 18, 0, 18, 0, 19);
28367        let b: u8x8 = u8x8::new(0, 18, 0, 19, 0, 23, 0, 24);
28368        let e: u8x8 = u8x8::new(17, 18, 18, 19, 18, 19, 23, 24);
28369        let r: u8x8 = transmute(vuzp2_u8(transmute(a), transmute(b)));
28370        assert_eq!(r, e);
28371    }
28372
28373    #[simd_test(enable = "neon")]
28374    unsafe fn test_vuzp2q_u8() {
28375        let a: u8x16 = u8x16::new(0, 17, 0, 18, 0, 18, 0, 19, 0, 18, 0, 19, 0, 23, 0, 24);
28376        let b: u8x16 = u8x16::new(0, 18, 0, 19, 0, 23, 0, 24, 0, 29, 0, 30, 0, 31, 0, 32);
28377        let e: u8x16 = u8x16::new(17, 18, 18, 19, 18, 19, 23, 24, 18, 19, 23, 24, 29, 30, 31, 32);
28378        let r: u8x16 = transmute(vuzp2q_u8(transmute(a), transmute(b)));
28379        assert_eq!(r, e);
28380    }
28381
28382    #[simd_test(enable = "neon")]
28383    unsafe fn test_vuzp2_u16() {
28384        let a: u16x4 = u16x4::new(0, 17, 0, 18);
28385        let b: u16x4 = u16x4::new(0, 18, 0, 19);
28386        let e: u16x4 = u16x4::new(17, 18, 18, 19);
28387        let r: u16x4 = transmute(vuzp2_u16(transmute(a), transmute(b)));
28388        assert_eq!(r, e);
28389    }
28390
28391    #[simd_test(enable = "neon")]
28392    unsafe fn test_vuzp2q_u16() {
28393        let a: u16x8 = u16x8::new(0, 17, 0, 18, 0, 18, 0, 19);
28394        let b: u16x8 = u16x8::new(0, 18, 0, 19, 0, 23, 0, 24);
28395        let e: u16x8 = u16x8::new(17, 18, 18, 19, 18, 19, 23, 24);
28396        let r: u16x8 = transmute(vuzp2q_u16(transmute(a), transmute(b)));
28397        assert_eq!(r, e);
28398    }
28399
28400    #[simd_test(enable = "neon")]
28401    unsafe fn test_vuzp2q_u32() {
28402        let a: u32x4 = u32x4::new(0, 17, 0, 18);
28403        let b: u32x4 = u32x4::new(0, 18, 0, 19);
28404        let e: u32x4 = u32x4::new(17, 18, 18, 19);
28405        let r: u32x4 = transmute(vuzp2q_u32(transmute(a), transmute(b)));
28406        assert_eq!(r, e);
28407    }
28408
28409    #[simd_test(enable = "neon")]
28410    unsafe fn test_vuzp2_p8() {
28411        let a: i8x8 = i8x8::new(0, 17, 0, 18, 0, 18, 0, 19);
28412        let b: i8x8 = i8x8::new(0, 18, 0, 19, 0, 23, 0, 24);
28413        let e: i8x8 = i8x8::new(17, 18, 18, 19, 18, 19, 23, 24);
28414        let r: i8x8 = transmute(vuzp2_p8(transmute(a), transmute(b)));
28415        assert_eq!(r, e);
28416    }
28417
28418    #[simd_test(enable = "neon")]
28419    unsafe fn test_vuzp2q_p8() {
28420        let a: i8x16 = i8x16::new(0, 17, 0, 18, 0, 18, 0, 19, 0, 18, 0, 19, 0, 23, 0, 24);
28421        let b: i8x16 = i8x16::new(0, 18, 0, 19, 0, 23, 0, 24, 0, 29, 0, 30, 0, 31, 0, 32);
28422        let e: i8x16 = i8x16::new(17, 18, 18, 19, 18, 19, 23, 24, 18, 19, 23, 24, 29, 30, 31, 32);
28423        let r: i8x16 = transmute(vuzp2q_p8(transmute(a), transmute(b)));
28424        assert_eq!(r, e);
28425    }
28426
28427    #[simd_test(enable = "neon")]
28428    unsafe fn test_vuzp2_p16() {
28429        let a: i16x4 = i16x4::new(0, 17, 0, 18);
28430        let b: i16x4 = i16x4::new(0, 18, 0, 19);
28431        let e: i16x4 = i16x4::new(17, 18, 18, 19);
28432        let r: i16x4 = transmute(vuzp2_p16(transmute(a), transmute(b)));
28433        assert_eq!(r, e);
28434    }
28435
28436    #[simd_test(enable = "neon")]
28437    unsafe fn test_vuzp2q_p16() {
28438        let a: i16x8 = i16x8::new(0, 17, 0, 18, 0, 18, 0, 19);
28439        let b: i16x8 = i16x8::new(0, 18, 0, 19, 0, 23, 0, 24);
28440        let e: i16x8 = i16x8::new(17, 18, 18, 19, 18, 19, 23, 24);
28441        let r: i16x8 = transmute(vuzp2q_p16(transmute(a), transmute(b)));
28442        assert_eq!(r, e);
28443    }
28444
28445    #[simd_test(enable = "neon")]
28446    unsafe fn test_vuzp2_s32() {
28447        let a: i32x2 = i32x2::new(0, 17);
28448        let b: i32x2 = i32x2::new(0, 18);
28449        let e: i32x2 = i32x2::new(17, 18);
28450        let r: i32x2 = transmute(vuzp2_s32(transmute(a), transmute(b)));
28451        assert_eq!(r, e);
28452    }
28453
28454    #[simd_test(enable = "neon")]
28455    unsafe fn test_vuzp2q_s64() {
28456        let a: i64x2 = i64x2::new(0, 17);
28457        let b: i64x2 = i64x2::new(0, 18);
28458        let e: i64x2 = i64x2::new(17, 18);
28459        let r: i64x2 = transmute(vuzp2q_s64(transmute(a), transmute(b)));
28460        assert_eq!(r, e);
28461    }
28462
28463    #[simd_test(enable = "neon")]
28464    unsafe fn test_vuzp2_u32() {
28465        let a: u32x2 = u32x2::new(0, 17);
28466        let b: u32x2 = u32x2::new(0, 18);
28467        let e: u32x2 = u32x2::new(17, 18);
28468        let r: u32x2 = transmute(vuzp2_u32(transmute(a), transmute(b)));
28469        assert_eq!(r, e);
28470    }
28471
28472    #[simd_test(enable = "neon")]
28473    unsafe fn test_vuzp2q_u64() {
28474        let a: u64x2 = u64x2::new(0, 17);
28475        let b: u64x2 = u64x2::new(0, 18);
28476        let e: u64x2 = u64x2::new(17, 18);
28477        let r: u64x2 = transmute(vuzp2q_u64(transmute(a), transmute(b)));
28478        assert_eq!(r, e);
28479    }
28480
28481    #[simd_test(enable = "neon")]
28482    unsafe fn test_vuzp2q_p64() {
28483        let a: i64x2 = i64x2::new(0, 17);
28484        let b: i64x2 = i64x2::new(0, 18);
28485        let e: i64x2 = i64x2::new(17, 18);
28486        let r: i64x2 = transmute(vuzp2q_p64(transmute(a), transmute(b)));
28487        assert_eq!(r, e);
28488    }
28489
28490    #[simd_test(enable = "neon")]
28491    unsafe fn test_vuzp2q_f32() {
28492        let a: f32x4 = f32x4::new(0., 8., 1., 9.);
28493        let b: f32x4 = f32x4::new(2., 9., 3., 11.);
28494        let e: f32x4 = f32x4::new(8., 9., 9., 11.);
28495        let r: f32x4 = transmute(vuzp2q_f32(transmute(a), transmute(b)));
28496        assert_eq!(r, e);
28497    }
28498
28499    #[simd_test(enable = "neon")]
28500    unsafe fn test_vuzp2_f32() {
28501        let a: f32x2 = f32x2::new(0., 8.);
28502        let b: f32x2 = f32x2::new(2., 9.);
28503        let e: f32x2 = f32x2::new(8., 9.);
28504        let r: f32x2 = transmute(vuzp2_f32(transmute(a), transmute(b)));
28505        assert_eq!(r, e);
28506    }
28507
28508    #[simd_test(enable = "neon")]
28509    unsafe fn test_vuzp2q_f64() {
28510        let a: f64x2 = f64x2::new(0., 8.);
28511        let b: f64x2 = f64x2::new(2., 9.);
28512        let e: f64x2 = f64x2::new(8., 9.);
28513        let r: f64x2 = transmute(vuzp2q_f64(transmute(a), transmute(b)));
28514        assert_eq!(r, e);
28515    }
28516
28517    #[simd_test(enable = "neon")]
28518    unsafe fn test_vabal_high_u8() {
28519        let a: u16x8 = u16x8::new(9, 10, 11, 12, 13, 14, 15, 16);
28520        let b: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
28521        let c: u8x16 = u8x16::new(10, 10, 10, 10, 10, 10, 10, 10, 20, 0, 2, 4, 6, 8, 10, 12);
28522        let e: u16x8 = u16x8::new(20, 20, 20, 20, 20, 20, 20, 20);
28523        let r: u16x8 = transmute(vabal_high_u8(transmute(a), transmute(b), transmute(c)));
28524        assert_eq!(r, e);
28525    }
28526
28527    #[simd_test(enable = "neon")]
28528    unsafe fn test_vabal_high_u16() {
28529        let a: u32x4 = u32x4::new(9, 10, 11, 12);
28530        let b: u16x8 = u16x8::new(1, 2, 3, 4, 9, 10, 11, 12);
28531        let c: u16x8 = u16x8::new(10, 10, 10, 10, 20, 0, 2, 4);
28532        let e: u32x4 = u32x4::new(20, 20, 20, 20);
28533        let r: u32x4 = transmute(vabal_high_u16(transmute(a), transmute(b), transmute(c)));
28534        assert_eq!(r, e);
28535    }
28536
28537    #[simd_test(enable = "neon")]
28538    unsafe fn test_vabal_high_u32() {
28539        let a: u64x2 = u64x2::new(15, 16);
28540        let b: u32x4 = u32x4::new(1, 2, 15, 16);
28541        let c: u32x4 = u32x4::new(10, 10, 10, 12);
28542        let e: u64x2 = u64x2::new(20, 20);
28543        let r: u64x2 = transmute(vabal_high_u32(transmute(a), transmute(b), transmute(c)));
28544        assert_eq!(r, e);
28545    }
28546
28547    #[simd_test(enable = "neon")]
28548    unsafe fn test_vabal_high_s8() {
28549        let a: i16x8 = i16x8::new(9, 10, 11, 12, 13, 14, 15, 16);
28550        let b: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
28551        let c: i8x16 = i8x16::new(10, 10, 10, 10, 10, 10, 10, 10, 20, 0, 2, 4, 6, 8, 10, 12);
28552        let e: i16x8 = i16x8::new(20, 20, 20, 20, 20, 20, 20, 20);
28553        let r: i16x8 = transmute(vabal_high_s8(transmute(a), transmute(b), transmute(c)));
28554        assert_eq!(r, e);
28555    }
28556
28557    #[simd_test(enable = "neon")]
28558    unsafe fn test_vabal_high_s16() {
28559        let a: i32x4 = i32x4::new(9, 10, 11, 12);
28560        let b: i16x8 = i16x8::new(1, 2, 3, 4, 9, 10, 11, 12);
28561        let c: i16x8 = i16x8::new(10, 10, 10, 10, 20, 0, 2, 4);
28562        let e: i32x4 = i32x4::new(20, 20, 20, 20);
28563        let r: i32x4 = transmute(vabal_high_s16(transmute(a), transmute(b), transmute(c)));
28564        assert_eq!(r, e);
28565    }
28566
28567    #[simd_test(enable = "neon")]
28568    unsafe fn test_vabal_high_s32() {
28569        let a: i64x2 = i64x2::new(15, 16);
28570        let b: i32x4 = i32x4::new(1, 2, 15, 16);
28571        let c: i32x4 = i32x4::new(10, 10, 10, 12);
28572        let e: i64x2 = i64x2::new(20, 20);
28573        let r: i64x2 = transmute(vabal_high_s32(transmute(a), transmute(b), transmute(c)));
28574        assert_eq!(r, e);
28575    }
28576
28577    #[simd_test(enable = "neon")]
28578    unsafe fn test_vqabs_s64() {
28579        let a: i64x1 = i64x1::new(-9223372036854775808);
28580        let e: i64x1 = i64x1::new(0x7F_FF_FF_FF_FF_FF_FF_FF);
28581        let r: i64x1 = transmute(vqabs_s64(transmute(a)));
28582        assert_eq!(r, e);
28583    }
28584
28585    #[simd_test(enable = "neon")]
28586    unsafe fn test_vqabsq_s64() {
28587        let a: i64x2 = i64x2::new(-9223372036854775808, -7);
28588        let e: i64x2 = i64x2::new(0x7F_FF_FF_FF_FF_FF_FF_FF, 7);
28589        let r: i64x2 = transmute(vqabsq_s64(transmute(a)));
28590        assert_eq!(r, e);
28591    }
28592
28593    #[simd_test(enable = "neon")]
28594    unsafe fn test_vqabsb_s8() {
28595        let a: i8 = -7;
28596        let e: i8 = 7;
28597        let r: i8 = vqabsb_s8(a);
28598        assert_eq!(r, e);
28599    }
28600
28601    #[simd_test(enable = "neon")]
28602    unsafe fn test_vqabsh_s16() {
28603        let a: i16 = -7;
28604        let e: i16 = 7;
28605        let r: i16 = vqabsh_s16(a);
28606        assert_eq!(r, e);
28607    }
28608
28609    #[simd_test(enable = "neon")]
28610    unsafe fn test_vqabss_s32() {
28611        let a: i32 = -7;
28612        let e: i32 = 7;
28613        let r: i32 = vqabss_s32(a);
28614        assert_eq!(r, e);
28615    }
28616
28617    #[simd_test(enable = "neon")]
28618    unsafe fn test_vqabsd_s64() {
28619        let a: i64 = -7;
28620        let e: i64 = 7;
28621        let r: i64 = vqabsd_s64(a);
28622        assert_eq!(r, e);
28623    }
28624
28625    #[simd_test(enable = "neon")]
28626    unsafe fn test_vslid_n_s64() {
28627        let a: i64 = 333;
28628        let b: i64 = 2042;
28629        let e: i64 = 8169;
28630        let r: i64 = vslid_n_s64::<2>(a, b);
28631        assert_eq!(r, e);
28632    }
28633
28634    #[simd_test(enable = "neon")]
28635    unsafe fn test_vslid_n_u64() {
28636        let a: u64 = 333;
28637        let b: u64 = 2042;
28638        let e: u64 = 8169;
28639        let r: u64 = vslid_n_u64::<2>(a, b);
28640        assert_eq!(r, e);
28641    }
28642
28643    #[simd_test(enable = "neon")]
28644    unsafe fn test_vsrid_n_s64() {
28645        let a: i64 = 333;
28646        let b: i64 = 2042;
28647        let e: i64 = 510;
28648        let r: i64 = vsrid_n_s64::<2>(a, b);
28649        assert_eq!(r, e);
28650    }
28651
28652    #[simd_test(enable = "neon")]
28653    unsafe fn test_vsrid_n_u64() {
28654        let a: u64 = 333;
28655        let b: u64 = 2042;
28656        let e: u64 = 510;
28657        let r: u64 = vsrid_n_u64::<2>(a, b);
28658        assert_eq!(r, e);
28659    }
28660}