From 1acfdbb3cd12854736df4ce7bd76aaa14f465f8f Mon Sep 17 00:00:00 2001 From: Boxy Uwu Date: Wed, 19 Nov 2025 19:30:09 +0000 Subject: [PATCH 01/90] Make `ValTree` recurse through `ty::Const` --- src/intrinsics/simd.rs | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/src/intrinsics/simd.rs b/src/intrinsics/simd.rs index 0bce31beb8b87..38e8d2fa9368b 100644 --- a/src/intrinsics/simd.rs +++ b/src/intrinsics/simd.rs @@ -143,7 +143,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( let total_len = lane_count * 2; - let indexes = idx.iter().map(|idx| idx.unwrap_leaf().to_u32()).collect::>(); + let indexes = idx.iter().map(|idx| idx.to_leaf().to_u32()).collect::>(); for &idx in &indexes { assert!(u64::from(idx) < total_len, "idx {} out of range 0..{}", idx, total_len); @@ -961,9 +961,8 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( let lane_clif_ty = fx.clif_type(val_lane_ty).unwrap(); let ptr_val = ptr.load_scalar(fx); - let alignment = generic_args[3].expect_const().to_value().valtree.unwrap_branch()[0] - .unwrap_leaf() - .to_simd_alignment(); + let alignment = + generic_args[3].expect_const().to_branch()[0].to_leaf().to_simd_alignment(); let memflags = match alignment { SimdAlign::Unaligned => MemFlags::new().with_notrap(), @@ -1006,9 +1005,8 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( let lane_clif_ty = fx.clif_type(val_lane_ty).unwrap(); let ret_lane_layout = fx.layout_of(ret_lane_ty); - let alignment = generic_args[3].expect_const().to_value().valtree.unwrap_branch()[0] - .unwrap_leaf() - .to_simd_alignment(); + let alignment = + generic_args[3].expect_const().to_branch()[0].to_leaf().to_simd_alignment(); let memflags = match alignment { SimdAlign::Unaligned => MemFlags::new().with_notrap(), @@ -1059,9 +1057,8 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( let ret_lane_layout = fx.layout_of(ret_lane_ty); let ptr_val = ptr.load_scalar(fx); - let alignment = generic_args[3].expect_const().to_value().valtree.unwrap_branch()[0] - .unwrap_leaf() - .to_simd_alignment(); + let alignment = + generic_args[3].expect_const().to_branch()[0].to_leaf().to_simd_alignment(); let memflags = match alignment { SimdAlign::Unaligned => MemFlags::new().with_notrap(), From 50ef284125101fbacd8ed16e86a084e6b6083304 Mon Sep 17 00:00:00 2001 From: Boxy Uwu Date: Wed, 19 Nov 2025 23:28:50 +0000 Subject: [PATCH 02/90] Fix tools --- src/intrinsics/simd.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/intrinsics/simd.rs b/src/intrinsics/simd.rs index 38e8d2fa9368b..15aef60c5af37 100644 --- a/src/intrinsics/simd.rs +++ b/src/intrinsics/simd.rs @@ -130,7 +130,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( return; } - let idx = generic_args[2].expect_const().to_value().valtree.unwrap_branch(); + let idx = generic_args[2].expect_const().to_branch(); assert_eq!(x.layout(), y.layout()); let layout = x.layout(); From 4e375db44aeefdfcaf374d1a47d9102b6d6260af Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Tue, 23 Dec 2025 17:47:42 +0000 Subject: [PATCH 03/90] Merge commit '6f3f6bdacb75571a87f08e0920d9c191b3d65ded' into sync_cg_clif-2025-12-23 --- .github/workflows/audit.yml | 2 +- Cargo.lock | 173 +++++++++++------------------------- Cargo.toml | 24 ++--- rust-toolchain.toml | 2 +- 4 files changed, 65 insertions(+), 136 deletions(-) diff --git a/.github/workflows/audit.yml b/.github/workflows/audit.yml index 27c95572ef879..274b9504beb04 100644 --- a/.github/workflows/audit.yml +++ b/.github/workflows/audit.yml @@ -12,7 +12,7 @@ jobs: steps: - uses: actions/checkout@v4 - run: | - sed -i 's/components.*/components = []/' rust-toolchain + sed -i 's/components.*/components = []/' rust-toolchain.toml - uses: rustsec/audit-check@v1.4.1 with: token: ${{ secrets.GITHUB_TOKEN }} diff --git a/Cargo.lock b/Cargo.lock index 617c7f0e34cd4..3d13b5540e196 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -43,42 +43,42 @@ checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" [[package]] name = "cranelift-assembler-x64" -version = "0.126.0" +version = "0.127.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf7631e609c97f063f9777aae405e8492abf9bf92336d7aa3f875403dd4ffd7d" +checksum = "8bd963a645179fa33834ba61fa63353998543b07f877e208da9eb47d4a70d1e7" dependencies = [ "cranelift-assembler-x64-meta", ] [[package]] name = "cranelift-assembler-x64-meta" -version = "0.126.0" +version = "0.127.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c030edccdc4a5bbf28fbfe7701b5cd1f9854b4445184dd34af2a7e8f8db6f45" +checksum = "3f6d5739c9dc6b5553ca758d78d87d127dd19f397f776efecf817b8ba8d0bb01" dependencies = [ "cranelift-srcgen", ] [[package]] name = "cranelift-bforest" -version = "0.126.0" +version = "0.127.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb544c1242d0ca98baf01873ebba96c79d5df155d5108d9bb699aefc741f5e6d" +checksum = "ff402c11bb1c9652b67a3e885e84b1b8d00c13472c8fd85211e06a41a63c3e03" dependencies = [ "cranelift-entity", ] [[package]] name = "cranelift-bitset" -version = "0.126.0" +version = "0.127.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0325aecbafec053d3d3f082edfdca7937e2945e7f09c5ff9672e05198312282" +checksum = "769a0d88c2f5539e9c5536a93a7bf164b0dc68d91e3d00723e5b4ffc1440afdc" [[package]] name = "cranelift-codegen" -version = "0.126.0" +version = "0.127.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb3236fd319ae897ba00c8a25105081de5c1348576def0e96c062ad259f87a7" +checksum = "d4351f721fb3b26add1c180f0a75c7474bab2f903c8b777c6ca65238ded59a78" dependencies = [ "bumpalo", "cranelift-assembler-x64", @@ -102,9 +102,9 @@ dependencies = [ [[package]] name = "cranelift-codegen-meta" -version = "0.126.0" +version = "0.127.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b8791c911a361c539130ace34fb726b16aca4216470ec75d75264b1495c8a3a" +checksum = "61f86c0ba5b96713643f4dd0de0df12844de9c7bb137d6829b174b706939aa74" dependencies = [ "cranelift-assembler-x64-meta", "cranelift-codegen-shared", @@ -114,33 +114,33 @@ dependencies = [ [[package]] name = "cranelift-codegen-shared" -version = "0.126.0" +version = "0.127.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12ead718c2a10990870c19b2497b5a04b8aae6024485e33da25b5d02e35819e0" +checksum = "f08605eee8d51fd976a970bd5b16c9529b51b624f8af68f80649ffb172eb85a4" [[package]] name = "cranelift-control" -version = "0.126.0" +version = "0.127.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0a57fc972b5651047efddccb99440d103d9d8c13393ccebde15ddd5b6a1181b" +checksum = "623aab0a09e40f0cf0b5d35eb7832bae4c4f13e3768228e051a6c1a60e88ef5f" dependencies = [ "arbitrary", ] [[package]] name = "cranelift-entity" -version = "0.126.0" +version = "0.127.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aae980b4a1678b601eab2f52e372ed0b3c9565a31c17f380008cb97b3a699c5" +checksum = "ea0f066e07e3bcbe38884cc5c94c32c7a90267d69df80f187d9dfe421adaa7c4" dependencies = [ "cranelift-bitset", ] [[package]] name = "cranelift-frontend" -version = "0.126.0" +version = "0.127.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a78877016b607982ca1708c0dd4ce23bde04581a39854c9b43a1dca43625b54c" +checksum = "40865b02a0e52ca8e580ad64feef530cb1d05f6bb4972b4eef05e3eaeae81701" dependencies = [ "cranelift-codegen", "log", @@ -150,15 +150,15 @@ dependencies = [ [[package]] name = "cranelift-isle" -version = "0.126.0" +version = "0.127.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dc46a68b46d4f53f9f2f02ab8d3a34b00f03a21c124a7a965b8cbf5fdb6773b" +checksum = "104b3c117ae513e9af1d90679842101193a5ccb96ac9f997966d85ea25be2852" [[package]] name = "cranelift-jit" -version = "0.126.0" +version = "0.127.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7df920009af919ad9df52eb7b47b1895145822e0c29da9b715a876fc8ecc6d82" +checksum = "3aa5f855cfb8e4253ed2d0dfc1a0b6ebe4912e67aa8b7ee14026ff55ca17f1fe" dependencies = [ "anyhow", "cranelift-codegen", @@ -171,14 +171,14 @@ dependencies = [ "region", "target-lexicon", "wasmtime-internal-jit-icache-coherence", - "windows-sys 0.60.2", + "windows-sys 0.61.2", ] [[package]] name = "cranelift-module" -version = "0.126.0" +version = "0.127.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddcf313629071ce74de8e59f02092f5453d1a01047607fc4ad36886b8bd1486c" +checksum = "b1d01806b191b59f4fc4680293dd5f554caf2de5b62f95eff5beef7acb46c29c" dependencies = [ "anyhow", "cranelift-codegen", @@ -187,9 +187,9 @@ dependencies = [ [[package]] name = "cranelift-native" -version = "0.126.0" +version = "0.127.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03faa07ec8cf373250a8252eb773d098ff88259fa1c19ee1ecde8012839f4097" +checksum = "e5c54e0a358bc05b48f2032e1c320e7f468da068604f2869b77052eab68eb0fe" dependencies = [ "cranelift-codegen", "libc", @@ -198,9 +198,9 @@ dependencies = [ [[package]] name = "cranelift-object" -version = "0.126.0" +version = "0.127.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cca62c14f3c2e4f438192562bbf82d1a98a59543cc66ba04fb658ba99f515a6" +checksum = "3d17e0216be5daabab616647c1918e06dae0708474ba5f7b7762ac24ea5eb126" dependencies = [ "anyhow", "cranelift-codegen", @@ -213,9 +213,9 @@ dependencies = [ [[package]] name = "cranelift-srcgen" -version = "0.126.0" +version = "0.127.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0484cb32c527a742e1bba09ef174acac0afb1dcf623ef1adda42849200edcd2e" +checksum = "cc6f4b039f453b66c75e9f7886e5a2af96276e151f44dc19b24b58f9a0c98009" [[package]] name = "crc32fast" @@ -293,7 +293,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "754ca22de805bb5744484a5b151a9e1a8e837d5dc232c2d7d8c2e3492edc8b60" dependencies = [ "cfg-if", - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -469,31 +469,25 @@ checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" [[package]] name = "wasmtime-internal-jit-icache-coherence" -version = "39.0.0" +version = "40.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f67986f5c499274ae5b2ba5b173bba0b95d1381f5ca70d8eec657f2392117d8" +checksum = "0858b470463f3e7c73acd6049046049e64be17b98901c2db5047450cf83df1fe" dependencies = [ "anyhow", "cfg-if", "libc", - "windows-sys 0.60.2", + "windows-sys 0.61.2", ] [[package]] name = "wasmtime-internal-math" -version = "39.0.0" +version = "40.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a681733e9b5d5d8804ee6cacd59f92c0d87ba2274f42ee1d4e5a943828d0075d" +checksum = "222e1a590ece4e898f20af1e541b61d2cb803f2557e7eaff23e6c1db5434454a" dependencies = [ "libm", ] -[[package]] -name = "windows-link" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" - [[package]] name = "windows-link" version = "0.2.1" @@ -506,16 +500,16 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.6", + "windows-targets", ] [[package]] name = "windows-sys" -version = "0.60.2" +version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" dependencies = [ - "windows-targets 0.53.3", + "windows-link", ] [[package]] @@ -524,31 +518,14 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.6", - "windows_aarch64_msvc 0.52.6", - "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm 0.52.6", - "windows_i686_msvc 0.52.6", - "windows_x86_64_gnu 0.52.6", - "windows_x86_64_gnullvm 0.52.6", - "windows_x86_64_msvc 0.52.6", -] - -[[package]] -name = "windows-targets" -version = "0.53.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91" -dependencies = [ - "windows-link 0.1.3", - "windows_aarch64_gnullvm 0.53.0", - "windows_aarch64_msvc 0.53.0", - "windows_i686_gnu 0.53.0", - "windows_i686_gnullvm 0.53.0", - "windows_i686_msvc 0.53.0", - "windows_x86_64_gnu 0.53.0", - "windows_x86_64_gnullvm 0.53.0", - "windows_x86_64_msvc 0.53.0", + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", ] [[package]] @@ -557,92 +534,44 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" - [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" -[[package]] -name = "windows_aarch64_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" - [[package]] name = "windows_i686_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" -[[package]] -name = "windows_i686_gnu" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" - [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" -[[package]] -name = "windows_i686_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" - [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" -[[package]] -name = "windows_i686_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" - [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" -[[package]] -name = "windows_x86_64_gnu" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" - [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" - [[package]] name = "windows_x86_64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" diff --git a/Cargo.toml b/Cargo.toml index 58e61cd0b9d7d..ee4bde477c477 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,12 +8,12 @@ crate-type = ["dylib"] [dependencies] # These have to be in sync with each other -cranelift-codegen = { version = "0.126.0", default-features = false, features = ["std", "timing", "unwind", "all-native-arch"] } -cranelift-frontend = { version = "0.126.0" } -cranelift-module = { version = "0.126.0" } -cranelift-native = { version = "0.126.0" } -cranelift-jit = { version = "0.126.0", optional = true } -cranelift-object = { version = "0.126.0" } +cranelift-codegen = { version = "0.127.0", default-features = false, features = ["std", "timing", "unwind", "all-native-arch"] } +cranelift-frontend = { version = "0.127.0" } +cranelift-module = { version = "0.127.0" } +cranelift-native = { version = "0.127.0" } +cranelift-jit = { version = "0.127.0", optional = true } +cranelift-object = { version = "0.127.0" } target-lexicon = "0.13" gimli = { version = "0.32", default-features = false, features = ["write"] } object = { version = "0.37.3", default-features = false, features = ["std", "read_core", "write", "archive", "coff", "elf", "macho", "pe"] } @@ -24,12 +24,12 @@ smallvec = "1.8.1" [patch.crates-io] # Uncomment to use an unreleased version of cranelift -#cranelift-codegen = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-39.0.0" } -#cranelift-frontend = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-39.0.0" } -#cranelift-module = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-39.0.0" } -#cranelift-native = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-39.0.0" } -#cranelift-jit = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-39.0.0" } -#cranelift-object = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-39.0.0" } +#cranelift-codegen = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-40.0.0" } +#cranelift-frontend = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-40.0.0" } +#cranelift-module = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-40.0.0" } +#cranelift-native = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-40.0.0" } +#cranelift-jit = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-40.0.0" } +#cranelift-object = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-40.0.0" } # Uncomment to use local checkout of cranelift #cranelift-codegen = { path = "../wasmtime/cranelift/codegen" } diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 6ce49eb4ccf07..b157c5879ba7e 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] -channel = "nightly-2025-12-18" +channel = "nightly-2025-12-23" components = ["rust-src", "rustc-dev", "llvm-tools", "rustfmt"] profile = "minimal" From 6da10482364d1159ecff4b9eb11443852fbe9586 Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Wed, 24 Dec 2025 12:25:30 +0000 Subject: [PATCH 04/90] Fix some divergences with the cg_clif subtree For some reason git-subtree incorrectly synced those changes. --- src/intrinsics/simd.rs | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/src/intrinsics/simd.rs b/src/intrinsics/simd.rs index 15aef60c5af37..bef9c67474577 100644 --- a/src/intrinsics/simd.rs +++ b/src/intrinsics/simd.rs @@ -1005,14 +1005,6 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( let lane_clif_ty = fx.clif_type(val_lane_ty).unwrap(); let ret_lane_layout = fx.layout_of(ret_lane_ty); - let alignment = - generic_args[3].expect_const().to_branch()[0].to_leaf().to_simd_alignment(); - - let memflags = match alignment { - SimdAlign::Unaligned => MemFlags::new().with_notrap(), - _ => MemFlags::trusted(), - }; - for lane_idx in 0..ptr_lane_count { let val_lane = val.value_lane(fx, lane_idx).load_scalar(fx); let ptr_lane = ptr.value_lane(fx, lane_idx).load_scalar(fx); @@ -1028,7 +1020,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( fx.bcx.seal_block(if_disabled); fx.bcx.switch_to_block(if_enabled); - let res = fx.bcx.ins().load(lane_clif_ty, memflags, ptr_lane, 0); + let res = fx.bcx.ins().load(lane_clif_ty, MemFlags::trusted(), ptr_lane, 0); fx.bcx.ins().jump(next, &[res.into()]); fx.bcx.switch_to_block(if_disabled); From 9c369a43ceaa25cfd3c914a4dd429779e7488283 Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Fri, 2 Jan 2026 17:21:03 +0000 Subject: [PATCH 05/90] Rustup to rustc 1.94.0-nightly (fcd630976 2026-01-01) --- config.txt | 2 +- rust-toolchain.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/config.txt b/config.txt index 85748a4f8a789..72631355733c4 100644 --- a/config.txt +++ b/config.txt @@ -20,7 +20,7 @@ aot.mini_core_hello_world testsuite.base_sysroot aot.arbitrary_self_types_pointers_and_wrappers -jit.std_example +#jit.std_example # FIXME(#1619) broken for some reason aot.std_example aot.dst_field_align aot.subslice-patterns-const-eval diff --git a/rust-toolchain.toml b/rust-toolchain.toml index b157c5879ba7e..de0eb1d7322a1 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] -channel = "nightly-2025-12-23" +channel = "nightly-2026-01-02" components = ["rust-src", "rustc-dev", "llvm-tools", "rustfmt"] profile = "minimal" From 0cba97b3b4e9e4255ce52ec399ceffe2fcebe3ae Mon Sep 17 00:00:00 2001 From: Martin Nordholts Date: Thu, 1 Jan 2026 10:47:22 +0100 Subject: [PATCH 06/90] Finish transition from `semitransparent` to `semiopaque` for `rustc_macro_transparency` --- example/mini_core.rs | 14 +++++++------- src/global_asm.rs | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/example/mini_core.rs b/example/mini_core.rs index b522ea1937166..301547cadaf7c 100644 --- a/example/mini_core.rs +++ b/example/mini_core.rs @@ -744,43 +744,43 @@ unsafe extern "C" { pub struct VaList<'a>(&'a mut VaListImpl); #[rustc_builtin_macro] -#[rustc_macro_transparency = "semitransparent"] +#[rustc_macro_transparency = "semiopaque"] pub macro stringify($($t:tt)*) { /* compiler built-in */ } #[rustc_builtin_macro] -#[rustc_macro_transparency = "semitransparent"] +#[rustc_macro_transparency = "semiopaque"] pub macro file() { /* compiler built-in */ } #[rustc_builtin_macro] -#[rustc_macro_transparency = "semitransparent"] +#[rustc_macro_transparency = "semiopaque"] pub macro line() { /* compiler built-in */ } #[rustc_builtin_macro] -#[rustc_macro_transparency = "semitransparent"] +#[rustc_macro_transparency = "semiopaque"] pub macro cfg() { /* compiler built-in */ } #[rustc_builtin_macro] -#[rustc_macro_transparency = "semitransparent"] +#[rustc_macro_transparency = "semiopaque"] pub macro asm() { /* compiler built-in */ } #[rustc_builtin_macro] -#[rustc_macro_transparency = "semitransparent"] +#[rustc_macro_transparency = "semiopaque"] pub macro global_asm() { /* compiler built-in */ } #[rustc_builtin_macro] -#[rustc_macro_transparency = "semitransparent"] +#[rustc_macro_transparency = "semiopaque"] pub macro naked_asm() { /* compiler built-in */ } diff --git a/src/global_asm.rs b/src/global_asm.rs index 8d8cdb14dbc6b..97d6cecf68481 100644 --- a/src/global_asm.rs +++ b/src/global_asm.rs @@ -233,7 +233,7 @@ pub(crate) fn compile_global_asm( #![allow(internal_features)] #![no_core] #[rustc_builtin_macro] - #[rustc_macro_transparency = "semitransparent"] + #[rustc_macro_transparency = "semiopaque"] macro global_asm() { /* compiler built-in */ } global_asm!(r###" "####, From b86553f808fae4dce7b7cac419e8a382399008d5 Mon Sep 17 00:00:00 2001 From: Pavel Grigorenko Date: Sun, 9 Nov 2025 23:17:09 +0300 Subject: [PATCH 07/90] Stabilize `alloc_layout_extra` --- patches/0027-sysroot_tests-128bit-atomic-operations.patch | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/patches/0027-sysroot_tests-128bit-atomic-operations.patch b/patches/0027-sysroot_tests-128bit-atomic-operations.patch index f3d1d5c43ea10..6ed0b17f679ca 100644 --- a/patches/0027-sysroot_tests-128bit-atomic-operations.patch +++ b/patches/0027-sysroot_tests-128bit-atomic-operations.patch @@ -14,11 +14,10 @@ diff --git a/coretests/tests/lib.rs b/coretests/tests/lib.rs index 1e336bf..35e6f54 100644 --- a/coretests/tests/lib.rs +++ b/coretests/tests/lib.rs -@@ -2,5 +2,4 @@ +@@ -2,4 +2,3 @@ // tidy-alphabetical-start -#![cfg_attr(target_has_atomic = "128", feature(integer_atomics))] #![cfg_attr(test, feature(cfg_select))] - #![feature(alloc_layout_extra)] #![feature(array_ptr_get)] diff --git a/coretests/tests/atomic.rs b/coretests/tests/atomic.rs index b735957..ea728b6 100644 From c1569cc0078bc9fae97d22d3050fbe90f3976c3f Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Sun, 11 Jan 2026 17:25:20 +0000 Subject: [PATCH 08/90] Lower extern "rust-cold" to default calling convention The next Cranelift release will support for CallConv::Cold as it was already effectively equivalent to the default calling convention. --- src/abi/mod.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/abi/mod.rs b/src/abi/mod.rs index 09d71f5dd5579..94891b632f566 100644 --- a/src/abi/mod.rs +++ b/src/abi/mod.rs @@ -53,8 +53,7 @@ pub(crate) fn conv_to_call_conv( default_call_conv: CallConv, ) -> CallConv { match c { - CanonAbi::Rust | CanonAbi::C => default_call_conv, - CanonAbi::RustCold => CallConv::Cold, + CanonAbi::Rust | CanonAbi::RustCold | CanonAbi::C => default_call_conv, // Functions with this calling convention can only be called from assembly, but it is // possible to declare an `extern "custom"` block, so the backend still needs a calling From 4d0b725e06c2012442b26b0ad4f65c28e2c4f4b3 Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Sun, 11 Jan 2026 17:27:53 +0000 Subject: [PATCH 09/90] Add a gdb helper for jit backtraces --- scripts/jit-helpers.py | 52 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 scripts/jit-helpers.py diff --git a/scripts/jit-helpers.py b/scripts/jit-helpers.py new file mode 100644 index 0000000000000..1128521c0dfb3 --- /dev/null +++ b/scripts/jit-helpers.py @@ -0,0 +1,52 @@ +import gdb + +def jitmap_raw(): + pid = gdb.selected_inferior().pid + jitmap_file = open("/tmp/perf-%d.map" % (pid,), "r") + jitmap = jitmap_file.read() + jitmap_file.close() + return jitmap + +def jit_functions(): + jitmap = jitmap_raw() + + functions = [] + for line in jitmap.strip().split("\n"): + [addr, size, name] = line.split(" ") + functions.append((int(addr, 16), int(size, 16), name)) + + return functions + +class JitDecorator(gdb.FrameDecorator.FrameDecorator): + def __init__(self, fobj, name): + super(JitDecorator, self).__init__(fobj) + self.name = name + + def function(self): + return self.name + +class JitFilter: + """ + A backtrace filter which reads perf map files produced by cranelift-jit. + """ + + def __init__(self): + self.name = 'JitFilter' + self.enabled = True + self.priority = 0 + + gdb.current_progspace().frame_filters[self.name] = self + + # FIXME add an actual unwinder or somehow register JITed .eh_frame with gdb to avoid relying on + # gdb unwinder heuristics. + def filter(self, frame_iter): + for frame in frame_iter: + frame_addr = frame.inferior_frame().pc() + for (addr, size, name) in jit_functions(): + if frame_addr >= addr and frame_addr < addr + size: + yield JitDecorator(frame, name) + break + else: + yield frame + +JitFilter() From a03e769410bbd4bb710f64cb399deceaa4cf342e Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Sun, 11 Jan 2026 17:58:51 +0000 Subject: [PATCH 10/90] Improve standard library source remapping --- build_system/build_sysroot.rs | 6 +----- scripts/test_rustc_tests.sh | 11 ++--------- 2 files changed, 3 insertions(+), 14 deletions(-) diff --git a/build_system/build_sysroot.rs b/build_system/build_sysroot.rs index 7b4c604580c11..2cc93713c6647 100644 --- a/build_system/build_sysroot.rs +++ b/build_system/build_sysroot.rs @@ -235,11 +235,7 @@ fn build_clif_sysroot_for_triple( if let Some(prefix) = env::var_os("CG_CLIF_STDLIB_REMAP_PATH_PREFIX") { rustflags.push("--remap-path-prefix".to_owned()); - rustflags.push(format!( - "{}={}", - STDLIB_SRC.to_path(dirs).to_str().unwrap(), - prefix.to_str().unwrap() - )); + rustflags.push(format!("library/={}/library", prefix.to_str().unwrap())); } compiler.rustflags.extend(rustflags); let mut build_cmd = STANDARD_LIBRARY.build(&compiler, dirs); diff --git a/scripts/test_rustc_tests.sh b/scripts/test_rustc_tests.sh index b25269d1430ae..288141d345b7f 100755 --- a/scripts/test_rustc_tests.sh +++ b/scripts/test_rustc_tests.sh @@ -152,15 +152,8 @@ rm tests/ui/errors/remap-path-prefix-sysroot.rs # different sysroot source path rm -r tests/run-make/export/extern-opt # something about rustc version mismatches rm -r tests/run-make/export # same rm -r tests/ui/compiletest-self-test/compile-flags-incremental.rs # needs compiletest compiled with panic=unwind -rm tests/ui/async-await/in-trait/dont-project-to-specializable-projection.rs # something going wrong with stdlib source remapping -rm tests/ui/consts/miri_unleashed/drop.rs # same -rm tests/ui/error-emitter/multiline-removal-suggestion.rs # same -rm tests/ui/lint/lint-const-item-mutation.rs # same -rm tests/ui/lint/use-redundant/use-redundant-issue-71450.rs # same -rm tests/ui/lint/use-redundant/use-redundant-prelude-rust-2021.rs # same -rm tests/ui/specialization/const_trait_impl.rs # same -rm tests/ui/thir-print/offset_of.rs # same -rm tests/ui/traits/const-traits/const_closure-const_trait_impl-ice-113381.rs # same +rm -r tests/ui/extern/extern-types-field-offset.rs # expects /rustc/ rather than /rustc/FAKE_PREFIX +rm -r tests/ui/process/println-with-broken-pipe.rs # same # genuine bugs # ============ From c71353854cc0f146dbb345707e8c7aa468b342a9 Mon Sep 17 00:00:00 2001 From: Folkert de Vries Date: Tue, 13 Jan 2026 14:08:47 +0100 Subject: [PATCH 11/90] x86: support passing `u128`/`i128` to inline assembly --- compiler/rustc_target/src/asm/mod.rs | 2 +- compiler/rustc_target/src/asm/x86.rs | 62 +++- .../language-features/asm-experimental-reg.md | 3 + tests/auxiliary/minicore.rs | 4 + ...stderr => bad-reg.experimental_reg.stderr} | 60 ++-- tests/ui/asm/x86_64/bad-reg.rs | 30 +- tests/ui/asm/x86_64/bad-reg.stable.stderr | 281 ++++++++++++++++++ 7 files changed, 394 insertions(+), 48 deletions(-) rename tests/ui/asm/x86_64/{bad-reg.stderr => bad-reg.experimental_reg.stderr} (89%) create mode 100644 tests/ui/asm/x86_64/bad-reg.stable.stderr diff --git a/compiler/rustc_target/src/asm/mod.rs b/compiler/rustc_target/src/asm/mod.rs index 0078866ab9503..9e41a187a3f9b 100644 --- a/compiler/rustc_target/src/asm/mod.rs +++ b/compiler/rustc_target/src/asm/mod.rs @@ -614,7 +614,7 @@ impl InlineAsmRegClass { allow_experimental_reg: bool, ) -> &'static [(InlineAsmType, Option)] { match self { - Self::X86(r) => r.supported_types(arch), + Self::X86(r) => r.supported_types(arch, allow_experimental_reg), Self::Arm(r) => r.supported_types(arch), Self::AArch64(r) => r.supported_types(arch), Self::RiscV(r) => r.supported_types(arch), diff --git a/compiler/rustc_target/src/asm/x86.rs b/compiler/rustc_target/src/asm/x86.rs index 15c1925bcdafd..03e7fd5969608 100644 --- a/compiler/rustc_target/src/asm/x86.rs +++ b/compiler/rustc_target/src/asm/x86.rs @@ -105,6 +105,7 @@ impl X86InlineAsmRegClass { pub fn supported_types( self, arch: InlineAsmArch, + allow_experimental_reg: bool, ) -> &'static [(InlineAsmType, Option)] { match self { Self::reg | Self::reg_abcd => { @@ -115,21 +116,52 @@ impl X86InlineAsmRegClass { } } Self::reg_byte => types! { _: I8; }, - Self::xmm_reg => types! { - sse: I32, I64, F16, F32, F64, F128, - VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF16(8), VecF32(4), VecF64(2); - }, - Self::ymm_reg => types! { - avx: I32, I64, F16, F32, F64, F128, - VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF16(8), VecF32(4), VecF64(2), - VecI8(32), VecI16(16), VecI32(8), VecI64(4), VecF16(16), VecF32(8), VecF64(4); - }, - Self::zmm_reg => types! { - avx512f: I32, I64, F16, F32, F64, F128, - VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF16(8), VecF32(4), VecF64(2), - VecI8(32), VecI16(16), VecI32(8), VecI64(4), VecF16(16), VecF32(8), VecF64(4), - VecI8(64), VecI16(32), VecI32(16), VecI64(8), VecF16(32), VecF32(16), VecF64(8); - }, + Self::xmm_reg => { + if allow_experimental_reg { + types! { + sse: I32, I64, I128, F16, F32, F64, F128, + VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF16(8), VecF32(4), VecF64(2); + } + } else { + types! { + sse: I32, I64, F16, F32, F64, F128, + VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF16(8), VecF32(4), VecF64(2); + } + } + } + Self::ymm_reg => { + if allow_experimental_reg { + types! { + avx: I32, I64, I128, F16, F32, F64, F128, + VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF16(8), VecF32(4), VecF64(2), + VecI8(32), VecI16(16), VecI32(8), VecI64(4), VecF16(16), VecF32(8), VecF64(4); + } + } else { + types! { + avx: I32, I64, F16, F32, F64, F128, + VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF16(8), VecF32(4), VecF64(2), + VecI8(32), VecI16(16), VecI32(8), VecI64(4), VecF16(16), VecF32(8), VecF64(4); + } + } + } + Self::zmm_reg => { + if allow_experimental_reg { + types! { + avx512f: I32, I64, I128, F16, F32, F64, F128, + VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF16(8), VecF32(4), VecF64(2), + VecI8(32), VecI16(16), VecI32(8), VecI64(4), VecF16(16), VecF32(8), VecF64(4), + VecI8(64), VecI16(32), VecI32(16), VecI64(8), VecF16(32), VecF32(16), VecF64(8); + } + } else { + types! { + avx512f: I32, I64, F16, F32, F64, F128, + VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF16(8), VecF32(4), VecF64(2), + VecI8(32), VecI16(16), VecI32(8), VecI64(4), VecF16(16), VecF32(8), VecF64(4), + VecI8(64), VecI16(32), VecI32(16), VecI64(8), VecF16(32), VecF32(16), VecF64(8); + } + } + } + Self::kreg => types! { avx512f: I8, I16; avx512bw: I32, I64; diff --git a/src/doc/unstable-book/src/language-features/asm-experimental-reg.md b/src/doc/unstable-book/src/language-features/asm-experimental-reg.md index a251573d276cb..5f695c90e4f77 100644 --- a/src/doc/unstable-book/src/language-features/asm-experimental-reg.md +++ b/src/doc/unstable-book/src/language-features/asm-experimental-reg.md @@ -22,6 +22,9 @@ This tracks support for additional registers in architectures where inline assem | Architecture | Register class | Target feature | Allowed types | | ------------ | -------------- | -------------- | ------------- | | s390x | `vreg` | `vector` | `i32`, `f32`, `i64`, `f64`, `i128`, `f128`, `i8x16`, `i16x8`, `i32x4`, `i64x2`, `f32x4`, `f64x2` | +| x86 | `xmm_reg` | `sse` | `i128` | +| x86 | `ymm_reg` | `avx` | `i128` | +| x86 | `zmm_reg` | `avx512f` | `i128` | ## Register aliases diff --git a/tests/auxiliary/minicore.rs b/tests/auxiliary/minicore.rs index 95b217c130317..39d9017b4e56f 100644 --- a/tests/auxiliary/minicore.rs +++ b/tests/auxiliary/minicore.rs @@ -62,6 +62,10 @@ pub trait MetaSized: PointeeSized {} )] pub trait Sized: MetaSized {} +#[lang = "destruct"] +#[rustc_on_unimplemented(message = "can't drop `{Self}`", append_const_msg)] +pub trait Destruct: PointeeSized {} + #[lang = "legacy_receiver"] pub trait LegacyReceiver {} impl LegacyReceiver for &T {} diff --git a/tests/ui/asm/x86_64/bad-reg.stderr b/tests/ui/asm/x86_64/bad-reg.experimental_reg.stderr similarity index 89% rename from tests/ui/asm/x86_64/bad-reg.stderr rename to tests/ui/asm/x86_64/bad-reg.experimental_reg.stderr index 6a02957210bb9..7c9b211337694 100644 --- a/tests/ui/asm/x86_64/bad-reg.stderr +++ b/tests/ui/asm/x86_64/bad-reg.experimental_reg.stderr @@ -1,5 +1,5 @@ error: invalid register class `foo`: unknown register class - --> $DIR/bad-reg.rs:12:20 + --> $DIR/bad-reg.rs:20:20 | LL | asm!("{}", in(foo) foo); | ^^^^^^^^^^^ @@ -7,13 +7,13 @@ LL | asm!("{}", in(foo) foo); = note: the following register classes are supported on this target: `reg`, `reg_abcd`, `reg_byte`, `xmm_reg`, `ymm_reg`, `zmm_reg`, `kreg`, `kreg0`, `mmx_reg`, `x87_reg`, `tmm_reg` error: invalid register `foo`: unknown register - --> $DIR/bad-reg.rs:14:18 + --> $DIR/bad-reg.rs:22:18 | LL | asm!("", in("foo") foo); | ^^^^^^^^^^^^^ error: invalid asm template modifier for this register class - --> $DIR/bad-reg.rs:16:15 + --> $DIR/bad-reg.rs:24:15 | LL | asm!("{:z}", in(reg) foo); | ^^^^ ----------- argument @@ -23,7 +23,7 @@ LL | asm!("{:z}", in(reg) foo); = note: the `reg` register class supports the following template modifiers: `l`, `x`, `e`, `r` error: invalid asm template modifier for this register class - --> $DIR/bad-reg.rs:18:15 + --> $DIR/bad-reg.rs:26:15 | LL | asm!("{:r}", in(xmm_reg) foo); | ^^^^ --------------- argument @@ -33,7 +33,7 @@ LL | asm!("{:r}", in(xmm_reg) foo); = note: the `xmm_reg` register class supports the following template modifiers: `x`, `y`, `z` error: asm template modifiers are not allowed for `const` arguments - --> $DIR/bad-reg.rs:20:15 + --> $DIR/bad-reg.rs:28:15 | LL | asm!("{:a}", const 0); | ^^^^ ------- argument @@ -41,7 +41,7 @@ LL | asm!("{:a}", const 0); | template modifier error: asm template modifiers are not allowed for `sym` arguments - --> $DIR/bad-reg.rs:22:15 + --> $DIR/bad-reg.rs:30:15 | LL | asm!("{:a}", sym main); | ^^^^ -------- argument @@ -49,67 +49,67 @@ LL | asm!("{:a}", sym main); | template modifier error: invalid register `ebp`: the frame pointer cannot be used as an operand for inline asm - --> $DIR/bad-reg.rs:24:18 + --> $DIR/bad-reg.rs:32:18 | LL | asm!("", in("ebp") foo); | ^^^^^^^^^^^^^ error: invalid register `rsp`: the stack pointer cannot be used as an operand for inline asm - --> $DIR/bad-reg.rs:26:18 + --> $DIR/bad-reg.rs:34:18 | LL | asm!("", in("rsp") foo); | ^^^^^^^^^^^^^ error: invalid register `ip`: the instruction pointer cannot be used as an operand for inline asm - --> $DIR/bad-reg.rs:28:18 + --> $DIR/bad-reg.rs:36:18 | LL | asm!("", in("ip") foo); | ^^^^^^^^^^^^ error: register class `x87_reg` can only be used as a clobber, not as an input or output - --> $DIR/bad-reg.rs:31:18 + --> $DIR/bad-reg.rs:39:18 | LL | asm!("", in("st(2)") foo); | ^^^^^^^^^^^^^^^ error: register class `mmx_reg` can only be used as a clobber, not as an input or output - --> $DIR/bad-reg.rs:34:18 + --> $DIR/bad-reg.rs:42:18 | LL | asm!("", in("mm0") foo); | ^^^^^^^^^^^^^ error: register class `kreg0` can only be used as a clobber, not as an input or output - --> $DIR/bad-reg.rs:37:18 + --> $DIR/bad-reg.rs:45:18 | LL | asm!("", in("k0") foo); | ^^^^^^^^^^^^ error: register class `x87_reg` can only be used as a clobber, not as an input or output - --> $DIR/bad-reg.rs:42:20 + --> $DIR/bad-reg.rs:50:20 | LL | asm!("{}", in(x87_reg) foo); | ^^^^^^^^^^^^^^^ error: register class `mmx_reg` can only be used as a clobber, not as an input or output - --> $DIR/bad-reg.rs:45:20 + --> $DIR/bad-reg.rs:53:20 | LL | asm!("{}", in(mmx_reg) foo); | ^^^^^^^^^^^^^^^ error: register class `x87_reg` can only be used as a clobber, not as an input or output - --> $DIR/bad-reg.rs:48:20 + --> $DIR/bad-reg.rs:56:20 | LL | asm!("{}", out(x87_reg) _); | ^^^^^^^^^^^^^^ error: register class `mmx_reg` can only be used as a clobber, not as an input or output - --> $DIR/bad-reg.rs:50:20 + --> $DIR/bad-reg.rs:58:20 | LL | asm!("{}", out(mmx_reg) _); | ^^^^^^^^^^^^^^ error: register `al` conflicts with register `eax` - --> $DIR/bad-reg.rs:56:33 + --> $DIR/bad-reg.rs:64:33 | LL | asm!("", in("eax") foo, in("al") bar); | ------------- ^^^^^^^^^^^^ register `al` @@ -117,7 +117,7 @@ LL | asm!("", in("eax") foo, in("al") bar); | register `eax` error: register `rax` conflicts with register `rax` - --> $DIR/bad-reg.rs:59:33 + --> $DIR/bad-reg.rs:67:33 | LL | asm!("", in("rax") foo, out("rax") bar); | ------------- ^^^^^^^^^^^^^^ register `rax` @@ -125,13 +125,13 @@ LL | asm!("", in("rax") foo, out("rax") bar); | register `rax` | help: use `lateout` instead of `out` to avoid conflict - --> $DIR/bad-reg.rs:59:18 + --> $DIR/bad-reg.rs:67:18 | LL | asm!("", in("rax") foo, out("rax") bar); | ^^^^^^^^^^^^^ error: register `ymm0` conflicts with register `xmm0` - --> $DIR/bad-reg.rs:64:34 + --> $DIR/bad-reg.rs:72:34 | LL | asm!("", in("xmm0") foo, in("ymm0") bar); | -------------- ^^^^^^^^^^^^^^ register `ymm0` @@ -139,7 +139,7 @@ LL | asm!("", in("xmm0") foo, in("ymm0") bar); | register `xmm0` error: register `ymm0` conflicts with register `xmm0` - --> $DIR/bad-reg.rs:66:34 + --> $DIR/bad-reg.rs:74:34 | LL | asm!("", in("xmm0") foo, out("ymm0") bar); | -------------- ^^^^^^^^^^^^^^^ register `ymm0` @@ -147,13 +147,13 @@ LL | asm!("", in("xmm0") foo, out("ymm0") bar); | register `xmm0` | help: use `lateout` instead of `out` to avoid conflict - --> $DIR/bad-reg.rs:66:18 + --> $DIR/bad-reg.rs:74:18 | LL | asm!("", in("xmm0") foo, out("ymm0") bar); | ^^^^^^^^^^^^^^ error: type `i32` cannot be used with this register class - --> $DIR/bad-reg.rs:31:30 + --> $DIR/bad-reg.rs:39:30 | LL | asm!("", in("st(2)") foo); | ^^^ @@ -161,7 +161,7 @@ LL | asm!("", in("st(2)") foo); = note: register class `x87_reg` supports these types: error: type `i32` cannot be used with this register class - --> $DIR/bad-reg.rs:34:28 + --> $DIR/bad-reg.rs:42:28 | LL | asm!("", in("mm0") foo); | ^^^ @@ -169,7 +169,7 @@ LL | asm!("", in("mm0") foo); = note: register class `mmx_reg` supports these types: error: type `i32` cannot be used with this register class - --> $DIR/bad-reg.rs:37:27 + --> $DIR/bad-reg.rs:45:27 | LL | asm!("", in("k0") foo); | ^^^ @@ -177,7 +177,7 @@ LL | asm!("", in("k0") foo); = note: register class `kreg0` supports these types: error: type `i32` cannot be used with this register class - --> $DIR/bad-reg.rs:42:32 + --> $DIR/bad-reg.rs:50:32 | LL | asm!("{}", in(x87_reg) foo); | ^^^ @@ -185,7 +185,7 @@ LL | asm!("{}", in(x87_reg) foo); = note: register class `x87_reg` supports these types: error: type `i32` cannot be used with this register class - --> $DIR/bad-reg.rs:45:32 + --> $DIR/bad-reg.rs:53:32 | LL | asm!("{}", in(mmx_reg) foo); | ^^^ @@ -193,7 +193,7 @@ LL | asm!("{}", in(mmx_reg) foo); = note: register class `mmx_reg` supports these types: error: type `i32` cannot be used with this register class - --> $DIR/bad-reg.rs:56:42 + --> $DIR/bad-reg.rs:64:42 | LL | asm!("", in("eax") foo, in("al") bar); | ^^^ @@ -201,7 +201,7 @@ LL | asm!("", in("eax") foo, in("al") bar); = note: register class `reg_byte` supports these types: i8 error: type `i32` cannot be used with this register class - --> $DIR/bad-reg.rs:61:27 + --> $DIR/bad-reg.rs:69:27 | LL | asm!("", in("al") foo, lateout("al") bar); | ^^^ @@ -209,7 +209,7 @@ LL | asm!("", in("al") foo, lateout("al") bar); = note: register class `reg_byte` supports these types: i8 error: type `i32` cannot be used with this register class - --> $DIR/bad-reg.rs:61:46 + --> $DIR/bad-reg.rs:69:46 | LL | asm!("", in("al") foo, lateout("al") bar); | ^^^ diff --git a/tests/ui/asm/x86_64/bad-reg.rs b/tests/ui/asm/x86_64/bad-reg.rs index 2a189a91c5a47..994dcfd77ccde 100644 --- a/tests/ui/asm/x86_64/bad-reg.rs +++ b/tests/ui/asm/x86_64/bad-reg.rs @@ -1,7 +1,15 @@ +//@ add-minicore //@ only-x86_64 -//@ compile-flags: -C target-feature=+avx2 +//@ revisions: stable experimental_reg +//@ compile-flags: -C target-feature=+avx2,+avx512f +#![cfg_attr(experimental_reg, feature(asm_experimental_reg))] -use std::arch::asm; +#![crate_type = "lib"] +#![feature(no_core)] +#![no_core] + +extern crate minicore; +use minicore::*; fn main() { let mut foo = 0; @@ -66,5 +74,23 @@ fn main() { asm!("", in("xmm0") foo, out("ymm0") bar); //~^ ERROR register `ymm0` conflicts with register `xmm0` asm!("", in("xmm0") foo, lateout("ymm0") bar); + + // Passing u128/i128 is currently experimental. + let mut xmmword = 0u128; + + asm!("/* {:x} */", in(xmm_reg) xmmword); // requires asm_experimental_reg + //[stable]~^ ERROR type `u128` cannot be used with this register class in stable + asm!("/* {:x} */", out(xmm_reg) xmmword); // requires asm_experimental_reg + //[stable]~^ ERROR type `u128` cannot be used with this register class in stable + + asm!("/* {:y} */", in(ymm_reg) xmmword); // requires asm_experimental_reg + //[stable]~^ ERROR type `u128` cannot be used with this register class in stable + asm!("/* {:y} */", out(ymm_reg) xmmword); // requires asm_experimental_reg + //[stable]~^ ERROR type `u128` cannot be used with this register class in stable + + asm!("/* {:z} */", in(zmm_reg) xmmword); // requires asm_experimental_reg + //[stable]~^ ERROR type `u128` cannot be used with this register class in stable + asm!("/* {:z} */", out(zmm_reg) xmmword); // requires asm_experimental_reg + //[stable]~^ ERROR type `u128` cannot be used with this register class in stable } } diff --git a/tests/ui/asm/x86_64/bad-reg.stable.stderr b/tests/ui/asm/x86_64/bad-reg.stable.stderr new file mode 100644 index 0000000000000..2b5c453d70c63 --- /dev/null +++ b/tests/ui/asm/x86_64/bad-reg.stable.stderr @@ -0,0 +1,281 @@ +error: invalid register class `foo`: unknown register class + --> $DIR/bad-reg.rs:20:20 + | +LL | asm!("{}", in(foo) foo); + | ^^^^^^^^^^^ + | + = note: the following register classes are supported on this target: `reg`, `reg_abcd`, `reg_byte`, `xmm_reg`, `ymm_reg`, `zmm_reg`, `kreg`, `kreg0`, `mmx_reg`, `x87_reg`, `tmm_reg` + +error: invalid register `foo`: unknown register + --> $DIR/bad-reg.rs:22:18 + | +LL | asm!("", in("foo") foo); + | ^^^^^^^^^^^^^ + +error: invalid asm template modifier for this register class + --> $DIR/bad-reg.rs:24:15 + | +LL | asm!("{:z}", in(reg) foo); + | ^^^^ ----------- argument + | | + | template modifier + | + = note: the `reg` register class supports the following template modifiers: `l`, `x`, `e`, `r` + +error: invalid asm template modifier for this register class + --> $DIR/bad-reg.rs:26:15 + | +LL | asm!("{:r}", in(xmm_reg) foo); + | ^^^^ --------------- argument + | | + | template modifier + | + = note: the `xmm_reg` register class supports the following template modifiers: `x`, `y`, `z` + +error: asm template modifiers are not allowed for `const` arguments + --> $DIR/bad-reg.rs:28:15 + | +LL | asm!("{:a}", const 0); + | ^^^^ ------- argument + | | + | template modifier + +error: asm template modifiers are not allowed for `sym` arguments + --> $DIR/bad-reg.rs:30:15 + | +LL | asm!("{:a}", sym main); + | ^^^^ -------- argument + | | + | template modifier + +error: invalid register `ebp`: the frame pointer cannot be used as an operand for inline asm + --> $DIR/bad-reg.rs:32:18 + | +LL | asm!("", in("ebp") foo); + | ^^^^^^^^^^^^^ + +error: invalid register `rsp`: the stack pointer cannot be used as an operand for inline asm + --> $DIR/bad-reg.rs:34:18 + | +LL | asm!("", in("rsp") foo); + | ^^^^^^^^^^^^^ + +error: invalid register `ip`: the instruction pointer cannot be used as an operand for inline asm + --> $DIR/bad-reg.rs:36:18 + | +LL | asm!("", in("ip") foo); + | ^^^^^^^^^^^^ + +error: register class `x87_reg` can only be used as a clobber, not as an input or output + --> $DIR/bad-reg.rs:39:18 + | +LL | asm!("", in("st(2)") foo); + | ^^^^^^^^^^^^^^^ + +error: register class `mmx_reg` can only be used as a clobber, not as an input or output + --> $DIR/bad-reg.rs:42:18 + | +LL | asm!("", in("mm0") foo); + | ^^^^^^^^^^^^^ + +error: register class `kreg0` can only be used as a clobber, not as an input or output + --> $DIR/bad-reg.rs:45:18 + | +LL | asm!("", in("k0") foo); + | ^^^^^^^^^^^^ + +error: register class `x87_reg` can only be used as a clobber, not as an input or output + --> $DIR/bad-reg.rs:50:20 + | +LL | asm!("{}", in(x87_reg) foo); + | ^^^^^^^^^^^^^^^ + +error: register class `mmx_reg` can only be used as a clobber, not as an input or output + --> $DIR/bad-reg.rs:53:20 + | +LL | asm!("{}", in(mmx_reg) foo); + | ^^^^^^^^^^^^^^^ + +error: register class `x87_reg` can only be used as a clobber, not as an input or output + --> $DIR/bad-reg.rs:56:20 + | +LL | asm!("{}", out(x87_reg) _); + | ^^^^^^^^^^^^^^ + +error: register class `mmx_reg` can only be used as a clobber, not as an input or output + --> $DIR/bad-reg.rs:58:20 + | +LL | asm!("{}", out(mmx_reg) _); + | ^^^^^^^^^^^^^^ + +error: register `al` conflicts with register `eax` + --> $DIR/bad-reg.rs:64:33 + | +LL | asm!("", in("eax") foo, in("al") bar); + | ------------- ^^^^^^^^^^^^ register `al` + | | + | register `eax` + +error: register `rax` conflicts with register `rax` + --> $DIR/bad-reg.rs:67:33 + | +LL | asm!("", in("rax") foo, out("rax") bar); + | ------------- ^^^^^^^^^^^^^^ register `rax` + | | + | register `rax` + | +help: use `lateout` instead of `out` to avoid conflict + --> $DIR/bad-reg.rs:67:18 + | +LL | asm!("", in("rax") foo, out("rax") bar); + | ^^^^^^^^^^^^^ + +error: register `ymm0` conflicts with register `xmm0` + --> $DIR/bad-reg.rs:72:34 + | +LL | asm!("", in("xmm0") foo, in("ymm0") bar); + | -------------- ^^^^^^^^^^^^^^ register `ymm0` + | | + | register `xmm0` + +error: register `ymm0` conflicts with register `xmm0` + --> $DIR/bad-reg.rs:74:34 + | +LL | asm!("", in("xmm0") foo, out("ymm0") bar); + | -------------- ^^^^^^^^^^^^^^^ register `ymm0` + | | + | register `xmm0` + | +help: use `lateout` instead of `out` to avoid conflict + --> $DIR/bad-reg.rs:74:18 + | +LL | asm!("", in("xmm0") foo, out("ymm0") bar); + | ^^^^^^^^^^^^^^ + +error: type `i32` cannot be used with this register class + --> $DIR/bad-reg.rs:39:30 + | +LL | asm!("", in("st(2)") foo); + | ^^^ + | + = note: register class `x87_reg` supports these types: + +error: type `i32` cannot be used with this register class + --> $DIR/bad-reg.rs:42:28 + | +LL | asm!("", in("mm0") foo); + | ^^^ + | + = note: register class `mmx_reg` supports these types: + +error: type `i32` cannot be used with this register class + --> $DIR/bad-reg.rs:45:27 + | +LL | asm!("", in("k0") foo); + | ^^^ + | + = note: register class `kreg0` supports these types: + +error: type `i32` cannot be used with this register class + --> $DIR/bad-reg.rs:50:32 + | +LL | asm!("{}", in(x87_reg) foo); + | ^^^ + | + = note: register class `x87_reg` supports these types: + +error: type `i32` cannot be used with this register class + --> $DIR/bad-reg.rs:53:32 + | +LL | asm!("{}", in(mmx_reg) foo); + | ^^^ + | + = note: register class `mmx_reg` supports these types: + +error: type `i32` cannot be used with this register class + --> $DIR/bad-reg.rs:64:42 + | +LL | asm!("", in("eax") foo, in("al") bar); + | ^^^ + | + = note: register class `reg_byte` supports these types: i8 + +error: type `i32` cannot be used with this register class + --> $DIR/bad-reg.rs:69:27 + | +LL | asm!("", in("al") foo, lateout("al") bar); + | ^^^ + | + = note: register class `reg_byte` supports these types: i8 + +error: type `i32` cannot be used with this register class + --> $DIR/bad-reg.rs:69:46 + | +LL | asm!("", in("al") foo, lateout("al") bar); + | ^^^ + | + = note: register class `reg_byte` supports these types: i8 + +error[E0658]: type `u128` cannot be used with this register class in stable + --> $DIR/bad-reg.rs:81:40 + | +LL | asm!("/* {:x} */", in(xmm_reg) xmmword); // requires asm_experimental_reg + | ^^^^^^^ + | + = note: see issue #133416 for more information + = help: add `#![feature(asm_experimental_reg)]` to the crate attributes to enable + = note: this compiler was built on YYYY-MM-DD; consider upgrading it if it is out of date + +error[E0658]: type `u128` cannot be used with this register class in stable + --> $DIR/bad-reg.rs:83:41 + | +LL | asm!("/* {:x} */", out(xmm_reg) xmmword); // requires asm_experimental_reg + | ^^^^^^^ + | + = note: see issue #133416 for more information + = help: add `#![feature(asm_experimental_reg)]` to the crate attributes to enable + = note: this compiler was built on YYYY-MM-DD; consider upgrading it if it is out of date + +error[E0658]: type `u128` cannot be used with this register class in stable + --> $DIR/bad-reg.rs:86:40 + | +LL | asm!("/* {:y} */", in(ymm_reg) xmmword); // requires asm_experimental_reg + | ^^^^^^^ + | + = note: see issue #133416 for more information + = help: add `#![feature(asm_experimental_reg)]` to the crate attributes to enable + = note: this compiler was built on YYYY-MM-DD; consider upgrading it if it is out of date + +error[E0658]: type `u128` cannot be used with this register class in stable + --> $DIR/bad-reg.rs:88:41 + | +LL | asm!("/* {:y} */", out(ymm_reg) xmmword); // requires asm_experimental_reg + | ^^^^^^^ + | + = note: see issue #133416 for more information + = help: add `#![feature(asm_experimental_reg)]` to the crate attributes to enable + = note: this compiler was built on YYYY-MM-DD; consider upgrading it if it is out of date + +error[E0658]: type `u128` cannot be used with this register class in stable + --> $DIR/bad-reg.rs:91:40 + | +LL | asm!("/* {:z} */", in(zmm_reg) xmmword); // requires asm_experimental_reg + | ^^^^^^^ + | + = note: see issue #133416 for more information + = help: add `#![feature(asm_experimental_reg)]` to the crate attributes to enable + = note: this compiler was built on YYYY-MM-DD; consider upgrading it if it is out of date + +error[E0658]: type `u128` cannot be used with this register class in stable + --> $DIR/bad-reg.rs:93:41 + | +LL | asm!("/* {:z} */", out(zmm_reg) xmmword); // requires asm_experimental_reg + | ^^^^^^^ + | + = note: see issue #133416 for more information + = help: add `#![feature(asm_experimental_reg)]` to the crate attributes to enable + = note: this compiler was built on YYYY-MM-DD; consider upgrading it if it is out of date + +error: aborting due to 34 previous errors + +For more information about this error, try `rustc --explain E0658`. From 0b05e69bce4bdb0a76a4dab622e1c1844d9cb2f0 Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Wed, 14 Jan 2026 10:59:12 +0000 Subject: [PATCH 12/90] Rustup to rustc 1.94.0-nightly (2850ca829 2026-01-13) --- rust-toolchain.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust-toolchain.toml b/rust-toolchain.toml index de0eb1d7322a1..1a2b59db089fd 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] -channel = "nightly-2026-01-02" +channel = "nightly-2026-01-14" components = ["rust-src", "rustc-dev", "llvm-tools", "rustfmt"] profile = "minimal" From e4e41b74262e589258fd697f034fe158b5de17bf Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Wed, 14 Jan 2026 11:18:07 +0000 Subject: [PATCH 13/90] Use -Zno-embed-metadata for the codegen backend and sysroot This saves about 10MB on the dist size and about 240MB on the build dir size. --- build_system/build_backend.rs | 2 ++ build_system/build_sysroot.rs | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/build_system/build_backend.rs b/build_system/build_backend.rs index c0a8cc95614f6..6b14727cd153e 100644 --- a/build_system/build_backend.rs +++ b/build_system/build_backend.rs @@ -43,6 +43,8 @@ pub(crate) fn build_backend( cmd.arg("--release"); + cmd.arg("-Zno-embed-metadata"); + eprintln!("[BUILD] rustc_codegen_cranelift"); crate::utils::spawn_and_wait(cmd); diff --git a/build_system/build_sysroot.rs b/build_system/build_sysroot.rs index 2cc93713c6647..5205ec1e8aaa1 100644 --- a/build_system/build_sysroot.rs +++ b/build_system/build_sysroot.rs @@ -242,6 +242,7 @@ fn build_clif_sysroot_for_triple( build_cmd.arg("--release"); build_cmd.arg("--features").arg("backtrace panic-unwind"); build_cmd.arg(format!("-Zroot-dir={}", STDLIB_SRC.to_path(dirs).display())); + build_cmd.arg("-Zno-embed-metadata"); build_cmd.env("CARGO_PROFILE_RELEASE_DEBUG", "true"); build_cmd.env("__CARGO_DEFAULT_LIB_METADATA", "cg_clif"); if compiler.triple.contains("apple") { @@ -256,7 +257,7 @@ fn build_clif_sysroot_for_triple( for entry in fs::read_dir(build_dir.join("deps")).unwrap() { let entry = entry.unwrap(); if let Some(ext) = entry.path().extension() { - if ext == "rmeta" || ext == "d" || ext == "dSYM" || ext == "clif" { + if ext == "d" || ext == "dSYM" || ext == "clif" { continue; } } else { From 4d4fb2784f59700f9bc35f2a3a4c60dbf3464504 Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Sat, 17 Jan 2026 16:03:18 +0000 Subject: [PATCH 14/90] Pass -Zbinary-dep-depinfo when testing rustc tests --- scripts/setup_rust_fork.sh | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/scripts/setup_rust_fork.sh b/scripts/setup_rust_fork.sh index c16cb4e538fe4..9d618554cb4ff 100644 --- a/scripts/setup_rust_fork.sh +++ b/scripts/setup_rust_fork.sh @@ -49,6 +49,19 @@ std-features = ["panic-unwind"] EOF cat < Date: Sat, 17 Jan 2026 16:13:30 +0000 Subject: [PATCH 15/90] Rustup to rustc 1.94.0-nightly (f6a07efc8 2026-01-16) --- rust-toolchain.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 1a2b59db089fd..b6abd825943e8 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] -channel = "nightly-2026-01-14" +channel = "nightly-2026-01-17" components = ["rust-src", "rustc-dev", "llvm-tools", "rustfmt"] profile = "minimal" From ef9002c3d7a6f48e053f55c4be5c9b8c13e3049f Mon Sep 17 00:00:00 2001 From: Folkert de Vries Date: Mon, 19 Jan 2026 00:42:38 +0100 Subject: [PATCH 16/90] add `simd_splat` intrinsic --- src/intrinsics/simd.rs | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/src/intrinsics/simd.rs b/src/intrinsics/simd.rs index bef9c67474577..200cedf0f6ae0 100644 --- a/src/intrinsics/simd.rs +++ b/src/intrinsics/simd.rs @@ -348,6 +348,31 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( ret.write_cvalue(fx, ret_lane); } + sym::simd_splat => { + intrinsic_args!(fx, args => (value); intrinsic); + + if !ret.layout().ty.is_simd() { + report_simd_type_validation_error(fx, intrinsic, span, ret.layout().ty); + return; + } + let (lane_count, lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx); + + if value.layout().ty != lane_ty { + fx.tcx.dcx().span_fatal( + span, + format!( + "[simd_splat] expected element type {lane_ty:?}, got {got:?}", + got = value.layout().ty + ), + ); + } + + for i in 0..lane_count { + let ret_lane = ret.place_lane(fx, i.into()); + ret_lane.write_cvalue(fx, value); + } + } + sym::simd_neg | sym::simd_bswap | sym::simd_bitreverse From 9949a536d329977366e66904e18fcb2fbb76185b Mon Sep 17 00:00:00 2001 From: Jonathan Brouwer Date: Fri, 9 Jan 2026 15:47:02 +0100 Subject: [PATCH 17/90] Remove all allows for `diagnostic_outside_of_impl` and `untranslatable_diagnostic` throughout the codebase This PR was mostly made by search&replacing --- src/lib.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 5fdecd014ac05..7361a6af41784 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,6 +1,4 @@ // tidy-alphabetical-start -#![allow(rustc::diagnostic_outside_of_impl)] -#![allow(rustc::untranslatable_diagnostic)] // Note: please avoid adding other feature gates where possible #![feature(rustc_private)] // Only used to define intrinsics in `compiler_builtins.rs`. From 0b71ff60f8f905bbc2e10415ab045cfbabf05793 Mon Sep 17 00:00:00 2001 From: Folkert de Vries Date: Sat, 27 Dec 2025 17:17:54 +0100 Subject: [PATCH 18/90] `c_variadic`: use `Clone` instead of LLVM `va_copy` --- src/intrinsics/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/intrinsics/mod.rs b/src/intrinsics/mod.rs index a78c6e0a4e7ac..ab9a11305baa3 100644 --- a/src/intrinsics/mod.rs +++ b/src/intrinsics/mod.rs @@ -1506,7 +1506,7 @@ fn codegen_regular_intrinsic_call<'tcx>( } // FIXME implement variadics in cranelift - sym::va_copy | sym::va_arg | sym::va_end => { + sym::va_arg | sym::va_end => { fx.tcx.dcx().span_fatal( source_info.span, "Defining variadic functions is not yet supported by Cranelift", From 7b8c532d9acae9856682d899891690da5061e88b Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Wed, 21 Jan 2026 12:32:38 +0000 Subject: [PATCH 19/90] Rustup to rustc 1.95.0-nightly (5c49c4f7c 2026-01-20) --- rust-toolchain.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust-toolchain.toml b/rust-toolchain.toml index b6abd825943e8..1dc900ca66baf 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] -channel = "nightly-2026-01-17" +channel = "nightly-2026-01-21" components = ["rust-src", "rustc-dev", "llvm-tools", "rustfmt"] profile = "minimal" From fe61cd6fa3bac333ab44e16674dae160d820ad0c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Le=C3=B3n=20Orell=20Valerian=20Liehr?= Date: Tue, 20 Jan 2026 22:14:41 +0100 Subject: [PATCH 20/90] Support debuginfo for assoc const bindings --- src/debuginfo/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/debuginfo/mod.rs b/src/debuginfo/mod.rs index 0cd510037293d..756f86a7d0111 100644 --- a/src/debuginfo/mod.rs +++ b/src/debuginfo/mod.rs @@ -242,7 +242,7 @@ impl DebugContext { let generics = tcx.generics_of(enclosing_fn_def_id); let args = instance.args.truncate_to(tcx, generics); - type_names::push_generic_params( + type_names::push_generic_args( tcx, tcx.normalize_erasing_regions(ty::TypingEnv::fully_monomorphized(), args), &mut name, From 41b7226b85a48ab0c5fe09cd5b7ba99a14696634 Mon Sep 17 00:00:00 2001 From: Pratham Jaiswal <100998543+pantha704@users.noreply.github.com> Date: Sat, 24 Jan 2026 21:01:40 +0530 Subject: [PATCH 21/90] Fix CI disk space issue for abi-cafe tests (#1625) * Fix CI disk space issue for abi-cafe tests Port disk space cleanup script from rust-lang/rust to free space on GitHub Actions runners before running abi-cafe tests. * ci: update free-disk-space.sh to match rust-lang/rust@d29e478 * ci: set RUNNER_ENVIRONMENT=github-hosted for free-disk-space script * Revert indentation change --------- Co-authored-by: bjorn3 <17426603+bjorn3@users.noreply.github.com> --- .github/scripts/free-disk-space.sh | 259 +++++++++++++++++++++++++++++ .github/workflows/abi-cafe.yml | 6 + 2 files changed, 265 insertions(+) create mode 100755 .github/scripts/free-disk-space.sh diff --git a/.github/scripts/free-disk-space.sh b/.github/scripts/free-disk-space.sh new file mode 100755 index 0000000000000..06afdaad619f3 --- /dev/null +++ b/.github/scripts/free-disk-space.sh @@ -0,0 +1,259 @@ +#!/bin/bash +# Ported from rust-lang/rust commit d29e4783dff30f9526eeba3929ebfe86c00c9dad in src/ci/scripts/free-disk-space-linux.sh +set -euo pipefail + +# Script inspired by https://github.com/jlumbroso/free-disk-space +isX86() { + local arch + arch=$(uname -m) + if [ "$arch" = "x86_64" ]; then + return 0 + else + return 1 + fi +} + +# In aws codebuild, the variable RUNNER_ENVIRONMENT is "self-hosted". +isGitHubRunner() { + # `:-` means "use the value of RUNNER_ENVIRONMENT if it exists, otherwise use an empty string". + if [[ "${RUNNER_ENVIRONMENT:-}" == "github-hosted" ]]; then + return 0 + else + return 1 + fi +} + +# print a line of the specified character +printSeparationLine() { + for ((i = 0; i < 80; i++)); do + printf "%s" "$1" + done + printf "\n" +} + +# REF: https://stackoverflow.com/a/450821/408734 +getAvailableSpace() { + df -a | awk 'NR > 1 {avail+=$4} END {print avail}' +} + +# REF: https://unix.stackexchange.com/a/44087/60849 +formatByteCount() { + numfmt --to=iec-i --suffix=B --padding=7 "${1}000" +} + +# macro to output saved space +printSavedSpace() { + # Disk space before the operation + local before=${1} + local title=${2:-} + + local after + after=$(getAvailableSpace) + local saved=$((after - before)) + + if [ "$saved" -lt 0 ]; then + echo "::warning::Saved space is negative: $saved. Using '0' as saved space." + saved=0 + fi + + echo "" + printSeparationLine "*" + if [ -n "${title}" ]; then + echo "=> ${title}: Saved $(formatByteCount "$saved")" + else + echo "=> Saved $(formatByteCount "$saved")" + fi + printSeparationLine "*" + echo "" +} + +# macro to print output of df with caption +printDF() { + local caption=${1} + + printSeparationLine "=" + echo "${caption}" + echo "" + echo "$ df -h" + echo "" + df -h + printSeparationLine "=" +} + +removeUnusedFilesAndDirs() { + local to_remove=( + "/usr/share/java" + ) + + if isGitHubRunner; then + to_remove+=( + "/usr/local/aws-sam-cli" + "/usr/local/doc/cmake" + "/usr/local/julia"* + "/usr/local/lib/android" + "/usr/local/share/chromedriver-"* + "/usr/local/share/chromium" + "/usr/local/share/cmake-"* + "/usr/local/share/edge_driver" + "/usr/local/share/emacs" + "/usr/local/share/gecko_driver" + "/usr/local/share/icons" + "/usr/local/share/powershell" + "/usr/local/share/vcpkg" + "/usr/local/share/vim" + "/usr/share/apache-maven-"* + "/usr/share/gradle-"* + "/usr/share/kotlinc" + "/usr/share/miniconda" + "/usr/share/php" + "/usr/share/ri" + "/usr/share/swift" + + # binaries + "/usr/local/bin/azcopy" + "/usr/local/bin/bicep" + "/usr/local/bin/ccmake" + "/usr/local/bin/cmake-"* + "/usr/local/bin/cmake" + "/usr/local/bin/cpack" + "/usr/local/bin/ctest" + "/usr/local/bin/helm" + "/usr/local/bin/kind" + "/usr/local/bin/kustomize" + "/usr/local/bin/minikube" + "/usr/local/bin/packer" + "/usr/local/bin/phpunit" + "/usr/local/bin/pulumi-"* + "/usr/local/bin/pulumi" + "/usr/local/bin/stack" + + # Haskell runtime + "/usr/local/.ghcup" + + # Azure + "/opt/az" + "/usr/share/az_"* + ) + + if [ -n "${AGENT_TOOLSDIRECTORY:-}" ]; then + # Environment variable set by GitHub Actions + to_remove+=( + "${AGENT_TOOLSDIRECTORY}" + ) + else + echo "::warning::AGENT_TOOLSDIRECTORY is not set. Skipping removal." + fi + else + # Remove folders and files present in AWS CodeBuild + to_remove+=( + # binaries + "/usr/local/bin/ecs-cli" + "/usr/local/bin/eksctl" + "/usr/local/bin/kubectl" + + "${HOME}/.gradle" + "${HOME}/.dotnet" + "${HOME}/.goenv" + "${HOME}/.phpenv" + + ) + fi + + for element in "${to_remove[@]}"; do + if [ ! -e "$element" ]; then + # The file or directory doesn't exist. + # Maybe it was removed in a newer version of the runner or it's not present in a + # specific architecture (e.g. ARM). + echo "::warning::Directory or file $element does not exist, skipping." + fi + done + + # Remove all files and directories at once to save time. + sudo rm -rf "${to_remove[@]}" +} + +execAndMeasureSpaceChange() { + local operation=${1} # Function to execute + local title=${2} + + local before + before=$(getAvailableSpace) + $operation + + printSavedSpace "$before" "$title" +} + +# REF: https://github.com/apache/flink/blob/master/tools/azure-pipelines/free_disk_space.sh +cleanPackages() { + local packages=( + '^aspnetcore-.*' + '^dotnet-.*' + '^llvm-.*' + '^mongodb-.*' + 'firefox' + 'libgl1-mesa-dri' + 'mono-devel' + 'php.*' + ) + + if isGitHubRunner; then + packages+=( + azure-cli + ) + + if isX86; then + packages+=( + 'google-chrome-stable' + 'google-cloud-cli' + 'google-cloud-sdk' + 'powershell' + ) + fi + else + packages+=( + 'google-chrome-stable' + ) + fi + + WAIT_DPKG_LOCK="-o DPkg::Lock::Timeout=60" + sudo apt-get ${WAIT_DPKG_LOCK} -qq remove -y --fix-missing "${packages[@]}" + + sudo apt-get ${WAIT_DPKG_LOCK} autoremove -y \ + || echo "::warning::The command [sudo apt-get autoremove -y] failed" + sudo apt-get ${WAIT_DPKG_LOCK} clean \ + || echo "::warning::The command [sudo apt-get clean] failed" +} + +# They aren't present in ubuntu 24 runners. +cleanDocker() { + echo "=> Removing the following docker images:" + sudo docker image ls + echo "=> Removing docker images..." + sudo docker image prune --all --force || true +} + +# Remove Swap storage +cleanSwap() { + sudo swapoff -a || true + sudo rm -rf /mnt/swapfile || true + free -h +} + +# Display initial disk space stats +AVAILABLE_INITIAL=$(getAvailableSpace) + +printDF "BEFORE CLEAN-UP:" +echo "" +execAndMeasureSpaceChange cleanPackages "Unused packages" +execAndMeasureSpaceChange cleanDocker "Docker images" +execAndMeasureSpaceChange cleanSwap "Swap storage" +execAndMeasureSpaceChange removeUnusedFilesAndDirs "Unused files and directories" + +# Output saved space statistic +echo "" +printDF "AFTER CLEAN-UP:" + +echo "" +echo "" + +printSavedSpace "$AVAILABLE_INITIAL" "Total saved" diff --git a/.github/workflows/abi-cafe.yml b/.github/workflows/abi-cafe.yml index 170c7126c296b..3367562f26838 100644 --- a/.github/workflows/abi-cafe.yml +++ b/.github/workflows/abi-cafe.yml @@ -49,6 +49,12 @@ jobs: if: matrix.os == 'ubuntu-latest' run: cat /proc/cpuinfo + - name: Free disk space + if: runner.os == 'Linux' + env: + RUNNER_ENVIRONMENT: github-hosted + run: .github/scripts/free-disk-space.sh + - name: Cache cargo target dir uses: actions/cache@v4 with: From 11d0889dc0532c287cb7b2b5b8cfc6269a36e5f7 Mon Sep 17 00:00:00 2001 From: Simonas Kazlauskas Date: Tue, 13 Jan 2026 16:52:02 +0200 Subject: [PATCH 22/90] abi: add a rust-preserve-none calling convention This is the conceptual opposite of the rust-cold calling convention and is particularly useful in combination with the new `explicit_tail_calls` feature. For relatively tight loops implemented with tail calling (`become`) each of the function with the regular calling convention is still responsible for restoring the initial value of the preserved registers. So it is not unusual to end up with a situation where each step in the tail call loop is spilling and reloading registers, along the lines of: foo: push r12 ; do things pop r12 jmp next_step This adds up quickly, especially when most of the clobberable registers are already used to pass arguments or other uses. I was thinking of making the name of this ABI a little less LLVM-derived and more like a conceptual inverse of `rust-cold`, but could not come with a great name (`rust-cold` is itself not a great name: cold in what context? from which perspective? is it supposed to mean that the function is rarely called?) --- src/abi/mod.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/abi/mod.rs b/src/abi/mod.rs index 09d71f5dd5579..5a46f79e2ba00 100644 --- a/src/abi/mod.rs +++ b/src/abi/mod.rs @@ -56,6 +56,9 @@ pub(crate) fn conv_to_call_conv( CanonAbi::Rust | CanonAbi::C => default_call_conv, CanonAbi::RustCold => CallConv::Cold, + // Cranelift doesn't currently have anything for this. + CanonAbi::RustPreserveNone => default_call_conv, + // Functions with this calling convention can only be called from assembly, but it is // possible to declare an `extern "custom"` block, so the backend still needs a calling // convention for declaring foreign functions. From 82fc4ec5950be12b340b6cfe8c37c36f79b69f62 Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Mon, 26 Jan 2026 16:24:35 +0000 Subject: [PATCH 23/90] Rustup to rustc 1.95.0-nightly (873d4682c 2026-01-25) --- ...esn-t-get-emitted-unless-VaList-is-a.patch | 25 +++++++++++++++++++ rust-toolchain.toml | 2 +- 2 files changed, 26 insertions(+), 1 deletion(-) create mode 100644 patches/0028-stdlib-Ensure-va_end-doesn-t-get-emitted-unless-VaList-is-a.patch diff --git a/patches/0028-stdlib-Ensure-va_end-doesn-t-get-emitted-unless-VaList-is-a.patch b/patches/0028-stdlib-Ensure-va_end-doesn-t-get-emitted-unless-VaList-is-a.patch new file mode 100644 index 0000000000000..34a95dadf0ef0 --- /dev/null +++ b/patches/0028-stdlib-Ensure-va_end-doesn-t-get-emitted-unless-VaList-is-a.patch @@ -0,0 +1,25 @@ +From 116abc64add4d617104993a7a3011f20bcf31ef2 Mon Sep 17 00:00:00 2001 +From: bjorn3 <17426603+bjorn3@users.noreply.github.com> +Date: Mon, 26 Jan 2026 16:20:58 +0000 +Subject: [PATCH] Ensure va_end doesn't get emitted unless VaList is actually + used + +--- + library/core/src/ffi/va_list.rs | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/library/core/src/ffi/va_list.rs b/library/core/src/ffi/va_list.rs +index d0f1553..75129af 100644 +--- a/library/core/src/ffi/va_list.rs ++++ b/library/core/src/ffi/va_list.rs +@@ -217,6 +217,7 @@ impl Clone for VaList<'_> { + } + + impl<'f> Drop for VaList<'f> { ++ #[inline] + fn drop(&mut self) { + // SAFETY: this variable argument list is being dropped, so won't be read from again. + unsafe { va_end(self) } +-- +2.43.0 + diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 1dc900ca66baf..726c690ed0698 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] -channel = "nightly-2026-01-21" +channel = "nightly-2026-01-26" components = ["rust-src", "rustc-dev", "llvm-tools", "rustfmt"] profile = "minimal" From 2859fde18af84f07c886d523f3747176da775ed9 Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Mon, 26 Jan 2026 16:38:55 +0000 Subject: [PATCH 24/90] Fix rustc test suite --- scripts/test_rustc_tests.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/test_rustc_tests.sh b/scripts/test_rustc_tests.sh index 288141d345b7f..8d7da8a6a008a 100755 --- a/scripts/test_rustc_tests.sh +++ b/scripts/test_rustc_tests.sh @@ -50,6 +50,7 @@ rm -r tests/run-make/c-link-to-rust-va-list-fn # requires callee side vararg sup rm tests/ui/c-variadic/valid.rs # same rm tests/ui/c-variadic/trait-method.rs # same rm tests/ui/c-variadic/inherent-method.rs # same +rm tests/ui/c-variadic/copy.rs # same rm tests/ui/sanitizer/kcfi-c-variadic.rs # same rm tests/ui/c-variadic/same-program-multiple-abis-x86_64.rs # variadics for calling conventions other than C unsupported rm tests/ui/delegation/fn-header.rs @@ -147,6 +148,7 @@ rm -r tests/run-make-cargo/panic-immediate-abort-codegen # same rm -r tests/run-make/missing-unstable-trait-bound # This disables support for unstable features, but running cg_clif needs some unstable features rm -r tests/run-make/const-trait-stable-toolchain # same rm -r tests/run-make/print-request-help-stable-unstable # same +rm -r tests/run-make/issue-149402-suggest-unresolve # same rm -r tests/run-make/incr-add-rust-src-component rm tests/ui/errors/remap-path-prefix-sysroot.rs # different sysroot source path rm -r tests/run-make/export/extern-opt # something about rustc version mismatches From c73f662595ba6bf6e157848d36b3ad46608eb090 Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Tue, 27 Jan 2026 10:14:50 +0000 Subject: [PATCH 25/90] Rustup to rustc 1.95.0-nightly (474276961 2026-01-26) --- rust-toolchain.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 726c690ed0698..1faaa47c88433 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] -channel = "nightly-2026-01-26" +channel = "nightly-2026-01-27" components = ["rust-src", "rustc-dev", "llvm-tools", "rustfmt"] profile = "minimal" From 20e7d31d4ddf638d6129b571d1a0fbffddaaab2f Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Tue, 27 Jan 2026 10:28:58 +0000 Subject: [PATCH 26/90] Re-enable a couple of rustc tests --- scripts/test_rustc_tests.sh | 4 ---- 1 file changed, 4 deletions(-) diff --git a/scripts/test_rustc_tests.sh b/scripts/test_rustc_tests.sh index 8d7da8a6a008a..4b6a1d238b4e1 100755 --- a/scripts/test_rustc_tests.sh +++ b/scripts/test_rustc_tests.sh @@ -165,11 +165,7 @@ rm -r tests/run-make/panic-abort-eh_frame # .eh_frame emitted with panic=abort # bugs in the test suite # ====================== rm tests/ui/process/nofile-limit.rs # TODO some AArch64 linking issue -rm tests/ui/backtrace/synchronized-panic-handler.rs # missing needs-unwind annotation -rm tests/ui/lint/non-snake-case/lint-non-snake-case-crate.rs # same -rm tests/ui/async-await/async-drop/async-drop-initial.rs # same (rust-lang/rust#140493) rm -r tests/ui/codegen/equal-pointers-unequal # make incorrect assumptions about the location of stack variables -rm -r tests/run-make-cargo/rustdoc-scrape-examples-paths # FIXME(rust-lang/rust#145580) incr comp bug rm -r tests/incremental/extern_static/issue-49153.rs # assumes reference to undefined static gets optimized away rm tests/ui/intrinsics/panic-uninitialized-zeroed.rs # really slow with unoptimized libstd From cf9e94ca494e64383898782697bde256695ea68a Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Tue, 27 Jan 2026 10:44:55 +0000 Subject: [PATCH 27/90] Move some rustc test disables around --- scripts/test_rustc_tests.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/scripts/test_rustc_tests.sh b/scripts/test_rustc_tests.sh index 4b6a1d238b4e1..a50eec40cb649 100755 --- a/scripts/test_rustc_tests.sh +++ b/scripts/test_rustc_tests.sh @@ -10,11 +10,6 @@ pushd rust command -v rg >/dev/null 2>&1 || cargo install ripgrep -rm -r tests/ui/{lto/,linkage*} || true -for test in $(rg --files-with-matches "lto" tests/{codegen-units,ui,incremental}); do - rm $test -done - # should-fail tests don't work when compiletest is compiled with panic=abort for test in $(rg --files-with-matches "//@ should-fail" tests/{codegen-units,ui,incremental}); do rm $test @@ -38,6 +33,7 @@ rm tests/ui/simd/intrinsic/generic-arithmetic-pass.rs # unimplemented simd_funne rm -r tests/ui/scalable-vectors # scalable vectors are unsupported # exotic linkages +rm -r tests/ui/linkage* rm tests/incremental/hashes/function_interfaces.rs rm tests/incremental/hashes/statics.rs rm -r tests/run-make/naked-symbol-visibility @@ -80,6 +76,10 @@ rm -r tests/ui/eii # EII not yet implemented rm -r tests/run-make/forced-unwind-terminate-pof # forced unwinding doesn't take precedence # requires LTO +rm -r tests/ui/lto +for test in $(rg --files-with-matches "lto" tests/{codegen-units,ui,incremental}); do + rm $test +done rm -r tests/run-make/cdylib rm -r tests/run-make/codegen-options-parsing rm -r tests/run-make/lto-* From 0ad8260c7506cf80b65d0595f17ad4f9d4199737 Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Tue, 27 Jan 2026 11:31:02 +0000 Subject: [PATCH 28/90] Update a couple of dependencies --- Cargo.lock | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3d13b5540e196..766c99c04c167 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -28,9 +28,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bumpalo" -version = "3.19.0" +version = "3.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" +checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" dependencies = [ "allocator-api2", ] @@ -257,9 +257,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.4" +version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" dependencies = [ "foldhash", ] @@ -282,9 +282,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.178" +version = "0.2.180" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" +checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc" [[package]] name = "libloading" @@ -298,9 +298,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" +checksum = "b6d2cec3eae94f9f509c767b45932f1ada8350c4bdb85af2fcab4a3c14807981" [[package]] name = "log" @@ -337,27 +337,27 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.103" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" dependencies = [ "unicode-ident", ] [[package]] name = "quote" -version = "1.0.42" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" +checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4" dependencies = [ "proc-macro2", ] [[package]] name = "regalloc2" -version = "0.13.3" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e249c660440317032a71ddac302f25f1d5dff387667bcc3978d1f77aa31ac34" +checksum = "08effbc1fa53aaebff69521a5c05640523fab037b34a4a2c109506bc938246fa" dependencies = [ "allocator-api2", "bumpalo", @@ -446,9 +446,9 @@ checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" [[package]] name = "syn" -version = "2.0.111" +version = "2.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87" +checksum = "d4d107df263a3013ef9b1879b0df87d706ff80f65a86ea879bd9c31f9b307c2a" dependencies = [ "proc-macro2", "quote", @@ -457,9 +457,9 @@ dependencies = [ [[package]] name = "target-lexicon" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df7f62577c25e07834649fc3b39fafdc597c0a3527dc1c60129201ccfcbaa50c" +checksum = "b1dd07eb858a2067e2f3c7155d54e929265c264e6f37efe3ee7a8d1b5a1dd0ba" [[package]] name = "unicode-ident" From a257fb960fa09afb1e733adebbd898aa6d80e851 Mon Sep 17 00:00:00 2001 From: zedddie Date: Sun, 25 Jan 2026 20:34:56 +0100 Subject: [PATCH 29/90] Fix ICE in transmutability error reporting when type aliases are normalized --- .../traits/fulfillment_errors.rs | 56 ++++++++++++++++++- .../type-alias-normalization.rs | 31 ++++++++++ .../type-alias-normalization.stderr | 29 ++++++++++ 3 files changed, 114 insertions(+), 2 deletions(-) create mode 100644 tests/ui/transmutability/type-alias-normalization.rs create mode 100644 tests/ui/transmutability/type-alias-normalization.stderr diff --git a/compiler/rustc_trait_selection/src/error_reporting/traits/fulfillment_errors.rs b/compiler/rustc_trait_selection/src/error_reporting/traits/fulfillment_errors.rs index 6872d038fb7f0..bccb04b4ff302 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/traits/fulfillment_errors.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/traits/fulfillment_errors.rs @@ -254,9 +254,16 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { LangItem::TransmuteTrait, ) { // Recompute the safe transmute reason and use that for the error reporting + let (report_obligation, report_pred) = + self.select_transmute_obligation_for_reporting( + &obligation, + main_trait_predicate, + root_obligation, + ); + match self.get_safe_transmute_error_and_reason( - obligation.clone(), - main_trait_predicate, + report_obligation, + report_pred, span, ) { GetSafeTransmuteErrorAndReason::Silent => { @@ -2793,6 +2800,51 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { }) } + fn select_transmute_obligation_for_reporting( + &self, + obligation: &PredicateObligation<'tcx>, + trait_predicate: ty::PolyTraitPredicate<'tcx>, + root_obligation: &PredicateObligation<'tcx>, + ) -> (PredicateObligation<'tcx>, ty::PolyTraitPredicate<'tcx>) { + let ocx = ObligationCtxt::new(self); + let normalized_predicate = self.tcx.erase_and_anonymize_regions( + self.tcx.instantiate_bound_regions_with_erased(trait_predicate), + ); + let trait_ref = normalized_predicate.trait_ref; + + let Ok(assume) = ocx.structurally_normalize_const( + &obligation.cause, + obligation.param_env, + trait_ref.args.const_at(2), + ) else { + return (obligation.clone(), trait_predicate); + }; + + let Some(assume) = rustc_transmute::Assume::from_const(self.tcx, assume) else { + return (obligation.clone(), trait_predicate); + }; + + let is_normalized_yes = matches!( + rustc_transmute::TransmuteTypeEnv::new(self.tcx).is_transmutable( + trait_ref.args.type_at(1), + trait_ref.args.type_at(0), + assume, + ), + rustc_transmute::Answer::Yes, + ); + + // If the normalized check unexpectedly passes, fall back to root obligation for reporting. + if is_normalized_yes + && let ty::PredicateKind::Clause(ty::ClauseKind::Trait(root_pred)) = + root_obligation.predicate.kind().skip_binder() + && root_pred.def_id() == trait_predicate.def_id() + { + return (root_obligation.clone(), root_obligation.predicate.kind().rebind(root_pred)); + } + + (obligation.clone(), trait_predicate) + } + fn get_safe_transmute_error_and_reason( &self, obligation: PredicateObligation<'tcx>, diff --git a/tests/ui/transmutability/type-alias-normalization.rs b/tests/ui/transmutability/type-alias-normalization.rs new file mode 100644 index 0000000000000..8c8734c677e8d --- /dev/null +++ b/tests/ui/transmutability/type-alias-normalization.rs @@ -0,0 +1,31 @@ +//! regression test for https://github.com/rust-lang/rust/issues/151462 +//@compile-flags: -Znext-solver=globally +#![feature(lazy_type_alias, transmutability)] +#![allow(incomplete_features)] +mod assert { + use std::mem::{Assume, TransmuteFrom}; + + pub fn is_maybe_transmutable() + where + Src: TransmuteFrom< + Src, + { + Assume { + alignment: true, + lifetimes: true, + safety: true, + validity: true, + } + }, + >, + { + } +} + +fn test() { + type JustUnit = (); + assert::is_maybe_transmutable::(); + //~^ ERROR `JustUnit` cannot be safely transmuted into `JustUnit` +} + +fn main() {} diff --git a/tests/ui/transmutability/type-alias-normalization.stderr b/tests/ui/transmutability/type-alias-normalization.stderr new file mode 100644 index 0000000000000..d224f755c6119 --- /dev/null +++ b/tests/ui/transmutability/type-alias-normalization.stderr @@ -0,0 +1,29 @@ +error[E0277]: `JustUnit` cannot be safely transmuted into `JustUnit` + --> $DIR/type-alias-normalization.rs:27:37 + | +LL | assert::is_maybe_transmutable::(); + | ^^^^^^^^ analyzing the transmutability of `JustUnit` is not yet supported + | +note: required by a bound in `is_maybe_transmutable` + --> $DIR/type-alias-normalization.rs:10:14 + | +LL | pub fn is_maybe_transmutable() + | --------------------- required by a bound in this function +LL | where +LL | Src: TransmuteFrom< + | ______________^ +LL | | Src, +LL | | { +LL | | Assume { +... | +LL | | }, +LL | | >, + | |_________^ required by this bound in `is_maybe_transmutable` +help: consider introducing a `where` clause, but there might be an alternative better way to express this requirement + | +LL | fn test() where (): TransmuteFrom<(), Assume { alignment: true, lifetimes: true, safety: true, validity: true }> { + | ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +error: aborting due to 1 previous error + +For more information about this error, try `rustc --explain E0277`. From f49a46c7aa9feae88c6ae8d3d678ebb9b96efc23 Mon Sep 17 00:00:00 2001 From: WANG Rui Date: Thu, 11 Sep 2025 10:06:04 +0800 Subject: [PATCH 30/90] loongarch: Sync SIMD intrinsics with C --- .../src/loongarch64/lasx/generated.rs | 166 ++++++++++- .../core_arch/src/loongarch64/lasx/tests.rs | 281 ++++++++++++++++++ .../src/loongarch64/lsx/generated.rs | 4 +- .../crates/stdarch-gen-loongarch/lasx.spec | 92 +++++- .../crates/stdarch-gen-loongarch/lasxintrin.h | 164 +++++++++- .../crates/stdarch-gen-loongarch/lsx.spec | 2 +- .../crates/stdarch-gen-loongarch/lsxintrin.h | 8 +- .../crates/stdarch-gen-loongarch/src/main.rs | 6 +- 8 files changed, 708 insertions(+), 15 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs b/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs index cda0ebec67799..1d9d4e8248e63 100644 --- a/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs +++ b/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs @@ -7,7 +7,7 @@ // ``` use crate::mem::transmute; -use super::types::*; +use super::super::*; #[allow(improper_ctypes)] unsafe extern "unadjusted" { @@ -980,7 +980,7 @@ unsafe extern "unadjusted" { #[link_name = "llvm.loongarch.lasx.xvssrln.w.d"] fn __lasx_xvssrln_w_d(a: __v4i64, b: __v4i64) -> __v8i32; #[link_name = "llvm.loongarch.lasx.xvorn.v"] - fn __lasx_xvorn_v(a: __v32i8, b: __v32i8) -> __v32i8; + fn __lasx_xvorn_v(a: __v32u8, b: __v32u8) -> __v32u8; #[link_name = "llvm.loongarch.lasx.xvldi"] fn __lasx_xvldi(a: i32) -> __v4i64; #[link_name = "llvm.loongarch.lasx.xvldx"] @@ -1491,6 +1491,42 @@ unsafe extern "unadjusted" { fn __lasx_xvrepli_h(a: i32) -> __v16i16; #[link_name = "llvm.loongarch.lasx.xvrepli.w"] fn __lasx_xvrepli_w(a: i32) -> __v8i32; + #[link_name = "llvm.loongarch.lasx.cast.128.s"] + fn __lasx_cast_128_s(a: __v4f32) -> __v8f32; + #[link_name = "llvm.loongarch.lasx.cast.128.d"] + fn __lasx_cast_128_d(a: __v2f64) -> __v4f64; + #[link_name = "llvm.loongarch.lasx.cast.128"] + fn __lasx_cast_128(a: __v2i64) -> __v4i64; + #[link_name = "llvm.loongarch.lasx.concat.128.s"] + fn __lasx_concat_128_s(a: __v4f32, b: __v4f32) -> __v8f32; + #[link_name = "llvm.loongarch.lasx.concat.128.d"] + fn __lasx_concat_128_d(a: __v2f64, b: __v2f64) -> __v4f64; + #[link_name = "llvm.loongarch.lasx.concat.128"] + fn __lasx_concat_128(a: __v2i64, b: __v2i64) -> __v4i64; + #[link_name = "llvm.loongarch.lasx.extract.128.lo.s"] + fn __lasx_extract_128_lo_s(a: __v8f32) -> __v4f32; + #[link_name = "llvm.loongarch.lasx.extract.128.hi.s"] + fn __lasx_extract_128_hi_s(a: __v8f32) -> __v4f32; + #[link_name = "llvm.loongarch.lasx.extract.128.lo.d"] + fn __lasx_extract_128_lo_d(a: __v4f64) -> __v2f64; + #[link_name = "llvm.loongarch.lasx.extract.128.hi.d"] + fn __lasx_extract_128_hi_d(a: __v4f64) -> __v2f64; + #[link_name = "llvm.loongarch.lasx.extract.128.lo"] + fn __lasx_extract_128_lo(a: __v4i64) -> __v2i64; + #[link_name = "llvm.loongarch.lasx.extract.128.hi"] + fn __lasx_extract_128_hi(a: __v4i64) -> __v2i64; + #[link_name = "llvm.loongarch.lasx.insert.128.lo.s"] + fn __lasx_insert_128_lo_s(a: __v8f32, b: __v4f32) -> __v8f32; + #[link_name = "llvm.loongarch.lasx.insert.128.hi.s"] + fn __lasx_insert_128_hi_s(a: __v8f32, b: __v4f32) -> __v8f32; + #[link_name = "llvm.loongarch.lasx.insert.128.lo.d"] + fn __lasx_insert_128_lo_d(a: __v4f64, b: __v2f64) -> __v4f64; + #[link_name = "llvm.loongarch.lasx.insert.128.hi.d"] + fn __lasx_insert_128_hi_d(a: __v4f64, b: __v2f64) -> __v4f64; + #[link_name = "llvm.loongarch.lasx.insert.128.lo"] + fn __lasx_insert_128_lo(a: __v4i64, b: __v2i64) -> __v4i64; + #[link_name = "llvm.loongarch.lasx.insert.128.hi"] + fn __lasx_insert_128_hi(a: __v4i64, b: __v2i64) -> __v4i64; } #[inline] @@ -7062,3 +7098,129 @@ pub fn lasx_xvrepli_w() -> m256i { static_assert_simm_bits!(IMM_S10, 10); unsafe { transmute(__lasx_xvrepli_w(IMM_S10)) } } + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn lasx_cast_128_s(a: m128) -> m256 { + unsafe { transmute(__lasx_cast_128_s(transmute(a))) } +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn lasx_cast_128_d(a: m128d) -> m256d { + unsafe { transmute(__lasx_cast_128_d(transmute(a))) } +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn lasx_cast_128(a: m128i) -> m256i { + unsafe { transmute(__lasx_cast_128(transmute(a))) } +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn lasx_concat_128_s(a: m128, b: m128) -> m256 { + unsafe { transmute(__lasx_concat_128_s(transmute(a), transmute(b))) } +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn lasx_concat_128_d(a: m128d, b: m128d) -> m256d { + unsafe { transmute(__lasx_concat_128_d(transmute(a), transmute(b))) } +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn lasx_concat_128(a: m128i, b: m128i) -> m256i { + unsafe { transmute(__lasx_concat_128(transmute(a), transmute(b))) } +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn lasx_extract_128_lo_s(a: m256) -> m128 { + unsafe { transmute(__lasx_extract_128_lo_s(transmute(a))) } +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn lasx_extract_128_hi_s(a: m256) -> m128 { + unsafe { transmute(__lasx_extract_128_hi_s(transmute(a))) } +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn lasx_extract_128_lo_d(a: m256d) -> m128d { + unsafe { transmute(__lasx_extract_128_lo_d(transmute(a))) } +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn lasx_extract_128_hi_d(a: m256d) -> m128d { + unsafe { transmute(__lasx_extract_128_hi_d(transmute(a))) } +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn lasx_extract_128_lo(a: m256i) -> m128i { + unsafe { transmute(__lasx_extract_128_lo(transmute(a))) } +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn lasx_extract_128_hi(a: m256i) -> m128i { + unsafe { transmute(__lasx_extract_128_hi(transmute(a))) } +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn lasx_insert_128_lo_s(a: m256, b: m128) -> m256 { + unsafe { transmute(__lasx_insert_128_lo_s(transmute(a), transmute(b))) } +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn lasx_insert_128_hi_s(a: m256, b: m128) -> m256 { + unsafe { transmute(__lasx_insert_128_hi_s(transmute(a), transmute(b))) } +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn lasx_insert_128_lo_d(a: m256d, b: m128d) -> m256d { + unsafe { transmute(__lasx_insert_128_lo_d(transmute(a), transmute(b))) } +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn lasx_insert_128_hi_d(a: m256d, b: m128d) -> m256d { + unsafe { transmute(__lasx_insert_128_hi_d(transmute(a), transmute(b))) } +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn lasx_insert_128_lo(a: m256i, b: m128i) -> m256i { + unsafe { transmute(__lasx_insert_128_lo(transmute(a), transmute(b))) } +} + +#[inline] +#[target_feature(enable = "lasx")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn lasx_insert_128_hi(a: m256i, b: m128i) -> m256i { + unsafe { transmute(__lasx_insert_128_hi(transmute(a), transmute(b))) } +} diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lasx/tests.rs b/library/stdarch/crates/core_arch/src/loongarch64/lasx/tests.rs index 54771d7b51109..319ce7cf98195 100644 --- a/library/stdarch/crates/core_arch/src/loongarch64/lasx/tests.rs +++ b/library/stdarch/crates/core_arch/src/loongarch64/lasx/tests.rs @@ -14756,3 +14756,284 @@ unsafe fn test_lasx_xvrepli_w() { assert_eq!(r, transmute(lasx_xvrepli_w::<-388>())); } + +#[simd_test(enable = "lasx")] +unsafe fn test_lasx_cast_128_s() { + let a = u32x4::new(1031165056, 1051966120, 1060984374, 1062536919); + let r = i64x4::new(4518160082931176576, 4563561318958585398, 1966080, 1966080); + + assert_eq!( + r.as_array()[0..2], + transmute::<_, i64x4>(lasx_cast_128_s(transmute(a))).as_array()[0..2] + ); +} + +#[simd_test(enable = "lasx")] +unsafe fn test_lasx_cast_128_d() { + let a = u64x2::new(4604694967937271251, 4600904075476555984); + let r = i64x4::new( + 4604694967937271251, + 4600904075476555984, + 2910860781861170785, + 8314045306847701346, + ); + + assert_eq!( + r.as_array()[0..2], + transmute::<_, i64x4>(lasx_cast_128_d(transmute(a))).as_array()[0..2] + ); +} + +#[simd_test(enable = "lasx")] +unsafe fn test_lasx_cast_128() { + let a = i64x2::new(-5333716211868108402, 2442107533729495827); + let r = i64x4::new( + -5333716211868108402, + 2442107533729495827, + -1115824375586394527, + 8314045306157170687, + ); + + assert_eq!( + r.as_array()[0..2], + transmute::<_, i64x4>(lasx_cast_128(transmute(a))).as_array()[0..2] + ); +} + +#[simd_test(enable = "lasx")] +unsafe fn test_lasx_concat_128_s() { + let a = u32x4::new(1032255272, 1059413818, 1058434362, 1041454056); + let b = u32x4::new(1047296252, 1059191602, 1051282752, 1026847376); + let r = i64x4::new( + 4550147702272751400, + 4473011111864986938, + 4549193291835144444, + 4410275898954698048, + ); + + assert_eq!(r, transmute(lasx_concat_128_s(transmute(a), transmute(b)))); +} + +#[simd_test(enable = "lasx")] +unsafe fn test_lasx_concat_128_d() { + let a = u64x2::new(4602341404117999960, 4599751584045405722); + let b = u64x2::new(4595947342927040984, 4600308396523102002); + let r = i64x4::new( + 4602341404117999960, + 4599751584045405722, + 4595947342927040984, + 4600308396523102002, + ); + + assert_eq!(r, transmute(lasx_concat_128_d(transmute(a), transmute(b)))); +} + +#[simd_test(enable = "lasx")] +unsafe fn test_lasx_concat_128() { + let a = i64x2::new(3302609705743394573, 8438855426868306143); + let b = i64x2::new(8632034656150002181, 7751541408133090748); + let r = i64x4::new( + 3302609705743394573, + 8438855426868306143, + 8632034656150002181, + 7751541408133090748, + ); + + assert_eq!(r, transmute(lasx_concat_128(transmute(a), transmute(b)))); +} + +#[simd_test(enable = "lasx")] +unsafe fn test_lasx_extract_128_lo_s() { + let a = u32x8::new( + 1038279272, 1053426270, 1062315532, 1055361088, 1061380448, 1052007748, 1063816577, + 1061671114, + ); + let r = i64x2::new(4524431379435545192, 4532741359493293580); + + assert_eq!(r, transmute(lasx_extract_128_lo_s(transmute(a)))); +} + +#[simd_test(enable = "lasx")] +unsafe fn test_lasx_extract_128_hi_s() { + let a = u32x8::new( + 1059517342, 1052723820, 1053176244, 1060336354, 1058221022, 1064684502, 1061072013, + 1059238420, + ); + let r = i64x2::new(4572785117706267614, 4549394373627784333); + + assert_eq!(r, transmute(lasx_extract_128_hi_s(transmute(a)))); +} + +#[simd_test(enable = "lasx")] +unsafe fn test_lasx_extract_128_lo_d() { + let a = u64x4::new( + 4606487981487128637, + 4592443779247846248, + 4605637448543526041, + 4604126872543611047, + ); + let r = i64x2::new(4606487981487128637, 4592443779247846248); + + assert_eq!(r, transmute(lasx_extract_128_lo_d(transmute(a)))); +} + +#[simd_test(enable = "lasx")] +unsafe fn test_lasx_extract_128_hi_d() { + let a = u64x4::new( + 4595075050683709816, + 4603388454656549851, + 4603881047625519227, + 4604218419306666352, + ); + let r = i64x2::new(4603881047625519227, 4604218419306666352); + + assert_eq!(r, transmute(lasx_extract_128_hi_d(transmute(a)))); +} + +#[simd_test(enable = "lasx")] +unsafe fn test_lasx_extract_128_lo() { + let a = i64x4::new( + 1690990426210778543, + -1056924033489771427, + 1791197928200737608, + 2648792885519901423, + ); + let r = i64x2::new(1690990426210778543, -1056924033489771427); + + assert_eq!(r, transmute(lasx_extract_128_lo(transmute(a)))); +} + +#[simd_test(enable = "lasx")] +unsafe fn test_lasx_extract_128_hi() { + let a = i64x4::new( + 1400282616691463341, + 6677577875527300174, + -1903780563362068813, + -7449796170151383489, + ); + let r = i64x2::new(-1903780563362068813, -7449796170151383489); + + assert_eq!(r, transmute(lasx_extract_128_hi(transmute(a)))); +} + +#[simd_test(enable = "lasx")] +unsafe fn test_lasx_insert_128_lo_s() { + let a = u32x8::new( + 1063338913, 1017815328, 1065051130, 1040694156, 1059596680, 1048796526, 1058020845, + 1057822131, + ); + let b = u32x4::new(1052930766, 1021556992, 1050709482, 1059704809); + let r = i64x4::new( + 4387553872693064398, + 4551397499119635946, + 4504546780388010376, + 4543311458688048621, + ); + + assert_eq!( + r, + transmute(lasx_insert_128_lo_s(transmute(a), transmute(b))) + ); +} + +#[simd_test(enable = "lasx")] +unsafe fn test_lasx_insert_128_hi_s() { + let a = u32x8::new( + 1018863744, 1064221149, 1048659080, 1057450774, 1049935896, 1034170664, 1059759433, + 1057849762, + ); + let b = u32x4::new(1060332648, 1063149600, 1051087106, 1060582348); + let r = i64x4::new( + 4570795031685406848, + 4541716492508546184, + 4566192763815814248, + 4555166500425978114, + ); + + assert_eq!( + r, + transmute(lasx_insert_128_hi_s(transmute(a), transmute(b))) + ); +} + +#[simd_test(enable = "lasx")] +unsafe fn test_lasx_insert_128_lo_d() { + let a = u64x4::new( + 4601319519422109044, + 4601506273633970188, + 4605118087882201940, + 4605125059076454256, + ); + let b = u64x2::new(4587489919640425888, 4591909120489567808); + let r = i64x4::new( + 4587489919640425888, + 4591909120489567808, + 4605118087882201940, + 4605125059076454256, + ); + + assert_eq!( + r, + transmute(lasx_insert_128_lo_d(transmute(a), transmute(b))) + ); +} + +#[simd_test(enable = "lasx")] +unsafe fn test_lasx_insert_128_hi_d() { + let a = u64x4::new( + 4604690660177752777, + 4593824994203592700, + 4599958775071728504, + 4604125324674373728, + ); + let b = u64x2::new(4601718173474385938, 4591758028383494760); + let r = i64x4::new( + 4604690660177752777, + 4593824994203592700, + 4601718173474385938, + 4591758028383494760, + ); + + assert_eq!( + r, + transmute(lasx_insert_128_hi_d(transmute(a), transmute(b))) + ); +} + +#[simd_test(enable = "lasx")] +unsafe fn test_lasx_insert_128_lo() { + let a = i64x4::new( + 8159968186698006293, + 5648210958959948409, + 603295919044368378, + -4396186135186039276, + ); + let b = i64x2::new(-6258666140812668387, 5822982556977506382); + let r = i64x4::new( + -6258666140812668387, + 5822982556977506382, + 603295919044368378, + -4396186135186039276, + ); + + assert_eq!(r, transmute(lasx_insert_128_lo(transmute(a), transmute(b)))); +} + +#[simd_test(enable = "lasx")] +unsafe fn test_lasx_insert_128_hi() { + let a = i64x4::new( + 2981835982487038158, + 5258378092714202875, + 5115371338527125146, + -6993491475145500537, + ); + let b = i64x2::new(1176776599938765863, -7502655081590988207); + let r = i64x4::new( + 2981835982487038158, + 5258378092714202875, + 1176776599938765863, + -7502655081590988207, + ); + + assert_eq!(r, transmute(lasx_insert_128_hi(transmute(a), transmute(b)))); +} diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs b/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs index 764e69ca05444..25efaadb42880 100644 --- a/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs +++ b/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs @@ -7,7 +7,7 @@ // ``` use crate::mem::transmute; -use super::types::*; +use super::super::*; #[allow(improper_ctypes)] unsafe extern "unadjusted" { @@ -1324,7 +1324,7 @@ unsafe extern "unadjusted" { #[link_name = "llvm.loongarch.lsx.vssrln.w.d"] fn __lsx_vssrln_w_d(a: __v2i64, b: __v2i64) -> __v4i32; #[link_name = "llvm.loongarch.lsx.vorn.v"] - fn __lsx_vorn_v(a: __v16i8, b: __v16i8) -> __v16i8; + fn __lsx_vorn_v(a: __v16u8, b: __v16u8) -> __v16u8; #[link_name = "llvm.loongarch.lsx.vldi"] fn __lsx_vldi(a: i32) -> __v2i64; #[link_name = "llvm.loongarch.lsx.vshuf.b"] diff --git a/library/stdarch/crates/stdarch-gen-loongarch/lasx.spec b/library/stdarch/crates/stdarch-gen-loongarch/lasx.spec index e3bdfcb5e9faa..ac4203a03f207 100644 --- a/library/stdarch/crates/stdarch-gen-loongarch/lasx.spec +++ b/library/stdarch/crates/stdarch-gen-loongarch/lasx.spec @@ -2426,7 +2426,7 @@ data-types = V8SI, V4DI, V4DI /// lasx_xvorn_v name = lasx_xvorn_v asm-fmts = xd, xj, xk -data-types = V32QI, V32QI, V32QI +data-types = UV32QI, UV32QI, UV32QI /// lasx_xvldi name = lasx_xvldi @@ -3703,3 +3703,93 @@ name = lasx_xvrepli_w asm-fmts = xd, si10 data-types = V8SI, HI +/// lasx_cast_128_s +name = lasx_cast_128_s +asm-fmts = xd, vj +data-types = V8SF, V4SF + +/// lasx_cast_128_d +name = lasx_cast_128_d +asm-fmts = xd, vj +data-types = V4DF, V2DF + +/// lasx_cast_128 +name = lasx_cast_128 +asm-fmts = xd, vj +data-types = V4DI, V2DI + +/// lasx_concat_128_s +name = lasx_concat_128_s +asm-fmts = xd, vj, vk +data-types = V8SF, V4SF, V4SF + +/// lasx_concat_128_d +name = lasx_concat_128_d +asm-fmts = xd, vj, vk +data-types = V4DF, V2DF, V2DF + +/// lasx_concat_128 +name = lasx_concat_128 +asm-fmts = xd, vj, vk +data-types = V4DI, V2DI, V2DI + +/// lasx_extract_128_lo_s +name = lasx_extract_128_lo_s +asm-fmts = vd, xj +data-types = V4SF, V8SF + +/// lasx_extract_128_hi_s +name = lasx_extract_128_hi_s +asm-fmts = vd, xj +data-types = V4SF, V8SF + +/// lasx_extract_128_lo_d +name = lasx_extract_128_lo_d +asm-fmts = vd, xj +data-types = V2DF, V4DF + +/// lasx_extract_128_hi_d +name = lasx_extract_128_hi_d +asm-fmts = vd, xj +data-types = V2DF, V4DF + +/// lasx_extract_128_lo +name = lasx_extract_128_lo +asm-fmts = vd, xj +data-types = V2DI, V4DI + +/// lasx_extract_128_hi +name = lasx_extract_128_hi +asm-fmts = vd, xj +data-types = V2DI, V4DI + +/// lasx_insert_128_lo_s +name = lasx_insert_128_lo_s +asm-fmts = xd, xj, vk +data-types = V8SF, V8SF, V4SF + +/// lasx_insert_128_hi_s +name = lasx_insert_128_hi_s +asm-fmts = xd, xj, vk +data-types = V8SF, V8SF, V4SF + +/// lasx_insert_128_lo_d +name = lasx_insert_128_lo_d +asm-fmts = xd, xj, vk +data-types = V4DF, V4DF, V2DF + +/// lasx_insert_128_hi_d +name = lasx_insert_128_hi_d +asm-fmts = xd, xj, vk +data-types = V4DF, V4DF, V2DF + +/// lasx_insert_128_lo +name = lasx_insert_128_lo +asm-fmts = xd, xj, vk +data-types = V4DI, V4DI, V2DI + +/// lasx_insert_128_hi +name = lasx_insert_128_hi +asm-fmts = xd, xj, vk +data-types = V4DI, V4DI, V2DI + diff --git a/library/stdarch/crates/stdarch-gen-loongarch/lasxintrin.h b/library/stdarch/crates/stdarch-gen-loongarch/lasxintrin.h index c525b6106b897..02bb97918d95d 100644 --- a/library/stdarch/crates/stdarch-gen-loongarch/lasxintrin.h +++ b/library/stdarch/crates/stdarch-gen-loongarch/lasxintrin.h @@ -1,10 +1,10 @@ /* - * https://gcc.gnu.org/git/?p=gcc.git;a=blob_plain;f=gcc/config/loongarch/lasxintrin.h;hb=61f1001f2f4ab9128e5eb6e9a4adbbb0f9f0bc75 + * https://gcc.gnu.org/git/?p=gcc.git;a=blob_plain;f=gcc/config/loongarch/lasxintrin.h;hb=c2013267642fea4a6e89b826940c8aa80a76089d */ /* LARCH Loongson ASX intrinsics include file. - Copyright (C) 2018-2024 Free Software Foundation, Inc. + Copyright (C) 2018-2025 Free Software Foundation, Inc. This file is part of GCC. @@ -27,6 +27,8 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see . */ +#include + #ifndef _GCC_LOONGSON_ASXINTRIN_H #define _GCC_LOONGSON_ASXINTRIN_H 1 @@ -3568,11 +3570,11 @@ __m256i __lasx_xvssrln_w_d (__m256i _1, __m256i _2) } /* Assembly instruction format: xd, xj, xk. */ -/* Data types in instruction templates: V32QI, V32QI, V32QI. */ +/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i __lasx_xvorn_v (__m256i _1, __m256i _2) { - return (__m256i)__builtin_lasx_xvorn_v ((v32i8)_1, (v32i8)_2); + return (__m256i)__builtin_lasx_xvorn_v ((v32u8)_1, (v32u8)_2); } /* Assembly instruction format: xd, i13. */ @@ -5372,5 +5374,159 @@ __m256i __lasx_xvfcmp_sun_s (__m256 _1, __m256 _2) #define __lasx_xvrepli_w(/*si10*/ _1) \ ((__m256i)__builtin_lasx_xvrepli_w ((_1))) +#if defined (__loongarch_asx_sx_conv) +/* Add builtin interfaces for 128 and 256 vector conversions. + For the assembly instruction format of some functions of the following vector + conversion, it is not described exactly in accordance with the format of the + generated assembly instruction. + In the front end of the Rust language, different built-in functions are called + by analyzing the format of assembly instructions. The data types of instructions + are all defined based on the interfaces of the defined functions, in the + following order: output, input... . */ +/* Assembly instruction format: xd, vj. */ +/* Data types in instruction templates: V8SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_cast_128_s (__m128 _1) +{ + return (__m256)__builtin_lasx_cast_128_s ((v4f32)_1); +} + +/* Assembly instruction format: xd, vj. */ +/* Data types in instruction templates: V4DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_cast_128_d (__m128d _1) +{ + return (__m256d)__builtin_lasx_cast_128_d ((v2f64)_1); +} + +/* Assembly instruction format: xd, vj. */ +/* Data types in instruction templates: V4DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_cast_128 (__m128i _1) +{ + return (__m256i)__builtin_lasx_cast_128 ((v2i64)_1); +} + +/* Assembly instruction format: xd, vj, vk. */ +/* Data types in instruction templates: V8SF, V4SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_concat_128_s (__m128 _1, __m128 _2) +{ + return (__m256)__builtin_lasx_concat_128_s ((v4f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: xd, vj, vk. */ +/* Data types in instruction templates: V4DF, V2DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_concat_128_d (__m128d _1, __m128d _2) +{ + return (__m256d)__builtin_lasx_concat_128_d ((v2f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: xd, vj, vk. */ +/* Data types in instruction templates: V4DI, V2DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_concat_128 (__m128i _1, __m128i _2) +{ + return (__m256i)__builtin_lasx_concat_128 ((v2i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: vd, xj. */ +/* Data types in instruction templates: V4SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lasx_extract_128_lo_s (__m256 _1) +{ + return (__m128)__builtin_lasx_extract_128_lo_s ((v8f32)_1); +} + +/* Assembly instruction format: vd, xj. */ +/* Data types in instruction templates: V4SF, V8SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128 __lasx_extract_128_hi_s (__m256 _1) +{ + return (__m128)__builtin_lasx_extract_128_hi_s ((v8f32)_1); +} + +/* Assembly instruction format: vd, xj. */ +/* Data types in instruction templates: V2DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lasx_extract_128_lo_d (__m256d _1) +{ + return (__m128d)__builtin_lasx_extract_128_lo_d ((v4f64)_1); +} + +/* Assembly instruction format: vd, xj. */ +/* Data types in instruction templates: V2DF, V4DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128d __lasx_extract_128_hi_d (__m256d _1) +{ + return (__m128d)__builtin_lasx_extract_128_hi_d ((v4f64)_1); +} + +/* Assembly instruction format: vd, xj. */ +/* Data types in instruction templates: V2DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lasx_extract_128_lo (__m256i _1) +{ + return (__m128i)__builtin_lasx_extract_128_lo ((v4i64)_1); +} + +/* Assembly instruction format: vd, xj. */ +/* Data types in instruction templates: V2DI, V4DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m128i __lasx_extract_128_hi (__m256i _1) +{ + return (__m128i)__builtin_lasx_extract_128_hi ((v4i64)_1); +} + +/* Assembly instruction format: xd, xj, vk. */ +/* Data types in instruction templates: V8SF, V8SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_insert_128_lo_s (__m256 _1, __m128 _2) +{ + return (__m256)__builtin_lasx_insert_128_lo_s ((v8f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: xd, xj, vk. */ +/* Data types in instruction templates: V8SF, V8SF, V4SF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256 __lasx_insert_128_hi_s (__m256 _1, __m128 _2) +{ + return (__m256)__builtin_lasx_insert_128_hi_s ((v8f32)_1, (v4f32)_2); +} + +/* Assembly instruction format: xd, xj, vk. */ +/* Data types in instruction templates: V4DF, V4DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_insert_128_lo_d (__m256d _1, __m128d _2) +{ + return (__m256d)__builtin_lasx_insert_128_lo_d ((v4f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: xd, xj, vk. */ +/* Data types in instruction templates: V4DF, V4DF, V2DF. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256d __lasx_insert_128_hi_d (__m256d _1, __m128d _2) +{ + return (__m256d)__builtin_lasx_insert_128_hi_d ((v4f64)_1, (v2f64)_2); +} + +/* Assembly instruction format: xd, xj, vk. */ +/* Data types in instruction templates: V4DI, V4DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_insert_128_lo (__m256i _1, __m128i _2) +{ + return (__m256i)__builtin_lasx_insert_128_lo ((v4i64)_1, (v2i64)_2); +} + +/* Assembly instruction format: xd, xj, vk. */ +/* Data types in instruction templates: V4DI, V4DI, V2DI. */ +extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__m256i __lasx_insert_128_hi (__m256i _1, __m128i _2) +{ + return (__m256i)__builtin_lasx_insert_128_hi ((v4i64)_1, (v2i64)_2); +} + +#endif /* defined(__loongarch_asx_sx_conv). */ #endif /* defined(__loongarch_asx). */ #endif /* _GCC_LOONGSON_ASXINTRIN_H. */ diff --git a/library/stdarch/crates/stdarch-gen-loongarch/lsx.spec b/library/stdarch/crates/stdarch-gen-loongarch/lsx.spec index dc835770d566e..b5497b6e6207e 100644 --- a/library/stdarch/crates/stdarch-gen-loongarch/lsx.spec +++ b/library/stdarch/crates/stdarch-gen-loongarch/lsx.spec @@ -3286,7 +3286,7 @@ data-types = V4SI, V2DI, V2DI /// lsx_vorn_v name = lsx_vorn_v asm-fmts = vd, vj, vk -data-types = V16QI, V16QI, V16QI +data-types = UV16QI, UV16QI, UV16QI /// lsx_vldi name = lsx_vldi diff --git a/library/stdarch/crates/stdarch-gen-loongarch/lsxintrin.h b/library/stdarch/crates/stdarch-gen-loongarch/lsxintrin.h index 943f2df913e4d..66b7c7e2187ac 100644 --- a/library/stdarch/crates/stdarch-gen-loongarch/lsxintrin.h +++ b/library/stdarch/crates/stdarch-gen-loongarch/lsxintrin.h @@ -1,10 +1,10 @@ /* - * https://gcc.gnu.org/git/?p=gcc.git;a=blob_plain;f=gcc/config/loongarch/lsxintrin.h;hb=61f1001f2f4ab9128e5eb6e9a4adbbb0f9f0bc75 + * https://gcc.gnu.org/git/?p=gcc.git;a=blob_plain;f=gcc/config/loongarch/lsxintrin.h;hb=6441eb6dc020faae0672ea724dfdb38c6a9bf6a1 */ /* LARCH Loongson SX intrinsics include file. - Copyright (C) 2018-2024 Free Software Foundation, Inc. + Copyright (C) 2018-2025 Free Software Foundation, Inc. This file is part of GCC. @@ -4749,11 +4749,11 @@ __m128i __lsx_vssrln_w_d (__m128i _1, __m128i _2) } /* Assembly instruction format: vd, vj, vk. */ -/* Data types in instruction templates: V16QI, V16QI, V16QI. */ +/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i __lsx_vorn_v (__m128i _1, __m128i _2) { - return (__m128i)__builtin_lsx_vorn_v ((v16i8)_1, (v16i8)_2); + return (__m128i)__builtin_lsx_vorn_v ((v16u8)_1, (v16u8)_2); } /* Assembly instruction format: vd, i13. */ diff --git a/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs b/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs index 5076064ffcdd3..10b87c70e9ede 100644 --- a/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs +++ b/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs @@ -157,7 +157,7 @@ fn gen_bind(in_file: String, ext_name: &str) -> io::Result<()> { // ``` use crate::mem::transmute; -use super::types::*; +use super::super::*; "# )); @@ -1551,6 +1551,10 @@ fn gen_test_body( format!( " printf(\"\\n {current_name}{as_params};\\n assert_eq!(r, transmute(o));\\n\"{as_args});" ) + } else if current_name.starts_with("lasx_cast_128") { + format!( + " printf(\"\\n assert_eq!(r.as_array()[0..2], transmute::<_, i64x4>({current_name}{as_params}).as_array()[0..2]);\\n\"{as_args});" + ) } else { format!( " printf(\"\\n assert_eq!(r, transmute({current_name}{as_params}));\\n\"{as_args});" From b871f6c7677987d8442437677598b9cda10671a9 Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Tue, 3 Feb 2026 14:48:23 +0000 Subject: [PATCH 31/90] Fix running rustc tests in CI --- scripts/setup_rust_fork.sh | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/scripts/setup_rust_fork.sh b/scripts/setup_rust_fork.sh index 9d618554cb4ff..3187bda1304a7 100644 --- a/scripts/setup_rust_fork.sh +++ b/scripts/setup_rust_fork.sh @@ -81,6 +81,27 @@ index a656927b1f6..44fc5546fac 100644 // If download-ci-llvm=true we also want to check that CI llvm is available b && llvm::is_ci_llvm_available_for_target(&dwn_ctx.host_target, asserts) } +diff --git a/src/build_helper/src/git.rs b/src/build_helper/src/git.rs +index 330fb465de..a4593ed96f 100644 +--- a/src/build_helper/src/git.rs ++++ b/src/build_helper/src/git.rs +@@ -218,15 +218,7 @@ pub fn get_closest_upstream_commit( + config: &GitConfig<'_>, + env: CiEnv, + ) -> Result, String> { +- let base = match env { +- CiEnv::None => "HEAD", +- CiEnv::GitHubActions => { +- // On CI, we should always have a non-upstream merge commit at the tip, +- // and our first parent should be the most recently merged upstream commit. +- // We thus simply return our first parent. +- return resolve_commit_sha(git_dir, "HEAD^1").map(Some); +- } +- }; ++ let base = "HEAD"; + + let mut git = Command::new("git"); + EOF popd From 3318d22754911e32ec759347a8bcda0f741994d0 Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Wed, 4 Feb 2026 12:35:28 +0000 Subject: [PATCH 32/90] Convert to inline diagnostics in all codegen backends --- src/lib.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 7361a6af41784..a49dc9be34583 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -125,11 +125,6 @@ pub struct CraneliftCodegenBackend { } impl CodegenBackend for CraneliftCodegenBackend { - fn locale_resource(&self) -> &'static str { - // FIXME(rust-lang/rust#100717) - cranelift codegen backend is not yet translated - "" - } - fn name(&self) -> &'static str { "cranelift" } From 47b2f234bcebf29bdcc788377f7a2142ad13aa33 Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Tue, 3 Feb 2026 10:08:05 +0000 Subject: [PATCH 33/90] Update to Cranelift 0.128 --- Cargo.lock | 72 +++++++++++++++++++++++++++--------------------------- Cargo.toml | 24 +++++++++--------- 2 files changed, 48 insertions(+), 48 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 766c99c04c167..afc1d0d0ab959 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -43,42 +43,42 @@ checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" [[package]] name = "cranelift-assembler-x64" -version = "0.127.0" +version = "0.128.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bd963a645179fa33834ba61fa63353998543b07f877e208da9eb47d4a70d1e7" +checksum = "0377b13bf002a0774fcccac4f1102a10f04893d24060cf4b7350c87e4cbb647c" dependencies = [ "cranelift-assembler-x64-meta", ] [[package]] name = "cranelift-assembler-x64-meta" -version = "0.127.0" +version = "0.128.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6d5739c9dc6b5553ca758d78d87d127dd19f397f776efecf817b8ba8d0bb01" +checksum = "cfa027979140d023b25bf7509fb7ede3a54c3d3871fb5ead4673c4b633f671a2" dependencies = [ "cranelift-srcgen", ] [[package]] name = "cranelift-bforest" -version = "0.127.0" +version = "0.128.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff402c11bb1c9652b67a3e885e84b1b8d00c13472c8fd85211e06a41a63c3e03" +checksum = "618e4da87d9179a70b3c2f664451ca8898987aa6eb9f487d16988588b5d8cc40" dependencies = [ "cranelift-entity", ] [[package]] name = "cranelift-bitset" -version = "0.127.0" +version = "0.128.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "769a0d88c2f5539e9c5536a93a7bf164b0dc68d91e3d00723e5b4ffc1440afdc" +checksum = "db53764b5dad233b37b8f5dc54d3caa9900c54579195e00f17ea21f03f71aaa7" [[package]] name = "cranelift-codegen" -version = "0.127.0" +version = "0.128.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4351f721fb3b26add1c180f0a75c7474bab2f903c8b777c6ca65238ded59a78" +checksum = "4ae927f1d8c0abddaa863acd201471d56e7fc6c3925104f4861ed4dc3e28b421" dependencies = [ "bumpalo", "cranelift-assembler-x64", @@ -102,9 +102,9 @@ dependencies = [ [[package]] name = "cranelift-codegen-meta" -version = "0.127.0" +version = "0.128.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61f86c0ba5b96713643f4dd0de0df12844de9c7bb137d6829b174b706939aa74" +checksum = "d3fcf1e3e6757834bd2584f4cbff023fcc198e9279dcb5d684b4bb27a9b19f54" dependencies = [ "cranelift-assembler-x64-meta", "cranelift-codegen-shared", @@ -114,33 +114,33 @@ dependencies = [ [[package]] name = "cranelift-codegen-shared" -version = "0.127.0" +version = "0.128.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f08605eee8d51fd976a970bd5b16c9529b51b624f8af68f80649ffb172eb85a4" +checksum = "205dcb9e6ccf9d368b7466be675ff6ee54a63e36da6fe20e72d45169cf6fd254" [[package]] name = "cranelift-control" -version = "0.127.0" +version = "0.128.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "623aab0a09e40f0cf0b5d35eb7832bae4c4f13e3768228e051a6c1a60e88ef5f" +checksum = "108eca9fcfe86026054f931eceaf57b722c1b97464bf8265323a9b5877238817" dependencies = [ "arbitrary", ] [[package]] name = "cranelift-entity" -version = "0.127.0" +version = "0.128.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea0f066e07e3bcbe38884cc5c94c32c7a90267d69df80f187d9dfe421adaa7c4" +checksum = "a0d96496910065d3165f84ff8e1e393916f4c086f88ac8e1b407678bc78735aa" dependencies = [ "cranelift-bitset", ] [[package]] name = "cranelift-frontend" -version = "0.127.0" +version = "0.128.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40865b02a0e52ca8e580ad64feef530cb1d05f6bb4972b4eef05e3eaeae81701" +checksum = "e303983ad7e23c850f24d9c41fc3cb346e1b930f066d3966545e4c98dac5c9fb" dependencies = [ "cranelift-codegen", "log", @@ -150,15 +150,15 @@ dependencies = [ [[package]] name = "cranelift-isle" -version = "0.127.0" +version = "0.128.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "104b3c117ae513e9af1d90679842101193a5ccb96ac9f997966d85ea25be2852" +checksum = "24b0cf8d867d891245836cac7abafb0a5b0ea040a019d720702b3b8bcba40bfa" [[package]] name = "cranelift-jit" -version = "0.127.0" +version = "0.128.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3aa5f855cfb8e4253ed2d0dfc1a0b6ebe4912e67aa8b7ee14026ff55ca17f1fe" +checksum = "dcf1e35da6eca2448395f483eb172ce71dd7842f7dc96f44bb8923beafe43c6d" dependencies = [ "anyhow", "cranelift-codegen", @@ -176,9 +176,9 @@ dependencies = [ [[package]] name = "cranelift-module" -version = "0.127.0" +version = "0.128.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d01806b191b59f4fc4680293dd5f554caf2de5b62f95eff5beef7acb46c29c" +checksum = "792ba2a54100e34f8a36e3e329a5207cafd1f0918a031d34695db73c163fdcc7" dependencies = [ "anyhow", "cranelift-codegen", @@ -187,9 +187,9 @@ dependencies = [ [[package]] name = "cranelift-native" -version = "0.127.0" +version = "0.128.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5c54e0a358bc05b48f2032e1c320e7f468da068604f2869b77052eab68eb0fe" +checksum = "e24b641e315443e27807b69c440fe766737d7e718c68beb665a2d69259c77bf3" dependencies = [ "cranelift-codegen", "libc", @@ -198,9 +198,9 @@ dependencies = [ [[package]] name = "cranelift-object" -version = "0.127.0" +version = "0.128.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d17e0216be5daabab616647c1918e06dae0708474ba5f7b7762ac24ea5eb126" +checksum = "ecba1f219a201cf946150538e631defd620c5051b62c52ecb89a0004bab263d4" dependencies = [ "anyhow", "cranelift-codegen", @@ -213,9 +213,9 @@ dependencies = [ [[package]] name = "cranelift-srcgen" -version = "0.127.0" +version = "0.128.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc6f4b039f453b66c75e9f7886e5a2af96276e151f44dc19b24b58f9a0c98009" +checksum = "a4e378a54e7168a689486d67ee1f818b7e5356e54ae51a1d7a53f4f13f7f8b7a" [[package]] name = "crc32fast" @@ -469,9 +469,9 @@ checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" [[package]] name = "wasmtime-internal-jit-icache-coherence" -version = "40.0.0" +version = "41.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0858b470463f3e7c73acd6049046049e64be17b98901c2db5047450cf83df1fe" +checksum = "bada5ca1cc47df7d14100e2254e187c2486b426df813cea2dd2553a7469f7674" dependencies = [ "anyhow", "cfg-if", @@ -481,9 +481,9 @@ dependencies = [ [[package]] name = "wasmtime-internal-math" -version = "40.0.0" +version = "41.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222e1a590ece4e898f20af1e541b61d2cb803f2557e7eaff23e6c1db5434454a" +checksum = "cf6f615d528eda9adc6eefb062135f831b5215c348f4c3ec3e143690c730605b" dependencies = [ "libm", ] diff --git a/Cargo.toml b/Cargo.toml index ee4bde477c477..a7b4664282eda 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,12 +8,12 @@ crate-type = ["dylib"] [dependencies] # These have to be in sync with each other -cranelift-codegen = { version = "0.127.0", default-features = false, features = ["std", "timing", "unwind", "all-native-arch"] } -cranelift-frontend = { version = "0.127.0" } -cranelift-module = { version = "0.127.0" } -cranelift-native = { version = "0.127.0" } -cranelift-jit = { version = "0.127.0", optional = true } -cranelift-object = { version = "0.127.0" } +cranelift-codegen = { version = "0.128.3", default-features = false, features = ["std", "timing", "unwind", "all-native-arch"] } +cranelift-frontend = { version = "0.128.3" } +cranelift-module = { version = "0.128.3" } +cranelift-native = { version = "0.128.3" } +cranelift-jit = { version = "0.128.3", optional = true } +cranelift-object = { version = "0.128.3" } target-lexicon = "0.13" gimli = { version = "0.32", default-features = false, features = ["write"] } object = { version = "0.37.3", default-features = false, features = ["std", "read_core", "write", "archive", "coff", "elf", "macho", "pe"] } @@ -24,12 +24,12 @@ smallvec = "1.8.1" [patch.crates-io] # Uncomment to use an unreleased version of cranelift -#cranelift-codegen = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-40.0.0" } -#cranelift-frontend = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-40.0.0" } -#cranelift-module = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-40.0.0" } -#cranelift-native = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-40.0.0" } -#cranelift-jit = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-40.0.0" } -#cranelift-object = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-40.0.0" } +#cranelift-codegen = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-41.0.0" } +#cranelift-frontend = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-41.0.0" } +#cranelift-module = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-41.0.0" } +#cranelift-native = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-41.0.0" } +#cranelift-jit = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-41.0.0" } +#cranelift-object = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-41.0.0" } # Uncomment to use local checkout of cranelift #cranelift-codegen = { path = "../wasmtime/cranelift/codegen" } From 99147c18082280eb7238f17dffb8813e67f55837 Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Wed, 11 Feb 2026 12:36:19 +0000 Subject: [PATCH 34/90] Use cg_ssa's produce_final_output_artifacts in cg_clif --- src/driver/aot.rs | 214 ++++------------------------------------------ src/lib.rs | 1 - 2 files changed, 15 insertions(+), 200 deletions(-) diff --git a/src/driver/aot.rs b/src/driver/aot.rs index 760e23f2171bc..fc5c634d95709 100644 --- a/src/driver/aot.rs +++ b/src/driver/aot.rs @@ -2,29 +2,26 @@ //! standalone executable. use std::env; -use std::fs::{self, File}; +use std::fs::File; use std::io::BufWriter; -use std::path::{Path, PathBuf}; +use std::path::PathBuf; use std::sync::Arc; use std::thread::JoinHandle; use cranelift_object::{ObjectBuilder, ObjectModule}; use rustc_codegen_ssa::assert_module_sources::CguReuse; -use rustc_codegen_ssa::back::link::ensure_removed; +use rustc_codegen_ssa::back::write::{CompiledModules, produce_final_output_artifacts}; use rustc_codegen_ssa::base::determine_cgu_reuse; -use rustc_codegen_ssa::{ - CodegenResults, CompiledModule, CrateInfo, ModuleKind, errors as ssa_errors, -}; +use rustc_codegen_ssa::{CodegenResults, CompiledModule, CrateInfo, ModuleKind}; use rustc_data_structures::profiling::SelfProfilerRef; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_data_structures::sync::{IntoDynSyncSend, par_map}; use rustc_hir::attrs::Linkage as RLinkage; -use rustc_metadata::fs::copy_to_stdout; use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; use rustc_middle::mir::mono::{CodegenUnit, MonoItem, MonoItemData, Visibility}; use rustc_session::Session; -use rustc_session::config::{OutFileName, OutputFilenames, OutputType}; +use rustc_session::config::{OutputFilenames, OutputType}; use crate::base::CodegenedFunction; use crate::concurrency_limiter::{ConcurrencyLimiter, ConcurrencyLimiterToken}; @@ -125,201 +122,20 @@ impl OngoingCodegen { sess.dcx().abort_if_errors(); - let codegen_results = CodegenResults { - modules, - allocator_module: self.allocator_module, - crate_info: self.crate_info, - }; + let compiled_modules = CompiledModules { modules, allocator_module: self.allocator_module }; - produce_final_output_artifacts(sess, &codegen_results, outputs); + produce_final_output_artifacts(sess, &compiled_modules, outputs); - (codegen_results, work_products) - } -} - -// Adapted from https://github.com/rust-lang/rust/blob/73476d49904751f8d90ce904e16dfbc278083d2c/compiler/rustc_codegen_ssa/src/back/write.rs#L547C1-L706C2 -fn produce_final_output_artifacts( - sess: &Session, - codegen_results: &CodegenResults, - crate_output: &OutputFilenames, -) { - let user_wants_bitcode = false; - let mut user_wants_objects = false; - - // Produce final compile outputs. - let copy_gracefully = |from: &Path, to: &OutFileName| match to { - OutFileName::Stdout => { - if let Err(e) = copy_to_stdout(from) { - sess.dcx().emit_err(ssa_errors::CopyPath::new(from, to.as_path(), e)); - } - } - OutFileName::Real(path) => { - if let Err(e) = fs::copy(from, path) { - sess.dcx().emit_err(ssa_errors::CopyPath::new(from, path, e)); - } - } - }; - - let copy_if_one_unit = |output_type: OutputType, keep_numbered: bool| { - if codegen_results.modules.len() == 1 { - // 1) Only one codegen unit. In this case it's no difficulty - // to copy `foo.0.x` to `foo.x`. - let path = crate_output.temp_path_for_cgu( - output_type, - &codegen_results.modules[0].name, - sess.invocation_temp.as_deref(), - ); - let output = crate_output.path(output_type); - if !output_type.is_text_output() && output.is_tty() { - sess.dcx() - .emit_err(ssa_errors::BinaryOutputToTty { shorthand: output_type.shorthand() }); - } else { - copy_gracefully(&path, &output); - } - if !sess.opts.cg.save_temps && !keep_numbered { - // The user just wants `foo.x`, not `foo.#module-name#.x`. - ensure_removed(sess.dcx(), &path); - } - } else { - if crate_output.outputs.contains_explicit_name(&output_type) { - // 2) Multiple codegen units, with `--emit foo=some_name`. We have - // no good solution for this case, so warn the user. - sess.dcx() - .emit_warn(ssa_errors::IgnoringEmitPath { extension: output_type.extension() }); - } else if crate_output.single_output_file.is_some() { - // 3) Multiple codegen units, with `-o some_name`. We have - // no good solution for this case, so warn the user. - sess.dcx() - .emit_warn(ssa_errors::IgnoringOutput { extension: output_type.extension() }); - } else { - // 4) Multiple codegen units, but no explicit name. We - // just leave the `foo.0.x` files in place. - // (We don't have to do any work in this case.) - } - } - }; + ( + CodegenResults { + crate_info: self.crate_info, - // Flag to indicate whether the user explicitly requested bitcode. - // Otherwise, we produced it only as a temporary output, and will need - // to get rid of it. - for output_type in crate_output.outputs.keys() { - match *output_type { - OutputType::Bitcode | OutputType::ThinLinkBitcode => { - // Cranelift doesn't have bitcode - // user_wants_bitcode = true; - // // Copy to .bc, but always keep the .0.bc. There is a later - // // check to figure out if we should delete .0.bc files, or keep - // // them for making an rlib. - // copy_if_one_unit(OutputType::Bitcode, true); - } - OutputType::LlvmAssembly => { - // Cranelift IR text already emitted during codegen - // copy_if_one_unit(OutputType::LlvmAssembly, false); - } - OutputType::Assembly => { - // Currently no support for emitting raw assembly files - // copy_if_one_unit(OutputType::Assembly, false); - } - OutputType::Object => { - user_wants_objects = true; - copy_if_one_unit(OutputType::Object, true); - } - OutputType::Mir | OutputType::Metadata | OutputType::Exe | OutputType::DepInfo => {} - } + modules: compiled_modules.modules, + allocator_module: compiled_modules.allocator_module, + }, + work_products, + ) } - - // Clean up unwanted temporary files. - - // We create the following files by default: - // - #crate#.#module-name#.bc - // - #crate#.#module-name#.o - // - #crate#.crate.metadata.bc - // - #crate#.crate.metadata.o - // - #crate#.o (linked from crate.##.o) - // - #crate#.bc (copied from crate.##.bc) - // We may create additional files if requested by the user (through - // `-C save-temps` or `--emit=` flags). - - if !sess.opts.cg.save_temps { - // Remove the temporary .#module-name#.o objects. If the user didn't - // explicitly request bitcode (with --emit=bc), and the bitcode is not - // needed for building an rlib, then we must remove .#module-name#.bc as - // well. - - // Specific rules for keeping .#module-name#.bc: - // - If the user requested bitcode (`user_wants_bitcode`), and - // codegen_units > 1, then keep it. - // - If the user requested bitcode but codegen_units == 1, then we - // can toss .#module-name#.bc because we copied it to .bc earlier. - // - If we're not building an rlib and the user didn't request - // bitcode, then delete .#module-name#.bc. - // If you change how this works, also update back::link::link_rlib, - // where .#module-name#.bc files are (maybe) deleted after making an - // rlib. - let needs_crate_object = crate_output.outputs.contains_key(&OutputType::Exe); - - let keep_numbered_bitcode = user_wants_bitcode && sess.codegen_units().as_usize() > 1; - - let keep_numbered_objects = - needs_crate_object || (user_wants_objects && sess.codegen_units().as_usize() > 1); - - for module in codegen_results.modules.iter() { - if let Some(ref path) = module.object { - if !keep_numbered_objects { - ensure_removed(sess.dcx(), path); - } - } - - if let Some(ref path) = module.dwarf_object { - if !keep_numbered_objects { - ensure_removed(sess.dcx(), path); - } - } - - if let Some(ref path) = module.bytecode { - if !keep_numbered_bitcode { - ensure_removed(sess.dcx(), path); - } - } - } - - if !user_wants_bitcode { - if let Some(ref allocator_module) = codegen_results.allocator_module { - if let Some(ref path) = allocator_module.bytecode { - ensure_removed(sess.dcx(), path); - } - } - } - } - - if sess.opts.json_artifact_notifications { - if codegen_results.modules.len() == 1 { - codegen_results.modules[0].for_each_output(|_path, ty| { - if sess.opts.output_types.contains_key(&ty) { - let descr = ty.shorthand(); - // for single cgu file is renamed to drop cgu specific suffix - // so we regenerate it the same way - let path = crate_output.path(ty); - sess.dcx().emit_artifact_notification(path.as_path(), descr); - } - }); - } else { - for module in &codegen_results.modules { - module.for_each_output(|path, ty| { - if sess.opts.output_types.contains_key(&ty) { - let descr = ty.shorthand(); - sess.dcx().emit_artifact_notification(path, descr); - } - }); - } - } - } - - // We leave the following files around by default: - // - #crate#.o - // - #crate#.crate.metadata.o - // - #crate#.bc - // These are used in linking steps and will be cleaned up afterward. } fn make_module(sess: &Session, name: String) -> UnwindModule { diff --git a/src/lib.rs b/src/lib.rs index a49dc9be34583..e7daf57de0f03 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -23,7 +23,6 @@ extern crate rustc_hir; extern crate rustc_incremental; extern crate rustc_index; extern crate rustc_log; -extern crate rustc_metadata; extern crate rustc_session; extern crate rustc_span; extern crate rustc_symbol_mangling; From 0a662d0f72d28686789463d9328dd974c70c1693 Mon Sep 17 00:00:00 2001 From: Brian Cain Date: Thu, 22 Jan 2026 09:30:20 -0600 Subject: [PATCH 35/90] arch: Add Hexagon HVX instructions --- library/stdarch/.github/workflows/main.yml | 13 +- library/stdarch/Cargo.lock | 448 + .../hexagon-unknown-linux-musl/Dockerfile | 46 + library/stdarch/ci/run.sh | 3 + .../crates/core_arch/src/core_arch_docs.md | 2 + .../crates/core_arch/src/hexagon/hvx.rs | 8488 +++++++++++++++++ .../crates/core_arch/src/hexagon/mod.rs | 12 + library/stdarch/crates/core_arch/src/lib.rs | 1 + library/stdarch/crates/core_arch/src/mod.rs | 17 + .../crates/stdarch-gen-hexagon/Cargo.toml | 10 + .../crates/stdarch-gen-hexagon/src/main.rs | 1697 ++++ library/stdarch/examples/Cargo.toml | 4 + library/stdarch/examples/gaussian.rs | 358 + 13 files changed, 11098 insertions(+), 1 deletion(-) create mode 100644 library/stdarch/ci/docker/hexagon-unknown-linux-musl/Dockerfile create mode 100644 library/stdarch/crates/core_arch/src/hexagon/hvx.rs create mode 100644 library/stdarch/crates/core_arch/src/hexagon/mod.rs create mode 100644 library/stdarch/crates/stdarch-gen-hexagon/Cargo.toml create mode 100644 library/stdarch/crates/stdarch-gen-hexagon/src/main.rs create mode 100644 library/stdarch/examples/gaussian.rs diff --git a/library/stdarch/.github/workflows/main.yml b/library/stdarch/.github/workflows/main.yml index 6cf0e9f02fe54..0ec355aa3ca4f 100644 --- a/library/stdarch/.github/workflows/main.yml +++ b/library/stdarch/.github/workflows/main.yml @@ -96,6 +96,8 @@ jobs: os: ubuntu-latest - tuple: loongarch64-unknown-linux-gnu os: ubuntu-latest + - tuple: hexagon-unknown-linux-musl + os: ubuntu-latest - tuple: wasm32-wasip1 os: ubuntu-latest @@ -207,6 +209,11 @@ jobs: tuple: amdgcn-amd-amdhsa os: ubuntu-latest norun: true + - target: + tuple: hexagon-unknown-linux-musl + os: ubuntu-latest + norun: true + build_std: true steps: - uses: actions/checkout@v4 @@ -300,7 +307,7 @@ jobs: # Check that the generated files agree with the checked-in versions. check-stdarch-gen: needs: [style] - name: Check stdarch-gen-{arm, loongarch} output + name: Check stdarch-gen-{arm, loongarch, hexagon} output runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -318,6 +325,10 @@ jobs: run: | cargo run --bin=stdarch-gen-loongarch --release -- crates/stdarch-gen-loongarch/lasx.spec git diff --exit-code + - name: Check hexagon + run: | + cargo run -p stdarch-gen-hexagon --release + git diff --exit-code conclusion: needs: diff --git a/library/stdarch/Cargo.lock b/library/stdarch/Cargo.lock index 70f09adf2c857..66dd59a379aa3 100644 --- a/library/stdarch/Cargo.lock +++ b/library/stdarch/Cargo.lock @@ -2,6 +2,12 @@ # It is not intended for manual editing. version = 4 +[[package]] +name = "adler2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" + [[package]] name = "aho-corasick" version = "1.1.3" @@ -82,6 +88,12 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + [[package]] name = "bitflags" version = "2.9.4" @@ -158,6 +170,15 @@ dependencies = [ "syscalls", ] +[[package]] +name = "crc32fast" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" +dependencies = [ + "cfg-if", +] + [[package]] name = "crossbeam-deque" version = "0.8.6" @@ -224,6 +245,17 @@ version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "either" version = "1.15.0" @@ -265,12 +297,31 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fd99930f64d146689264c637b5af2f0233a933bef0d8570e2526bf9e083192d" +[[package]] +name = "flate2" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b375d6465b98090a5f25b1c7703f3859783755aa9a80433b36e0379a3ec2f369" +dependencies = [ + "crc32fast", + "miniz_oxide", +] + [[package]] name = "fnv" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "form_urlencoded" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" +dependencies = [ + "percent-encoding", +] + [[package]] name = "getrandom" version = "0.2.16" @@ -312,12 +363,114 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b112acc8b3adf4b107a8ec20977da0273a8c386765a3ec0229bd500a1443f9f" +[[package]] +name = "icu_collections" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" +dependencies = [ + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" +dependencies = [ + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" + +[[package]] +name = "icu_properties" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec" +dependencies = [ + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af" + +[[package]] +name = "icu_provider" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" +dependencies = [ + "displaydoc", + "icu_locale_core", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + [[package]] name = "ident_case" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" +[[package]] +name = "idna" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + [[package]] name = "indexmap" version = "1.9.3" @@ -399,6 +552,12 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" +[[package]] +name = "litemap" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" + [[package]] name = "log" version = "0.4.28" @@ -411,12 +570,43 @@ version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" +[[package]] +name = "miniz_oxide" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" +dependencies = [ + "adler2", + "simd-adler32", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + [[package]] name = "once_cell_polyfill" version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" +[[package]] +name = "percent-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" + +[[package]] +name = "potential_utf" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" +dependencies = [ + "zerovec", +] + [[package]] name = "ppv-lite86" version = "0.2.21" @@ -564,12 +754,61 @@ version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "caf4aa5b0f434c91fe5c7f1ecb6a5ece2130b02ad2a590589dda5146df959001" +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom", + "libc", + "untrusted", + "windows-sys 0.52.0", +] + [[package]] name = "rustc-demangle" version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" +[[package]] +name = "rustls" +version = "0.23.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" +dependencies = [ + "log", + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-pki-types" +version = "1.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd" +dependencies = [ + "zeroize", +] + +[[package]] +name = "rustls-webpki" +version = "0.103.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] + [[package]] name = "ryu" version = "1.0.20" @@ -676,6 +915,12 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" +[[package]] +name = "simd-adler32" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" + [[package]] name = "simd-test-macro" version = "0.1.0" @@ -685,6 +930,18 @@ dependencies = [ "syn", ] +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + +[[package]] +name = "stable_deref_trait" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" + [[package]] name = "stdarch-gen-arm" version = "0.1.0" @@ -699,6 +956,14 @@ dependencies = [ "walkdir", ] +[[package]] +name = "stdarch-gen-hexagon" +version = "0.1.0" +dependencies = [ + "regex", + "ureq", +] + [[package]] name = "stdarch-gen-loongarch" version = "0.1.0" @@ -745,6 +1010,12 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + [[package]] name = "syn" version = "2.0.106" @@ -756,6 +1027,17 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "syscalls" version = "0.6.18" @@ -791,12 +1073,62 @@ dependencies = [ "syn", ] +[[package]] +name = "tinystr" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" +dependencies = [ + "displaydoc", + "zerovec", +] + [[package]] name = "unicode-ident" version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "ureq" +version = "2.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02d1a66277ed75f640d608235660df48c8e3c19f3b4edb6a263315626cc3c01d" +dependencies = [ + "base64", + "flate2", + "log", + "once_cell", + "rustls", + "rustls-pki-types", + "url", + "webpki-roots 0.26.11", +] + +[[package]] +name = "url" +version = "2.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", + "serde", +] + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "utf8parse" version = "0.2.2" @@ -841,6 +1173,24 @@ dependencies = [ "wasmparser", ] +[[package]] +name = "webpki-roots" +version = "0.26.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" +dependencies = [ + "webpki-roots 1.0.5", +] + +[[package]] +name = "webpki-roots" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12bed680863276c63889429bfd6cab3b99943659923822de1c8a39c49e4d722c" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "winapi-util" version = "0.1.10" @@ -856,6 +1206,15 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + [[package]] name = "windows-sys" version = "0.59.0" @@ -1003,6 +1362,12 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" +[[package]] +name = "writeable" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" + [[package]] name = "xml-rs" version = "0.8.27" @@ -1018,6 +1383,29 @@ dependencies = [ "linked-hash-map", ] +[[package]] +name = "yoke" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" +dependencies = [ + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + [[package]] name = "zerocopy" version = "0.8.27" @@ -1037,3 +1425,63 @@ dependencies = [ "quote", "syn", ] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "zeroize" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" + +[[package]] +name = "zerotrie" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] diff --git a/library/stdarch/ci/docker/hexagon-unknown-linux-musl/Dockerfile b/library/stdarch/ci/docker/hexagon-unknown-linux-musl/Dockerfile new file mode 100644 index 0000000000000..f6c0efd94629a --- /dev/null +++ b/library/stdarch/ci/docker/hexagon-unknown-linux-musl/Dockerfile @@ -0,0 +1,46 @@ +FROM ubuntu:25.10 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + gcc \ + libc6-dev \ + ca-certificates \ + curl \ + zstd \ + file \ + make \ + libc++1 \ + libglib2.0-0t64 \ + libunwind-20 \ + liburing2 \ + llvm + +# The Hexagon toolchain requires libc++ and libunwind at runtime - create symlinks from versioned files +RUN cd /usr/lib/x86_64-linux-gnu && \ + for f in libc++.so.1.0.*; do ln -sf "$f" libc++.so.1; done && \ + for f in libc++abi.so.1.0.*; do ln -sf "$f" libc++abi.so.1; done && \ + for f in libunwind.so.1.0.*; do ln -sf "$f" libunwind.so.1; done + +# Download and install the Hexagon cross toolchain from +# https://github.com/quic/toolchain_for_hexagon/releases/tag/v21.1.8 +# Includes clang cross-compiler, musl sysroot, and qemu-hexagon. +# +# The tarball contains directories with restrictive (0700) permissions. +# In rootless Podman, chmod fails on tar-extracted files within the same +# layer due to overlayfs limitations in user namespaces. Splitting into +# two RUN steps lets chmod work via overlayfs copy-up from the lower layer. +RUN curl -L -o /tmp/hexagon-toolchain.tar.zst \ + https://artifacts.codelinaro.org/artifactory/codelinaro-toolchain-for-hexagon/21.1.8/clang+llvm-21.1.8-cross-hexagon-unknown-linux-musl.tar.zst && \ + mkdir -p /opt/hexagon-toolchain && \ + cd /opt/hexagon-toolchain && \ + (unzstd -c /tmp/hexagon-toolchain.tar.zst | tar -xf - --strip-components=2 --no-same-permissions || true) && \ + rm /tmp/hexagon-toolchain.tar.zst +RUN find /opt/hexagon-toolchain -type d -exec chmod a+rx {} + 2>/dev/null; \ + find /opt/hexagon-toolchain -type f -exec chmod a+r {} + 2>/dev/null; \ + find /opt/hexagon-toolchain -type f -perm /111 -exec chmod a+rx {} + 2>/dev/null; \ + /opt/hexagon-toolchain/bin/hexagon-unknown-linux-musl-clang --version + +ENV PATH="/opt/hexagon-toolchain/bin:${PATH}" \ + CARGO_TARGET_HEXAGON_UNKNOWN_LINUX_MUSL_LINKER=hexagon-unknown-linux-musl-clang \ + CARGO_TARGET_HEXAGON_UNKNOWN_LINUX_MUSL_RUNNER="qemu-hexagon -L /opt/hexagon-toolchain/target/hexagon-unknown-linux-musl" \ + CARGO_UNSTABLE_BUILD_STD_FEATURES=llvm-libunwind \ + OBJDUMP=llvm-objdump diff --git a/library/stdarch/ci/run.sh b/library/stdarch/ci/run.sh index 8a0b5fa26f66c..ea012b42f983b 100755 --- a/library/stdarch/ci/run.sh +++ b/library/stdarch/ci/run.sh @@ -50,6 +50,9 @@ case ${TARGET} in riscv*) export RUSTFLAGS="${RUSTFLAGS} -Ctarget-feature=+zk,+zks,+zbb,+zbc" ;; + hexagon*) + export RUSTFLAGS="${RUSTFLAGS} -Ctarget-feature=+hvxv60,+hvx-length128b" + ;; esac echo "RUSTFLAGS=${RUSTFLAGS}" diff --git a/library/stdarch/crates/core_arch/src/core_arch_docs.md b/library/stdarch/crates/core_arch/src/core_arch_docs.md index 7075945754975..9b52fb2af1598 100644 --- a/library/stdarch/crates/core_arch/src/core_arch_docs.md +++ b/library/stdarch/crates/core_arch/src/core_arch_docs.md @@ -186,6 +186,7 @@ others at: * [`arm`] * [`aarch64`] * [`amdgpu`] +* [`hexagon`] * [`riscv32`] * [`riscv64`] * [`mips`] @@ -203,6 +204,7 @@ others at: [`arm`]: ../../core/arch/arm/index.html [`aarch64`]: ../../core/arch/aarch64/index.html [`amdgpu`]: ../../core/arch/amdgpu/index.html +[`hexagon`]: ../../core/arch/hexagon/index.html [`riscv32`]: ../../core/arch/riscv32/index.html [`riscv64`]: ../../core/arch/riscv64/index.html [`mips`]: ../../core/arch/mips/index.html diff --git a/library/stdarch/crates/core_arch/src/hexagon/hvx.rs b/library/stdarch/crates/core_arch/src/hexagon/hvx.rs new file mode 100644 index 0000000000000..24d42ea1fcd11 --- /dev/null +++ b/library/stdarch/crates/core_arch/src/hexagon/hvx.rs @@ -0,0 +1,8488 @@ +//! Hexagon HVX intrinsics +//! +//! This module provides intrinsics for the Hexagon Vector Extensions (HVX). +//! HVX is a wide vector extension designed for high-performance signal processing. +//! [Hexagon HVX Programmer's Reference Manual](https://docs.qualcomm.com/doc/80-N2040-61) +//! +//! ## Vector Types +//! +//! HVX supports different vector lengths depending on the configuration: +//! - 128-byte mode: `HvxVector` is 1024 bits (128 bytes) +//! - 64-byte mode: `HvxVector` is 512 bits (64 bytes) +//! +//! This implementation targets 128-byte mode by default. To change the vector +//! length mode, use the appropriate target feature when compiling: +//! - For 128-byte mode: `-C target-feature=+hvx-length128b` +//! - For 64-byte mode: `-C target-feature=+hvx-length64b` +//! +//! Note that HVX v66 and later default to 128-byte mode, while earlier versions +//! default to 64-byte mode. +//! +//! ## Architecture Versions +//! +//! Different intrinsics require different HVX architecture versions. Use the +//! appropriate target feature to enable the required version: +//! - HVX v60: `-C target-feature=+hvxv60` (basic HVX operations) +//! - HVX v62: `-C target-feature=+hvxv62` +//! - HVX v65: `-C target-feature=+hvxv65` (includes floating-point support) +//! - HVX v66: `-C target-feature=+hvxv66` +//! - HVX v68: `-C target-feature=+hvxv68` +//! - HVX v69: `-C target-feature=+hvxv69` +//! - HVX v73: `-C target-feature=+hvxv73` +//! - HVX v79: `-C target-feature=+hvxv79` +//! - HVX v81: `-C target-feature=+hvxv81` +//! +//! Each version includes all features from previous versions. + +#![allow(non_camel_case_types)] + +#[cfg(test)] +use stdarch_test::assert_instr; + +use crate::intrinsics::simd::{simd_add, simd_and, simd_or, simd_sub, simd_xor}; + +// HVX type definitions for 128-byte vector mode (default for v66+) +// Use -C target-feature=+hvx-length128b to enable +#[cfg(target_feature = "hvx-length128b")] +types! { + #![unstable(feature = "stdarch_hexagon", issue = "151523")] + + /// HVX vector type (1024 bits / 128 bytes) + /// + /// This type represents a single HVX vector register containing 32 x 32-bit values. + pub struct HvxVector(32 x i32); + + /// HVX vector pair type (2048 bits / 256 bytes) + /// + /// This type represents a pair of HVX vector registers, often used for + /// operations that produce double-width results. + pub struct HvxVectorPair(64 x i32); + + /// HVX vector predicate type (1024 bits / 128 bytes) + /// + /// This type represents a predicate vector used for conditional operations. + /// Each bit corresponds to a lane in the vector. + pub struct HvxVectorPred(32 x i32); +} + +// HVX type definitions for 64-byte vector mode (default for v60-v65) +// Use -C target-feature=+hvx-length64b to enable, or omit hvx-length128b +#[cfg(not(target_feature = "hvx-length128b"))] +types! { + #![unstable(feature = "stdarch_hexagon", issue = "151523")] + + /// HVX vector type (512 bits / 64 bytes) + /// + /// This type represents a single HVX vector register containing 16 x 32-bit values. + pub struct HvxVector(16 x i32); + + /// HVX vector pair type (1024 bits / 128 bytes) + /// + /// This type represents a pair of HVX vector registers, often used for + /// operations that produce double-width results. + pub struct HvxVectorPair(32 x i32); + + /// HVX vector predicate type (512 bits / 64 bytes) + /// + /// This type represents a predicate vector used for conditional operations. + /// Each bit corresponds to a lane in the vector. + pub struct HvxVectorPred(16 x i32); +} + +// LLVM intrinsic declarations for 128-byte vector mode +#[cfg(target_feature = "hvx-length128b")] +#[allow(improper_ctypes)] +unsafe extern "unadjusted" { + #[link_name = "llvm.hexagon.V6.extractw.128B"] + fn extractw(_: HvxVector, _: i32) -> i32; + #[link_name = "llvm.hexagon.V6.get.qfext.128B"] + fn get_qfext(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.hi.128B"] + fn hi(_: HvxVectorPair) -> HvxVector; + #[link_name = "llvm.hexagon.V6.lo.128B"] + fn lo(_: HvxVectorPair) -> HvxVector; + #[link_name = "llvm.hexagon.V6.lvsplatb.128B"] + fn lvsplatb(_: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.lvsplath.128B"] + fn lvsplath(_: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.lvsplatw.128B"] + fn lvsplatw(_: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.and.128B"] + fn pred_and(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.and.n.128B"] + fn pred_and_n(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.not.128B"] + fn pred_not(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.or.128B"] + fn pred_or(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.or.n.128B"] + fn pred_or_n(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.scalar2.128B"] + fn pred_scalar2(_: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.scalar2v2.128B"] + fn pred_scalar2v2(_: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.xor.128B"] + fn pred_xor(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.set.qfext.128B"] + fn set_qfext(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.shuffeqh.128B"] + fn shuffeqh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.shuffeqw.128B"] + fn shuffeqw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.v6mpyhubs10.128B"] + fn v6mpyhubs10(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.v6mpyhubs10.vxx.128B"] + fn v6mpyhubs10_vxx( + _: HvxVectorPair, + _: HvxVectorPair, + _: HvxVectorPair, + _: i32, + ) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.v6mpyvubs10.128B"] + fn v6mpyvubs10(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.v6mpyvubs10.vxx.128B"] + fn v6mpyvubs10_vxx( + _: HvxVectorPair, + _: HvxVectorPair, + _: HvxVectorPair, + _: i32, + ) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vS32b.nqpred.ai.128B"] + fn vS32b_nqpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vS32b.nt.nqpred.ai.128B"] + fn vS32b_nt_nqpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vS32b.nt.qpred.ai.128B"] + fn vS32b_nt_qpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vS32b.qpred.ai.128B"] + fn vS32b_qpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vabs.f8.128B"] + fn vabs_f8(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabs.hf.128B"] + fn vabs_hf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabs.sf.128B"] + fn vabs_sf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsb.128B"] + fn vabsb(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsb.sat.128B"] + fn vabsb_sat(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsdiffh.128B"] + fn vabsdiffh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsdiffub.128B"] + fn vabsdiffub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsdiffuh.128B"] + fn vabsdiffuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsdiffw.128B"] + fn vabsdiffw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsh.128B"] + fn vabsh(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsh.sat.128B"] + fn vabsh_sat(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsw.128B"] + fn vabsw(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsw.sat.128B"] + fn vabsw_sat(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.hf.128B"] + fn vadd_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.hf.hf.128B"] + fn vadd_hf_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.qf16.128B"] + fn vadd_qf16(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.qf16.mix.128B"] + fn vadd_qf16_mix(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.qf32.128B"] + fn vadd_qf32(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.qf32.mix.128B"] + fn vadd_qf32_mix(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.sf.128B"] + fn vadd_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.sf.hf.128B"] + fn vadd_sf_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vadd.sf.sf.128B"] + fn vadd_sf_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddb.128B"] + fn vaddb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddb.dv.128B"] + fn vaddb_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddbnq.128B"] + fn vaddbnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddbq.128B"] + fn vaddbq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddbsat.128B"] + fn vaddbsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddbsat.dv.128B"] + fn vaddbsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddcarrysat.128B"] + fn vaddcarrysat(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddclbh.128B"] + fn vaddclbh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddclbw.128B"] + fn vaddclbw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddh.128B"] + fn vaddh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddh.dv.128B"] + fn vaddh_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddhnq.128B"] + fn vaddhnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddhq.128B"] + fn vaddhq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddhsat.128B"] + fn vaddhsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddhsat.dv.128B"] + fn vaddhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddhw.128B"] + fn vaddhw(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddhw.acc.128B"] + fn vaddhw_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddubh.128B"] + fn vaddubh(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddubh.acc.128B"] + fn vaddubh_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddubsat.128B"] + fn vaddubsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddubsat.dv.128B"] + fn vaddubsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddububb.sat.128B"] + fn vaddububb_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadduhsat.128B"] + fn vadduhsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadduhsat.dv.128B"] + fn vadduhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vadduhw.128B"] + fn vadduhw(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vadduhw.acc.128B"] + fn vadduhw_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vadduwsat.128B"] + fn vadduwsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadduwsat.dv.128B"] + fn vadduwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddw.128B"] + fn vaddw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddw.dv.128B"] + fn vaddw_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddwnq.128B"] + fn vaddwnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddwq.128B"] + fn vaddwq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddwsat.128B"] + fn vaddwsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddwsat.dv.128B"] + fn vaddwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.valignb.128B"] + fn valignb(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.valignbi.128B"] + fn valignbi(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vand.128B"] + fn vand(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandnqrt.128B"] + fn vandnqrt(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandnqrt.acc.128B"] + fn vandnqrt_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandqrt.128B"] + fn vandqrt(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandqrt.acc.128B"] + fn vandqrt_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandvnqv.128B"] + fn vandvnqv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandvqv.128B"] + fn vandvqv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandvrt.128B"] + fn vandvrt(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandvrt.acc.128B"] + fn vandvrt_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslh.128B"] + fn vaslh(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslh.acc.128B"] + fn vaslh_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslhv.128B"] + fn vaslhv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslw.128B"] + fn vaslw(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslw.acc.128B"] + fn vaslw_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslwv.128B"] + fn vaslwv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasr.into.128B"] + fn vasr_into(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vasrh.128B"] + fn vasrh(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrh.acc.128B"] + fn vasrh_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrhbrndsat.128B"] + fn vasrhbrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrhbsat.128B"] + fn vasrhbsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrhubrndsat.128B"] + fn vasrhubrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrhubsat.128B"] + fn vasrhubsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrhv.128B"] + fn vasrhv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasruhubrndsat.128B"] + fn vasruhubrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasruhubsat.128B"] + fn vasruhubsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasruwuhrndsat.128B"] + fn vasruwuhrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasruwuhsat.128B"] + fn vasruwuhsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrvuhubrndsat.128B"] + fn vasrvuhubrndsat(_: HvxVectorPair, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrvuhubsat.128B"] + fn vasrvuhubsat(_: HvxVectorPair, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrvwuhrndsat.128B"] + fn vasrvwuhrndsat(_: HvxVectorPair, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrvwuhsat.128B"] + fn vasrvwuhsat(_: HvxVectorPair, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrw.128B"] + fn vasrw(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrw.acc.128B"] + fn vasrw_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwh.128B"] + fn vasrwh(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwhrndsat.128B"] + fn vasrwhrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwhsat.128B"] + fn vasrwhsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwuhrndsat.128B"] + fn vasrwuhrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwuhsat.128B"] + fn vasrwuhsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwv.128B"] + fn vasrwv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vassign.128B"] + fn vassign(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vassign.fp.128B"] + fn vassign_fp(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vassignp.128B"] + fn vassignp(_: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vavgb.128B"] + fn vavgb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgbrnd.128B"] + fn vavgbrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgh.128B"] + fn vavgh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavghrnd.128B"] + fn vavghrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgub.128B"] + fn vavgub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgubrnd.128B"] + fn vavgubrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavguh.128B"] + fn vavguh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavguhrnd.128B"] + fn vavguhrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavguw.128B"] + fn vavguw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavguwrnd.128B"] + fn vavguwrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgw.128B"] + fn vavgw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgwrnd.128B"] + fn vavgwrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcl0h.128B"] + fn vcl0h(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcl0w.128B"] + fn vcl0w(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcombine.128B"] + fn vcombine(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vconv.h.hf.128B"] + fn vconv_h_hf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.hf.h.128B"] + fn vconv_hf_h(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.hf.qf16.128B"] + fn vconv_hf_qf16(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.hf.qf32.128B"] + fn vconv_hf_qf32(_: HvxVectorPair) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.sf.qf32.128B"] + fn vconv_sf_qf32(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.sf.w.128B"] + fn vconv_sf_w(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.w.sf.128B"] + fn vconv_w_sf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt2.hf.b.128B"] + fn vcvt2_hf_b(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt2.hf.ub.128B"] + fn vcvt2_hf_ub(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt.b.hf.128B"] + fn vcvt_b_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.h.hf.128B"] + fn vcvt_h_hf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.hf.b.128B"] + fn vcvt_hf_b(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt.hf.f8.128B"] + fn vcvt_hf_f8(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt.hf.h.128B"] + fn vcvt_hf_h(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.hf.sf.128B"] + fn vcvt_hf_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.hf.ub.128B"] + fn vcvt_hf_ub(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt.hf.uh.128B"] + fn vcvt_hf_uh(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.sf.hf.128B"] + fn vcvt_sf_hf(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt.ub.hf.128B"] + fn vcvt_ub_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.uh.hf.128B"] + fn vcvt_uh_hf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vd0.128B"] + fn vd0() -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdd0.128B"] + fn vdd0() -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdealb.128B"] + fn vdealb(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdealb4w.128B"] + fn vdealb4w(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdealh.128B"] + fn vdealh(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdealvdd.128B"] + fn vdealvdd(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdelta.128B"] + fn vdelta(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpy.sf.hf.128B"] + fn vdmpy_sf_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpy.sf.hf.acc.128B"] + fn vdmpy_sf_hf_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpybus.128B"] + fn vdmpybus(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpybus.acc.128B"] + fn vdmpybus_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpybus.dv.128B"] + fn vdmpybus_dv(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdmpybus.dv.acc.128B"] + fn vdmpybus_dv_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdmpyhb.128B"] + fn vdmpyhb(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhb.acc.128B"] + fn vdmpyhb_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhb.dv.128B"] + fn vdmpyhb_dv(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdmpyhb.dv.acc.128B"] + fn vdmpyhb_dv_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdmpyhisat.128B"] + fn vdmpyhisat(_: HvxVectorPair, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhisat.acc.128B"] + fn vdmpyhisat_acc(_: HvxVector, _: HvxVectorPair, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsat.128B"] + fn vdmpyhsat(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsat.acc.128B"] + fn vdmpyhsat_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsuisat.128B"] + fn vdmpyhsuisat(_: HvxVectorPair, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsuisat.acc.128B"] + fn vdmpyhsuisat_acc(_: HvxVector, _: HvxVectorPair, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsusat.128B"] + fn vdmpyhsusat(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsusat.acc.128B"] + fn vdmpyhsusat_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhvsat.128B"] + fn vdmpyhvsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhvsat.acc.128B"] + fn vdmpyhvsat_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdsaduh.128B"] + fn vdsaduh(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdsaduh.acc.128B"] + fn vdsaduh_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.veqb.128B"] + fn veqb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqb.and.128B"] + fn veqb_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqb.or.128B"] + fn veqb_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqb.xor.128B"] + fn veqb_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqh.128B"] + fn veqh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqh.and.128B"] + fn veqh_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqh.or.128B"] + fn veqh_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqh.xor.128B"] + fn veqh_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqw.128B"] + fn veqw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqw.and.128B"] + fn veqw_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqw.or.128B"] + fn veqw_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqw.xor.128B"] + fn veqw_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmax.f8.128B"] + fn vfmax_f8(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmax.hf.128B"] + fn vfmax_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmax.sf.128B"] + fn vfmax_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmin.f8.128B"] + fn vfmin_f8(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmin.hf.128B"] + fn vfmin_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmin.sf.128B"] + fn vfmin_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfneg.f8.128B"] + fn vfneg_f8(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfneg.hf.128B"] + fn vfneg_hf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfneg.sf.128B"] + fn vfneg_sf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgathermh.128B"] + fn vgathermh(_: *mut HvxVector, _: i32, _: i32, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vgathermhq.128B"] + fn vgathermhq(_: *mut HvxVector, _: HvxVector, _: i32, _: i32, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vgathermhw.128B"] + fn vgathermhw(_: *mut HvxVector, _: i32, _: i32, _: HvxVectorPair) -> (); + #[link_name = "llvm.hexagon.V6.vgathermhwq.128B"] + fn vgathermhwq(_: *mut HvxVector, _: HvxVector, _: i32, _: i32, _: HvxVectorPair) -> (); + #[link_name = "llvm.hexagon.V6.vgathermw.128B"] + fn vgathermw(_: *mut HvxVector, _: i32, _: i32, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vgathermwq.128B"] + fn vgathermwq(_: *mut HvxVector, _: HvxVector, _: i32, _: i32, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vgtb.128B"] + fn vgtb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtb.and.128B"] + fn vgtb_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtb.or.128B"] + fn vgtb_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtb.xor.128B"] + fn vgtb_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgth.128B"] + fn vgth(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgth.and.128B"] + fn vgth_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgth.or.128B"] + fn vgth_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgth.xor.128B"] + fn vgth_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgthf.128B"] + fn vgthf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgthf.and.128B"] + fn vgthf_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgthf.or.128B"] + fn vgthf_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgthf.xor.128B"] + fn vgthf_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtsf.128B"] + fn vgtsf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtsf.and.128B"] + fn vgtsf_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtsf.or.128B"] + fn vgtsf_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtsf.xor.128B"] + fn vgtsf_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtub.128B"] + fn vgtub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtub.and.128B"] + fn vgtub_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtub.or.128B"] + fn vgtub_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtub.xor.128B"] + fn vgtub_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuh.128B"] + fn vgtuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuh.and.128B"] + fn vgtuh_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuh.or.128B"] + fn vgtuh_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuh.xor.128B"] + fn vgtuh_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuw.128B"] + fn vgtuw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuw.and.128B"] + fn vgtuw_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuw.or.128B"] + fn vgtuw_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuw.xor.128B"] + fn vgtuw_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtw.128B"] + fn vgtw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtw.and.128B"] + fn vgtw_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtw.or.128B"] + fn vgtw_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtw.xor.128B"] + fn vgtw_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vinsertwr.128B"] + fn vinsertwr(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlalignb.128B"] + fn vlalignb(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlalignbi.128B"] + fn vlalignbi(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlsrb.128B"] + fn vlsrb(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlsrh.128B"] + fn vlsrh(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlsrhv.128B"] + fn vlsrhv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlsrw.128B"] + fn vlsrw(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlsrwv.128B"] + fn vlsrwv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvvb.128B"] + fn vlutvvb(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvvb.nm.128B"] + fn vlutvvb_nm(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvvb.oracc.128B"] + fn vlutvvb_oracc(_: HvxVector, _: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvvb.oracci.128B"] + fn vlutvvb_oracci(_: HvxVector, _: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvvbi.128B"] + fn vlutvvbi(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvwh.128B"] + fn vlutvwh(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vlutvwh.nm.128B"] + fn vlutvwh_nm(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vlutvwh.oracc.128B"] + fn vlutvwh_oracc(_: HvxVectorPair, _: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vlutvwh.oracci.128B"] + fn vlutvwh_oracci(_: HvxVectorPair, _: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vlutvwhi.128B"] + fn vlutvwhi(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmax.hf.128B"] + fn vmax_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmax.sf.128B"] + fn vmax_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmaxb.128B"] + fn vmaxb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmaxh.128B"] + fn vmaxh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmaxub.128B"] + fn vmaxub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmaxuh.128B"] + fn vmaxuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmaxw.128B"] + fn vmaxw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmin.hf.128B"] + fn vmin_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmin.sf.128B"] + fn vmin_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vminb.128B"] + fn vminb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vminh.128B"] + fn vminh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vminub.128B"] + fn vminub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vminuh.128B"] + fn vminuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vminw.128B"] + fn vminw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpabus.128B"] + fn vmpabus(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpabus.acc.128B"] + fn vmpabus_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpabusv.128B"] + fn vmpabusv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpabuu.128B"] + fn vmpabuu(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpabuu.acc.128B"] + fn vmpabuu_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpabuuv.128B"] + fn vmpabuuv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpahb.128B"] + fn vmpahb(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpahb.acc.128B"] + fn vmpahb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpauhb.128B"] + fn vmpauhb(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpauhb.acc.128B"] + fn vmpauhb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.hf.hf.128B"] + fn vmpy_hf_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.hf.hf.acc.128B"] + fn vmpy_hf_hf_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.qf16.128B"] + fn vmpy_qf16(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.qf16.hf.128B"] + fn vmpy_qf16_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.qf16.mix.hf.128B"] + fn vmpy_qf16_mix_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.qf32.128B"] + fn vmpy_qf32(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.qf32.hf.128B"] + fn vmpy_qf32_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.qf32.mix.hf.128B"] + fn vmpy_qf32_mix_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.qf32.qf16.128B"] + fn vmpy_qf32_qf16(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.qf32.sf.128B"] + fn vmpy_qf32_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.sf.hf.128B"] + fn vmpy_sf_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.sf.hf.acc.128B"] + fn vmpy_sf_hf_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.sf.sf.128B"] + fn vmpy_sf_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpybus.128B"] + fn vmpybus(_: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpybus.acc.128B"] + fn vmpybus_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpybusv.128B"] + fn vmpybusv(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpybusv.acc.128B"] + fn vmpybusv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpybv.128B"] + fn vmpybv(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpybv.acc.128B"] + fn vmpybv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyewuh.128B"] + fn vmpyewuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyewuh.64.128B"] + fn vmpyewuh_64(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyh.128B"] + fn vmpyh(_: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyh.acc.128B"] + fn vmpyh_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhsat.acc.128B"] + fn vmpyhsat_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhsrs.128B"] + fn vmpyhsrs(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyhss.128B"] + fn vmpyhss(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyhus.128B"] + fn vmpyhus(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhus.acc.128B"] + fn vmpyhus_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhv.128B"] + fn vmpyhv(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhv.acc.128B"] + fn vmpyhv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhvsrs.128B"] + fn vmpyhvsrs(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyieoh.128B"] + fn vmpyieoh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiewh.acc.128B"] + fn vmpyiewh_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiewuh.128B"] + fn vmpyiewuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiewuh.acc.128B"] + fn vmpyiewuh_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyih.128B"] + fn vmpyih(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyih.acc.128B"] + fn vmpyih_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyihb.128B"] + fn vmpyihb(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyihb.acc.128B"] + fn vmpyihb_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiowh.128B"] + fn vmpyiowh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwb.128B"] + fn vmpyiwb(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwb.acc.128B"] + fn vmpyiwb_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwh.128B"] + fn vmpyiwh(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwh.acc.128B"] + fn vmpyiwh_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwub.128B"] + fn vmpyiwub(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwub.acc.128B"] + fn vmpyiwub_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyowh.128B"] + fn vmpyowh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyowh.64.acc.128B"] + fn vmpyowh_64_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyowh.rnd.128B"] + fn vmpyowh_rnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyowh.rnd.sacc.128B"] + fn vmpyowh_rnd_sacc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyowh.sacc.128B"] + fn vmpyowh_sacc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyub.128B"] + fn vmpyub(_: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyub.acc.128B"] + fn vmpyub_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyubv.128B"] + fn vmpyubv(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyubv.acc.128B"] + fn vmpyubv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyuh.128B"] + fn vmpyuh(_: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyuh.acc.128B"] + fn vmpyuh_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyuhe.128B"] + fn vmpyuhe(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyuhe.acc.128B"] + fn vmpyuhe_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyuhv.128B"] + fn vmpyuhv(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyuhv.acc.128B"] + fn vmpyuhv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyuhvs.128B"] + fn vmpyuhvs(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmux.128B"] + fn vmux(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnavgb.128B"] + fn vnavgb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnavgh.128B"] + fn vnavgh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnavgub.128B"] + fn vnavgub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnavgw.128B"] + fn vnavgw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnormamth.128B"] + fn vnormamth(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnormamtw.128B"] + fn vnormamtw(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnot.128B"] + fn vnot(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vor.128B"] + fn vor(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackeb.128B"] + fn vpackeb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackeh.128B"] + fn vpackeh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackhb.sat.128B"] + fn vpackhb_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackhub.sat.128B"] + fn vpackhub_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackob.128B"] + fn vpackob(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackoh.128B"] + fn vpackoh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackwh.sat.128B"] + fn vpackwh_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackwuh.sat.128B"] + fn vpackwuh_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpopcounth.128B"] + fn vpopcounth(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vprefixqb.128B"] + fn vprefixqb(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vprefixqh.128B"] + fn vprefixqh(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vprefixqw.128B"] + fn vprefixqw(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrdelta.128B"] + fn vrdelta(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybus.128B"] + fn vrmpybus(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybus.acc.128B"] + fn vrmpybus_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybusi.128B"] + fn vrmpybusi(_: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vrmpybusi.acc.128B"] + fn vrmpybusi_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vrmpybusv.128B"] + fn vrmpybusv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybusv.acc.128B"] + fn vrmpybusv_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybv.128B"] + fn vrmpybv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybv.acc.128B"] + fn vrmpybv_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpyub.128B"] + fn vrmpyub(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpyub.acc.128B"] + fn vrmpyub_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpyubi.128B"] + fn vrmpyubi(_: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vrmpyubi.acc.128B"] + fn vrmpyubi_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vrmpyubv.128B"] + fn vrmpyubv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpyubv.acc.128B"] + fn vrmpyubv_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vror.128B"] + fn vror(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrotr.128B"] + fn vrotr(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vroundhb.128B"] + fn vroundhb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vroundhub.128B"] + fn vroundhub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrounduhub.128B"] + fn vrounduhub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrounduwuh.128B"] + fn vrounduwuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vroundwh.128B"] + fn vroundwh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vroundwuh.128B"] + fn vroundwuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrsadubi.128B"] + fn vrsadubi(_: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vrsadubi.acc.128B"] + fn vrsadubi_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsatdw.128B"] + fn vsatdw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsathub.128B"] + fn vsathub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsatuwuh.128B"] + fn vsatuwuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsatwh.128B"] + fn vsatwh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsb.128B"] + fn vsb(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vscattermh.128B"] + fn vscattermh(_: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermh.add.128B"] + fn vscattermh_add(_: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermhq.128B"] + fn vscattermhq(_: HvxVector, _: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermhw.128B"] + fn vscattermhw(_: i32, _: i32, _: HvxVectorPair, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermhw.add.128B"] + fn vscattermhw_add(_: i32, _: i32, _: HvxVectorPair, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermhwq.128B"] + fn vscattermhwq(_: HvxVector, _: i32, _: i32, _: HvxVectorPair, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermw.128B"] + fn vscattermw(_: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermw.add.128B"] + fn vscattermw_add(_: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermwq.128B"] + fn vscattermwq(_: HvxVector, _: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vsh.128B"] + fn vsh(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vshufeh.128B"] + fn vshufeh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vshuffb.128B"] + fn vshuffb(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vshuffeb.128B"] + fn vshuffeb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vshuffh.128B"] + fn vshuffh(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vshuffob.128B"] + fn vshuffob(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vshuffvdd.128B"] + fn vshuffvdd(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vshufoeb.128B"] + fn vshufoeb(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vshufoeh.128B"] + fn vshufoeh(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vshufoh.128B"] + fn vshufoh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.hf.128B"] + fn vsub_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.hf.hf.128B"] + fn vsub_hf_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.qf16.128B"] + fn vsub_qf16(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.qf16.mix.128B"] + fn vsub_qf16_mix(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.qf32.128B"] + fn vsub_qf32(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.qf32.mix.128B"] + fn vsub_qf32_mix(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.sf.128B"] + fn vsub_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.sf.hf.128B"] + fn vsub_sf_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsub.sf.sf.128B"] + fn vsub_sf_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubb.128B"] + fn vsubb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubb.dv.128B"] + fn vsubb_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubbnq.128B"] + fn vsubbnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubbq.128B"] + fn vsubbq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubbsat.128B"] + fn vsubbsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubbsat.dv.128B"] + fn vsubbsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubh.128B"] + fn vsubh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubh.dv.128B"] + fn vsubh_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubhnq.128B"] + fn vsubhnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubhq.128B"] + fn vsubhq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubhsat.128B"] + fn vsubhsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubhsat.dv.128B"] + fn vsubhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubhw.128B"] + fn vsubhw(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsububh.128B"] + fn vsububh(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsububsat.128B"] + fn vsububsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsububsat.dv.128B"] + fn vsububsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubububb.sat.128B"] + fn vsubububb_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubuhsat.128B"] + fn vsubuhsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubuhsat.dv.128B"] + fn vsubuhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubuhw.128B"] + fn vsubuhw(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubuwsat.128B"] + fn vsubuwsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubuwsat.dv.128B"] + fn vsubuwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubw.128B"] + fn vsubw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubw.dv.128B"] + fn vsubw_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubwnq.128B"] + fn vsubwnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubwq.128B"] + fn vsubwq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubwsat.128B"] + fn vsubwsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubwsat.dv.128B"] + fn vsubwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vswap.128B"] + fn vswap(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpyb.128B"] + fn vtmpyb(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpyb.acc.128B"] + fn vtmpyb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpybus.128B"] + fn vtmpybus(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpybus.acc.128B"] + fn vtmpybus_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpyhb.128B"] + fn vtmpyhb(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpyhb.acc.128B"] + fn vtmpyhb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackb.128B"] + fn vunpackb(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackh.128B"] + fn vunpackh(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackob.128B"] + fn vunpackob(_: HvxVectorPair, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackoh.128B"] + fn vunpackoh(_: HvxVectorPair, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackub.128B"] + fn vunpackub(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackuh.128B"] + fn vunpackuh(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vxor.128B"] + fn vxor(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vzb.128B"] + fn vzb(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vzh.128B"] + fn vzh(_: HvxVector) -> HvxVectorPair; +} + +// LLVM intrinsic declarations for 64-byte vector mode +#[cfg(not(target_feature = "hvx-length128b"))] +#[allow(improper_ctypes)] +unsafe extern "unadjusted" { + #[link_name = "llvm.hexagon.V6.extractw"] + fn extractw(_: HvxVector, _: i32) -> i32; + #[link_name = "llvm.hexagon.V6.get.qfext"] + fn get_qfext(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.hi"] + fn hi(_: HvxVectorPair) -> HvxVector; + #[link_name = "llvm.hexagon.V6.lo"] + fn lo(_: HvxVectorPair) -> HvxVector; + #[link_name = "llvm.hexagon.V6.lvsplatb"] + fn lvsplatb(_: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.lvsplath"] + fn lvsplath(_: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.lvsplatw"] + fn lvsplatw(_: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.and"] + fn pred_and(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.and.n"] + fn pred_and_n(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.not"] + fn pred_not(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.or"] + fn pred_or(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.or.n"] + fn pred_or_n(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.scalar2"] + fn pred_scalar2(_: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.scalar2v2"] + fn pred_scalar2v2(_: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.xor"] + fn pred_xor(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.set.qfext"] + fn set_qfext(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.shuffeqh"] + fn shuffeqh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.shuffeqw"] + fn shuffeqw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.v6mpyhubs10"] + fn v6mpyhubs10(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.v6mpyhubs10.vxx"] + fn v6mpyhubs10_vxx( + _: HvxVectorPair, + _: HvxVectorPair, + _: HvxVectorPair, + _: i32, + ) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.v6mpyvubs10"] + fn v6mpyvubs10(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.v6mpyvubs10.vxx"] + fn v6mpyvubs10_vxx( + _: HvxVectorPair, + _: HvxVectorPair, + _: HvxVectorPair, + _: i32, + ) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vS32b.nqpred.ai"] + fn vS32b_nqpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vS32b.nt.nqpred.ai"] + fn vS32b_nt_nqpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vS32b.nt.qpred.ai"] + fn vS32b_nt_qpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vS32b.qpred.ai"] + fn vS32b_qpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vabs.f8"] + fn vabs_f8(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabs.hf"] + fn vabs_hf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabs.sf"] + fn vabs_sf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsb"] + fn vabsb(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsb.sat"] + fn vabsb_sat(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsdiffh"] + fn vabsdiffh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsdiffub"] + fn vabsdiffub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsdiffuh"] + fn vabsdiffuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsdiffw"] + fn vabsdiffw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsh"] + fn vabsh(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsh.sat"] + fn vabsh_sat(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsw"] + fn vabsw(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsw.sat"] + fn vabsw_sat(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.hf"] + fn vadd_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.hf.hf"] + fn vadd_hf_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.qf16"] + fn vadd_qf16(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.qf16.mix"] + fn vadd_qf16_mix(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.qf32"] + fn vadd_qf32(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.qf32.mix"] + fn vadd_qf32_mix(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.sf"] + fn vadd_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.sf.hf"] + fn vadd_sf_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vadd.sf.sf"] + fn vadd_sf_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddb"] + fn vaddb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddb.dv"] + fn vaddb_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddbnq"] + fn vaddbnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddbq"] + fn vaddbq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddbsat"] + fn vaddbsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddbsat.dv"] + fn vaddbsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddcarrysat"] + fn vaddcarrysat(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddclbh"] + fn vaddclbh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddclbw"] + fn vaddclbw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddh"] + fn vaddh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddh.dv"] + fn vaddh_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddhnq"] + fn vaddhnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddhq"] + fn vaddhq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddhsat"] + fn vaddhsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddhsat.dv"] + fn vaddhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddhw"] + fn vaddhw(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddhw.acc"] + fn vaddhw_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddubh"] + fn vaddubh(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddubh.acc"] + fn vaddubh_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddubsat"] + fn vaddubsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddubsat.dv"] + fn vaddubsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddububb.sat"] + fn vaddububb_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadduhsat"] + fn vadduhsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadduhsat.dv"] + fn vadduhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vadduhw"] + fn vadduhw(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vadduhw.acc"] + fn vadduhw_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vadduwsat"] + fn vadduwsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadduwsat.dv"] + fn vadduwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddw"] + fn vaddw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddw.dv"] + fn vaddw_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddwnq"] + fn vaddwnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddwq"] + fn vaddwq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddwsat"] + fn vaddwsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddwsat.dv"] + fn vaddwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.valignb"] + fn valignb(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.valignbi"] + fn valignbi(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vand"] + fn vand(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandnqrt"] + fn vandnqrt(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandnqrt.acc"] + fn vandnqrt_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandqrt"] + fn vandqrt(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandqrt.acc"] + fn vandqrt_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandvnqv"] + fn vandvnqv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandvqv"] + fn vandvqv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandvrt"] + fn vandvrt(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandvrt.acc"] + fn vandvrt_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslh"] + fn vaslh(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslh.acc"] + fn vaslh_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslhv"] + fn vaslhv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslw"] + fn vaslw(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslw.acc"] + fn vaslw_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslwv"] + fn vaslwv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasr.into"] + fn vasr_into(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vasrh"] + fn vasrh(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrh.acc"] + fn vasrh_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrhbrndsat"] + fn vasrhbrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrhbsat"] + fn vasrhbsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrhubrndsat"] + fn vasrhubrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrhubsat"] + fn vasrhubsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrhv"] + fn vasrhv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasruhubrndsat"] + fn vasruhubrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasruhubsat"] + fn vasruhubsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasruwuhrndsat"] + fn vasruwuhrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasruwuhsat"] + fn vasruwuhsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrvuhubrndsat"] + fn vasrvuhubrndsat(_: HvxVectorPair, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrvuhubsat"] + fn vasrvuhubsat(_: HvxVectorPair, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrvwuhrndsat"] + fn vasrvwuhrndsat(_: HvxVectorPair, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrvwuhsat"] + fn vasrvwuhsat(_: HvxVectorPair, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrw"] + fn vasrw(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrw.acc"] + fn vasrw_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwh"] + fn vasrwh(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwhrndsat"] + fn vasrwhrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwhsat"] + fn vasrwhsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwuhrndsat"] + fn vasrwuhrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwuhsat"] + fn vasrwuhsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwv"] + fn vasrwv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vassign"] + fn vassign(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vassign.fp"] + fn vassign_fp(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vassignp"] + fn vassignp(_: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vavgb"] + fn vavgb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgbrnd"] + fn vavgbrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgh"] + fn vavgh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavghrnd"] + fn vavghrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgub"] + fn vavgub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgubrnd"] + fn vavgubrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavguh"] + fn vavguh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavguhrnd"] + fn vavguhrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavguw"] + fn vavguw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavguwrnd"] + fn vavguwrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgw"] + fn vavgw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgwrnd"] + fn vavgwrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcl0h"] + fn vcl0h(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcl0w"] + fn vcl0w(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcombine"] + fn vcombine(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vconv.h.hf"] + fn vconv_h_hf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.hf.h"] + fn vconv_hf_h(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.hf.qf16"] + fn vconv_hf_qf16(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.hf.qf32"] + fn vconv_hf_qf32(_: HvxVectorPair) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.sf.qf32"] + fn vconv_sf_qf32(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.sf.w"] + fn vconv_sf_w(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.w.sf"] + fn vconv_w_sf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt2.hf.b"] + fn vcvt2_hf_b(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt2.hf.ub"] + fn vcvt2_hf_ub(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt.b.hf"] + fn vcvt_b_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.h.hf"] + fn vcvt_h_hf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.hf.b"] + fn vcvt_hf_b(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt.hf.f8"] + fn vcvt_hf_f8(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt.hf.h"] + fn vcvt_hf_h(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.hf.sf"] + fn vcvt_hf_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.hf.ub"] + fn vcvt_hf_ub(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt.hf.uh"] + fn vcvt_hf_uh(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.sf.hf"] + fn vcvt_sf_hf(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt.ub.hf"] + fn vcvt_ub_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.uh.hf"] + fn vcvt_uh_hf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vd0"] + fn vd0() -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdd0"] + fn vdd0() -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdealb"] + fn vdealb(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdealb4w"] + fn vdealb4w(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdealh"] + fn vdealh(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdealvdd"] + fn vdealvdd(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdelta"] + fn vdelta(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpy.sf.hf"] + fn vdmpy_sf_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpy.sf.hf.acc"] + fn vdmpy_sf_hf_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpybus"] + fn vdmpybus(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpybus.acc"] + fn vdmpybus_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpybus.dv"] + fn vdmpybus_dv(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdmpybus.dv.acc"] + fn vdmpybus_dv_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdmpyhb"] + fn vdmpyhb(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhb.acc"] + fn vdmpyhb_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhb.dv"] + fn vdmpyhb_dv(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdmpyhb.dv.acc"] + fn vdmpyhb_dv_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdmpyhisat"] + fn vdmpyhisat(_: HvxVectorPair, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhisat.acc"] + fn vdmpyhisat_acc(_: HvxVector, _: HvxVectorPair, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsat"] + fn vdmpyhsat(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsat.acc"] + fn vdmpyhsat_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsuisat"] + fn vdmpyhsuisat(_: HvxVectorPair, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsuisat.acc"] + fn vdmpyhsuisat_acc(_: HvxVector, _: HvxVectorPair, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsusat"] + fn vdmpyhsusat(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsusat.acc"] + fn vdmpyhsusat_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhvsat"] + fn vdmpyhvsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhvsat.acc"] + fn vdmpyhvsat_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdsaduh"] + fn vdsaduh(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdsaduh.acc"] + fn vdsaduh_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.veqb"] + fn veqb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqb.and"] + fn veqb_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqb.or"] + fn veqb_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqb.xor"] + fn veqb_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqh"] + fn veqh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqh.and"] + fn veqh_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqh.or"] + fn veqh_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqh.xor"] + fn veqh_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqw"] + fn veqw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqw.and"] + fn veqw_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqw.or"] + fn veqw_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqw.xor"] + fn veqw_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmax.f8"] + fn vfmax_f8(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmax.hf"] + fn vfmax_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmax.sf"] + fn vfmax_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmin.f8"] + fn vfmin_f8(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmin.hf"] + fn vfmin_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmin.sf"] + fn vfmin_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfneg.f8"] + fn vfneg_f8(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfneg.hf"] + fn vfneg_hf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfneg.sf"] + fn vfneg_sf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgathermh"] + fn vgathermh(_: *mut HvxVector, _: i32, _: i32, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vgathermhq"] + fn vgathermhq(_: *mut HvxVector, _: HvxVector, _: i32, _: i32, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vgathermhw"] + fn vgathermhw(_: *mut HvxVector, _: i32, _: i32, _: HvxVectorPair) -> (); + #[link_name = "llvm.hexagon.V6.vgathermhwq"] + fn vgathermhwq(_: *mut HvxVector, _: HvxVector, _: i32, _: i32, _: HvxVectorPair) -> (); + #[link_name = "llvm.hexagon.V6.vgathermw"] + fn vgathermw(_: *mut HvxVector, _: i32, _: i32, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vgathermwq"] + fn vgathermwq(_: *mut HvxVector, _: HvxVector, _: i32, _: i32, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vgtb"] + fn vgtb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtb.and"] + fn vgtb_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtb.or"] + fn vgtb_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtb.xor"] + fn vgtb_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgth"] + fn vgth(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgth.and"] + fn vgth_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgth.or"] + fn vgth_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgth.xor"] + fn vgth_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgthf"] + fn vgthf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgthf.and"] + fn vgthf_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgthf.or"] + fn vgthf_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgthf.xor"] + fn vgthf_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtsf"] + fn vgtsf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtsf.and"] + fn vgtsf_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtsf.or"] + fn vgtsf_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtsf.xor"] + fn vgtsf_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtub"] + fn vgtub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtub.and"] + fn vgtub_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtub.or"] + fn vgtub_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtub.xor"] + fn vgtub_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuh"] + fn vgtuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuh.and"] + fn vgtuh_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuh.or"] + fn vgtuh_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuh.xor"] + fn vgtuh_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuw"] + fn vgtuw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuw.and"] + fn vgtuw_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuw.or"] + fn vgtuw_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuw.xor"] + fn vgtuw_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtw"] + fn vgtw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtw.and"] + fn vgtw_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtw.or"] + fn vgtw_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtw.xor"] + fn vgtw_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vinsertwr"] + fn vinsertwr(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlalignb"] + fn vlalignb(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlalignbi"] + fn vlalignbi(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlsrb"] + fn vlsrb(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlsrh"] + fn vlsrh(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlsrhv"] + fn vlsrhv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlsrw"] + fn vlsrw(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlsrwv"] + fn vlsrwv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvvb"] + fn vlutvvb(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvvb.nm"] + fn vlutvvb_nm(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvvb.oracc"] + fn vlutvvb_oracc(_: HvxVector, _: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvvb.oracci"] + fn vlutvvb_oracci(_: HvxVector, _: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvvbi"] + fn vlutvvbi(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvwh"] + fn vlutvwh(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vlutvwh.nm"] + fn vlutvwh_nm(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vlutvwh.oracc"] + fn vlutvwh_oracc(_: HvxVectorPair, _: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vlutvwh.oracci"] + fn vlutvwh_oracci(_: HvxVectorPair, _: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vlutvwhi"] + fn vlutvwhi(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmax.hf"] + fn vmax_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmax.sf"] + fn vmax_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmaxb"] + fn vmaxb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmaxh"] + fn vmaxh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmaxub"] + fn vmaxub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmaxuh"] + fn vmaxuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmaxw"] + fn vmaxw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmin.hf"] + fn vmin_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmin.sf"] + fn vmin_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vminb"] + fn vminb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vminh"] + fn vminh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vminub"] + fn vminub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vminuh"] + fn vminuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vminw"] + fn vminw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpabus"] + fn vmpabus(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpabus.acc"] + fn vmpabus_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpabusv"] + fn vmpabusv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpabuu"] + fn vmpabuu(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpabuu.acc"] + fn vmpabuu_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpabuuv"] + fn vmpabuuv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpahb"] + fn vmpahb(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpahb.acc"] + fn vmpahb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpauhb"] + fn vmpauhb(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpauhb.acc"] + fn vmpauhb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.hf.hf"] + fn vmpy_hf_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.hf.hf.acc"] + fn vmpy_hf_hf_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.qf16"] + fn vmpy_qf16(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.qf16.hf"] + fn vmpy_qf16_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.qf16.mix.hf"] + fn vmpy_qf16_mix_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.qf32"] + fn vmpy_qf32(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.qf32.hf"] + fn vmpy_qf32_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.qf32.mix.hf"] + fn vmpy_qf32_mix_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.qf32.qf16"] + fn vmpy_qf32_qf16(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.qf32.sf"] + fn vmpy_qf32_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.sf.hf"] + fn vmpy_sf_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.sf.hf.acc"] + fn vmpy_sf_hf_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.sf.sf"] + fn vmpy_sf_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpybus"] + fn vmpybus(_: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpybus.acc"] + fn vmpybus_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpybusv"] + fn vmpybusv(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpybusv.acc"] + fn vmpybusv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpybv"] + fn vmpybv(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpybv.acc"] + fn vmpybv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyewuh"] + fn vmpyewuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyewuh.64"] + fn vmpyewuh_64(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyh"] + fn vmpyh(_: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyh.acc"] + fn vmpyh_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhsat.acc"] + fn vmpyhsat_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhsrs"] + fn vmpyhsrs(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyhss"] + fn vmpyhss(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyhus"] + fn vmpyhus(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhus.acc"] + fn vmpyhus_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhv"] + fn vmpyhv(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhv.acc"] + fn vmpyhv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhvsrs"] + fn vmpyhvsrs(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyieoh"] + fn vmpyieoh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiewh.acc"] + fn vmpyiewh_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiewuh"] + fn vmpyiewuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiewuh.acc"] + fn vmpyiewuh_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyih"] + fn vmpyih(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyih.acc"] + fn vmpyih_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyihb"] + fn vmpyihb(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyihb.acc"] + fn vmpyihb_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiowh"] + fn vmpyiowh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwb"] + fn vmpyiwb(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwb.acc"] + fn vmpyiwb_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwh"] + fn vmpyiwh(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwh.acc"] + fn vmpyiwh_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwub"] + fn vmpyiwub(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwub.acc"] + fn vmpyiwub_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyowh"] + fn vmpyowh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyowh.64.acc"] + fn vmpyowh_64_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyowh.rnd"] + fn vmpyowh_rnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyowh.rnd.sacc"] + fn vmpyowh_rnd_sacc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyowh.sacc"] + fn vmpyowh_sacc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyub"] + fn vmpyub(_: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyub.acc"] + fn vmpyub_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyubv"] + fn vmpyubv(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyubv.acc"] + fn vmpyubv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyuh"] + fn vmpyuh(_: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyuh.acc"] + fn vmpyuh_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyuhe"] + fn vmpyuhe(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyuhe.acc"] + fn vmpyuhe_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyuhv"] + fn vmpyuhv(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyuhv.acc"] + fn vmpyuhv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyuhvs"] + fn vmpyuhvs(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmux"] + fn vmux(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnavgb"] + fn vnavgb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnavgh"] + fn vnavgh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnavgub"] + fn vnavgub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnavgw"] + fn vnavgw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnormamth"] + fn vnormamth(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnormamtw"] + fn vnormamtw(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnot"] + fn vnot(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vor"] + fn vor(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackeb"] + fn vpackeb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackeh"] + fn vpackeh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackhb.sat"] + fn vpackhb_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackhub.sat"] + fn vpackhub_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackob"] + fn vpackob(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackoh"] + fn vpackoh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackwh.sat"] + fn vpackwh_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackwuh.sat"] + fn vpackwuh_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpopcounth"] + fn vpopcounth(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vprefixqb"] + fn vprefixqb(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vprefixqh"] + fn vprefixqh(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vprefixqw"] + fn vprefixqw(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrdelta"] + fn vrdelta(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybus"] + fn vrmpybus(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybus.acc"] + fn vrmpybus_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybusi"] + fn vrmpybusi(_: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vrmpybusi.acc"] + fn vrmpybusi_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vrmpybusv"] + fn vrmpybusv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybusv.acc"] + fn vrmpybusv_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybv"] + fn vrmpybv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybv.acc"] + fn vrmpybv_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpyub"] + fn vrmpyub(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpyub.acc"] + fn vrmpyub_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpyubi"] + fn vrmpyubi(_: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vrmpyubi.acc"] + fn vrmpyubi_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vrmpyubv"] + fn vrmpyubv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpyubv.acc"] + fn vrmpyubv_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vror"] + fn vror(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrotr"] + fn vrotr(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vroundhb"] + fn vroundhb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vroundhub"] + fn vroundhub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrounduhub"] + fn vrounduhub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrounduwuh"] + fn vrounduwuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vroundwh"] + fn vroundwh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vroundwuh"] + fn vroundwuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrsadubi"] + fn vrsadubi(_: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vrsadubi.acc"] + fn vrsadubi_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsatdw"] + fn vsatdw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsathub"] + fn vsathub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsatuwuh"] + fn vsatuwuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsatwh"] + fn vsatwh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsb"] + fn vsb(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vscattermh"] + fn vscattermh(_: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermh.add"] + fn vscattermh_add(_: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermhq"] + fn vscattermhq(_: HvxVector, _: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermhw"] + fn vscattermhw(_: i32, _: i32, _: HvxVectorPair, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermhw.add"] + fn vscattermhw_add(_: i32, _: i32, _: HvxVectorPair, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermhwq"] + fn vscattermhwq(_: HvxVector, _: i32, _: i32, _: HvxVectorPair, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermw"] + fn vscattermw(_: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermw.add"] + fn vscattermw_add(_: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermwq"] + fn vscattermwq(_: HvxVector, _: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vsh"] + fn vsh(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vshufeh"] + fn vshufeh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vshuffb"] + fn vshuffb(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vshuffeb"] + fn vshuffeb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vshuffh"] + fn vshuffh(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vshuffob"] + fn vshuffob(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vshuffvdd"] + fn vshuffvdd(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vshufoeb"] + fn vshufoeb(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vshufoeh"] + fn vshufoeh(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vshufoh"] + fn vshufoh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.hf"] + fn vsub_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.hf.hf"] + fn vsub_hf_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.qf16"] + fn vsub_qf16(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.qf16.mix"] + fn vsub_qf16_mix(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.qf32"] + fn vsub_qf32(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.qf32.mix"] + fn vsub_qf32_mix(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.sf"] + fn vsub_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.sf.hf"] + fn vsub_sf_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsub.sf.sf"] + fn vsub_sf_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubb"] + fn vsubb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubb.dv"] + fn vsubb_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubbnq"] + fn vsubbnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubbq"] + fn vsubbq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubbsat"] + fn vsubbsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubbsat.dv"] + fn vsubbsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubh"] + fn vsubh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubh.dv"] + fn vsubh_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubhnq"] + fn vsubhnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubhq"] + fn vsubhq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubhsat"] + fn vsubhsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubhsat.dv"] + fn vsubhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubhw"] + fn vsubhw(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsububh"] + fn vsububh(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsububsat"] + fn vsububsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsububsat.dv"] + fn vsububsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubububb.sat"] + fn vsubububb_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubuhsat"] + fn vsubuhsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubuhsat.dv"] + fn vsubuhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubuhw"] + fn vsubuhw(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubuwsat"] + fn vsubuwsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubuwsat.dv"] + fn vsubuwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubw"] + fn vsubw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubw.dv"] + fn vsubw_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubwnq"] + fn vsubwnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubwq"] + fn vsubwq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubwsat"] + fn vsubwsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubwsat.dv"] + fn vsubwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vswap"] + fn vswap(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpyb"] + fn vtmpyb(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpyb.acc"] + fn vtmpyb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpybus"] + fn vtmpybus(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpybus.acc"] + fn vtmpybus_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpyhb"] + fn vtmpyhb(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpyhb.acc"] + fn vtmpyhb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackb"] + fn vunpackb(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackh"] + fn vunpackh(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackob"] + fn vunpackob(_: HvxVectorPair, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackoh"] + fn vunpackoh(_: HvxVectorPair, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackub"] + fn vunpackub(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackuh"] + fn vunpackuh(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vxor"] + fn vxor(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vzb"] + fn vzb(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vzh"] + fn vzh(_: HvxVector) -> HvxVectorPair; +} + +/// `Rd32=vextract(Vu32,Rs32)` +/// +/// Instruction Type: LD +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(extractw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_r_vextract_vr(vu: HvxVector, rs: i32) -> i32 { + extractw(vu, rs) +} + +/// `Vd32=hi(Vss32)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(hi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_hi_w(vss: HvxVectorPair) -> HvxVector { + hi(vss) +} + +/// `Vd32=lo(Vss32)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(lo))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_lo_w(vss: HvxVectorPair) -> HvxVector { + lo(vss) +} + +/// `Vd32=vsplat(Rt32)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(lvsplatw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vsplat_r(rt: i32) -> HvxVector { + lvsplatw(rt) +} + +/// `Vd32.uh=vabsdiff(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsdiffh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vabsdiff_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vabsdiffh(vu, vv) +} + +/// `Vd32.ub=vabsdiff(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsdiffub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vabsdiff_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { + vabsdiffub(vu, vv) +} + +/// `Vd32.uh=vabsdiff(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsdiffuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vabsdiff_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vabsdiffuh(vu, vv) +} + +/// `Vd32.uw=vabsdiff(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsdiffw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vabsdiff_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vabsdiffw(vu, vv) +} + +/// `Vd32.h=vabs(Vu32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vabs_vh(vu: HvxVector) -> HvxVector { + vabsh(vu) +} + +/// `Vd32.h=vabs(Vu32.h):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsh_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vabs_vh_sat(vu: HvxVector) -> HvxVector { + vabsh_sat(vu) +} + +/// `Vd32.w=vabs(Vu32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vabs_vw(vu: HvxVector) -> HvxVector { + vabsw(vu) +} + +/// `Vd32.w=vabs(Vu32.w):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsw_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vabs_vw_sat(vu: HvxVector) -> HvxVector { + vabsw_sat(vu) +} + +/// `Vd32.b=vadd(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vadd_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddb(vu, vv) +} + +/// `Vdd32.b=vadd(Vuu32.b,Vvv32.b)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddb_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wb_vadd_wbwb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vaddb_dv(vuu, vvv) +} + +/// `Vd32.h=vadd(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vadd_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddh(vu, vv) +} + +/// `Vdd32.h=vadd(Vuu32.h,Vvv32.h)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddh_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vadd_whwh(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vaddh_dv(vuu, vvv) +} + +/// `Vd32.h=vadd(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vadd_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddhsat(vu, vv) +} + +/// `Vdd32.h=vadd(Vuu32.h,Vvv32.h):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddhsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vadd_whwh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vaddhsat_dv(vuu, vvv) +} + +/// `Vdd32.w=vadd(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddhw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vadd_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vaddhw(vu, vv) +} + +/// `Vdd32.h=vadd(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddubh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vadd_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vaddubh(vu, vv) +} + +/// `Vd32.ub=vadd(Vu32.ub,Vv32.ub):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddubsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vadd_vubvub_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddubsat(vu, vv) +} + +/// `Vdd32.ub=vadd(Vuu32.ub,Vvv32.ub):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddubsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wub_vadd_wubwub_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vaddubsat_dv(vuu, vvv) +} + +/// `Vd32.uh=vadd(Vu32.uh,Vv32.uh):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vadduhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vadd_vuhvuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadduhsat(vu, vv) +} + +/// `Vdd32.uh=vadd(Vuu32.uh,Vvv32.uh):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vadduhsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vadd_wuhwuh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vadduhsat_dv(vuu, vvv) +} + +/// `Vdd32.w=vadd(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vadduhw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vadd_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vadduhw(vu, vv) +} + +/// `Vd32.w=vadd(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vadd_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + simd_add(vu, vv) +} + +/// `Vdd32.w=vadd(Vuu32.w,Vvv32.w)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddw_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vadd_wwww(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vaddw_dv(vuu, vvv) +} + +/// `Vd32.w=vadd(Vu32.w,Vv32.w):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddwsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vadd_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddwsat(vu, vv) +} + +/// `Vdd32.w=vadd(Vuu32.w,Vvv32.w):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddwsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vadd_wwww_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vaddwsat_dv(vuu, vvv) +} + +/// `Vd32=valign(Vu32,Vv32,Rt8)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(valignb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_valign_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + valignb(vu, vv, rt) +} + +/// `Vd32=valign(Vu32,Vv32,#u3)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(valignbi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_valign_vvi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector { + valignbi(vu, vv, iu3) +} + +/// `Vd32=vand(Vu32,Vv32)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vand))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vand_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { + simd_and(vu, vv) +} + +/// `Vd32.h=vasl(Vu32.h,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaslh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasl_vhr(vu: HvxVector, rt: i32) -> HvxVector { + vaslh(vu, rt) +} + +/// `Vd32.h=vasl(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaslhv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasl_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaslhv(vu, vv) +} + +/// `Vd32.w=vasl(Vu32.w,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaslw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vasl_vwr(vu: HvxVector, rt: i32) -> HvxVector { + vaslw(vu, rt) +} + +/// `Vx32.w+=vasl(Vu32.w,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaslw_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vaslacc_vwvwr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vaslw_acc(vx, vu, rt) +} + +/// `Vd32.w=vasl(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaslwv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vasl_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaslwv(vu, vv) +} + +/// `Vd32.h=vasr(Vu32.h,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasr_vhr(vu: HvxVector, rt: i32) -> HvxVector { + vasrh(vu, rt) +} + +/// `Vd32.b=vasr(Vu32.h,Vv32.h,Rt8):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrhbrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vasr_vhvhr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrhbrndsat(vu, vv, rt) +} + +/// `Vd32.ub=vasr(Vu32.h,Vv32.h,Rt8):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrhubrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vasr_vhvhr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrhubrndsat(vu, vv, rt) +} + +/// `Vd32.ub=vasr(Vu32.h,Vv32.h,Rt8):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrhubsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vasr_vhvhr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrhubsat(vu, vv, rt) +} + +/// `Vd32.h=vasr(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrhv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasr_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vasrhv(vu, vv) +} + +/// `Vd32.w=vasr(Vu32.w,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vasr_vwr(vu: HvxVector, rt: i32) -> HvxVector { + vasrw(vu, rt) +} + +/// `Vx32.w+=vasr(Vu32.w,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrw_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vasracc_vwvwr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vasrw_acc(vx, vu, rt) +} + +/// `Vd32.h=vasr(Vu32.w,Vv32.w,Rt8)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrwh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasr_vwvwr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrwh(vu, vv, rt) +} + +/// `Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrwhrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasr_vwvwr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrwhrndsat(vu, vv, rt) +} + +/// `Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrwhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasr_vwvwr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrwhsat(vu, vv, rt) +} + +/// `Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrwuhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vasr_vwvwr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrwuhsat(vu, vv, rt) +} + +/// `Vd32.w=vasr(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrwv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vasr_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vasrwv(vu, vv) +} + +/// `Vd32=Vu32` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vassign))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_equals_v(vu: HvxVector) -> HvxVector { + vassign(vu) +} + +/// `Vdd32=Vuu32` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vassignp))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_equals_w(vuu: HvxVectorPair) -> HvxVectorPair { + vassignp(vuu) +} + +/// `Vd32.h=vavg(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavgh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vavg_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavgh(vu, vv) +} + +/// `Vd32.h=vavg(Vu32.h,Vv32.h):rnd` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavghrnd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vavg_vhvh_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavghrnd(vu, vv) +} + +/// `Vd32.ub=vavg(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavgub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vavg_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavgub(vu, vv) +} + +/// `Vd32.ub=vavg(Vu32.ub,Vv32.ub):rnd` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavgubrnd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vavg_vubvub_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavgubrnd(vu, vv) +} + +/// `Vd32.uh=vavg(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavguh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vavg_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavguh(vu, vv) +} + +/// `Vd32.uh=vavg(Vu32.uh,Vv32.uh):rnd` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavguhrnd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vavg_vuhvuh_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavguhrnd(vu, vv) +} + +/// `Vd32.w=vavg(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavgw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vavg_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavgw(vu, vv) +} + +/// `Vd32.w=vavg(Vu32.w,Vv32.w):rnd` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavgwrnd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vavg_vwvw_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavgwrnd(vu, vv) +} + +/// `Vd32.uh=vcl0(Vu32.uh)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vcl0h))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vcl0_vuh(vu: HvxVector) -> HvxVector { + vcl0h(vu) +} + +/// `Vd32.uw=vcl0(Vu32.uw)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vcl0w))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vcl0_vuw(vu: HvxVector) -> HvxVector { + vcl0w(vu) +} + +/// `Vdd32=vcombine(Vu32,Vv32)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vcombine))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_vcombine_vv(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vcombine(vu, vv) +} + +/// `Vd32=#0` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vd0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vzero() -> HvxVector { + vd0() +} + +/// `Vd32.b=vdeal(Vu32.b)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdealb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vdeal_vb(vu: HvxVector) -> HvxVector { + vdealb(vu) +} + +/// `Vd32.b=vdeale(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdealb4w))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vdeale_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vdealb4w(vu, vv) +} + +/// `Vd32.h=vdeal(Vu32.h)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdealh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vdeal_vh(vu: HvxVector) -> HvxVector { + vdealh(vu) +} + +/// `Vdd32=vdeal(Vu32,Vv32,Rt8)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdealvdd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_vdeal_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { + vdealvdd(vu, vv, rt) +} + +/// `Vd32=vdelta(Vu32,Vv32)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdelta))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vdelta_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { + vdelta(vu, vv) +} + +/// `Vd32.h=vdmpy(Vu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpybus))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vdmpy_vubrb(vu: HvxVector, rt: i32) -> HvxVector { + vdmpybus(vu, rt) +} + +/// `Vx32.h+=vdmpy(Vu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpybus_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vdmpyacc_vhvubrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vdmpybus_acc(vx, vu, rt) +} + +/// `Vdd32.h=vdmpy(Vuu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpybus_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vdmpy_wubrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vdmpybus_dv(vuu, rt) +} + +/// `Vxx32.h+=vdmpy(Vuu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpybus_dv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vdmpyacc_whwubrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vdmpybus_dv_acc(vxx, vuu, rt) +} + +/// `Vd32.w=vdmpy(Vu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpy_vhrb(vu: HvxVector, rt: i32) -> HvxVector { + vdmpyhb(vu, rt) +} + +/// `Vx32.w+=vdmpy(Vu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhb_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpyacc_vwvhrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vdmpyhb_acc(vx, vu, rt) +} + +/// `Vdd32.w=vdmpy(Vuu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhb_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vdmpy_whrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vdmpyhb_dv(vuu, rt) +} + +/// `Vxx32.w+=vdmpy(Vuu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhb_dv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vdmpyacc_wwwhrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vdmpyhb_dv_acc(vxx, vuu, rt) +} + +/// `Vd32.w=vdmpy(Vuu32.h,Rt32.h):sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhisat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpy_whrh_sat(vuu: HvxVectorPair, rt: i32) -> HvxVector { + vdmpyhisat(vuu, rt) +} + +/// `Vx32.w+=vdmpy(Vuu32.h,Rt32.h):sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhisat_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpyacc_vwwhrh_sat(vx: HvxVector, vuu: HvxVectorPair, rt: i32) -> HvxVector { + vdmpyhisat_acc(vx, vuu, rt) +} + +/// `Vd32.w=vdmpy(Vu32.h,Rt32.h):sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpy_vhrh_sat(vu: HvxVector, rt: i32) -> HvxVector { + vdmpyhsat(vu, rt) +} + +/// `Vx32.w+=vdmpy(Vu32.h,Rt32.h):sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhsat_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpyacc_vwvhrh_sat(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vdmpyhsat_acc(vx, vu, rt) +} + +/// `Vd32.w=vdmpy(Vuu32.h,Rt32.uh,#1):sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhsuisat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpy_whruh_sat(vuu: HvxVectorPair, rt: i32) -> HvxVector { + vdmpyhsuisat(vuu, rt) +} + +/// `Vx32.w+=vdmpy(Vuu32.h,Rt32.uh,#1):sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhsuisat_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpyacc_vwwhruh_sat(vx: HvxVector, vuu: HvxVectorPair, rt: i32) -> HvxVector { + vdmpyhsuisat_acc(vx, vuu, rt) +} + +/// `Vd32.w=vdmpy(Vu32.h,Rt32.uh):sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhsusat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpy_vhruh_sat(vu: HvxVector, rt: i32) -> HvxVector { + vdmpyhsusat(vu, rt) +} + +/// `Vx32.w+=vdmpy(Vu32.h,Rt32.uh):sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhsusat_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpyacc_vwvhruh_sat(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vdmpyhsusat_acc(vx, vu, rt) +} + +/// `Vd32.w=vdmpy(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhvsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpy_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vdmpyhvsat(vu, vv) +} + +/// `Vx32.w+=vdmpy(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhvsat_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpyacc_vwvhvh_sat(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vdmpyhvsat_acc(vx, vu, vv) +} + +/// `Vdd32.uw=vdsad(Vuu32.uh,Rt32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdsaduh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vdsad_wuhruh(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vdsaduh(vuu, rt) +} + +/// `Vxx32.uw+=vdsad(Vuu32.uh,Rt32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdsaduh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vdsadacc_wuwwuhruh( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vdsaduh_acc(vxx, vuu, rt) +} + +/// `Vx32.w=vinsert(Rt32)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vinsertwr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vinsert_vwr(vx: HvxVector, rt: i32) -> HvxVector { + vinsertwr(vx, rt) +} + +/// `Vd32=vlalign(Vu32,Vv32,Rt8)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlalignb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vlalign_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vlalignb(vu, vv, rt) +} + +/// `Vd32=vlalign(Vu32,Vv32,#u3)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlalignbi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vlalign_vvi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector { + vlalignbi(vu, vv, iu3) +} + +/// `Vd32.uh=vlsr(Vu32.uh,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlsrh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vlsr_vuhr(vu: HvxVector, rt: i32) -> HvxVector { + vlsrh(vu, rt) +} + +/// `Vd32.h=vlsr(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlsrhv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vlsr_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vlsrhv(vu, vv) +} + +/// `Vd32.uw=vlsr(Vu32.uw,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlsrw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vlsr_vuwr(vu: HvxVector, rt: i32) -> HvxVector { + vlsrw(vu, rt) +} + +/// `Vd32.w=vlsr(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlsrwv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vlsr_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vlsrwv(vu, vv) +} + +/// `Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlutvvb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vlut32_vbvbr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vlutvvb(vu, vv, rt) +} + +/// `Vx32.b|=vlut32(Vu32.b,Vv32.b,Rt8)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlutvvb_oracc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vlut32or_vbvbvbr( + vx: HvxVector, + vu: HvxVector, + vv: HvxVector, + rt: i32, +) -> HvxVector { + vlutvvb_oracc(vx, vu, vv, rt) +} + +/// `Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlutvwh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vlut16_vbvhr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { + vlutvwh(vu, vv, rt) +} + +/// `Vxx32.h|=vlut16(Vu32.b,Vv32.h,Rt8)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlutvwh_oracc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vlut16or_whvbvhr( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, + rt: i32, +) -> HvxVectorPair { + vlutvwh_oracc(vxx, vu, vv, rt) +} + +/// `Vd32.h=vmax(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmaxh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmax_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmaxh(vu, vv) +} + +/// `Vd32.ub=vmax(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmaxub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vmax_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmaxub(vu, vv) +} + +/// `Vd32.uh=vmax(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmaxuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vmax_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmaxuh(vu, vv) +} + +/// `Vd32.w=vmax(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmaxw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmax_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmaxw(vu, vv) +} + +/// `Vd32.h=vmin(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vminh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmin_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vminh(vu, vv) +} + +/// `Vd32.ub=vmin(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vminub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vmin_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { + vminub(vu, vv) +} + +/// `Vd32.uh=vmin(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vminuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vmin_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vminuh(vu, vv) +} + +/// `Vd32.w=vmin(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vminw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmin_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vminw(vu, vv) +} + +/// `Vdd32.h=vmpa(Vuu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpabus))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpa_wubrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vmpabus(vuu, rt) +} + +/// `Vxx32.h+=vmpa(Vuu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpabus_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpaacc_whwubrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vmpabus_acc(vxx, vuu, rt) +} + +/// `Vdd32.h=vmpa(Vuu32.ub,Vvv32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpabusv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpa_wubwb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vmpabusv(vuu, vvv) +} + +/// `Vdd32.h=vmpa(Vuu32.ub,Vvv32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpabuuv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpa_wubwub(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vmpabuuv(vuu, vvv) +} + +/// `Vdd32.w=vmpa(Vuu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpahb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpa_whrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vmpahb(vuu, rt) +} + +/// `Vxx32.w+=vmpa(Vuu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpahb_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpaacc_wwwhrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vmpahb_acc(vxx, vuu, rt) +} + +/// `Vdd32.h=vmpy(Vu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpybus))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpy_vubrb(vu: HvxVector, rt: i32) -> HvxVectorPair { + vmpybus(vu, rt) +} + +/// `Vxx32.h+=vmpy(Vu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpybus_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpyacc_whvubrb(vxx: HvxVectorPair, vu: HvxVector, rt: i32) -> HvxVectorPair { + vmpybus_acc(vxx, vu, rt) +} + +/// `Vdd32.h=vmpy(Vu32.ub,Vv32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpybusv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpy_vubvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpybusv(vu, vv) +} + +/// `Vxx32.h+=vmpy(Vu32.ub,Vv32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpybusv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpyacc_whvubvb( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpybusv_acc(vxx, vu, vv) +} + +/// `Vdd32.h=vmpy(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpybv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpy_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpybv(vu, vv) +} + +/// `Vxx32.h+=vmpy(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpybv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpyacc_whvbvb( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpybv_acc(vxx, vu, vv) +} + +/// `Vd32.w=vmpye(Vu32.w,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyewuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpye_vwvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyewuh(vu, vv) +} + +/// `Vdd32.w=vmpy(Vu32.h,Rt32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpy_vhrh(vu: HvxVector, rt: i32) -> HvxVectorPair { + vmpyh(vu, rt) +} + +/// `Vxx32.w+=vmpy(Vu32.h,Rt32.h):sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhsat_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpyacc_wwvhrh_sat( + vxx: HvxVectorPair, + vu: HvxVector, + rt: i32, +) -> HvxVectorPair { + vmpyhsat_acc(vxx, vu, rt) +} + +/// `Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:rnd:sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhsrs))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmpy_vhrh_s1_rnd_sat(vu: HvxVector, rt: i32) -> HvxVector { + vmpyhsrs(vu, rt) +} + +/// `Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhss))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmpy_vhrh_s1_sat(vu: HvxVector, rt: i32) -> HvxVector { + vmpyhss(vu, rt) +} + +/// `Vdd32.w=vmpy(Vu32.h,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhus))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpy_vhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpyhus(vu, vv) +} + +/// `Vxx32.w+=vmpy(Vu32.h,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhus_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpyacc_wwvhvuh( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpyhus_acc(vxx, vu, vv) +} + +/// `Vdd32.w=vmpy(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpy_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpyhv(vu, vv) +} + +/// `Vxx32.w+=vmpy(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpyacc_wwvhvh( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpyhv_acc(vxx, vu, vv) +} + +/// `Vd32.h=vmpy(Vu32.h,Vv32.h):<<1:rnd:sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhvsrs))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmpy_vhvh_s1_rnd_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyhvsrs(vu, vv) +} + +/// `Vd32.w=vmpyieo(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyieoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyieo_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyieoh(vu, vv) +} + +/// `Vx32.w+=vmpyie(Vu32.w,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiewh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyieacc_vwvwvh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyiewh_acc(vx, vu, vv) +} + +/// `Vd32.w=vmpyie(Vu32.w,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiewuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyie_vwvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyiewuh(vu, vv) +} + +/// `Vx32.w+=vmpyie(Vu32.w,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiewuh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyieacc_vwvwvuh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyiewuh_acc(vx, vu, vv) +} + +/// `Vd32.h=vmpyi(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyih))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmpyi_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyih(vu, vv) +} + +/// `Vx32.h+=vmpyi(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyih_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmpyiacc_vhvhvh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyih_acc(vx, vu, vv) +} + +/// `Vd32.h=vmpyi(Vu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyihb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmpyi_vhrb(vu: HvxVector, rt: i32) -> HvxVector { + vmpyihb(vu, rt) +} + +/// `Vx32.h+=vmpyi(Vu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyihb_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmpyiacc_vhvhrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vmpyihb_acc(vx, vu, rt) +} + +/// `Vd32.w=vmpyio(Vu32.w,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiowh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyio_vwvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyiowh(vu, vv) +} + +/// `Vd32.w=vmpyi(Vu32.w,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiwb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyi_vwrb(vu: HvxVector, rt: i32) -> HvxVector { + vmpyiwb(vu, rt) +} + +/// `Vx32.w+=vmpyi(Vu32.w,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiwb_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyiacc_vwvwrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vmpyiwb_acc(vx, vu, rt) +} + +/// `Vd32.w=vmpyi(Vu32.w,Rt32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiwh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyi_vwrh(vu: HvxVector, rt: i32) -> HvxVector { + vmpyiwh(vu, rt) +} + +/// `Vx32.w+=vmpyi(Vu32.w,Rt32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiwh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyiacc_vwvwrh(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vmpyiwh_acc(vx, vu, rt) +} + +/// `Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyowh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyo_vwvh_s1_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyowh(vu, vv) +} + +/// `Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyowh_rnd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyo_vwvh_s1_rnd_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyowh_rnd(vu, vv) +} + +/// `Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat:shift` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyowh_rnd_sacc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyoacc_vwvwvh_s1_rnd_sat_shift( + vx: HvxVector, + vu: HvxVector, + vv: HvxVector, +) -> HvxVector { + vmpyowh_rnd_sacc(vx, vu, vv) +} + +/// `Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:sat:shift` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyowh_sacc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyoacc_vwvwvh_s1_sat_shift( + vx: HvxVector, + vu: HvxVector, + vv: HvxVector, +) -> HvxVector { + vmpyowh_sacc(vx, vu, vv) +} + +/// `Vdd32.uh=vmpy(Vu32.ub,Rt32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vmpy_vubrub(vu: HvxVector, rt: i32) -> HvxVectorPair { + vmpyub(vu, rt) +} + +/// `Vxx32.uh+=vmpy(Vu32.ub,Rt32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyub_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vmpyacc_wuhvubrub( + vxx: HvxVectorPair, + vu: HvxVector, + rt: i32, +) -> HvxVectorPair { + vmpyub_acc(vxx, vu, rt) +} + +/// `Vdd32.uh=vmpy(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyubv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vmpy_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpyubv(vu, vv) +} + +/// `Vxx32.uh+=vmpy(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyubv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vmpyacc_wuhvubvub( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpyubv_acc(vxx, vu, vv) +} + +/// `Vdd32.uw=vmpy(Vu32.uh,Rt32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vmpy_vuhruh(vu: HvxVector, rt: i32) -> HvxVectorPair { + vmpyuh(vu, rt) +} + +/// `Vxx32.uw+=vmpy(Vu32.uh,Rt32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyuh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vmpyacc_wuwvuhruh( + vxx: HvxVectorPair, + vu: HvxVector, + rt: i32, +) -> HvxVectorPair { + vmpyuh_acc(vxx, vu, rt) +} + +/// `Vdd32.uw=vmpy(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyuhv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vmpy_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpyuhv(vu, vv) +} + +/// `Vxx32.uw+=vmpy(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyuhv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vmpyacc_wuwvuhvuh( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpyuhv_acc(vxx, vu, vv) +} + +/// `Vd32.h=vnavg(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vnavgh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vnavg_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vnavgh(vu, vv) +} + +/// `Vd32.b=vnavg(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vnavgub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vnavg_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { + vnavgub(vu, vv) +} + +/// `Vd32.w=vnavg(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vnavgw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vnavg_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vnavgw(vu, vv) +} + +/// `Vd32.h=vnormamt(Vu32.h)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vnormamth))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vnormamt_vh(vu: HvxVector) -> HvxVector { + vnormamth(vu) +} + +/// `Vd32.w=vnormamt(Vu32.w)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vnormamtw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vnormamt_vw(vu: HvxVector) -> HvxVector { + vnormamtw(vu) +} + +/// `Vd32=vnot(Vu32)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vnot))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vnot_v(vu: HvxVector) -> HvxVector { + vnot(vu) +} + +/// `Vd32=vor(Vu32,Vv32)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vor))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vor_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { + simd_or(vu, vv) +} + +/// `Vd32.b=vpacke(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackeb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vpacke_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackeb(vu, vv) +} + +/// `Vd32.h=vpacke(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackeh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vpacke_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackeh(vu, vv) +} + +/// `Vd32.b=vpack(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackhb_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vpack_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackhb_sat(vu, vv) +} + +/// `Vd32.ub=vpack(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackhub_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vpack_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackhub_sat(vu, vv) +} + +/// `Vd32.b=vpacko(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackob))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vpacko_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackob(vu, vv) +} + +/// `Vd32.h=vpacko(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vpacko_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackoh(vu, vv) +} + +/// `Vd32.h=vpack(Vu32.w,Vv32.w):sat` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackwh_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vpack_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackwh_sat(vu, vv) +} + +/// `Vd32.uh=vpack(Vu32.w,Vv32.w):sat` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackwuh_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vpack_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackwuh_sat(vu, vv) +} + +/// `Vd32.h=vpopcount(Vu32.h)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpopcounth))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vpopcount_vh(vu: HvxVector) -> HvxVector { + vpopcounth(vu) +} + +/// `Vd32=vrdelta(Vu32,Vv32)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrdelta))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vrdelta_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { + vrdelta(vu, vv) +} + +/// `Vd32.w=vrmpy(Vu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybus))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vrmpy_vubrb(vu: HvxVector, rt: i32) -> HvxVector { + vrmpybus(vu, rt) +} + +/// `Vx32.w+=vrmpy(Vu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybus_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vrmpyacc_vwvubrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vrmpybus_acc(vx, vu, rt) +} + +/// `Vdd32.w=vrmpy(Vuu32.ub,Rt32.b,#u1)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybusi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vrmpy_wubrbi(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair { + vrmpybusi(vuu, rt, iu1) +} + +/// `Vxx32.w+=vrmpy(Vuu32.ub,Rt32.b,#u1)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybusi_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vrmpyacc_wwwubrbi( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, + iu1: i32, +) -> HvxVectorPair { + vrmpybusi_acc(vxx, vuu, rt, iu1) +} + +/// `Vd32.w=vrmpy(Vu32.ub,Vv32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybusv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vrmpy_vubvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vrmpybusv(vu, vv) +} + +/// `Vx32.w+=vrmpy(Vu32.ub,Vv32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybusv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vrmpyacc_vwvubvb(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vrmpybusv_acc(vx, vu, vv) +} + +/// `Vd32.w=vrmpy(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vrmpy_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vrmpybv(vu, vv) +} + +/// `Vx32.w+=vrmpy(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vrmpyacc_vwvbvb(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vrmpybv_acc(vx, vu, vv) +} + +/// `Vd32.uw=vrmpy(Vu32.ub,Rt32.ub)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpyub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vrmpy_vubrub(vu: HvxVector, rt: i32) -> HvxVector { + vrmpyub(vu, rt) +} + +/// `Vx32.uw+=vrmpy(Vu32.ub,Rt32.ub)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpyub_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vrmpyacc_vuwvubrub(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vrmpyub_acc(vx, vu, rt) +} + +/// `Vdd32.uw=vrmpy(Vuu32.ub,Rt32.ub,#u1)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpyubi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vrmpy_wubrubi(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair { + vrmpyubi(vuu, rt, iu1) +} + +/// `Vxx32.uw+=vrmpy(Vuu32.ub,Rt32.ub,#u1)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpyubi_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vrmpyacc_wuwwubrubi( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, + iu1: i32, +) -> HvxVectorPair { + vrmpyubi_acc(vxx, vuu, rt, iu1) +} + +/// `Vd32.uw=vrmpy(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpyubv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vrmpy_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { + vrmpyubv(vu, vv) +} + +/// `Vx32.uw+=vrmpy(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpyubv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vrmpyacc_vuwvubvub(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vrmpyubv_acc(vx, vu, vv) +} + +/// `Vd32=vror(Vu32,Rt32)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vror))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vror_vr(vu: HvxVector, rt: i32) -> HvxVector { + vror(vu, rt) +} + +/// `Vd32.b=vround(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vroundhb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vround_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vroundhb(vu, vv) +} + +/// `Vd32.ub=vround(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vroundhub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vround_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vroundhub(vu, vv) +} + +/// `Vd32.h=vround(Vu32.w,Vv32.w):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vroundwh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vround_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vroundwh(vu, vv) +} + +/// `Vd32.uh=vround(Vu32.w,Vv32.w):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vroundwuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vround_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vroundwuh(vu, vv) +} + +/// `Vdd32.uw=vrsad(Vuu32.ub,Rt32.ub,#u1)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrsadubi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vrsad_wubrubi(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair { + vrsadubi(vuu, rt, iu1) +} + +/// `Vxx32.uw+=vrsad(Vuu32.ub,Rt32.ub,#u1)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrsadubi_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vrsadacc_wuwwubrubi( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, + iu1: i32, +) -> HvxVectorPair { + vrsadubi_acc(vxx, vuu, rt, iu1) +} + +/// `Vd32.ub=vsat(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsathub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vsat_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsathub(vu, vv) +} + +/// `Vd32.h=vsat(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsatwh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vsat_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsatwh(vu, vv) +} + +/// `Vdd32.h=vsxt(Vu32.b)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vsxt_vb(vu: HvxVector) -> HvxVectorPair { + vsb(vu) +} + +/// `Vdd32.w=vsxt(Vu32.h)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vsxt_vh(vu: HvxVector) -> HvxVectorPair { + vsh(vu) +} + +/// `Vd32.h=vshuffe(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshufeh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vshuffe_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vshufeh(vu, vv) +} + +/// `Vd32.b=vshuff(Vu32.b)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshuffb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vshuff_vb(vu: HvxVector) -> HvxVector { + vshuffb(vu) +} + +/// `Vd32.b=vshuffe(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshuffeb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vshuffe_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vshuffeb(vu, vv) +} + +/// `Vd32.h=vshuff(Vu32.h)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshuffh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vshuff_vh(vu: HvxVector) -> HvxVector { + vshuffh(vu) +} + +/// `Vd32.b=vshuffo(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshuffob))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vshuffo_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vshuffob(vu, vv) +} + +/// `Vdd32=vshuff(Vu32,Vv32,Rt8)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshuffvdd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_vshuff_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { + vshuffvdd(vu, vv, rt) +} + +/// `Vdd32.b=vshuffoe(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshufoeb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wb_vshuffoe_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vshufoeb(vu, vv) +} + +/// `Vdd32.h=vshuffoe(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshufoeh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vshuffoe_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vshufoeh(vu, vv) +} + +/// `Vd32.h=vshuffo(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshufoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vshuffo_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vshufoh(vu, vv) +} + +/// `Vd32.b=vsub(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vsub_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubb(vu, vv) +} + +/// `Vdd32.b=vsub(Vuu32.b,Vvv32.b)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubb_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wb_vsub_wbwb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubb_dv(vuu, vvv) +} + +/// `Vd32.h=vsub(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vsub_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubh(vu, vv) +} + +/// `Vdd32.h=vsub(Vuu32.h,Vvv32.h)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubh_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vsub_whwh(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubh_dv(vuu, vvv) +} + +/// `Vd32.h=vsub(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vsub_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubhsat(vu, vv) +} + +/// `Vdd32.h=vsub(Vuu32.h,Vvv32.h):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubhsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vsub_whwh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubhsat_dv(vuu, vvv) +} + +/// `Vdd32.w=vsub(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubhw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vsub_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vsubhw(vu, vv) +} + +/// `Vdd32.h=vsub(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsububh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vsub_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vsububh(vu, vv) +} + +/// `Vd32.ub=vsub(Vu32.ub,Vv32.ub):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsububsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vsub_vubvub_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsububsat(vu, vv) +} + +/// `Vdd32.ub=vsub(Vuu32.ub,Vvv32.ub):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsububsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wub_vsub_wubwub_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsububsat_dv(vuu, vvv) +} + +/// `Vd32.uh=vsub(Vu32.uh,Vv32.uh):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubuhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vsub_vuhvuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubuhsat(vu, vv) +} + +/// `Vdd32.uh=vsub(Vuu32.uh,Vvv32.uh):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubuhsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vsub_wuhwuh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubuhsat_dv(vuu, vvv) +} + +/// `Vdd32.w=vsub(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubuhw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vsub_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vsubuhw(vu, vv) +} + +/// `Vd32.w=vsub(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vsub_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + simd_sub(vu, vv) +} + +/// `Vdd32.w=vsub(Vuu32.w,Vvv32.w)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubw_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vsub_wwww(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubw_dv(vuu, vvv) +} + +/// `Vd32.w=vsub(Vu32.w,Vv32.w):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubwsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vsub_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubwsat(vu, vv) +} + +/// `Vdd32.w=vsub(Vuu32.w,Vvv32.w):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubwsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vsub_wwww_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubwsat_dv(vuu, vvv) +} + +/// `Vdd32.h=vtmpy(Vuu32.b,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vtmpyb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vtmpy_wbrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vtmpyb(vuu, rt) +} + +/// `Vxx32.h+=vtmpy(Vuu32.b,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vtmpyb_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vtmpyacc_whwbrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vtmpyb_acc(vxx, vuu, rt) +} + +/// `Vdd32.h=vtmpy(Vuu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vtmpybus))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vtmpy_wubrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vtmpybus(vuu, rt) +} + +/// `Vxx32.h+=vtmpy(Vuu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vtmpybus_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vtmpyacc_whwubrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vtmpybus_acc(vxx, vuu, rt) +} + +/// `Vdd32.w=vtmpy(Vuu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vtmpyhb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vtmpy_whrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vtmpyhb(vuu, rt) +} + +/// `Vxx32.w+=vtmpy(Vuu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vtmpyhb_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vtmpyacc_wwwhrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vtmpyhb_acc(vxx, vuu, rt) +} + +/// `Vdd32.h=vunpack(Vu32.b)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vunpackb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vunpack_vb(vu: HvxVector) -> HvxVectorPair { + vunpackb(vu) +} + +/// `Vdd32.w=vunpack(Vu32.h)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vunpackh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vunpack_vh(vu: HvxVector) -> HvxVectorPair { + vunpackh(vu) +} + +/// `Vxx32.h|=vunpacko(Vu32.b)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vunpackob))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vunpackoor_whvb(vxx: HvxVectorPair, vu: HvxVector) -> HvxVectorPair { + vunpackob(vxx, vu) +} + +/// `Vxx32.w|=vunpacko(Vu32.h)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vunpackoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vunpackoor_wwvh(vxx: HvxVectorPair, vu: HvxVector) -> HvxVectorPair { + vunpackoh(vxx, vu) +} + +/// `Vdd32.uh=vunpack(Vu32.ub)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vunpackub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vunpack_vub(vu: HvxVector) -> HvxVectorPair { + vunpackub(vu) +} + +/// `Vdd32.uw=vunpack(Vu32.uh)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vunpackuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vunpack_vuh(vu: HvxVector) -> HvxVectorPair { + vunpackuh(vu) +} + +/// `Vd32=vxor(Vu32,Vv32)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vxor))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vxor_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { + simd_xor(vu, vv) +} + +/// `Vdd32.uh=vzxt(Vu32.ub)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vzb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vzxt_vub(vu: HvxVector) -> HvxVectorPair { + vzb(vu) +} + +/// `Vdd32.uw=vzxt(Vu32.uh)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vzh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vzxt_vuh(vu: HvxVector) -> HvxVectorPair { + vzh(vu) +} + +/// `Vd32.b=vsplat(Rt32)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(lvsplatb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vsplat_r(rt: i32) -> HvxVector { + lvsplatb(rt) +} + +/// `Vd32.h=vsplat(Rt32)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(lvsplath))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vsplat_r(rt: i32) -> HvxVector { + lvsplath(rt) +} + +/// `Vd32.b=vadd(Vu32.b,Vv32.b):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vaddbsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vadd_vbvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddbsat(vu, vv) +} + +/// `Vdd32.b=vadd(Vuu32.b,Vvv32.b):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vaddbsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wb_vadd_wbwb_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vaddbsat_dv(vuu, vvv) +} + +/// `Vd32.h=vadd(vclb(Vu32.h),Vv32.h)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vaddclbh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vadd_vclb_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddclbh(vu, vv) +} + +/// `Vd32.w=vadd(vclb(Vu32.w),Vv32.w)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vaddclbw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vadd_vclb_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddclbw(vu, vv) +} + +/// `Vxx32.w+=vadd(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vaddhw_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vaddacc_wwvhvh( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vaddhw_acc(vxx, vu, vv) +} + +/// `Vxx32.h+=vadd(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vaddubh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vaddacc_whvubvub( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vaddubh_acc(vxx, vu, vv) +} + +/// `Vd32.ub=vadd(Vu32.ub,Vv32.b):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vaddububb_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vadd_vubvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddububb_sat(vu, vv) +} + +/// `Vxx32.w+=vadd(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vadduhw_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vaddacc_wwvuhvuh( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vadduhw_acc(vxx, vu, vv) +} + +/// `Vd32.uw=vadd(Vu32.uw,Vv32.uw):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vadduwsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vadd_vuwvuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadduwsat(vu, vv) +} + +/// `Vdd32.uw=vadd(Vuu32.uw,Vvv32.uw):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vadduwsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vadd_wuwwuw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vadduwsat_dv(vuu, vvv) +} + +/// `Vd32.b=vasr(Vu32.h,Vv32.h,Rt8):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vasrhbsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vasr_vhvhr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrhbsat(vu, vv, rt) +} + +/// `Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vasruwuhrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vasr_vuwvuwr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasruwuhrndsat(vu, vv, rt) +} + +/// `Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vasrwuhrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vasr_vwvwr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrwuhrndsat(vu, vv, rt) +} + +/// `Vd32.ub=vlsr(Vu32.ub,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vlsrb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vlsr_vubr(vu: HvxVector, rt: i32) -> HvxVector { + vlsrb(vu, rt) +} + +/// `Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8):nomatch` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vlutvvb_nm))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vlut32_vbvbr_nomatch(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vlutvvb_nm(vu, vv, rt) +} + +/// `Vx32.b|=vlut32(Vu32.b,Vv32.b,#u3)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vlutvvb_oracci))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vlut32or_vbvbvbi( + vx: HvxVector, + vu: HvxVector, + vv: HvxVector, + iu3: i32, +) -> HvxVector { + vlutvvb_oracci(vx, vu, vv, iu3) +} + +/// `Vd32.b=vlut32(Vu32.b,Vv32.b,#u3)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vlutvvbi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vlut32_vbvbi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector { + vlutvvbi(vu, vv, iu3) +} + +/// `Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8):nomatch` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vlutvwh_nm))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vlut16_vbvhr_nomatch(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { + vlutvwh_nm(vu, vv, rt) +} + +/// `Vxx32.h|=vlut16(Vu32.b,Vv32.h,#u3)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vlutvwh_oracci))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vlut16or_whvbvhi( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, + iu3: i32, +) -> HvxVectorPair { + vlutvwh_oracci(vxx, vu, vv, iu3) +} + +/// `Vdd32.h=vlut16(Vu32.b,Vv32.h,#u3)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vlutvwhi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vlut16_vbvhi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVectorPair { + vlutvwhi(vu, vv, iu3) +} + +/// `Vd32.b=vmax(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vmaxb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vmax_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmaxb(vu, vv) +} + +/// `Vd32.b=vmin(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vminb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vmin_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vminb(vu, vv) +} + +/// `Vdd32.w=vmpa(Vuu32.uh,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vmpauhb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpa_wuhrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vmpauhb(vuu, rt) +} + +/// `Vxx32.w+=vmpa(Vuu32.uh,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vmpauhb_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpaacc_wwwuhrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vmpauhb_acc(vxx, vuu, rt) +} + +/// `Vdd32=vmpye(Vu32.w,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vmpyewuh_64))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_vmpye_vwvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpyewuh_64(vu, vv) +} + +/// `Vd32.w=vmpyi(Vu32.w,Rt32.ub)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vmpyiwub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyi_vwrub(vu: HvxVector, rt: i32) -> HvxVector { + vmpyiwub(vu, rt) +} + +/// `Vx32.w+=vmpyi(Vu32.w,Rt32.ub)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vmpyiwub_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyiacc_vwvwrub(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vmpyiwub_acc(vx, vu, rt) +} + +/// `Vxx32+=vmpyo(Vu32.w,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vmpyowh_64_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_vmpyoacc_wvwvh( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpyowh_64_acc(vxx, vu, vv) +} + +/// `Vd32.ub=vround(Vu32.uh,Vv32.uh):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vrounduhub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vround_vuhvuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vrounduhub(vu, vv) +} + +/// `Vd32.uh=vround(Vu32.uw,Vv32.uw):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vrounduwuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vround_vuwvuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vrounduwuh(vu, vv) +} + +/// `Vd32.uh=vsat(Vu32.uw,Vv32.uw)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vsatuwuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vsat_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsatuwuh(vu, vv) +} + +/// `Vd32.b=vsub(Vu32.b,Vv32.b):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vsubbsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vsub_vbvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubbsat(vu, vv) +} + +/// `Vdd32.b=vsub(Vuu32.b,Vvv32.b):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vsubbsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wb_vsub_wbwb_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubbsat_dv(vuu, vvv) +} + +/// `Vd32.ub=vsub(Vu32.ub,Vv32.b):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vsubububb_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vsub_vubvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubububb_sat(vu, vv) +} + +/// `Vd32.uw=vsub(Vu32.uw,Vv32.uw):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vsubuwsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vsub_vuwvuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubuwsat(vu, vv) +} + +/// `Vdd32.uw=vsub(Vuu32.uw,Vvv32.uw):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vsubuwsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vsub_wuwwuw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubuwsat_dv(vuu, vvv) +} + +/// `Vd32.b=vabs(Vu32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vabsb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vabs_vb(vu: HvxVector) -> HvxVector { + vabsb(vu) +} + +/// `Vd32.b=vabs(Vu32.b):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vabsb_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vabs_vb_sat(vu: HvxVector) -> HvxVector { + vabsb_sat(vu) +} + +/// `Vx32.h+=vasl(Vu32.h,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vaslh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vaslacc_vhvhr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vaslh_acc(vx, vu, rt) +} + +/// `Vx32.h+=vasr(Vu32.h,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vasrh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasracc_vhvhr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vasrh_acc(vx, vu, rt) +} + +/// `Vd32.ub=vasr(Vu32.uh,Vv32.uh,Rt8):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vasruhubrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vasr_vuhvuhr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasruhubrndsat(vu, vv, rt) +} + +/// `Vd32.ub=vasr(Vu32.uh,Vv32.uh,Rt8):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vasruhubsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vasr_vuhvuhr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasruhubsat(vu, vv, rt) +} + +/// `Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vasruwuhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vasr_vuwvuwr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasruwuhsat(vu, vv, rt) +} + +/// `Vd32.b=vavg(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vavgb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vavg_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavgb(vu, vv) +} + +/// `Vd32.b=vavg(Vu32.b,Vv32.b):rnd` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vavgbrnd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vavg_vbvb_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavgbrnd(vu, vv) +} + +/// `Vd32.uw=vavg(Vu32.uw,Vv32.uw)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vavguw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vavg_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavguw(vu, vv) +} + +/// `Vd32.uw=vavg(Vu32.uw,Vv32.uw):rnd` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vavguwrnd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vavg_vuwvuw_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavguwrnd(vu, vv) +} + +/// `Vdd32=#0` +/// +/// Instruction Type: MAPPING +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vdd0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_vzero() -> HvxVectorPair { + vdd0() +} + +/// `vtmp.h=vgather(Rt32,Mu2,Vv32.h).h` +/// +/// Instruction Type: CVI_GATHER +/// Execution Slots: SLOT01 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vgathermh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vgather_armvh(rs: *mut HvxVector, rt: i32, mu: i32, vv: HvxVector) { + vgathermh(rs, rt, mu, vv) +} + +/// `vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h` +/// +/// Instruction Type: CVI_GATHER_DV +/// Execution Slots: SLOT01 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vgathermhw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vgather_armww(rs: *mut HvxVector, rt: i32, mu: i32, vvv: HvxVectorPair) { + vgathermhw(rs, rt, mu, vvv) +} + +/// `vtmp.w=vgather(Rt32,Mu2,Vv32.w).w` +/// +/// Instruction Type: CVI_GATHER +/// Execution Slots: SLOT01 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vgathermw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vgather_armvw(rs: *mut HvxVector, rt: i32, mu: i32, vv: HvxVector) { + vgathermw(rs, rt, mu, vv) +} + +/// `Vdd32.h=vmpa(Vuu32.ub,Rt32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vmpabuu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpa_wubrub(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vmpabuu(vuu, rt) +} + +/// `Vxx32.h+=vmpa(Vuu32.ub,Rt32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vmpabuu_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpaacc_whwubrub( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vmpabuu_acc(vxx, vuu, rt) +} + +/// `Vxx32.w+=vmpy(Vu32.h,Rt32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vmpyh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpyacc_wwvhrh(vxx: HvxVectorPair, vu: HvxVector, rt: i32) -> HvxVectorPair { + vmpyh_acc(vxx, vu, rt) +} + +/// `Vd32.uw=vmpye(Vu32.uh,Rt32.uh)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vmpyuhe))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vmpye_vuhruh(vu: HvxVector, rt: i32) -> HvxVector { + vmpyuhe(vu, rt) +} + +/// `Vx32.uw+=vmpye(Vu32.uh,Rt32.uh)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vmpyuhe_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vmpyeacc_vuwvuhruh(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vmpyuhe_acc(vx, vu, rt) +} + +/// `Vd32.b=vnavg(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vnavgb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vnavg_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vnavgb(vu, vv) +} + +/// `vscatter(Rt32,Mu2,Vv32.h).h=Vw32` +/// +/// Instruction Type: CVI_SCATTER +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vscattermh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatter_rmvhv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { + vscattermh(rt, mu, vv, vw) +} + +/// `vscatter(Rt32,Mu2,Vv32.h).h+=Vw32` +/// +/// Instruction Type: CVI_SCATTER +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vscattermh_add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatteracc_rmvhv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { + vscattermh_add(rt, mu, vv, vw) +} + +/// `vscatter(Rt32,Mu2,Vvv32.w).h=Vw32` +/// +/// Instruction Type: CVI_SCATTER_DV +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vscattermhw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatter_rmwwv(rt: i32, mu: i32, vvv: HvxVectorPair, vw: HvxVector) { + vscattermhw(rt, mu, vvv, vw) +} + +/// `vscatter(Rt32,Mu2,Vvv32.w).h+=Vw32` +/// +/// Instruction Type: CVI_SCATTER_DV +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vscattermhw_add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatteracc_rmwwv(rt: i32, mu: i32, vvv: HvxVectorPair, vw: HvxVector) { + vscattermhw_add(rt, mu, vvv, vw) +} + +/// `vscatter(Rt32,Mu2,Vv32.w).w=Vw32` +/// +/// Instruction Type: CVI_SCATTER +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vscattermw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatter_rmvwv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { + vscattermw(rt, mu, vv, vw) +} + +/// `vscatter(Rt32,Mu2,Vv32.w).w+=Vw32` +/// +/// Instruction Type: CVI_SCATTER +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vscattermw_add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatteracc_rmvwv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { + vscattermw_add(rt, mu, vv, vw) +} + +/// `Vxx32.w=vasrinto(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] +#[cfg_attr(test, assert_instr(vasr_into))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vasrinto_wwvwvw( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vasr_into(vxx, vu, vv) +} + +/// `Vd32.uw=vrotr(Vu32.uw,Vv32.uw)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] +#[cfg_attr(test, assert_instr(vrotr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vrotr_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vrotr(vu, vv) +} + +/// `Vd32.w=vsatdw(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] +#[cfg_attr(test, assert_instr(vsatdw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vsatdw_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsatdw(vu, vv) +} + +/// `Vdd32.w=v6mpy(Vuu32.ub,Vvv32.b,#u2):h` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(v6mpyhubs10))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_v6mpy_wubwbi_h( + vuu: HvxVectorPair, + vvv: HvxVectorPair, + iu2: i32, +) -> HvxVectorPair { + v6mpyhubs10(vuu, vvv, iu2) +} + +/// `Vxx32.w+=v6mpy(Vuu32.ub,Vvv32.b,#u2):h` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(v6mpyhubs10_vxx))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_v6mpyacc_wwwubwbi_h( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + vvv: HvxVectorPair, + iu2: i32, +) -> HvxVectorPair { + v6mpyhubs10_vxx(vxx, vuu, vvv, iu2) +} + +/// `Vdd32.w=v6mpy(Vuu32.ub,Vvv32.b,#u2):v` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(v6mpyvubs10))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_v6mpy_wubwbi_v( + vuu: HvxVectorPair, + vvv: HvxVectorPair, + iu2: i32, +) -> HvxVectorPair { + v6mpyvubs10(vuu, vvv, iu2) +} + +/// `Vxx32.w+=v6mpy(Vuu32.ub,Vvv32.b,#u2):v` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(v6mpyvubs10_vxx))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_v6mpyacc_wwwubwbi_v( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + vvv: HvxVectorPair, + iu2: i32, +) -> HvxVectorPair { + v6mpyvubs10_vxx(vxx, vuu, vvv, iu2) +} + +/// `Vd32.hf=vabs(Vu32.hf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vabs_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vabs_vhf(vu: HvxVector) -> HvxVector { + vabs_hf(vu) +} + +/// `Vd32.sf=vabs(Vu32.sf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vabs_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vabs_vsf(vu: HvxVector) -> HvxVector { + vabs_sf(vu) +} + +/// `Vd32.qf16=vadd(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vadd_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_hf(vu, vv) +} + +/// `Vd32.hf=vadd(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_hf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vadd_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_hf_hf(vu, vv) +} + +/// `Vd32.qf16=vadd(Vu32.qf16,Vv32.qf16)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_qf16))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vadd_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_qf16(vu, vv) +} + +/// `Vd32.qf16=vadd(Vu32.qf16,Vv32.hf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_qf16_mix))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vadd_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_qf16_mix(vu, vv) +} + +/// `Vd32.qf32=vadd(Vu32.qf32,Vv32.qf32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_qf32))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vadd_vqf32vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_qf32(vu, vv) +} + +/// `Vd32.qf32=vadd(Vu32.qf32,Vv32.sf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_qf32_mix))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vadd_vqf32vsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_qf32_mix(vu, vv) +} + +/// `Vd32.qf32=vadd(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vadd_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_sf(vu, vv) +} + +/// `Vdd32.sf=vadd(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_sf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wsf_vadd_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vadd_sf_hf(vu, vv) +} + +/// `Vd32.sf=vadd(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_sf_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vadd_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_sf_sf(vu, vv) +} + +/// `Vd32.w=vfmv(Vu32.w)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vassign_fp))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vfmv_vw(vu: HvxVector) -> HvxVector { + vassign_fp(vu) +} + +/// `Vd32.hf=Vu32.qf16` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vconv_hf_qf16))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_equals_vqf16(vu: HvxVector) -> HvxVector { + vconv_hf_qf16(vu) +} + +/// `Vd32.hf=Vuu32.qf32` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vconv_hf_qf32))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_equals_wqf32(vuu: HvxVectorPair) -> HvxVector { + vconv_hf_qf32(vuu) +} + +/// `Vd32.sf=Vu32.qf32` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vconv_sf_qf32))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_equals_vqf32(vu: HvxVector) -> HvxVector { + vconv_sf_qf32(vu) +} + +/// `Vd32.b=vcvt(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_b_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vcvt_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vcvt_b_hf(vu, vv) +} + +/// `Vd32.h=vcvt(Vu32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_h_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vcvt_vhf(vu: HvxVector) -> HvxVector { + vcvt_h_hf(vu) +} + +/// `Vdd32.hf=vcvt(Vu32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_hf_b))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_whf_vcvt_vb(vu: HvxVector) -> HvxVectorPair { + vcvt_hf_b(vu) +} + +/// `Vd32.hf=vcvt(Vu32.h)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_hf_h))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vcvt_vh(vu: HvxVector) -> HvxVector { + vcvt_hf_h(vu) +} + +/// `Vd32.hf=vcvt(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_hf_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vcvt_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vcvt_hf_sf(vu, vv) +} + +/// `Vdd32.hf=vcvt(Vu32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_hf_ub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_whf_vcvt_vub(vu: HvxVector) -> HvxVectorPair { + vcvt_hf_ub(vu) +} + +/// `Vd32.hf=vcvt(Vu32.uh)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_hf_uh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vcvt_vuh(vu: HvxVector) -> HvxVector { + vcvt_hf_uh(vu) +} + +/// `Vdd32.sf=vcvt(Vu32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_sf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wsf_vcvt_vhf(vu: HvxVector) -> HvxVectorPair { + vcvt_sf_hf(vu) +} + +/// `Vd32.ub=vcvt(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_ub_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vcvt_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vcvt_ub_hf(vu, vv) +} + +/// `Vd32.uh=vcvt(Vu32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_uh_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vcvt_vhf(vu: HvxVector) -> HvxVector { + vcvt_uh_hf(vu) +} + +/// `Vd32.sf=vdmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vdmpy_sf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vdmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vdmpy_sf_hf(vu, vv) +} + +/// `Vx32.sf+=vdmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vdmpy_sf_hf_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vdmpyacc_vsfvhfvhf(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vdmpy_sf_hf_acc(vx, vu, vv) +} + +/// `Vd32.hf=vfmax(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vfmax_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vfmax_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vfmax_hf(vu, vv) +} + +/// `Vd32.sf=vfmax(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vfmax_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vfmax_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vfmax_sf(vu, vv) +} + +/// `Vd32.hf=vfmin(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vfmin_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vfmin_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vfmin_hf(vu, vv) +} + +/// `Vd32.sf=vfmin(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vfmin_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vfmin_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vfmin_sf(vu, vv) +} + +/// `Vd32.hf=vfneg(Vu32.hf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vfneg_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vfneg_vhf(vu: HvxVector) -> HvxVector { + vfneg_hf(vu) +} + +/// `Vd32.sf=vfneg(Vu32.sf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vfneg_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vfneg_vsf(vu: HvxVector) -> HvxVector { + vfneg_sf(vu) +} + +/// `Vd32.hf=vmax(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmax_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vmax_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmax_hf(vu, vv) +} + +/// `Vd32.sf=vmax(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmax_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vmax_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmax_sf(vu, vv) +} + +/// `Vd32.hf=vmin(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmin_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vmin_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmin_hf(vu, vv) +} + +/// `Vd32.sf=vmin(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmin_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vmin_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmin_sf(vu, vv) +} + +/// `Vd32.hf=vmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_hf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_hf_hf(vu, vv) +} + +/// `Vx32.hf+=vmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_hf_hf_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vmpyacc_vhfvhfvhf(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_hf_hf_acc(vx, vu, vv) +} + +/// `Vd32.qf16=vmpy(Vu32.qf16,Vv32.qf16)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf16))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vmpy_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_qf16(vu, vv) +} + +/// `Vd32.qf16=vmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf16_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_qf16_hf(vu, vv) +} + +/// `Vd32.qf16=vmpy(Vu32.qf16,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf16_mix_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vmpy_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_qf16_mix_hf(vu, vv) +} + +/// `Vd32.qf32=vmpy(Vu32.qf32,Vv32.qf32)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf32))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vmpy_vqf32vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_qf32(vu, vv) +} + +/// `Vdd32.qf32=vmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf32_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wqf32_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpy_qf32_hf(vu, vv) +} + +/// `Vdd32.qf32=vmpy(Vu32.qf16,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf32_mix_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wqf32_vmpy_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpy_qf32_mix_hf(vu, vv) +} + +/// `Vdd32.qf32=vmpy(Vu32.qf16,Vv32.qf16)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf32_qf16))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wqf32_vmpy_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpy_qf32_qf16(vu, vv) +} + +/// `Vd32.qf32=vmpy(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf32_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vmpy_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_qf32_sf(vu, vv) +} + +/// `Vdd32.sf=vmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_sf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wsf_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpy_sf_hf(vu, vv) +} + +/// `Vxx32.sf+=vmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_sf_hf_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wsf_vmpyacc_wsfvhfvhf( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpy_sf_hf_acc(vxx, vu, vv) +} + +/// `Vd32.sf=vmpy(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_sf_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vmpy_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_sf_sf(vu, vv) +} + +/// `Vd32.qf16=vsub(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vsub_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_hf(vu, vv) +} + +/// `Vd32.hf=vsub(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_hf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vsub_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_hf_hf(vu, vv) +} + +/// `Vd32.qf16=vsub(Vu32.qf16,Vv32.qf16)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_qf16))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vsub_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_qf16(vu, vv) +} + +/// `Vd32.qf16=vsub(Vu32.qf16,Vv32.hf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_qf16_mix))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vsub_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_qf16_mix(vu, vv) +} + +/// `Vd32.qf32=vsub(Vu32.qf32,Vv32.qf32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_qf32))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vsub_vqf32vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_qf32(vu, vv) +} + +/// `Vd32.qf32=vsub(Vu32.qf32,Vv32.sf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_qf32_mix))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vsub_vqf32vsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_qf32_mix(vu, vv) +} + +/// `Vd32.qf32=vsub(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vsub_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_sf(vu, vv) +} + +/// `Vdd32.sf=vsub(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_sf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wsf_vsub_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vsub_sf_hf(vu, vv) +} + +/// `Vd32.sf=vsub(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_sf_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vsub_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_sf_sf(vu, vv) +} + +/// `Vd32.ub=vasr(Vuu32.uh,Vv32.ub):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] +#[cfg_attr(test, assert_instr(vasrvuhubrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vasr_wuhvub_rnd_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { + vasrvuhubrndsat(vuu, vv) +} + +/// `Vd32.ub=vasr(Vuu32.uh,Vv32.ub):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] +#[cfg_attr(test, assert_instr(vasrvuhubsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vasr_wuhvub_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { + vasrvuhubsat(vuu, vv) +} + +/// `Vd32.uh=vasr(Vuu32.w,Vv32.uh):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] +#[cfg_attr(test, assert_instr(vasrvwuhrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vasr_wwvuh_rnd_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { + vasrvwuhrndsat(vuu, vv) +} + +/// `Vd32.uh=vasr(Vuu32.w,Vv32.uh):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] +#[cfg_attr(test, assert_instr(vasrvwuhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vasr_wwvuh_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { + vasrvwuhsat(vuu, vv) +} + +/// `Vd32.uh=vmpy(Vu32.uh,Vv32.uh):>>16` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] +#[cfg_attr(test, assert_instr(vmpyuhvs))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vmpy_vuhvuh_rs16(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyuhvs(vu, vv) +} + +/// `Vd32.h=Vu32.hf` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] +#[cfg_attr(test, assert_instr(vconv_h_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_equals_vhf(vu: HvxVector) -> HvxVector { + vconv_h_hf(vu) +} + +/// `Vd32.hf=Vu32.h` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] +#[cfg_attr(test, assert_instr(vconv_hf_h))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_equals_vh(vu: HvxVector) -> HvxVector { + vconv_hf_h(vu) +} + +/// `Vd32.sf=Vu32.w` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] +#[cfg_attr(test, assert_instr(vconv_sf_w))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_equals_vw(vu: HvxVector) -> HvxVector { + vconv_sf_w(vu) +} + +/// `Vd32.w=Vu32.sf` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] +#[cfg_attr(test, assert_instr(vconv_w_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_equals_vsf(vu: HvxVector) -> HvxVector { + vconv_w_sf(vu) +} + +/// `Vd32=vgetqfext(Vu32.x,Rt32)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(get_qfext))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vgetqfext_vr(vu: HvxVector, rt: i32) -> HvxVector { + get_qfext(vu, rt) +} + +/// `Vd32.x=vsetqfext(Vu32,Rt32)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(set_qfext))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vsetqfext_vr(vu: HvxVector, rt: i32) -> HvxVector { + set_qfext(vu, rt) +} + +/// `Vd32.f8=vabs(Vu32.f8)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(vabs_f8))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vabs_v(vu: HvxVector) -> HvxVector { + vabs_f8(vu) +} + +/// `Vdd32.hf=vcvt2(Vu32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(vcvt2_hf_b))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_whf_vcvt2_vb(vu: HvxVector) -> HvxVectorPair { + vcvt2_hf_b(vu) +} + +/// `Vdd32.hf=vcvt2(Vu32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(vcvt2_hf_ub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_whf_vcvt2_vub(vu: HvxVector) -> HvxVectorPair { + vcvt2_hf_ub(vu) +} + +/// `Vdd32.hf=vcvt(Vu32.f8)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(vcvt_hf_f8))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_whf_vcvt_v(vu: HvxVector) -> HvxVectorPair { + vcvt_hf_f8(vu) +} + +/// `Vd32.f8=vfmax(Vu32.f8,Vv32.f8)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(vfmax_f8))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vfmax_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { + vfmax_f8(vu, vv) +} + +/// `Vd32.f8=vfmin(Vu32.f8,Vv32.f8)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(vfmin_f8))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vfmin_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { + vfmin_f8(vu, vv) +} + +/// `Vd32.f8=vfneg(Vu32.f8)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(vfneg_f8))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vfneg_v(vu: HvxVector) -> HvxVector { + vfneg_f8(vu) +} + +/// `Qd4=and(Qs4,Qt4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_and_qq(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + pred_and( + vandvrt(core::mem::transmute::(qs), -1), + vandvrt(core::mem::transmute::(qt), -1), + ), + -1, + )) +} + +/// `Qd4=and(Qs4,!Qt4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_and_qqn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + pred_and_n( + vandvrt(core::mem::transmute::(qs), -1), + vandvrt(core::mem::transmute::(qt), -1), + ), + -1, + )) +} + +/// `Qd4=not(Qs4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_not_q(qs: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + pred_not(vandvrt( + core::mem::transmute::(qs), + -1, + )), + -1, + )) +} + +/// `Qd4=or(Qs4,Qt4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_or_qq(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + pred_or( + vandvrt(core::mem::transmute::(qs), -1), + vandvrt(core::mem::transmute::(qt), -1), + ), + -1, + )) +} + +/// `Qd4=or(Qs4,!Qt4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_or_qqn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + pred_or_n( + vandvrt(core::mem::transmute::(qs), -1), + vandvrt(core::mem::transmute::(qt), -1), + ), + -1, + )) +} + +/// `Qd4=vsetq(Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vsetq_r(rt: i32) -> HvxVectorPred { + core::mem::transmute::(vandqrt(pred_scalar2(rt), -1)) +} + +/// `Qd4=xor(Qs4,Qt4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_xor_qq(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + pred_xor( + vandvrt(core::mem::transmute::(qs), -1), + vandvrt(core::mem::transmute::(qt), -1), + ), + -1, + )) +} + +/// `if (!Qv4) vmem(Rt32+#s4)=Vs32` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VM_ST +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vmem_qnriv(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { + vS32b_nqpred_ai( + vandvrt(core::mem::transmute::(qv), -1), + rt, + vs, + ) +} + +/// `if (!Qv4) vmem(Rt32+#s4):nt=Vs32` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VM_ST +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vmem_qnriv_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { + vS32b_nt_nqpred_ai( + vandvrt(core::mem::transmute::(qv), -1), + rt, + vs, + ) +} + +/// `if (Qv4) vmem(Rt32+#s4):nt=Vs32` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VM_ST +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vmem_qriv_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { + vS32b_nt_qpred_ai( + vandvrt(core::mem::transmute::(qv), -1), + rt, + vs, + ) +} + +/// `if (Qv4) vmem(Rt32+#s4)=Vs32` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VM_ST +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vmem_qriv(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { + vS32b_qpred_ai( + vandvrt(core::mem::transmute::(qv), -1), + rt, + vs, + ) +} + +/// `if (!Qv4) Vx32.b+=Vu32.b` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_condacc_qnvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vaddbnq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (Qv4) Vx32.b+=Vu32.b` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_condacc_qvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vaddbq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (!Qv4) Vx32.h+=Vu32.h` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_condacc_qnvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vaddhnq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (Qv4) Vx32.h+=Vu32.h` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_condacc_qvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vaddhq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (!Qv4) Vx32.w+=Vu32.w` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_condacc_qnvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vaddwnq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (Qv4) Vx32.w+=Vu32.w` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_condacc_qvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vaddwq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `Vd32=vand(Qu4,Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vand_qr(qu: HvxVectorPred, rt: i32) -> HvxVector { + vandvrt(core::mem::transmute::(qu), rt) +} + +/// `Vx32|=vand(Qu4,Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vandor_vqr(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> HvxVector { + vandvrt_acc(vx, core::mem::transmute::(qu), rt) +} + +/// `Qd4=vand(Vu32,Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vand_vr(vu: HvxVector, rt: i32) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vu, rt)) +} + +/// `Qx4|=vand(Vu32,Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vandor_qvr(qx: HvxVectorPred, vu: HvxVector, rt: i32) -> HvxVectorPred { + core::mem::transmute::(vandqrt_acc( + core::mem::transmute::(qx), + vu, + rt, + )) +} + +/// `Qd4=vcmp.eq(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eq_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(veqb(vu, vv), -1)) +} + +/// `Qx4&=vcmp.eq(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqand_qvbvb( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqb_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.eq(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqor_qvbvb( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqb_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.eq(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqxacc_qvbvb( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqb_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.eq(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eq_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(veqh(vu, vv), -1)) +} + +/// `Qx4&=vcmp.eq(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqand_qvhvh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqh_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.eq(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqor_qvhvh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqh_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.eq(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqxacc_qvhvh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqh_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.eq(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eq_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(veqw(vu, vv), -1)) +} + +/// `Qx4&=vcmp.eq(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqand_qvwvw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqw_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.eq(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqor_qvwvw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqw_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.eq(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqxacc_qvwvw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqw_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.gt(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgtb(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvbvb( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtb_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvbvb( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtb_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvbvb( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtb_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.gt(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgth(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvhvh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgth_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvhvh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgth_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvhvh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgth_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.gt(Vu32.ub,Vv32.ub)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgtub(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.ub,Vv32.ub)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvubvub( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtub_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.ub,Vv32.ub)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvubvub( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtub_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.ub,Vv32.ub)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvubvub( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtub_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.gt(Vu32.uh,Vv32.uh)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgtuh(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.uh,Vv32.uh)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvuhvuh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtuh_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.uh,Vv32.uh)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvuhvuh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtuh_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.uh,Vv32.uh)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvuhvuh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtuh_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.gt(Vu32.uw,Vv32.uw)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgtuw(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.uw,Vv32.uw)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvuwvuw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtuw_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.uw,Vv32.uw)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvuwvuw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtuw_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.uw,Vv32.uw)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvuwvuw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtuw_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.gt(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgtw(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvwvw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtw_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvwvw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtw_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvwvw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtw_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Vd32=vmux(Qt4,Vu32,Vv32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vmux_qvv(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> HvxVector { + vmux( + vandvrt(core::mem::transmute::(qt), -1), + vu, + vv, + ) +} + +/// `if (!Qv4) Vx32.b-=Vu32.b` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_condnac_qnvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vsubbnq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (Qv4) Vx32.b-=Vu32.b` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_condnac_qvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vsubbq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (!Qv4) Vx32.h-=Vu32.h` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_condnac_qnvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vsubhnq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (Qv4) Vx32.h-=Vu32.h` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_condnac_qvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vsubhq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (!Qv4) Vx32.w-=Vu32.w` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_condnac_qnvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vsubwnq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (Qv4) Vx32.w-=Vu32.w` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_condnac_qvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vsubwq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `Vdd32=vswap(Qt4,Vu32,Vv32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_vswap_qvv(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vswap( + vandvrt(core::mem::transmute::(qt), -1), + vu, + vv, + ) +} + +/// `Qd4=vsetq2(Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vsetq2_r(rt: i32) -> HvxVectorPred { + core::mem::transmute::(vandqrt(pred_scalar2v2(rt), -1)) +} + +/// `Qd4.b=vshuffe(Qs4.h,Qt4.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_qb_vshuffe_qhqh(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + shuffeqh( + vandvrt(core::mem::transmute::(qs), -1), + vandvrt(core::mem::transmute::(qt), -1), + ), + -1, + )) +} + +/// `Qd4.h=vshuffe(Qs4.w,Qt4.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_qh_vshuffe_qwqw(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + shuffeqw( + vandvrt(core::mem::transmute::(qs), -1), + vandvrt(core::mem::transmute::(qt), -1), + ), + -1, + )) +} + +/// `Vd32=vand(!Qu4,Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vand_qnr(qu: HvxVectorPred, rt: i32) -> HvxVector { + vandnqrt( + vandvrt(core::mem::transmute::(qu), -1), + rt, + ) +} + +/// `Vx32|=vand(!Qu4,Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vandor_vqnr(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> HvxVector { + vandnqrt_acc( + vx, + vandvrt(core::mem::transmute::(qu), -1), + rt, + ) +} + +/// `Vd32=vand(!Qv4,Vu32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vand_qnv(qv: HvxVectorPred, vu: HvxVector) -> HvxVector { + vandvnqv( + vandvrt(core::mem::transmute::(qv), -1), + vu, + ) +} + +/// `Vd32=vand(Qv4,Vu32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vand_qv(qv: HvxVectorPred, vu: HvxVector) -> HvxVector { + vandvqv( + vandvrt(core::mem::transmute::(qv), -1), + vu, + ) +} + +/// `if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vv32.h).h` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_GATHER +/// Execution Slots: SLOT01 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vgather_aqrmvh( + rs: *mut HvxVector, + qs: HvxVectorPred, + rt: i32, + mu: i32, + vv: HvxVector, +) { + vgathermhq( + rs, + vandvrt(core::mem::transmute::(qs), -1), + rt, + mu, + vv, + ) +} + +/// `if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_GATHER_DV +/// Execution Slots: SLOT01 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vgather_aqrmww( + rs: *mut HvxVector, + qs: HvxVectorPred, + rt: i32, + mu: i32, + vvv: HvxVectorPair, +) { + vgathermhwq( + rs, + vandvrt(core::mem::transmute::(qs), -1), + rt, + mu, + vvv, + ) +} + +/// `if (Qs4) vtmp.w=vgather(Rt32,Mu2,Vv32.w).w` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_GATHER +/// Execution Slots: SLOT01 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vgather_aqrmvw( + rs: *mut HvxVector, + qs: HvxVectorPred, + rt: i32, + mu: i32, + vv: HvxVector, +) { + vgathermwq( + rs, + vandvrt(core::mem::transmute::(qs), -1), + rt, + mu, + vv, + ) +} + +/// `Vd32.b=prefixsum(Qv4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_prefixsum_q(qv: HvxVectorPred) -> HvxVector { + vprefixqb(vandvrt( + core::mem::transmute::(qv), + -1, + )) +} + +/// `Vd32.h=prefixsum(Qv4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_prefixsum_q(qv: HvxVectorPred) -> HvxVector { + vprefixqh(vandvrt( + core::mem::transmute::(qv), + -1, + )) +} + +/// `Vd32.w=prefixsum(Qv4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_prefixsum_q(qv: HvxVectorPred) -> HvxVector { + vprefixqw(vandvrt( + core::mem::transmute::(qv), + -1, + )) +} + +/// `if (Qs4) vscatter(Rt32,Mu2,Vv32.h).h=Vw32` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_SCATTER +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatter_qrmvhv( + qs: HvxVectorPred, + rt: i32, + mu: i32, + vv: HvxVector, + vw: HvxVector, +) { + vscattermhq( + vandvrt(core::mem::transmute::(qs), -1), + rt, + mu, + vv, + vw, + ) +} + +/// `if (Qs4) vscatter(Rt32,Mu2,Vvv32.w).h=Vw32` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_SCATTER_DV +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatter_qrmwwv( + qs: HvxVectorPred, + rt: i32, + mu: i32, + vvv: HvxVectorPair, + vw: HvxVector, +) { + vscattermhwq( + vandvrt(core::mem::transmute::(qs), -1), + rt, + mu, + vvv, + vw, + ) +} + +/// `if (Qs4) vscatter(Rt32,Mu2,Vv32.w).w=Vw32` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_SCATTER +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatter_qrmvwv( + qs: HvxVectorPred, + rt: i32, + mu: i32, + vv: HvxVector, + vw: HvxVector, +) { + vscattermwq( + vandvrt(core::mem::transmute::(qs), -1), + rt, + mu, + vv, + vw, + ) +} + +/// `Vd32.w=vadd(Vu32.w,Vv32.w,Qs4):carry:sat` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vadd_vwvwq_carry_sat( + vu: HvxVector, + vv: HvxVector, + qs: HvxVectorPred, +) -> HvxVector { + vaddcarrysat( + vu, + vv, + vandvrt(core::mem::transmute::(qs), -1), + ) +} + +/// `Qd4=vcmp.gt(Vu32.hf,Vv32.hf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgthf(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.hf,Vv32.hf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvhfvhf( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgthf_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.hf,Vv32.hf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvhfvhf( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgthf_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.hf,Vv32.hf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvhfvhf( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgthf_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.gt(Vu32.sf,Vv32.sf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgtsf(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.sf,Vv32.sf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvsfvsf( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtsf_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.sf,Vv32.sf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvsfvsf( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtsf_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.sf,Vv32.sf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvsfvsf( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtsf_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} diff --git a/library/stdarch/crates/core_arch/src/hexagon/mod.rs b/library/stdarch/crates/core_arch/src/hexagon/mod.rs new file mode 100644 index 0000000000000..a9c53d6efe00e --- /dev/null +++ b/library/stdarch/crates/core_arch/src/hexagon/mod.rs @@ -0,0 +1,12 @@ +//! Hexagon architecture intrinsics +//! +//! This module contains intrinsics for the Qualcomm Hexagon DSP architecture, +//! including the Hexagon Vector Extensions (HVX). +//! +//! HVX is a wide SIMD architecture designed for high-performance signal processing, +//! machine learning, and image processing workloads. + +mod hvx; + +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub use self::hvx::*; diff --git a/library/stdarch/crates/core_arch/src/lib.rs b/library/stdarch/crates/core_arch/src/lib.rs index 039a4c4411f2e..8a1bead7c4791 100644 --- a/library/stdarch/crates/core_arch/src/lib.rs +++ b/library/stdarch/crates/core_arch/src/lib.rs @@ -23,6 +23,7 @@ mips_target_feature, powerpc_target_feature, loongarch_target_feature, + hexagon_target_feature, wasm_target_feature, abi_unadjusted, rtm_target_feature, diff --git a/library/stdarch/crates/core_arch/src/mod.rs b/library/stdarch/crates/core_arch/src/mod.rs index 3577175ae31c7..f8ea68b35c665 100644 --- a/library/stdarch/crates/core_arch/src/mod.rs +++ b/library/stdarch/crates/core_arch/src/mod.rs @@ -320,6 +320,19 @@ pub mod arch { pub mod s390x { pub use crate::core_arch::s390x::*; } + + /// Platform-specific intrinsics for the `hexagon` platform. + /// + /// This module provides intrinsics for the Qualcomm Hexagon DSP architecture, + /// including the Hexagon Vector Extensions (HVX). + /// + /// See the [module documentation](../index.html) for more details. + #[cfg(any(target_arch = "hexagon", doc))] + #[doc(cfg(target_arch = "hexagon"))] + #[unstable(feature = "stdarch_hexagon", issue = "none")] + pub mod hexagon { + pub use crate::core_arch::hexagon::*; + } } #[cfg(any(target_arch = "x86", target_arch = "x86_64", doc))] @@ -379,3 +392,7 @@ mod loongarch64; #[cfg(any(target_arch = "s390x", doc))] #[doc(cfg(target_arch = "s390x"))] mod s390x; + +#[cfg(any(target_arch = "hexagon", doc))] +#[doc(cfg(target_arch = "hexagon"))] +mod hexagon; diff --git a/library/stdarch/crates/stdarch-gen-hexagon/Cargo.toml b/library/stdarch/crates/stdarch-gen-hexagon/Cargo.toml new file mode 100644 index 0000000000000..f8c446c1d15a0 --- /dev/null +++ b/library/stdarch/crates/stdarch-gen-hexagon/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "stdarch-gen-hexagon" +version = "0.1.0" +authors = ["The Rust Project Developers"] +license = "MIT OR Apache-2.0" +edition = "2021" + +[dependencies] +regex = "1.10" +ureq = "2.9" diff --git a/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs b/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs new file mode 100644 index 0000000000000..2f8dec75b76b6 --- /dev/null +++ b/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs @@ -0,0 +1,1697 @@ +//! Hexagon HVX Code Generator +//! +//! This generator creates hvx.rs from scratch using the LLVM HVX header file +//! as the sole source of truth. It parses the C intrinsic prototypes and +//! generates Rust wrapper functions with appropriate attributes. +//! +//! Usage: +//! cd crates/stdarch-gen-hexagon +//! cargo run +//! # Output is written directly to ../core_arch/src/hexagon/hvx.rs + +use regex::Regex; +use std::collections::{HashMap, HashSet}; +use std::fs::File; +use std::io::Write; +use std::path::Path; + +/// Mappings from HVX intrinsics to architecture-independent SIMD intrinsics. +/// These intrinsics have equivalent semantics and can be lowered to the generic form. +fn get_simd_intrinsic_mappings() -> HashMap<&'static str, &'static str> { + let mut map = HashMap::new(); + // Bitwise operations (element-size independent) + map.insert("vxor", "simd_xor"); + map.insert("vand", "simd_and"); + map.insert("vor", "simd_or"); + // Word (32-bit) arithmetic operations + map.insert("vaddw", "simd_add"); + map.insert("vsubw", "simd_sub"); + map +} + +/// The tracking issue number for the stdarch_hexagon feature +const TRACKING_ISSUE: &str = "151523"; + +/// LLVM tag to fetch the header from +const LLVM_TAG: &str = "llvmorg-22.1.0-rc1"; + +/// Maximum HVX architecture version supported by rustc +/// Check with: rustc --target=hexagon-unknown-linux-musl --print target-features +const MAX_SUPPORTED_ARCH: u32 = 79; + +/// URL template for the HVX header file +const HEADER_URL: &str = + "https://raw.githubusercontent.com/llvm/llvm-project/{tag}/clang/lib/Headers/hvx_hexagon_protos.h"; + +/// Intrinsic information parsed from the LLVM header +#[derive(Debug, Clone)] +struct IntrinsicInfo { + /// The Q6_* intrinsic name (e.g., "Q6_V_vadd_VV") + q6_name: String, + /// The LLVM builtin name without prefix (e.g., "V6_vaddb") + builtin_name: String, + /// The short instruction name for assert_instr (e.g., "vaddb") + instr_name: String, + /// The assembly syntax from the comment + asm_syntax: String, + /// Instruction type + instr_type: String, + /// Execution slots + exec_slots: String, + /// Minimum HVX architecture version required + min_arch: u32, + /// Return type + return_type: RustType, + /// Parameters (name, type) + params: Vec<(String, RustType)>, + /// Whether this is a compound intrinsic (multiple builtins) + is_compound: bool, + /// For compound intrinsics: the parsed expression tree + compound_expr: Option, +} + +/// Expression tree for compound intrinsics +#[derive(Debug, Clone)] +enum CompoundExpr { + /// A call to a builtin: (builtin_name without V6_ prefix, arguments) + BuiltinCall(String, Vec), + /// A parameter reference by name + Param(String), + /// An integer literal (like -1) + IntLiteral(i32), +} + +/// Rust type mappings +#[derive(Debug, Clone, PartialEq)] +enum RustType { + HvxVector, + HvxVectorPair, + HvxVectorPred, + I32, + MutPtrHvxVector, + Unit, +} + +impl RustType { + fn from_c_type(c_type: &str) -> Option { + match c_type.trim() { + "HVX_Vector" => Some(RustType::HvxVector), + "HVX_VectorPair" => Some(RustType::HvxVectorPair), + "HVX_VectorPred" => Some(RustType::HvxVectorPred), + "Word32" => Some(RustType::I32), + "HVX_Vector*" => Some(RustType::MutPtrHvxVector), + "void" => Some(RustType::Unit), + _ => None, + } + } + + fn to_rust_str(&self) -> &'static str { + match self { + RustType::HvxVector => "HvxVector", + RustType::HvxVectorPair => "HvxVectorPair", + RustType::HvxVectorPred => "HvxVectorPred", + RustType::I32 => "i32", + RustType::MutPtrHvxVector => "*mut HvxVector", + RustType::Unit => "()", + } + } + + fn to_extern_str(&self) -> &'static str { + match self { + RustType::HvxVector => "HvxVector", + RustType::HvxVectorPair => "HvxVectorPair", + RustType::HvxVectorPred => "HvxVectorPred", + RustType::I32 => "i32", + RustType::MutPtrHvxVector => "*mut HvxVector", + RustType::Unit => "()", + } + } +} + +/// Parse a compound macro expression into an expression tree +fn parse_compound_expr(expr: &str) -> Option { + let expr = expr.trim(); + + // Try to match an integer literal (like -1) + if let Ok(n) = expr.parse::() { + return Some(CompoundExpr::IntLiteral(n)); + } + + // Try to match a simple parameter name (Vu, Vv, Rt, Qs, Qt, Qx, Vx, etc.) + // These are typically short identifiers in the macro + if expr.len() <= 3 + && expr.chars().all(|c| c.is_ascii_alphanumeric() || c == '_') + && !expr.contains("__") + { + return Some(CompoundExpr::Param(expr.to_lowercase())); + } + + // Check if it's wrapped in extra parens first + if expr.starts_with('(') && expr.ends_with(')') { + // Check if these parens wrap the entire expression + let inner = &expr[1..expr.len() - 1]; + // Count depth: if after removing outer parens the expression is balanced, + // the outer parens were enclosing everything + if is_balanced_parens(inner) { + // But we also need to verify these aren't part of a function call + // If the inner expression is balanced and the whole thing starts with ( + // and ends with ), it's a paren wrapper + let result = parse_compound_expr(inner); + if result.is_some() { + return result; + } + } + } + + // Try to match __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_xxx)(args) + // The args portion may contain nested calls, so we need to find the matching paren + if expr.starts_with("__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_") { + // Find the end of the builtin name (after V6_) + let prefix = "__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_"; + let after_prefix = &expr[prefix.len()..]; + if let Some(paren_pos) = after_prefix.find(')') { + let builtin_name = &after_prefix[..paren_pos]; + let rest = &after_prefix[paren_pos + 1..]; // Skip the closing ) of the WRAP + // rest should now be "(args)" + if rest.starts_with('(') && rest.ends_with(')') { + let args_str = &rest[1..rest.len() - 1]; + let args = parse_compound_args(args_str)?; + return Some(CompoundExpr::BuiltinCall(builtin_name.to_string(), args)); + } + } + } + + // Try to match __builtin_HEXAGON_V6_xxx(args) without wrap + if expr.starts_with("__builtin_HEXAGON_V6_") { + let prefix = "__builtin_HEXAGON_V6_"; + let after_prefix = &expr[prefix.len()..]; + if let Some(paren_pos) = after_prefix.find('(') { + let builtin_name = &after_prefix[..paren_pos]; + let rest = &after_prefix[paren_pos..]; + if rest.starts_with('(') && rest.ends_with(')') { + let args_str = &rest[1..rest.len() - 1]; + let args = parse_compound_args(args_str)?; + return Some(CompoundExpr::BuiltinCall(builtin_name.to_string(), args)); + } + } + } + + None +} + +/// Check if parentheses are balanced in a string +fn is_balanced_parens(s: &str) -> bool { + let mut depth = 0; + for c in s.chars() { + match c { + '(' => depth += 1, + ')' => { + depth -= 1; + if depth < 0 { + return false; + } + } + _ => {} + } + } + depth == 0 +} + +/// Parse comma-separated arguments, respecting nested parentheses +fn parse_compound_args(args_str: &str) -> Option> { + let mut args = Vec::new(); + let mut current = String::new(); + let mut depth = 0; + + for c in args_str.chars() { + match c { + '(' => { + depth += 1; + current.push(c); + } + ')' => { + depth -= 1; + current.push(c); + } + ',' if depth == 0 => { + let arg = current.trim().to_string(); + if !arg.is_empty() { + args.push(parse_compound_expr(&arg)?); + } + current.clear(); + } + _ => current.push(c), + } + } + + // Don't forget the last argument + let arg = current.trim().to_string(); + if !arg.is_empty() { + args.push(parse_compound_expr(&arg)?); + } + + Some(args) +} + +/// Extract all builtin names used in a compound expression +fn collect_builtins_from_expr(expr: &CompoundExpr, builtins: &mut HashSet) { + match expr { + CompoundExpr::BuiltinCall(name, args) => { + builtins.insert(name.clone()); + for arg in args { + collect_builtins_from_expr(arg, builtins); + } + } + CompoundExpr::Param(_) | CompoundExpr::IntLiteral(_) => {} + } +} + +/// Download the LLVM HVX header file +fn download_header() -> Result { + let url = HEADER_URL.replace("{tag}", LLVM_TAG); + println!("Downloading HVX header from: {}", url); + + let response = ureq::get(&url) + .call() + .map_err(|e| format!("Failed to download header: {}", e))?; + + response + .into_string() + .map_err(|e| format!("Failed to read response: {}", e)) +} + +/// Parse a C function prototype to extract return type and parameters +fn parse_prototype(prototype: &str) -> Option<(RustType, Vec<(String, RustType)>)> { + // Pattern: ReturnType FunctionName(ParamType1 Param1, ParamType2 Param2, ...) + let proto_re = Regex::new(r"(\w+(?:\*)?)\s+Q6_\w+\(([^)]*)\)").unwrap(); + + if let Some(caps) = proto_re.captures(prototype) { + let return_type_str = caps[1].trim(); + let params_str = &caps[2]; + + let return_type = RustType::from_c_type(return_type_str)?; + + let mut params = Vec::new(); + if !params_str.trim().is_empty() { + for param in params_str.split(',') { + let param = param.trim(); + // Pattern: Type Name or Type* Name + let param_re = Regex::new(r"(\w+\*?)\s+(\w+)").unwrap(); + if let Some(pcaps) = param_re.captures(param) { + let ptype_str = pcaps[1].trim(); + let pname = pcaps[2].to_lowercase(); + if let Some(ptype) = RustType::from_c_type(ptype_str) { + params.push((pname, ptype)); + } else { + return None; // Unknown type + } + } + } + } + + Some((return_type, params)) + } else { + None + } +} + +/// Parse the LLVM header file to extract intrinsic information +fn parse_header(content: &str) -> Vec { + let mut intrinsics = Vec::new(); + + let arch_re = Regex::new(r"#if __HVX_ARCH__ >= (\d+)").unwrap(); + + // Regex to extract the simple builtin name from a macro body + // Match: __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_xxx)(args) + let simple_builtin_re = + Regex::new(r"__BUILTIN_VECTOR_WRAP\(__builtin_HEXAGON_(\w+)\)\([^)]*\)\s*$").unwrap(); + + // Also handle builtins without VECTOR_WRAP + let simple_builtin_re2 = Regex::new(r"__builtin_HEXAGON_(\w+)\([^)]*\)\s*$").unwrap(); + + let lines: Vec<&str> = content.lines().collect(); + let mut current_arch: u32 = 60; + let mut i = 0; + + while i < lines.len() { + // Track architecture version + if let Some(caps) = arch_re.captures(lines[i]) { + if let Ok(arch) = caps[1].parse() { + current_arch = arch; + } + } + + // Look for Assembly Syntax comment block + if lines[i].contains("Assembly Syntax:") { + let mut asm_syntax = String::new(); + let mut prototype = String::new(); + let mut instr_type = String::new(); + let mut exec_slots = String::new(); + + // Parse the comment block + let mut j = i; + while j < lines.len() && !lines[j].starts_with("#define") { + let line = lines[j]; + if line.contains("Assembly Syntax:") { + if let Some(pos) = line.find("Assembly Syntax:") { + asm_syntax = line[pos + 16..].trim().to_string(); + } + } else if line.contains("C Intrinsic Prototype:") { + if let Some(pos) = line.find("C Intrinsic Prototype:") { + prototype = line[pos + 22..].trim().to_string(); + } + } else if line.contains("Instruction Type:") { + if let Some(pos) = line.find("Instruction Type:") { + instr_type = line[pos + 17..].trim().to_string(); + } + } else if line.contains("Execution Slots:") { + if let Some(pos) = line.find("Execution Slots:") { + exec_slots = line[pos + 16..].trim().to_string(); + } + } + j += 1; + } + + // Now find the #define line + while j < lines.len() && !lines[j].starts_with("#define") { + j += 1; + } + + if j < lines.len() { + let define_line = lines[j]; + + // Extract Q6 name and check if it's simple or compound + let q6_name_re = Regex::new(r"#define\s+(Q6_\w+)").unwrap(); + if let Some(caps) = q6_name_re.captures(define_line) { + let q6_name = caps[1].to_string(); + + // Get the full macro body (handle line continuations) + let mut macro_body = define_line.to_string(); + let mut k = j; + while macro_body.trim_end().ends_with('\\') && k + 1 < lines.len() { + k += 1; + macro_body.push_str(lines[k]); + } + + // Try to extract simple builtin name + let builtin_name = if let Some(bcaps) = simple_builtin_re.captures(¯o_body) + { + Some(bcaps[1].to_string()) + } else if let Some(bcaps) = simple_builtin_re2.captures(¯o_body) { + Some(bcaps[1].to_string()) + } else { + None + }; + + // Check if it's a compound intrinsic (multiple __builtin calls) + let builtin_count = macro_body.matches("__builtin_HEXAGON_").count(); + let is_compound = builtin_count > 1; + + // Parse prototype + if let Some((return_type, params)) = parse_prototype(&prototype) { + if is_compound { + // For compound intrinsics, parse the expression + // Extract the macro body after the parameter list + let macro_expr_re = + Regex::new(r"#define\s+Q6_\w+\([^)]*\)\s+(.+)").unwrap(); + if let Some(expr_caps) = macro_expr_re.captures(¯o_body) { + let expr_str = + expr_caps[1].trim().replace('\n', " ").replace('\\', " "); + let expr_str = expr_str.trim(); + + if let Some(compound_expr) = parse_compound_expr(expr_str) { + // For compound intrinsics, we use the outermost builtin + // as the "primary" for the instruction name + let (primary_builtin, instr_name) = match &compound_expr { + CompoundExpr::BuiltinCall(name, _) => { + (name.clone(), name.clone()) + } + _ => continue, + }; + + intrinsics.push(IntrinsicInfo { + q6_name, + builtin_name: format!("V6_{}", primary_builtin), + instr_name, + asm_syntax, + instr_type, + exec_slots, + min_arch: current_arch, + return_type, + params, + is_compound: true, + compound_expr: Some(compound_expr), + }); + } + } + } else if let Some(builtin) = builtin_name { + // Extract short instruction name + let instr_name = if builtin.starts_with("V6_") { + builtin[3..].to_string() + } else { + builtin.clone() + }; + + intrinsics.push(IntrinsicInfo { + q6_name, + builtin_name: builtin, + instr_name, + asm_syntax, + instr_type, + exec_slots, + min_arch: current_arch, + return_type, + params, + is_compound: false, + compound_expr: None, + }); + } + } + } + } + i = j; + } + i += 1; + } + + intrinsics +} + +/// Convert Q6 name to Rust function name (lowercase with underscores) +fn q6_to_rust_name(q6_name: &str) -> String { + // Q6_V_hi_W -> q6_v_hi_w + q6_name.to_lowercase() +} + +/// Generate the module documentation +fn generate_module_doc() -> String { + r#"//! Hexagon HVX intrinsics +//! +//! This module provides intrinsics for the Hexagon Vector Extensions (HVX). +//! HVX is a wide vector extension designed for high-performance signal processing. +//! [Hexagon HVX Programmer's Reference Manual](https://docs.qualcomm.com/doc/80-N2040-61) +//! +//! ## Vector Types +//! +//! HVX supports different vector lengths depending on the configuration: +//! - 128-byte mode: `HvxVector` is 1024 bits (128 bytes) +//! - 64-byte mode: `HvxVector` is 512 bits (64 bytes) +//! +//! This implementation targets 128-byte mode by default. To change the vector +//! length mode, use the appropriate target feature when compiling: +//! - For 128-byte mode: `-C target-feature=+hvx-length128b` +//! - For 64-byte mode: `-C target-feature=+hvx-length64b` +//! +//! Note that HVX v66 and later default to 128-byte mode, while earlier versions +//! default to 64-byte mode. +//! +//! ## Architecture Versions +//! +//! Different intrinsics require different HVX architecture versions. Use the +//! appropriate target feature to enable the required version: +//! - HVX v60: `-C target-feature=+hvxv60` (basic HVX operations) +//! - HVX v62: `-C target-feature=+hvxv62` +//! - HVX v65: `-C target-feature=+hvxv65` (includes floating-point support) +//! - HVX v66: `-C target-feature=+hvxv66` +//! - HVX v68: `-C target-feature=+hvxv68` +//! - HVX v69: `-C target-feature=+hvxv69` +//! - HVX v73: `-C target-feature=+hvxv73` +//! - HVX v79: `-C target-feature=+hvxv79` +//! - HVX v81: `-C target-feature=+hvxv81` +//! +//! Each version includes all features from previous versions. +"# + .to_string() +} + +/// Generate the type definitions +fn generate_types() -> String { + format!( + r#" +#![allow(non_camel_case_types)] + +#[cfg(test)] +use stdarch_test::assert_instr; + +use crate::intrinsics::simd::{{simd_add, simd_and, simd_or, simd_sub, simd_xor}}; + +// HVX type definitions for 128-byte vector mode (default for v66+) +// Use -C target-feature=+hvx-length128b to enable +#[cfg(target_feature = "hvx-length128b")] +types! {{ + #![unstable(feature = "stdarch_hexagon", issue = "{TRACKING_ISSUE}")] + + /// HVX vector type (1024 bits / 128 bytes) + /// + /// This type represents a single HVX vector register containing 32 x 32-bit values. + pub struct HvxVector(32 x i32); + + /// HVX vector pair type (2048 bits / 256 bytes) + /// + /// This type represents a pair of HVX vector registers, often used for + /// operations that produce double-width results. + pub struct HvxVectorPair(64 x i32); + + /// HVX vector predicate type (1024 bits / 128 bytes) + /// + /// This type represents a predicate vector used for conditional operations. + /// Each bit corresponds to a lane in the vector. + pub struct HvxVectorPred(32 x i32); +}} + +// HVX type definitions for 64-byte vector mode (default for v60-v65) +// Use -C target-feature=+hvx-length64b to enable, or omit hvx-length128b +#[cfg(not(target_feature = "hvx-length128b"))] +types! {{ + #![unstable(feature = "stdarch_hexagon", issue = "{TRACKING_ISSUE}")] + + /// HVX vector type (512 bits / 64 bytes) + /// + /// This type represents a single HVX vector register containing 16 x 32-bit values. + pub struct HvxVector(16 x i32); + + /// HVX vector pair type (1024 bits / 128 bytes) + /// + /// This type represents a pair of HVX vector registers, often used for + /// operations that produce double-width results. + pub struct HvxVectorPair(32 x i32); + + /// HVX vector predicate type (512 bits / 64 bytes) + /// + /// This type represents a predicate vector used for conditional operations. + /// Each bit corresponds to a lane in the vector. + pub struct HvxVectorPred(16 x i32); +}} +"# + ) +} + +/// Builtin signature information for extern declarations +struct BuiltinSignature { + /// The V6_ prefixed name + full_name: String, + /// The short name (without V6_) + short_name: String, + /// Return type + return_type: RustType, + /// Parameter types + param_types: Vec, +} + +/// Get known signatures for builtins used in compound operations +/// These are the helper builtins that don't have their own Q6_ wrapper +fn get_compound_helper_signatures() -> HashMap { + let mut map = HashMap::new(); + + // vandvrt: HVX_Vector -> i32 -> HVX_Vector + // Converts predicate to vector representation. LLVM uses HVX_Vector for both. + map.insert( + "vandvrt".to_string(), + BuiltinSignature { + full_name: "V6_vandvrt".to_string(), + short_name: "vandvrt".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::I32], + }, + ); + + // vandqrt: HVX_Vector -> i32 -> HVX_Vector + // Converts vector representation back to predicate. LLVM uses HVX_Vector for both. + map.insert( + "vandqrt".to_string(), + BuiltinSignature { + full_name: "V6_vandqrt".to_string(), + short_name: "vandqrt".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::I32], + }, + ); + + // vandvrt_acc: HVX_Vector -> HVX_Vector -> i32 -> HVX_Vector + map.insert( + "vandvrt_acc".to_string(), + BuiltinSignature { + full_name: "V6_vandvrt_acc".to_string(), + short_name: "vandvrt_acc".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::HvxVector, RustType::I32], + }, + ); + + // vandqrt_acc: HVX_Vector -> HVX_Vector -> i32 -> HVX_Vector + map.insert( + "vandqrt_acc".to_string(), + BuiltinSignature { + full_name: "V6_vandqrt_acc".to_string(), + short_name: "vandqrt_acc".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::HvxVector, RustType::I32], + }, + ); + + // pred_and: HVX_Vector -> HVX_Vector -> HVX_Vector + map.insert( + "pred_and".to_string(), + BuiltinSignature { + full_name: "V6_pred_and".to_string(), + short_name: "pred_and".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::HvxVector], + }, + ); + + // pred_and_n: HVX_Vector -> HVX_Vector -> HVX_Vector + map.insert( + "pred_and_n".to_string(), + BuiltinSignature { + full_name: "V6_pred_and_n".to_string(), + short_name: "pred_and_n".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::HvxVector], + }, + ); + + // pred_or: HVX_Vector -> HVX_Vector -> HVX_Vector + map.insert( + "pred_or".to_string(), + BuiltinSignature { + full_name: "V6_pred_or".to_string(), + short_name: "pred_or".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::HvxVector], + }, + ); + + // pred_or_n: HVX_Vector -> HVX_Vector -> HVX_Vector + map.insert( + "pred_or_n".to_string(), + BuiltinSignature { + full_name: "V6_pred_or_n".to_string(), + short_name: "pred_or_n".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::HvxVector], + }, + ); + + // pred_xor: HVX_Vector -> HVX_Vector -> HVX_Vector + map.insert( + "pred_xor".to_string(), + BuiltinSignature { + full_name: "V6_pred_xor".to_string(), + short_name: "pred_xor".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::HvxVector], + }, + ); + + // pred_not: HVX_Vector -> HVX_Vector + map.insert( + "pred_not".to_string(), + BuiltinSignature { + full_name: "V6_pred_not".to_string(), + short_name: "pred_not".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector], + }, + ); + + // pred_scalar2: i32 -> HVX_Vector + map.insert( + "pred_scalar2".to_string(), + BuiltinSignature { + full_name: "V6_pred_scalar2".to_string(), + short_name: "pred_scalar2".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::I32], + }, + ); + + // Conditional store operations + map.insert( + "vS32b_qpred_ai".to_string(), + BuiltinSignature { + full_name: "V6_vS32b_qpred_ai".to_string(), + short_name: "vS32b_qpred_ai".to_string(), + return_type: RustType::Unit, + param_types: vec![ + RustType::HvxVector, + RustType::MutPtrHvxVector, + RustType::HvxVector, + ], + }, + ); + + map.insert( + "vS32b_nqpred_ai".to_string(), + BuiltinSignature { + full_name: "V6_vS32b_nqpred_ai".to_string(), + short_name: "vS32b_nqpred_ai".to_string(), + return_type: RustType::Unit, + param_types: vec![ + RustType::HvxVector, + RustType::MutPtrHvxVector, + RustType::HvxVector, + ], + }, + ); + + map.insert( + "vS32b_nt_qpred_ai".to_string(), + BuiltinSignature { + full_name: "V6_vS32b_nt_qpred_ai".to_string(), + short_name: "vS32b_nt_qpred_ai".to_string(), + return_type: RustType::Unit, + param_types: vec![ + RustType::HvxVector, + RustType::MutPtrHvxVector, + RustType::HvxVector, + ], + }, + ); + + map.insert( + "vS32b_nt_nqpred_ai".to_string(), + BuiltinSignature { + full_name: "V6_vS32b_nt_nqpred_ai".to_string(), + short_name: "vS32b_nt_nqpred_ai".to_string(), + return_type: RustType::Unit, + param_types: vec![ + RustType::HvxVector, + RustType::MutPtrHvxVector, + RustType::HvxVector, + ], + }, + ); + + // Conditional accumulation operations + for (suffix, _elem) in [("b", "byte"), ("h", "halfword"), ("w", "word")] { + // vaddbq, vaddhq, vaddwq + map.insert( + format!("vadd{}q", suffix), + BuiltinSignature { + full_name: format!("V6_vadd{}q", suffix), + short_name: format!("vadd{}q", suffix), + return_type: RustType::HvxVector, + param_types: vec![ + RustType::HvxVector, + RustType::HvxVector, + RustType::HvxVector, + ], + }, + ); + // vaddbnq, vaddhnq, vaddwnq + map.insert( + format!("vadd{}nq", suffix), + BuiltinSignature { + full_name: format!("V6_vadd{}nq", suffix), + short_name: format!("vadd{}nq", suffix), + return_type: RustType::HvxVector, + param_types: vec![ + RustType::HvxVector, + RustType::HvxVector, + RustType::HvxVector, + ], + }, + ); + } + + // Comparison operations with accumulation + // veqb_and, veqb_or, veqb_xor, etc. + for elem in ["b", "h", "w", "ub", "uh", "uw"] { + for op in ["and", "or", "xor"] { + // veq*_and, veq*_or, veq*_xor + map.insert( + format!("veq{}_{}", elem, op), + BuiltinSignature { + full_name: format!("V6_veq{}_{}", elem, op), + short_name: format!("veq{}_{}", elem, op), + return_type: RustType::HvxVector, + param_types: vec![ + RustType::HvxVector, + RustType::HvxVector, + RustType::HvxVector, + ], + }, + ); + // vgt*_and, vgt*_or, vgt*_xor + map.insert( + format!("vgt{}_{}", elem, op), + BuiltinSignature { + full_name: format!("V6_vgt{}_{}", elem, op), + short_name: format!("vgt{}_{}", elem, op), + return_type: RustType::HvxVector, + param_types: vec![ + RustType::HvxVector, + RustType::HvxVector, + RustType::HvxVector, + ], + }, + ); + } + } + + // Floating-point comparison operations (hf = half-float, sf = single-float) + for elem in ["hf", "sf"] { + // Basic comparison: vgt* + map.insert( + format!("vgt{}", elem), + BuiltinSignature { + full_name: format!("V6_vgt{}", elem), + short_name: format!("vgt{}", elem), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::HvxVector], + }, + ); + + for op in ["and", "or", "xor"] { + // vgt*_and, vgt*_or, vgt*_xor + map.insert( + format!("vgt{}_{}", elem, op), + BuiltinSignature { + full_name: format!("V6_vgt{}_{}", elem, op), + short_name: format!("vgt{}_{}", elem, op), + return_type: RustType::HvxVector, + param_types: vec![ + RustType::HvxVector, + RustType::HvxVector, + RustType::HvxVector, + ], + }, + ); + } + } + + // Prefix operations with predicate + for elem in ["b", "h", "w"] { + map.insert( + format!("vprefixq{}", elem), + BuiltinSignature { + full_name: format!("V6_vprefixq{}", elem), + short_name: format!("vprefixq{}", elem), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector], + }, + ); + } + + // Scatter operations with predicate + map.insert( + "vscattermhq".to_string(), + BuiltinSignature { + full_name: "V6_vscattermhq".to_string(), + short_name: "vscattermhq".to_string(), + return_type: RustType::Unit, + param_types: vec![ + RustType::HvxVector, + RustType::I32, + RustType::I32, + RustType::HvxVector, + RustType::HvxVector, + ], + }, + ); + + map.insert( + "vscattermhwq".to_string(), + BuiltinSignature { + full_name: "V6_vscattermhwq".to_string(), + short_name: "vscattermhwq".to_string(), + return_type: RustType::Unit, + param_types: vec![ + RustType::HvxVector, + RustType::I32, + RustType::I32, + RustType::HvxVectorPair, + RustType::HvxVector, + ], + }, + ); + + map.insert( + "vscattermwq".to_string(), + BuiltinSignature { + full_name: "V6_vscattermwq".to_string(), + short_name: "vscattermwq".to_string(), + return_type: RustType::Unit, + param_types: vec![ + RustType::HvxVector, + RustType::I32, + RustType::I32, + RustType::HvxVector, + RustType::HvxVector, + ], + }, + ); + + // Add with carry saturation + map.insert( + "vaddcarrysat".to_string(), + BuiltinSignature { + full_name: "V6_vaddcarrysat".to_string(), + short_name: "vaddcarrysat".to_string(), + return_type: RustType::HvxVector, + param_types: vec![ + RustType::HvxVector, + RustType::HvxVector, + RustType::HvxVector, + ], + }, + ); + + // Gather operations with predicate + map.insert( + "vgathermhq".to_string(), + BuiltinSignature { + full_name: "V6_vgathermhq".to_string(), + short_name: "vgathermhq".to_string(), + return_type: RustType::Unit, + param_types: vec![ + RustType::MutPtrHvxVector, + RustType::HvxVector, + RustType::I32, + RustType::I32, + RustType::HvxVector, + ], + }, + ); + + map.insert( + "vgathermhwq".to_string(), + BuiltinSignature { + full_name: "V6_vgathermhwq".to_string(), + short_name: "vgathermhwq".to_string(), + return_type: RustType::Unit, + param_types: vec![ + RustType::MutPtrHvxVector, + RustType::HvxVector, + RustType::I32, + RustType::I32, + RustType::HvxVectorPair, + ], + }, + ); + + map.insert( + "vgathermwq".to_string(), + BuiltinSignature { + full_name: "V6_vgathermwq".to_string(), + short_name: "vgathermwq".to_string(), + return_type: RustType::Unit, + param_types: vec![ + RustType::MutPtrHvxVector, + RustType::HvxVector, + RustType::I32, + RustType::I32, + RustType::HvxVector, + ], + }, + ); + + // Basic comparison operations (without accumulation) + for elem in ["b", "h", "w", "ub", "uh", "uw"] { + // vgt* - greater than + map.insert( + format!("vgt{}", elem), + BuiltinSignature { + full_name: format!("V6_vgt{}", elem), + short_name: format!("vgt{}", elem), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::HvxVector], + }, + ); + // veq* - equal + map.insert( + format!("veq{}", elem), + BuiltinSignature { + full_name: format!("V6_veq{}", elem), + short_name: format!("veq{}", elem), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::HvxVector], + }, + ); + } + + // Conditional subtraction operations (vsub*q, vsub*nq) + for elem in ["b", "h", "w"] { + map.insert( + format!("vsub{}q", elem), + BuiltinSignature { + full_name: format!("V6_vsub{}q", elem), + short_name: format!("vsub{}q", elem), + return_type: RustType::HvxVector, + param_types: vec![ + RustType::HvxVector, + RustType::HvxVector, + RustType::HvxVector, + ], + }, + ); + map.insert( + format!("vsub{}nq", elem), + BuiltinSignature { + full_name: format!("V6_vsub{}nq", elem), + short_name: format!("vsub{}nq", elem), + return_type: RustType::HvxVector, + param_types: vec![ + RustType::HvxVector, + RustType::HvxVector, + RustType::HvxVector, + ], + }, + ); + } + + // vmux - vector mux (select based on predicate) + map.insert( + "vmux".to_string(), + BuiltinSignature { + full_name: "V6_vmux".to_string(), + short_name: "vmux".to_string(), + return_type: RustType::HvxVector, + param_types: vec![ + RustType::HvxVector, + RustType::HvxVector, + RustType::HvxVector, + ], + }, + ); + + // vswap - vector swap based on predicate + map.insert( + "vswap".to_string(), + BuiltinSignature { + full_name: "V6_vswap".to_string(), + short_name: "vswap".to_string(), + return_type: RustType::HvxVectorPair, + param_types: vec![ + RustType::HvxVector, + RustType::HvxVector, + RustType::HvxVector, + ], + }, + ); + + // shuffeq operations - take vectors (internal pred representation) and return vector + for elem in ["h", "w"] { + map.insert( + format!("shuffeq{}", elem), + BuiltinSignature { + full_name: format!("V6_shuffeq{}", elem), + short_name: format!("shuffeq{}", elem), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::HvxVector], + }, + ); + } + + // Predicate AND with vector operations + map.insert( + "vandvqv".to_string(), + BuiltinSignature { + full_name: "V6_vandvqv".to_string(), + short_name: "vandvqv".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::HvxVector], + }, + ); + + map.insert( + "vandvnqv".to_string(), + BuiltinSignature { + full_name: "V6_vandvnqv".to_string(), + short_name: "vandvnqv".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::HvxVector], + }, + ); + + // vandnqrt and vandnqrt_acc + map.insert( + "vandnqrt".to_string(), + BuiltinSignature { + full_name: "V6_vandnqrt".to_string(), + short_name: "vandnqrt".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::I32], + }, + ); + + map.insert( + "vandnqrt_acc".to_string(), + BuiltinSignature { + full_name: "V6_vandnqrt_acc".to_string(), + short_name: "vandnqrt_acc".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::HvxVector, RustType::I32], + }, + ); + + // pred_scalar2v2 + map.insert( + "pred_scalar2v2".to_string(), + BuiltinSignature { + full_name: "V6_pred_scalar2v2".to_string(), + short_name: "pred_scalar2v2".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::I32], + }, + ); + + map +} + +/// Generate extern declarations for all intrinsics +fn generate_extern_block(intrinsics: &[IntrinsicInfo]) -> String { + let mut output = String::new(); + + // Collect unique builtins to avoid duplicates + let mut seen_builtins: HashSet = HashSet::new(); + let mut decls: Vec<(String, String, RustType, Vec)> = Vec::new(); + + // First, add simple intrinsics + for info in intrinsics.iter().filter(|i| !i.is_compound) { + if seen_builtins.contains(&info.builtin_name) { + continue; + } + seen_builtins.insert(info.builtin_name.clone()); + + let param_types: Vec = info.params.iter().map(|(_, t)| t.clone()).collect(); + decls.push(( + info.builtin_name.clone(), + info.instr_name.clone(), + info.return_type.clone(), + param_types, + )); + } + + // Then, collect all builtins used in compound expressions + let helper_sigs = get_compound_helper_signatures(); + let mut compound_builtins: HashSet = HashSet::new(); + + for info in intrinsics.iter().filter(|i| i.is_compound) { + if let Some(ref expr) = info.compound_expr { + collect_builtins_from_expr(expr, &mut compound_builtins); + } + } + + // Add compound helper builtins + let mut missing_builtins = Vec::new(); + for builtin_name in compound_builtins { + let full_name = format!("V6_{}", builtin_name); + if seen_builtins.contains(&full_name) { + continue; + } + seen_builtins.insert(full_name.clone()); + + if let Some(sig) = helper_sigs.get(&builtin_name) { + decls.push(( + sig.full_name.clone(), + sig.short_name.clone(), + sig.return_type.clone(), + sig.param_types.clone(), + )); + } else { + missing_builtins.push(builtin_name); + } + } + + // Report missing builtins (for development purposes) + if !missing_builtins.is_empty() { + eprintln!("Warning: Missing helper signatures for compound builtins:"); + for name in &missing_builtins { + eprintln!(" - {}", name); + } + } + + // Sort by builtin name for consistent output + decls.sort_by(|a, b| a.0.cmp(&b.0)); + + // Generate 128-byte mode intrinsics (default for v66+) + output.push_str("// LLVM intrinsic declarations for 128-byte vector mode\n"); + output.push_str("#[cfg(target_feature = \"hvx-length128b\")]\n"); + output.push_str("#[allow(improper_ctypes)]\n"); + output.push_str("unsafe extern \"unadjusted\" {\n"); + + for (builtin_name, instr_name, return_type, param_types) in &decls { + let base_link = builtin_name.replace('_', "."); + let link_name = if builtin_name.starts_with("V6_") { + format!("llvm.hexagon.{}.128B", base_link) + } else { + format!("llvm.hexagon.{}", base_link) + }; + + let params_str = if param_types.is_empty() { + String::new() + } else { + param_types + .iter() + .map(|t| format!("_: {}", t.to_extern_str())) + .collect::>() + .join(", ") + }; + + let return_str = if *return_type == RustType::Unit { + " -> ()".to_string() + } else { + format!(" -> {}", return_type.to_extern_str()) + }; + + output.push_str(&format!( + " #[link_name = \"{}\"]\n fn {}({}){};\n", + link_name, instr_name, params_str, return_str + )); + } + + output.push_str("}\n\n"); + + // Generate 64-byte mode intrinsics (default for v60-v65) + output.push_str("// LLVM intrinsic declarations for 64-byte vector mode\n"); + output.push_str("#[cfg(not(target_feature = \"hvx-length128b\"))]\n"); + output.push_str("#[allow(improper_ctypes)]\n"); + output.push_str("unsafe extern \"unadjusted\" {\n"); + + for (builtin_name, instr_name, return_type, param_types) in &decls { + let base_link = builtin_name.replace('_', "."); + // 64-byte mode uses intrinsics without the .128B suffix + let link_name = format!("llvm.hexagon.{}", base_link); + + let params_str = if param_types.is_empty() { + String::new() + } else { + param_types + .iter() + .map(|t| format!("_: {}", t.to_extern_str())) + .collect::>() + .join(", ") + }; + + let return_str = if *return_type == RustType::Unit { + " -> ()".to_string() + } else { + format!(" -> {}", return_type.to_extern_str()) + }; + + output.push_str(&format!( + " #[link_name = \"{}\"]\n fn {}({}){};\n", + link_name, instr_name, params_str, return_str + )); + } + + output.push_str("}\n"); + output +} + +/// Generate Rust code for a compound expression +/// `params` maps parameter names to their types in the function signature +/// Get the type of an expression +fn get_expr_type( + expr: &CompoundExpr, + params: &HashMap, + helper_sigs: &HashMap, +) -> Option { + match expr { + CompoundExpr::BuiltinCall(name, _) => { + helper_sigs.get(name).map(|sig| sig.return_type.clone()) + } + CompoundExpr::Param(name) => params.get(name).cloned(), + CompoundExpr::IntLiteral(_) => Some(RustType::I32), + } +} + +fn generate_compound_expr_code( + expr: &CompoundExpr, + params: &HashMap, + helper_sigs: &HashMap, +) -> String { + match expr { + CompoundExpr::BuiltinCall(name, args) => { + // Get the expected parameter types for this builtin + let expected_types = helper_sigs + .get(name) + .map(|sig| sig.param_types.clone()) + .unwrap_or_default(); + + let args_code: Vec = args + .iter() + .enumerate() + .map(|(i, arg)| { + let arg_code = generate_compound_expr_code(arg, params, helper_sigs); + + // Check if we need to transmute this argument + let expected_type = expected_types.get(i); + let actual_type = get_expr_type(arg, params, helper_sigs); + + // If the builtin expects HvxVector but the arg is HvxVectorPred, transmute + if expected_type == Some(&RustType::HvxVector) + && actual_type == Some(RustType::HvxVectorPred) + { + format!( + "core::mem::transmute::({})", + arg_code + ) + } else { + arg_code + } + }) + .collect(); + format!("{}({})", name, args_code.join(", ")) + } + CompoundExpr::Param(name) => name.clone(), + CompoundExpr::IntLiteral(n) => n.to_string(), + } +} + +/// Get the primary instruction name from a compound expression (innermost significant op) +fn get_compound_primary_instr(expr: &CompoundExpr) -> Option { + match expr { + CompoundExpr::BuiltinCall(name, args) => { + // For vandqrt wrapper, look inside + if name == "vandqrt" && args.len() >= 1 { + if let Some(inner) = get_compound_primary_instr(&args[0]) { + return Some(inner); + } + } + // For store operations, use the store name + if name.starts_with("vS32b") { + return Some(name.clone()); + } + // For conditional accumulation, use the add name + if name.starts_with("vadd") && (name.ends_with("q") || name.ends_with("nq")) { + return Some(name.clone()); + } + // For predicate operations + if name.starts_with("pred_") { + return Some(name.clone()); + } + // For comparison operations with accumulation + if (name.starts_with("veq") || name.starts_with("vgt")) + && (name.ends_with("_and") || name.ends_with("_or") || name.ends_with("_xor")) + { + return Some(name.clone()); + } + Some(name.clone()) + } + _ => None, + } +} + +/// Get override implementations for specific compound intrinsics. +/// Some C macros rely on implicit type conversions that don't work with +/// our stricter Rust types, so we provide corrected implementations. +fn get_compound_overrides() -> HashMap<&'static str, &'static str> { + let mut map = HashMap::new(); + + // Q6_V_vand_QR: takes pred, returns vec + // Use transmute to convert pred to vec for LLVM, call vandvrt + map.insert( + "Q6_V_vand_QR", + "vandvrt(core::mem::transmute::(qu), rt)", + ); + + // Q6_V_vandor_VQR: takes vec and pred, returns vec + map.insert( + "Q6_V_vandor_VQR", + "vandvrt_acc(vx, core::mem::transmute::(qu), rt)", + ); + + // Q6_Q_vand_VR: takes vec, returns pred + map.insert( + "Q6_Q_vand_VR", + "core::mem::transmute::(vandqrt(vu, rt))", + ); + + // Q6_Q_vandor_QVR: takes pred and vec, returns pred + map.insert( + "Q6_Q_vandor_QVR", + "core::mem::transmute::(vandqrt_acc(core::mem::transmute::(qx), vu, rt))", + ); + + map +} + +/// Generate wrapper functions for all intrinsics +fn generate_functions(intrinsics: &[IntrinsicInfo]) -> String { + let mut output = String::new(); + let simd_mappings = get_simd_intrinsic_mappings(); + + // Generate simple intrinsics + for info in intrinsics.iter().filter(|i| !i.is_compound) { + let rust_name = q6_to_rust_name(&info.q6_name); + + // Generate doc comment + output.push_str(&format!("/// `{}`\n", info.asm_syntax)); + output.push_str("///\n"); + output.push_str(&format!("/// Instruction Type: {}\n", info.instr_type)); + output.push_str(&format!("/// Execution Slots: {}\n", info.exec_slots)); + + // Generate attributes + output.push_str("#[inline(always)]\n"); + output.push_str(&format!( + "#[cfg_attr(target_arch = \"hexagon\", target_feature(enable = \"hvxv{}\"))]\n", + info.min_arch + )); + + // Check if we should use simd intrinsic instead + let use_simd = simd_mappings.get(info.instr_name.as_str()); + + // assert_instr uses the original instruction name + output.push_str(&format!( + "#[cfg_attr(test, assert_instr({}))]\n", + info.instr_name + )); + + output.push_str(&format!( + "#[unstable(feature = \"stdarch_hexagon\", issue = \"{}\")]\n", + TRACKING_ISSUE + )); + + // Generate function signature + let params_str = info + .params + .iter() + .map(|(name, ty)| format!("{}: {}", name, ty.to_rust_str())) + .collect::>() + .join(", "); + + let return_str = if info.return_type == RustType::Unit { + String::new() + } else { + format!(" -> {}", info.return_type.to_rust_str()) + }; + + output.push_str(&format!( + "pub unsafe fn {}({}){} {{\n", + rust_name, params_str, return_str + )); + + // Generate function body + let args_str = info + .params + .iter() + .map(|(name, _)| name.as_str()) + .collect::>() + .join(", "); + + if let Some(simd_fn) = use_simd { + // Use architecture-independent simd intrinsic + output.push_str(&format!(" {}({})\n", simd_fn, args_str)); + } else { + // Use the LLVM intrinsic + output.push_str(&format!(" {}({})\n", info.instr_name, args_str)); + } + + output.push_str("}\n\n"); + } + + // Generate compound intrinsics + let helper_sigs = get_compound_helper_signatures(); + let overrides = get_compound_overrides(); + for info in intrinsics.iter().filter(|i| i.is_compound) { + if let Some(ref compound_expr) = info.compound_expr { + let rust_name = q6_to_rust_name(&info.q6_name); + + // Get the primary instruction for assert_instr + let _primary_instr = get_compound_primary_instr(compound_expr) + .unwrap_or_else(|| info.instr_name.clone()); + + // Generate doc comment + output.push_str(&format!("/// `{}`\n", info.asm_syntax)); + output.push_str("///\n"); + output.push_str( + "/// This is a compound operation composed of multiple HVX instructions.\n", + ); + if !info.instr_type.is_empty() { + output.push_str(&format!("/// Instruction Type: {}\n", info.instr_type)); + } + if !info.exec_slots.is_empty() { + output.push_str(&format!("/// Execution Slots: {}\n", info.exec_slots)); + } + + // Generate attributes + output.push_str("#[inline(always)]\n"); + output.push_str(&format!( + "#[cfg_attr(target_arch = \"hexagon\", target_feature(enable = \"hvxv{}\"))]\n", + info.min_arch + )); + + // For compound ops, we skip assert_instr since they emit multiple instructions + // output.push_str(&format!( + // "#[cfg_attr(test, assert_instr({}))]\n", + // primary_instr + // )); + + output.push_str(&format!( + "#[unstable(feature = \"stdarch_hexagon\", issue = \"{}\")]\n", + TRACKING_ISSUE + )); + + // Generate function signature + let params_str = info + .params + .iter() + .map(|(name, ty)| format!("{}: {}", name, ty.to_rust_str())) + .collect::>() + .join(", "); + + let return_str = if info.return_type == RustType::Unit { + String::new() + } else { + format!(" -> {}", info.return_type.to_rust_str()) + }; + + output.push_str(&format!( + "pub unsafe fn {}({}){} {{\n", + rust_name, params_str, return_str + )); + + // Check if we have an override for this intrinsic + let body = if let Some(override_body) = overrides.get(info.q6_name.as_str()) { + override_body.to_string() + } else { + // Build param type map for expression code generation + let param_types: HashMap = info.params.iter().cloned().collect(); + // Generate function body from compound expression + let expr_body = + generate_compound_expr_code(compound_expr, ¶m_types, &helper_sigs); + + // Check if we need to transmute the result + let expr_return_type = get_expr_type(compound_expr, ¶m_types, &helper_sigs); + if info.return_type == RustType::HvxVectorPred + && expr_return_type == Some(RustType::HvxVector) + { + format!( + "core::mem::transmute::({})", + expr_body + ) + } else { + expr_body + } + }; + output.push_str(&format!(" {}\n", body)); + + output.push_str("}\n\n"); + } + } + + output +} + +/// Generate the complete hvx.rs file +fn generate_hvx_rs(intrinsics: &[IntrinsicInfo], output_path: &Path) -> Result<(), String> { + let mut output = + File::create(output_path).map_err(|e| format!("Failed to create output: {}", e))?; + + writeln!(output, "{}", generate_module_doc()).map_err(|e| e.to_string())?; + writeln!(output, "{}", generate_types()).map_err(|e| e.to_string())?; + writeln!(output, "{}", generate_extern_block(intrinsics)).map_err(|e| e.to_string())?; + writeln!(output, "{}", generate_functions(intrinsics)).map_err(|e| e.to_string())?; + + // Ensure file is flushed before running rustfmt + drop(output); + + // Run rustfmt on the generated file + let status = std::process::Command::new("rustfmt") + .arg(output_path) + .status() + .map_err(|e| format!("Failed to run rustfmt: {}", e))?; + + if !status.success() { + return Err("rustfmt failed".to_string()); + } + + Ok(()) +} + +fn main() -> Result<(), String> { + println!("=== Hexagon HVX Code Generator ===\n"); + + // Download and parse the LLVM header + println!("Step 1: Downloading LLVM HVX header..."); + let header_content = download_header()?; + println!(" Downloaded {} bytes", header_content.len()); + + println!("\nStep 2: Parsing intrinsic definitions..."); + let all_intrinsics = parse_header(&header_content); + println!(" Found {} intrinsic definitions", all_intrinsics.len()); + + // Filter out intrinsics requiring architecture versions not yet supported by rustc + let intrinsics: Vec<_> = all_intrinsics + .into_iter() + .filter(|i| i.min_arch <= MAX_SUPPORTED_ARCH) + .collect(); + let filtered_count = intrinsics.len(); + println!( + " Filtered to {} intrinsics (max supported: hvxv{})", + filtered_count, MAX_SUPPORTED_ARCH + ); + + // Count simple vs compound + let simple_count = intrinsics.iter().filter(|i| !i.is_compound).count(); + let compound_count = intrinsics.iter().filter(|i| i.is_compound).count(); + println!(" Simple intrinsics: {}", simple_count); + println!(" Compound intrinsics: {}", compound_count); + + // Print some sample intrinsics for verification + println!("\n Sample simple intrinsics:"); + for info in intrinsics.iter().filter(|i| !i.is_compound).take(5) { + println!( + " {} -> {} ({})", + info.q6_name, info.builtin_name, info.asm_syntax + ); + } + + println!("\n Sample compound intrinsics:"); + for info in intrinsics.iter().filter(|i| i.is_compound).take(5) { + println!(" {} ({})", info.q6_name, info.asm_syntax); + } + + // Count architecture versions + let mut arch_counts: HashMap = HashMap::new(); + for info in &intrinsics { + *arch_counts.entry(info.min_arch).or_insert(0) += 1; + } + println!("\n By architecture version:"); + let mut archs: Vec<_> = arch_counts.iter().collect(); + archs.sort_by_key(|(k, _)| *k); + for (arch, count) in archs { + println!(" HVX v{}: {} intrinsics", arch, count); + } + + // Generate output + let crate_dir = std::env::var("CARGO_MANIFEST_DIR") + .map(std::path::PathBuf::from) + .unwrap_or_else(|_| std::env::current_dir().unwrap()); + + let output_path = crate_dir.join("../core_arch/src/hexagon/hvx.rs"); + + println!("\nStep 3: Generating hvx.rs..."); + generate_hvx_rs(&intrinsics, &output_path)?; + + println!("\n=== Results ==="); + println!(" Generated {} simple wrapper functions", simple_count); + println!(" Generated {} compound wrapper functions", compound_count); + println!(" Total: {} functions", simple_count + compound_count); + println!(" Output: {}", output_path.display()); + + Ok(()) +} diff --git a/library/stdarch/examples/Cargo.toml b/library/stdarch/examples/Cargo.toml index 61451edee841c..1e893dc15f971 100644 --- a/library/stdarch/examples/Cargo.toml +++ b/library/stdarch/examples/Cargo.toml @@ -23,6 +23,10 @@ path = "hex.rs" name = "connect5" path = "connect5.rs" +[[bin]] +name = "gaussian" +path = "gaussian.rs" + [[example]] name = "wasm" crate-type = ["cdylib"] diff --git a/library/stdarch/examples/gaussian.rs b/library/stdarch/examples/gaussian.rs new file mode 100644 index 0000000000000..1891f194ed7ff --- /dev/null +++ b/library/stdarch/examples/gaussian.rs @@ -0,0 +1,358 @@ +//! Hexagon HVX Gaussian 3x3 blur example +//! +//! This example demonstrates the use of Hexagon HVX intrinsics to implement +//! a 3x3 Gaussian blur filter on unsigned 8-bit images. +//! +//! The 3x3 Gaussian kernel is: +//! 1 2 1 +//! 2 4 2 / 16 +//! 1 2 1 +//! +//! This is a separable filter: `[1 2 1]^T * [1 2 1] / 16`. +//! Each 1D pass of `[1 2 1] / 4` is computed using byte averaging: +//! avg(avg(a, c), b) ≈ (a + 2b + c) / 4 +//! +//! This approach uses only `HvxVector` (single-vector) operations, avoiding +//! `HvxVectorPair` which currently has ABI limitations in the Rust/LLVM +//! Hexagon backend. +//! +//! To build: +//! +//! RUSTFLAGS="-C target-feature=+hvxv60,+hvx-length128b \ +//! -C linker=hexagon-unknown-linux-musl-clang" \ +//! cargo +nightly build --bin gaussian -p stdarch_examples \ +//! --target hexagon-unknown-linux-musl \ +//! -Zbuild-std -Zbuild-std-features=llvm-libunwind +//! +//! To run under QEMU: +//! +//! qemu-hexagon -L /target/hexagon-unknown-linux-musl \ +//! target/hexagon-unknown-linux-musl/debug/gaussian + +#![cfg_attr(target_arch = "hexagon", feature(stdarch_hexagon))] +#![cfg_attr(target_arch = "hexagon", feature(hexagon_target_feature))] +#![allow( + unsafe_op_in_unsafe_fn, + clippy::unwrap_used, + clippy::print_stdout, + clippy::missing_docs_in_private_items, + clippy::cast_possible_wrap, + clippy::cast_ptr_alignment, + dead_code +)] + +#[cfg(target_arch = "hexagon")] +use core_arch::arch::hexagon::*; + +/// Vector length in bytes for HVX 128-byte mode +#[cfg(all(target_arch = "hexagon", target_feature = "hvx-length128b"))] +const VLEN: usize = 128; + +/// Vector length in bytes for HVX 64-byte mode +#[cfg(all(target_arch = "hexagon", not(target_feature = "hvx-length128b")))] +const VLEN: usize = 64; + +/// Vertical 1-2-1 filter pass using byte averaging +/// +/// Computes: dst[x] = avg(avg(row_above[x], row_below[x]), center[x]) +/// ≈ (row_above[x] + 2*center[x] + row_below[x]) / 4 +/// +/// # Safety +/// +/// - `src` must point to the center row with valid data at -stride and +stride +/// - `dst` must point to a valid output buffer for `width` bytes +/// - `width` must be a multiple of VLEN +/// - All pointers must be HVX-aligned (128-byte for 128B mode) +#[cfg(target_arch = "hexagon")] +#[target_feature(enable = "hvxv60")] +unsafe fn vertical_121_pass(src: *const u8, stride: isize, width: usize, dst: *mut u8) { + let inp0 = src.offset(-stride) as *const HvxVector; + let inp1 = src as *const HvxVector; + let inp2 = src.offset(stride) as *const HvxVector; + let outp = dst as *mut HvxVector; + + let n_chunks = width / VLEN; + for i in 0..n_chunks { + let above = *inp0.add(i); + let center = *inp1.add(i); + let below = *inp2.add(i); + + // avg(above, below) ≈ (above + below) / 2 + let avg_ab = q6_vub_vavg_vubvub_rnd(above, below); + // avg(avg_ab, center) ≈ ((above + below)/2 + center) / 2 + // ≈ (above + 2*center + below) / 4 + let result = q6_vub_vavg_vubvub_rnd(avg_ab, center); + + *outp.add(i) = result; + } +} + +/// Horizontal 1-2-1 filter pass using byte averaging with vector alignment +/// +/// Computes: dst[x] = avg(avg(src[x-1], src[x+1]), src[x]) +/// ≈ (src[x-1] + 2*src[x] + src[x+1]) / 4 +/// +/// Uses `valign` and `vlalign` to shift vectors by 1 byte for neighbor access. +/// +/// # Safety +/// +/// - `src` and `dst` must point to valid buffers of `width` bytes +/// - `width` must be a multiple of VLEN +/// - All pointers must be HVX-aligned +#[cfg(target_arch = "hexagon")] +#[target_feature(enable = "hvxv60")] +unsafe fn horizontal_121_pass(src: *const u8, width: usize, dst: *mut u8) { + let inp = src as *const HvxVector; + let outp = dst as *mut HvxVector; + + let n_chunks = width / VLEN; + let mut prev = q6_v_vzero(); + + for i in 0..n_chunks { + let curr = *inp.add(i); + let next = if i + 1 < n_chunks { + *inp.add(i + 1) + } else { + q6_v_vzero() + }; + + // Left neighbor (x-1): shift curr right by 1 byte, filling from prev + // vlalign(curr, prev, 1) = { prev[VLEN-1], curr[0], curr[1], ..., curr[VLEN-2] } + let left = q6_v_vlalign_vvr(curr, prev, 1); + + // Right neighbor (x+1): shift curr left by 1 byte, filling from next + // valign(next, curr, 1) = { curr[1], curr[2], ..., curr[VLEN-1], next[0] } + let right = q6_v_valign_vvr(next, curr, 1); + + // avg(left, right) ≈ (src[x-1] + src[x+1]) / 2 + let avg_lr = q6_vub_vavg_vubvub_rnd(left, right); + // avg(avg_lr, curr) ≈ ((src[x-1] + src[x+1])/2 + src[x]) / 2 + // ≈ (src[x-1] + 2*src[x] + src[x+1]) / 4 + let result = q6_vub_vavg_vubvub_rnd(avg_lr, curr); + + *outp.add(i) = result; + + prev = curr; + } +} + +/// Apply Gaussian 3x3 blur to an entire image using separable filtering +/// +/// Two-pass approach: +/// 1. Vertical pass: apply 1-2-1 filter across rows +/// 2. Horizontal pass: apply 1-2-1 filter across columns +/// +/// Combined effect: 3x3 Gaussian kernel [1 2 1; 2 4 2; 1 2 1] / 16 +/// +/// # Safety +/// +/// - `src` and `dst` must point to valid image buffers of `stride * height` bytes +/// - `tmp` must point to a valid temporary buffer of `width` bytes, HVX-aligned +/// - `width` must be a multiple of VLEN and >= VLEN +/// - `stride` must be >= `width` +/// - All buffers must be HVX-aligned (128-byte for 128B mode) +#[cfg(target_arch = "hexagon")] +#[target_feature(enable = "hvxv60")] +pub unsafe fn gaussian3x3u8( + src: *const u8, + stride: usize, + width: usize, + height: usize, + dst: *mut u8, + tmp: *mut u8, +) { + let stride_i = stride as isize; + + // Process interior rows (skip first and last which lack vertical neighbors) + for y in 1..height - 1 { + let row_src = src.offset(y as isize * stride_i); + let row_dst = dst.offset(y as isize * stride_i); + + // Pass 1: vertical 1-2-1 into tmp + vertical_121_pass(row_src, stride_i, width, tmp); + + // Pass 2: horizontal 1-2-1 from tmp into dst + horizontal_121_pass(tmp, width, row_dst); + } +} + +/// Scalar reference implementation of Gaussian 3x3 blur for verification +/// +/// Applies the exact 3x3 Gaussian kernel: +/// out[y][x] = (1*p[-1][-1] + 2*p[-1][0] + 1*p[-1][1] + +/// 2*p[ 0][-1] + 4*p[ 0][0] + 2*p[ 0][1] + +/// 1*p[ 1][-1] + 2*p[ 1][0] + 1*p[ 1][1] + 8) / 16 +fn gaussian3x3u8_scalar(src: &[u8], stride: usize, width: usize, height: usize, dst: &mut [u8]) { + for y in 1..height - 1 { + for x in 1..width - 1 { + let sum = src[(y - 1) * stride + (x - 1)] as u32 + + src[(y - 1) * stride + x] as u32 * 2 + + src[(y - 1) * stride + (x + 1)] as u32 + + src[y * stride + (x - 1)] as u32 * 2 + + src[y * stride + x] as u32 * 4 + + src[y * stride + (x + 1)] as u32 * 2 + + src[(y + 1) * stride + (x - 1)] as u32 + + src[(y + 1) * stride + x] as u32 * 2 + + src[(y + 1) * stride + (x + 1)] as u32; + // Divide by 16 with rounding, saturate to u8 + dst[y * stride + x] = ((sum + 8) >> 4).min(255) as u8; + } + } +} + +/// Scalar approximation matching the HVX byte-averaging approach +/// +/// This matches the HVX implementation's behavior: +/// - Vertical: avg_rnd(avg_rnd(above, below), center) +/// - Horizontal: avg_rnd(avg_rnd(left, right), center) +/// where avg_rnd(a, b) = (a + b + 1) / 2 +fn gaussian3x3u8_scalar_approx( + src: &[u8], + stride: usize, + width: usize, + height: usize, + dst: &mut [u8], +) { + // Temporary buffer for vertical pass output + let mut tmp = vec![0u8; width * height]; + + // Vertical pass: 1-2-1 using rounding average + for y in 1..height - 1 { + for x in 0..width { + let above = src[(y - 1) * stride + x] as u16; + let center = src[y * stride + x] as u16; + let below = src[(y + 1) * stride + x] as u16; + let avg_ab = ((above + below + 1) / 2) as u8; + tmp[y * width + x] = ((avg_ab as u16 + center + 1) / 2) as u8; + } + } + + // Horizontal pass: 1-2-1 using rounding average + for y in 1..height - 1 { + for x in 1..width - 1 { + let left = tmp[y * width + (x - 1)] as u16; + let center = tmp[y * width + x] as u16; + let right = tmp[y * width + (x + 1)] as u16; + let avg_lr = ((left + right + 1) / 2) as u8; + dst[y * stride + x] = ((avg_lr as u16 + center + 1) / 2) as u8; + } + } +} + +fn main() { + println!("HVX Gaussian 3x3 blur example"); + println!("Separable filter using byte averaging (HvxVector only)"); + println!(); + + #[cfg(not(target_arch = "hexagon"))] + { + const WIDTH: usize = 128; + const HEIGHT: usize = 16; + + let mut src = vec![0u8; WIDTH * HEIGHT]; + let mut dst_exact = vec![0u8; WIDTH * HEIGHT]; + let mut dst_approx = vec![0u8; WIDTH * HEIGHT]; + + // Create test pattern + for y in 0..HEIGHT { + for x in 0..WIDTH { + src[y * WIDTH + x] = ((x + y * 7) % 256) as u8; + } + } + + // Run exact Gaussian + gaussian3x3u8_scalar(&src, WIDTH, WIDTH, HEIGHT, &mut dst_exact); + + // Run approximate version (matches HVX behavior) + gaussian3x3u8_scalar_approx(&src, WIDTH, WIDTH, HEIGHT, &mut dst_approx); + + // Compare exact vs approximate + let mut max_diff = 0u8; + for y in 1..HEIGHT - 1 { + for x in 1..WIDTH - 1 { + let idx = y * WIDTH + x; + let diff = (dst_exact[idx] as i16 - dst_approx[idx] as i16).unsigned_abs() as u8; + if diff > max_diff { + max_diff = diff; + } + } + } + + println!("Scalar implementations completed."); + println!( + "Input sample (row 2, cols 1..9): {:?}", + &src[2 * WIDTH + 1..2 * WIDTH + 9] + ); + println!( + "Exact output (row 2, cols 1..9): {:?}", + &dst_exact[2 * WIDTH + 1..2 * WIDTH + 9] + ); + println!( + "Approx output (row 2, cols 1..9): {:?}", + &dst_approx[2 * WIDTH + 1..2 * WIDTH + 9] + ); + println!("Max diff between exact and approx: {}", max_diff); + } + + #[cfg(target_arch = "hexagon")] + { + const WIDTH: usize = 256; // Must be multiple of VLEN (128) + const HEIGHT: usize = 16; + + // Aligned buffers for HVX + #[repr(align(128))] + struct AlignedBuf([u8; N]); + + let mut src = AlignedBuf::<{ WIDTH * HEIGHT }>([0u8; WIDTH * HEIGHT]); + let mut dst_hvx = AlignedBuf::<{ WIDTH * HEIGHT }>([0u8; WIDTH * HEIGHT]); + let mut tmp = AlignedBuf::<{ WIDTH }>([0u8; WIDTH]); + let mut dst_ref = vec![0u8; WIDTH * HEIGHT]; + + // Create test pattern + for y in 0..HEIGHT { + for x in 0..WIDTH { + src.0[y * WIDTH + x] = ((x + y * 7) % 256) as u8; + } + } + + // Run HVX version + unsafe { + gaussian3x3u8( + src.0.as_ptr(), + WIDTH, + WIDTH, + HEIGHT, + dst_hvx.0.as_mut_ptr(), + tmp.0.as_mut_ptr(), + ); + } + + // Run scalar approximate reference (should match HVX closely) + gaussian3x3u8_scalar_approx(&src.0, WIDTH, WIDTH, HEIGHT, &mut dst_ref); + + // Compare results (skip edges) + let mut max_diff = 0u8; + let mut diff_count = 0usize; + for y in 1..HEIGHT - 1 { + for x in 1..WIDTH - 1 { + let idx = y * WIDTH + x; + let diff = (dst_hvx.0[idx] as i16 - dst_ref[idx] as i16).unsigned_abs() as u8; + if diff > max_diff { + max_diff = diff; + } + if diff > 0 { + diff_count += 1; + } + } + } + + println!("HVX implementation completed."); + println!("Max difference from scalar reference: {}", max_diff); + println!("Pixels with any difference: {}", diff_count); + if max_diff <= 1 { + println!("Results match within rounding tolerance!"); + } else { + println!("WARNING: Results differ more than expected."); + } + } +} From 2b710f1a450f22aab7695e88f556eedf456a7a56 Mon Sep 17 00:00:00 2001 From: Brian Cain Date: Fri, 30 Jan 2026 19:19:41 -0600 Subject: [PATCH 36/90] Switched to 64b and 128b crate definitions --- .../crates/core_arch/src/hexagon/mod.rs | 21 +- .../core_arch/src/hexagon/{hvx.rs => v128.rs} | 1019 +-- .../crates/core_arch/src/hexagon/v64.rs | 7489 +++++++++++++++++ .../crates/stdarch-gen-hexagon/src/main.rs | 250 +- library/stdarch/examples/gaussian.rs | 6 +- 5 files changed, 7661 insertions(+), 1124 deletions(-) rename library/stdarch/crates/core_arch/src/hexagon/{hvx.rs => v128.rs} (82%) create mode 100644 library/stdarch/crates/core_arch/src/hexagon/v64.rs diff --git a/library/stdarch/crates/core_arch/src/hexagon/mod.rs b/library/stdarch/crates/core_arch/src/hexagon/mod.rs index a9c53d6efe00e..c370f3da15dfb 100644 --- a/library/stdarch/crates/core_arch/src/hexagon/mod.rs +++ b/library/stdarch/crates/core_arch/src/hexagon/mod.rs @@ -5,8 +5,25 @@ //! //! HVX is a wide SIMD architecture designed for high-performance signal processing, //! machine learning, and image processing workloads. +//! +//! ## Vector Length Modes +//! +//! HVX supports two vector length modes: +//! - 64-byte mode (512-bit vectors): Use the [`v64`] module +//! - 128-byte mode (1024-bit vectors): Use the [`v128`] module +//! +//! Both modules are available unconditionally, but require the appropriate +//! target features to actually use the intrinsics: +//! - For 64-byte mode: `-C target-feature=+hvx-length64b` +//! - For 128-byte mode: `-C target-feature=+hvx-length128b` +//! +//! Note that HVX v66 and later default to 128-byte mode, while earlier versions +//! (v60-v65) default to 64-byte mode. -mod hvx; +/// HVX intrinsics for 64-byte vector mode (512-bit vectors) +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub mod v64; +/// HVX intrinsics for 128-byte vector mode (1024-bit vectors) #[unstable(feature = "stdarch_hexagon", issue = "151523")] -pub use self::hvx::*; +pub mod v128; diff --git a/library/stdarch/crates/core_arch/src/hexagon/hvx.rs b/library/stdarch/crates/core_arch/src/hexagon/v128.rs similarity index 82% rename from library/stdarch/crates/core_arch/src/hexagon/hvx.rs rename to library/stdarch/crates/core_arch/src/hexagon/v128.rs index 24d42ea1fcd11..ef7ff4205c71d 100644 --- a/library/stdarch/crates/core_arch/src/hexagon/hvx.rs +++ b/library/stdarch/crates/core_arch/src/hexagon/v128.rs @@ -1,22 +1,19 @@ -//! Hexagon HVX intrinsics +//! Hexagon HVX 128-byte vector mode intrinsics +//! +//! This module provides intrinsics for the Hexagon Vector Extensions (HVX) +//! in 128-byte vector mode (1024-bit vectors). //! -//! This module provides intrinsics for the Hexagon Vector Extensions (HVX). //! HVX is a wide vector extension designed for high-performance signal processing. //! [Hexagon HVX Programmer's Reference Manual](https://docs.qualcomm.com/doc/80-N2040-61) //! //! ## Vector Types //! -//! HVX supports different vector lengths depending on the configuration: -//! - 128-byte mode: `HvxVector` is 1024 bits (128 bytes) -//! - 64-byte mode: `HvxVector` is 512 bits (64 bytes) -//! -//! This implementation targets 128-byte mode by default. To change the vector -//! length mode, use the appropriate target feature when compiling: -//! - For 128-byte mode: `-C target-feature=+hvx-length128b` -//! - For 64-byte mode: `-C target-feature=+hvx-length64b` +//! In 128-byte mode: +//! - `HvxVector` is 1024 bits (128 bytes) containing 32 x 32-bit values +//! - `HvxVectorPair` is 2048 bits (256 bytes) +//! - `HvxVectorPred` is 1024 bits (128 bytes) for predicate operations //! -//! Note that HVX v66 and later default to 128-byte mode, while earlier versions -//! default to 64-byte mode. +//! To use this module, compile with `-C target-feature=+hvx-length128b`. //! //! ## Architecture Versions //! @@ -30,7 +27,6 @@ //! - HVX v69: `-C target-feature=+hvxv69` //! - HVX v73: `-C target-feature=+hvxv73` //! - HVX v79: `-C target-feature=+hvxv79` -//! - HVX v81: `-C target-feature=+hvxv81` //! //! Each version includes all features from previous versions. @@ -41,9 +37,7 @@ use stdarch_test::assert_instr; use crate::intrinsics::simd::{simd_add, simd_and, simd_or, simd_sub, simd_xor}; -// HVX type definitions for 128-byte vector mode (default for v66+) -// Use -C target-feature=+hvx-length128b to enable -#[cfg(target_feature = "hvx-length128b")] +// HVX type definitions for 128-byte vector mode types! { #![unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -65,32 +59,7 @@ types! { pub struct HvxVectorPred(32 x i32); } -// HVX type definitions for 64-byte vector mode (default for v60-v65) -// Use -C target-feature=+hvx-length64b to enable, or omit hvx-length128b -#[cfg(not(target_feature = "hvx-length128b"))] -types! { - #![unstable(feature = "stdarch_hexagon", issue = "151523")] - - /// HVX vector type (512 bits / 64 bytes) - /// - /// This type represents a single HVX vector register containing 16 x 32-bit values. - pub struct HvxVector(16 x i32); - - /// HVX vector pair type (1024 bits / 128 bytes) - /// - /// This type represents a pair of HVX vector registers, often used for - /// operations that produce double-width results. - pub struct HvxVectorPair(32 x i32); - - /// HVX vector predicate type (512 bits / 64 bytes) - /// - /// This type represents a predicate vector used for conditional operations. - /// Each bit corresponds to a lane in the vector. - pub struct HvxVectorPred(16 x i32); -} - // LLVM intrinsic declarations for 128-byte vector mode -#[cfg(target_feature = "hvx-length128b")] #[allow(improper_ctypes)] unsafe extern "unadjusted" { #[link_name = "llvm.hexagon.V6.extractw.128B"] @@ -1057,974 +1026,6 @@ unsafe extern "unadjusted" { fn vzh(_: HvxVector) -> HvxVectorPair; } -// LLVM intrinsic declarations for 64-byte vector mode -#[cfg(not(target_feature = "hvx-length128b"))] -#[allow(improper_ctypes)] -unsafe extern "unadjusted" { - #[link_name = "llvm.hexagon.V6.extractw"] - fn extractw(_: HvxVector, _: i32) -> i32; - #[link_name = "llvm.hexagon.V6.get.qfext"] - fn get_qfext(_: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.hi"] - fn hi(_: HvxVectorPair) -> HvxVector; - #[link_name = "llvm.hexagon.V6.lo"] - fn lo(_: HvxVectorPair) -> HvxVector; - #[link_name = "llvm.hexagon.V6.lvsplatb"] - fn lvsplatb(_: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.lvsplath"] - fn lvsplath(_: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.lvsplatw"] - fn lvsplatw(_: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.pred.and"] - fn pred_and(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.pred.and.n"] - fn pred_and_n(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.pred.not"] - fn pred_not(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.pred.or"] - fn pred_or(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.pred.or.n"] - fn pred_or_n(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.pred.scalar2"] - fn pred_scalar2(_: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.pred.scalar2v2"] - fn pred_scalar2v2(_: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.pred.xor"] - fn pred_xor(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.set.qfext"] - fn set_qfext(_: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.shuffeqh"] - fn shuffeqh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.shuffeqw"] - fn shuffeqw(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.v6mpyhubs10"] - fn v6mpyhubs10(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.v6mpyhubs10.vxx"] - fn v6mpyhubs10_vxx( - _: HvxVectorPair, - _: HvxVectorPair, - _: HvxVectorPair, - _: i32, - ) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.v6mpyvubs10"] - fn v6mpyvubs10(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.v6mpyvubs10.vxx"] - fn v6mpyvubs10_vxx( - _: HvxVectorPair, - _: HvxVectorPair, - _: HvxVectorPair, - _: i32, - ) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vS32b.nqpred.ai"] - fn vS32b_nqpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> (); - #[link_name = "llvm.hexagon.V6.vS32b.nt.nqpred.ai"] - fn vS32b_nt_nqpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> (); - #[link_name = "llvm.hexagon.V6.vS32b.nt.qpred.ai"] - fn vS32b_nt_qpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> (); - #[link_name = "llvm.hexagon.V6.vS32b.qpred.ai"] - fn vS32b_qpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> (); - #[link_name = "llvm.hexagon.V6.vabs.f8"] - fn vabs_f8(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vabs.hf"] - fn vabs_hf(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vabs.sf"] - fn vabs_sf(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vabsb"] - fn vabsb(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vabsb.sat"] - fn vabsb_sat(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vabsdiffh"] - fn vabsdiffh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vabsdiffub"] - fn vabsdiffub(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vabsdiffuh"] - fn vabsdiffuh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vabsdiffw"] - fn vabsdiffw(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vabsh"] - fn vabsh(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vabsh.sat"] - fn vabsh_sat(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vabsw"] - fn vabsw(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vabsw.sat"] - fn vabsw_sat(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vadd.hf"] - fn vadd_hf(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vadd.hf.hf"] - fn vadd_hf_hf(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vadd.qf16"] - fn vadd_qf16(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vadd.qf16.mix"] - fn vadd_qf16_mix(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vadd.qf32"] - fn vadd_qf32(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vadd.qf32.mix"] - fn vadd_qf32_mix(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vadd.sf"] - fn vadd_sf(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vadd.sf.hf"] - fn vadd_sf_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vadd.sf.sf"] - fn vadd_sf_sf(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vaddb"] - fn vaddb(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vaddb.dv"] - fn vaddb_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vaddbnq"] - fn vaddbnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vaddbq"] - fn vaddbq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vaddbsat"] - fn vaddbsat(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vaddbsat.dv"] - fn vaddbsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vaddcarrysat"] - fn vaddcarrysat(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vaddclbh"] - fn vaddclbh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vaddclbw"] - fn vaddclbw(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vaddh"] - fn vaddh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vaddh.dv"] - fn vaddh_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vaddhnq"] - fn vaddhnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vaddhq"] - fn vaddhq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vaddhsat"] - fn vaddhsat(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vaddhsat.dv"] - fn vaddhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vaddhw"] - fn vaddhw(_: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vaddhw.acc"] - fn vaddhw_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vaddubh"] - fn vaddubh(_: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vaddubh.acc"] - fn vaddubh_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vaddubsat"] - fn vaddubsat(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vaddubsat.dv"] - fn vaddubsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vaddububb.sat"] - fn vaddububb_sat(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vadduhsat"] - fn vadduhsat(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vadduhsat.dv"] - fn vadduhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vadduhw"] - fn vadduhw(_: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vadduhw.acc"] - fn vadduhw_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vadduwsat"] - fn vadduwsat(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vadduwsat.dv"] - fn vadduwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vaddw"] - fn vaddw(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vaddw.dv"] - fn vaddw_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vaddwnq"] - fn vaddwnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vaddwq"] - fn vaddwq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vaddwsat"] - fn vaddwsat(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vaddwsat.dv"] - fn vaddwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.valignb"] - fn valignb(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.valignbi"] - fn valignbi(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vand"] - fn vand(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vandnqrt"] - fn vandnqrt(_: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vandnqrt.acc"] - fn vandnqrt_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vandqrt"] - fn vandqrt(_: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vandqrt.acc"] - fn vandqrt_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vandvnqv"] - fn vandvnqv(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vandvqv"] - fn vandvqv(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vandvrt"] - fn vandvrt(_: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vandvrt.acc"] - fn vandvrt_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vaslh"] - fn vaslh(_: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vaslh.acc"] - fn vaslh_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vaslhv"] - fn vaslhv(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vaslw"] - fn vaslw(_: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vaslw.acc"] - fn vaslw_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vaslwv"] - fn vaslwv(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vasr.into"] - fn vasr_into(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vasrh"] - fn vasrh(_: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vasrh.acc"] - fn vasrh_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vasrhbrndsat"] - fn vasrhbrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vasrhbsat"] - fn vasrhbsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vasrhubrndsat"] - fn vasrhubrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vasrhubsat"] - fn vasrhubsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vasrhv"] - fn vasrhv(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vasruhubrndsat"] - fn vasruhubrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vasruhubsat"] - fn vasruhubsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vasruwuhrndsat"] - fn vasruwuhrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vasruwuhsat"] - fn vasruwuhsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vasrvuhubrndsat"] - fn vasrvuhubrndsat(_: HvxVectorPair, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vasrvuhubsat"] - fn vasrvuhubsat(_: HvxVectorPair, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vasrvwuhrndsat"] - fn vasrvwuhrndsat(_: HvxVectorPair, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vasrvwuhsat"] - fn vasrvwuhsat(_: HvxVectorPair, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vasrw"] - fn vasrw(_: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vasrw.acc"] - fn vasrw_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vasrwh"] - fn vasrwh(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vasrwhrndsat"] - fn vasrwhrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vasrwhsat"] - fn vasrwhsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vasrwuhrndsat"] - fn vasrwuhrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vasrwuhsat"] - fn vasrwuhsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vasrwv"] - fn vasrwv(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vassign"] - fn vassign(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vassign.fp"] - fn vassign_fp(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vassignp"] - fn vassignp(_: HvxVectorPair) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vavgb"] - fn vavgb(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vavgbrnd"] - fn vavgbrnd(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vavgh"] - fn vavgh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vavghrnd"] - fn vavghrnd(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vavgub"] - fn vavgub(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vavgubrnd"] - fn vavgubrnd(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vavguh"] - fn vavguh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vavguhrnd"] - fn vavguhrnd(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vavguw"] - fn vavguw(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vavguwrnd"] - fn vavguwrnd(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vavgw"] - fn vavgw(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vavgwrnd"] - fn vavgwrnd(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vcl0h"] - fn vcl0h(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vcl0w"] - fn vcl0w(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vcombine"] - fn vcombine(_: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vconv.h.hf"] - fn vconv_h_hf(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vconv.hf.h"] - fn vconv_hf_h(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vconv.hf.qf16"] - fn vconv_hf_qf16(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vconv.hf.qf32"] - fn vconv_hf_qf32(_: HvxVectorPair) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vconv.sf.qf32"] - fn vconv_sf_qf32(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vconv.sf.w"] - fn vconv_sf_w(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vconv.w.sf"] - fn vconv_w_sf(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vcvt2.hf.b"] - fn vcvt2_hf_b(_: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vcvt2.hf.ub"] - fn vcvt2_hf_ub(_: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vcvt.b.hf"] - fn vcvt_b_hf(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vcvt.h.hf"] - fn vcvt_h_hf(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vcvt.hf.b"] - fn vcvt_hf_b(_: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vcvt.hf.f8"] - fn vcvt_hf_f8(_: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vcvt.hf.h"] - fn vcvt_hf_h(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vcvt.hf.sf"] - fn vcvt_hf_sf(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vcvt.hf.ub"] - fn vcvt_hf_ub(_: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vcvt.hf.uh"] - fn vcvt_hf_uh(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vcvt.sf.hf"] - fn vcvt_sf_hf(_: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vcvt.ub.hf"] - fn vcvt_ub_hf(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vcvt.uh.hf"] - fn vcvt_uh_hf(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vd0"] - fn vd0() -> HvxVector; - #[link_name = "llvm.hexagon.V6.vdd0"] - fn vdd0() -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vdealb"] - fn vdealb(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vdealb4w"] - fn vdealb4w(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vdealh"] - fn vdealh(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vdealvdd"] - fn vdealvdd(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vdelta"] - fn vdelta(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vdmpy.sf.hf"] - fn vdmpy_sf_hf(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vdmpy.sf.hf.acc"] - fn vdmpy_sf_hf_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vdmpybus"] - fn vdmpybus(_: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vdmpybus.acc"] - fn vdmpybus_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vdmpybus.dv"] - fn vdmpybus_dv(_: HvxVectorPair, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vdmpybus.dv.acc"] - fn vdmpybus_dv_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vdmpyhb"] - fn vdmpyhb(_: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vdmpyhb.acc"] - fn vdmpyhb_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vdmpyhb.dv"] - fn vdmpyhb_dv(_: HvxVectorPair, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vdmpyhb.dv.acc"] - fn vdmpyhb_dv_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vdmpyhisat"] - fn vdmpyhisat(_: HvxVectorPair, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vdmpyhisat.acc"] - fn vdmpyhisat_acc(_: HvxVector, _: HvxVectorPair, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vdmpyhsat"] - fn vdmpyhsat(_: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vdmpyhsat.acc"] - fn vdmpyhsat_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vdmpyhsuisat"] - fn vdmpyhsuisat(_: HvxVectorPair, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vdmpyhsuisat.acc"] - fn vdmpyhsuisat_acc(_: HvxVector, _: HvxVectorPair, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vdmpyhsusat"] - fn vdmpyhsusat(_: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vdmpyhsusat.acc"] - fn vdmpyhsusat_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vdmpyhvsat"] - fn vdmpyhvsat(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vdmpyhvsat.acc"] - fn vdmpyhvsat_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vdsaduh"] - fn vdsaduh(_: HvxVectorPair, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vdsaduh.acc"] - fn vdsaduh_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.veqb"] - fn veqb(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.veqb.and"] - fn veqb_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.veqb.or"] - fn veqb_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.veqb.xor"] - fn veqb_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.veqh"] - fn veqh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.veqh.and"] - fn veqh_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.veqh.or"] - fn veqh_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.veqh.xor"] - fn veqh_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.veqw"] - fn veqw(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.veqw.and"] - fn veqw_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.veqw.or"] - fn veqw_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.veqw.xor"] - fn veqw_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vfmax.f8"] - fn vfmax_f8(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vfmax.hf"] - fn vfmax_hf(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vfmax.sf"] - fn vfmax_sf(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vfmin.f8"] - fn vfmin_f8(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vfmin.hf"] - fn vfmin_hf(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vfmin.sf"] - fn vfmin_sf(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vfneg.f8"] - fn vfneg_f8(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vfneg.hf"] - fn vfneg_hf(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vfneg.sf"] - fn vfneg_sf(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgathermh"] - fn vgathermh(_: *mut HvxVector, _: i32, _: i32, _: HvxVector) -> (); - #[link_name = "llvm.hexagon.V6.vgathermhq"] - fn vgathermhq(_: *mut HvxVector, _: HvxVector, _: i32, _: i32, _: HvxVector) -> (); - #[link_name = "llvm.hexagon.V6.vgathermhw"] - fn vgathermhw(_: *mut HvxVector, _: i32, _: i32, _: HvxVectorPair) -> (); - #[link_name = "llvm.hexagon.V6.vgathermhwq"] - fn vgathermhwq(_: *mut HvxVector, _: HvxVector, _: i32, _: i32, _: HvxVectorPair) -> (); - #[link_name = "llvm.hexagon.V6.vgathermw"] - fn vgathermw(_: *mut HvxVector, _: i32, _: i32, _: HvxVector) -> (); - #[link_name = "llvm.hexagon.V6.vgathermwq"] - fn vgathermwq(_: *mut HvxVector, _: HvxVector, _: i32, _: i32, _: HvxVector) -> (); - #[link_name = "llvm.hexagon.V6.vgtb"] - fn vgtb(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgtb.and"] - fn vgtb_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgtb.or"] - fn vgtb_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgtb.xor"] - fn vgtb_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgth"] - fn vgth(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgth.and"] - fn vgth_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgth.or"] - fn vgth_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgth.xor"] - fn vgth_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgthf"] - fn vgthf(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgthf.and"] - fn vgthf_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgthf.or"] - fn vgthf_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgthf.xor"] - fn vgthf_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgtsf"] - fn vgtsf(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgtsf.and"] - fn vgtsf_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgtsf.or"] - fn vgtsf_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgtsf.xor"] - fn vgtsf_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgtub"] - fn vgtub(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgtub.and"] - fn vgtub_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgtub.or"] - fn vgtub_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgtub.xor"] - fn vgtub_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgtuh"] - fn vgtuh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgtuh.and"] - fn vgtuh_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgtuh.or"] - fn vgtuh_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgtuh.xor"] - fn vgtuh_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgtuw"] - fn vgtuw(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgtuw.and"] - fn vgtuw_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgtuw.or"] - fn vgtuw_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgtuw.xor"] - fn vgtuw_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgtw"] - fn vgtw(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgtw.and"] - fn vgtw_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgtw.or"] - fn vgtw_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vgtw.xor"] - fn vgtw_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vinsertwr"] - fn vinsertwr(_: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vlalignb"] - fn vlalignb(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vlalignbi"] - fn vlalignbi(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vlsrb"] - fn vlsrb(_: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vlsrh"] - fn vlsrh(_: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vlsrhv"] - fn vlsrhv(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vlsrw"] - fn vlsrw(_: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vlsrwv"] - fn vlsrwv(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vlutvvb"] - fn vlutvvb(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vlutvvb.nm"] - fn vlutvvb_nm(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vlutvvb.oracc"] - fn vlutvvb_oracc(_: HvxVector, _: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vlutvvb.oracci"] - fn vlutvvb_oracci(_: HvxVector, _: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vlutvvbi"] - fn vlutvvbi(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vlutvwh"] - fn vlutvwh(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vlutvwh.nm"] - fn vlutvwh_nm(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vlutvwh.oracc"] - fn vlutvwh_oracc(_: HvxVectorPair, _: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vlutvwh.oracci"] - fn vlutvwh_oracci(_: HvxVectorPair, _: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vlutvwhi"] - fn vlutvwhi(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmax.hf"] - fn vmax_hf(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmax.sf"] - fn vmax_sf(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmaxb"] - fn vmaxb(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmaxh"] - fn vmaxh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmaxub"] - fn vmaxub(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmaxuh"] - fn vmaxuh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmaxw"] - fn vmaxw(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmin.hf"] - fn vmin_hf(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmin.sf"] - fn vmin_sf(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vminb"] - fn vminb(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vminh"] - fn vminh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vminub"] - fn vminub(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vminuh"] - fn vminuh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vminw"] - fn vminw(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpabus"] - fn vmpabus(_: HvxVectorPair, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpabus.acc"] - fn vmpabus_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpabusv"] - fn vmpabusv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpabuu"] - fn vmpabuu(_: HvxVectorPair, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpabuu.acc"] - fn vmpabuu_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpabuuv"] - fn vmpabuuv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpahb"] - fn vmpahb(_: HvxVectorPair, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpahb.acc"] - fn vmpahb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpauhb"] - fn vmpauhb(_: HvxVectorPair, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpauhb.acc"] - fn vmpauhb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpy.hf.hf"] - fn vmpy_hf_hf(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpy.hf.hf.acc"] - fn vmpy_hf_hf_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpy.qf16"] - fn vmpy_qf16(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpy.qf16.hf"] - fn vmpy_qf16_hf(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpy.qf16.mix.hf"] - fn vmpy_qf16_mix_hf(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpy.qf32"] - fn vmpy_qf32(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpy.qf32.hf"] - fn vmpy_qf32_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpy.qf32.mix.hf"] - fn vmpy_qf32_mix_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpy.qf32.qf16"] - fn vmpy_qf32_qf16(_: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpy.qf32.sf"] - fn vmpy_qf32_sf(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpy.sf.hf"] - fn vmpy_sf_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpy.sf.hf.acc"] - fn vmpy_sf_hf_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpy.sf.sf"] - fn vmpy_sf_sf(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpybus"] - fn vmpybus(_: HvxVector, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpybus.acc"] - fn vmpybus_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpybusv"] - fn vmpybusv(_: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpybusv.acc"] - fn vmpybusv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpybv"] - fn vmpybv(_: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpybv.acc"] - fn vmpybv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpyewuh"] - fn vmpyewuh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpyewuh.64"] - fn vmpyewuh_64(_: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpyh"] - fn vmpyh(_: HvxVector, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpyh.acc"] - fn vmpyh_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpyhsat.acc"] - fn vmpyhsat_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpyhsrs"] - fn vmpyhsrs(_: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpyhss"] - fn vmpyhss(_: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpyhus"] - fn vmpyhus(_: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpyhus.acc"] - fn vmpyhus_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpyhv"] - fn vmpyhv(_: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpyhv.acc"] - fn vmpyhv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpyhvsrs"] - fn vmpyhvsrs(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpyieoh"] - fn vmpyieoh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpyiewh.acc"] - fn vmpyiewh_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpyiewuh"] - fn vmpyiewuh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpyiewuh.acc"] - fn vmpyiewuh_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpyih"] - fn vmpyih(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpyih.acc"] - fn vmpyih_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpyihb"] - fn vmpyihb(_: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpyihb.acc"] - fn vmpyihb_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpyiowh"] - fn vmpyiowh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpyiwb"] - fn vmpyiwb(_: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpyiwb.acc"] - fn vmpyiwb_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpyiwh"] - fn vmpyiwh(_: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpyiwh.acc"] - fn vmpyiwh_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpyiwub"] - fn vmpyiwub(_: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpyiwub.acc"] - fn vmpyiwub_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpyowh"] - fn vmpyowh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpyowh.64.acc"] - fn vmpyowh_64_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpyowh.rnd"] - fn vmpyowh_rnd(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpyowh.rnd.sacc"] - fn vmpyowh_rnd_sacc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpyowh.sacc"] - fn vmpyowh_sacc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpyub"] - fn vmpyub(_: HvxVector, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpyub.acc"] - fn vmpyub_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpyubv"] - fn vmpyubv(_: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpyubv.acc"] - fn vmpyubv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpyuh"] - fn vmpyuh(_: HvxVector, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpyuh.acc"] - fn vmpyuh_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpyuhe"] - fn vmpyuhe(_: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpyuhe.acc"] - fn vmpyuhe_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmpyuhv"] - fn vmpyuhv(_: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpyuhv.acc"] - fn vmpyuhv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vmpyuhvs"] - fn vmpyuhvs(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vmux"] - fn vmux(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vnavgb"] - fn vnavgb(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vnavgh"] - fn vnavgh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vnavgub"] - fn vnavgub(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vnavgw"] - fn vnavgw(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vnormamth"] - fn vnormamth(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vnormamtw"] - fn vnormamtw(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vnot"] - fn vnot(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vor"] - fn vor(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vpackeb"] - fn vpackeb(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vpackeh"] - fn vpackeh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vpackhb.sat"] - fn vpackhb_sat(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vpackhub.sat"] - fn vpackhub_sat(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vpackob"] - fn vpackob(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vpackoh"] - fn vpackoh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vpackwh.sat"] - fn vpackwh_sat(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vpackwuh.sat"] - fn vpackwuh_sat(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vpopcounth"] - fn vpopcounth(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vprefixqb"] - fn vprefixqb(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vprefixqh"] - fn vprefixqh(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vprefixqw"] - fn vprefixqw(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vrdelta"] - fn vrdelta(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vrmpybus"] - fn vrmpybus(_: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vrmpybus.acc"] - fn vrmpybus_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vrmpybusi"] - fn vrmpybusi(_: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vrmpybusi.acc"] - fn vrmpybusi_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vrmpybusv"] - fn vrmpybusv(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vrmpybusv.acc"] - fn vrmpybusv_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vrmpybv"] - fn vrmpybv(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vrmpybv.acc"] - fn vrmpybv_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vrmpyub"] - fn vrmpyub(_: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vrmpyub.acc"] - fn vrmpyub_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vrmpyubi"] - fn vrmpyubi(_: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vrmpyubi.acc"] - fn vrmpyubi_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vrmpyubv"] - fn vrmpyubv(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vrmpyubv.acc"] - fn vrmpyubv_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vror"] - fn vror(_: HvxVector, _: i32) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vrotr"] - fn vrotr(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vroundhb"] - fn vroundhb(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vroundhub"] - fn vroundhub(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vrounduhub"] - fn vrounduhub(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vrounduwuh"] - fn vrounduwuh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vroundwh"] - fn vroundwh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vroundwuh"] - fn vroundwuh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vrsadubi"] - fn vrsadubi(_: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vrsadubi.acc"] - fn vrsadubi_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vsatdw"] - fn vsatdw(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsathub"] - fn vsathub(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsatuwuh"] - fn vsatuwuh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsatwh"] - fn vsatwh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsb"] - fn vsb(_: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vscattermh"] - fn vscattermh(_: i32, _: i32, _: HvxVector, _: HvxVector) -> (); - #[link_name = "llvm.hexagon.V6.vscattermh.add"] - fn vscattermh_add(_: i32, _: i32, _: HvxVector, _: HvxVector) -> (); - #[link_name = "llvm.hexagon.V6.vscattermhq"] - fn vscattermhq(_: HvxVector, _: i32, _: i32, _: HvxVector, _: HvxVector) -> (); - #[link_name = "llvm.hexagon.V6.vscattermhw"] - fn vscattermhw(_: i32, _: i32, _: HvxVectorPair, _: HvxVector) -> (); - #[link_name = "llvm.hexagon.V6.vscattermhw.add"] - fn vscattermhw_add(_: i32, _: i32, _: HvxVectorPair, _: HvxVector) -> (); - #[link_name = "llvm.hexagon.V6.vscattermhwq"] - fn vscattermhwq(_: HvxVector, _: i32, _: i32, _: HvxVectorPair, _: HvxVector) -> (); - #[link_name = "llvm.hexagon.V6.vscattermw"] - fn vscattermw(_: i32, _: i32, _: HvxVector, _: HvxVector) -> (); - #[link_name = "llvm.hexagon.V6.vscattermw.add"] - fn vscattermw_add(_: i32, _: i32, _: HvxVector, _: HvxVector) -> (); - #[link_name = "llvm.hexagon.V6.vscattermwq"] - fn vscattermwq(_: HvxVector, _: i32, _: i32, _: HvxVector, _: HvxVector) -> (); - #[link_name = "llvm.hexagon.V6.vsh"] - fn vsh(_: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vshufeh"] - fn vshufeh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vshuffb"] - fn vshuffb(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vshuffeb"] - fn vshuffeb(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vshuffh"] - fn vshuffh(_: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vshuffob"] - fn vshuffob(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vshuffvdd"] - fn vshuffvdd(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vshufoeb"] - fn vshufoeb(_: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vshufoeh"] - fn vshufoeh(_: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vshufoh"] - fn vshufoh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsub.hf"] - fn vsub_hf(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsub.hf.hf"] - fn vsub_hf_hf(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsub.qf16"] - fn vsub_qf16(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsub.qf16.mix"] - fn vsub_qf16_mix(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsub.qf32"] - fn vsub_qf32(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsub.qf32.mix"] - fn vsub_qf32_mix(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsub.sf"] - fn vsub_sf(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsub.sf.hf"] - fn vsub_sf_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vsub.sf.sf"] - fn vsub_sf_sf(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsubb"] - fn vsubb(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsubb.dv"] - fn vsubb_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vsubbnq"] - fn vsubbnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsubbq"] - fn vsubbq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsubbsat"] - fn vsubbsat(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsubbsat.dv"] - fn vsubbsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vsubh"] - fn vsubh(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsubh.dv"] - fn vsubh_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vsubhnq"] - fn vsubhnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsubhq"] - fn vsubhq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsubhsat"] - fn vsubhsat(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsubhsat.dv"] - fn vsubhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vsubhw"] - fn vsubhw(_: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vsububh"] - fn vsububh(_: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vsububsat"] - fn vsububsat(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsububsat.dv"] - fn vsububsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vsubububb.sat"] - fn vsubububb_sat(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsubuhsat"] - fn vsubuhsat(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsubuhsat.dv"] - fn vsubuhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vsubuhw"] - fn vsubuhw(_: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vsubuwsat"] - fn vsubuwsat(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsubuwsat.dv"] - fn vsubuwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vsubw"] - fn vsubw(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsubw.dv"] - fn vsubw_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vsubwnq"] - fn vsubwnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsubwq"] - fn vsubwq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsubwsat"] - fn vsubwsat(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vsubwsat.dv"] - fn vsubwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vswap"] - fn vswap(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vtmpyb"] - fn vtmpyb(_: HvxVectorPair, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vtmpyb.acc"] - fn vtmpyb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vtmpybus"] - fn vtmpybus(_: HvxVectorPair, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vtmpybus.acc"] - fn vtmpybus_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vtmpyhb"] - fn vtmpyhb(_: HvxVectorPair, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vtmpyhb.acc"] - fn vtmpyhb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vunpackb"] - fn vunpackb(_: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vunpackh"] - fn vunpackh(_: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vunpackob"] - fn vunpackob(_: HvxVectorPair, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vunpackoh"] - fn vunpackoh(_: HvxVectorPair, _: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vunpackub"] - fn vunpackub(_: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vunpackuh"] - fn vunpackuh(_: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vxor"] - fn vxor(_: HvxVector, _: HvxVector) -> HvxVector; - #[link_name = "llvm.hexagon.V6.vzb"] - fn vzb(_: HvxVector) -> HvxVectorPair; - #[link_name = "llvm.hexagon.V6.vzh"] - fn vzh(_: HvxVector) -> HvxVectorPair; -} - /// `Rd32=vextract(Vu32,Rs32)` /// /// Instruction Type: LD diff --git a/library/stdarch/crates/core_arch/src/hexagon/v64.rs b/library/stdarch/crates/core_arch/src/hexagon/v64.rs new file mode 100644 index 0000000000000..023a8711d21f3 --- /dev/null +++ b/library/stdarch/crates/core_arch/src/hexagon/v64.rs @@ -0,0 +1,7489 @@ +//! Hexagon HVX 64-byte vector mode intrinsics +//! +//! This module provides intrinsics for the Hexagon Vector Extensions (HVX) +//! in 64-byte vector mode (512-bit vectors). +//! +//! HVX is a wide vector extension designed for high-performance signal processing. +//! [Hexagon HVX Programmer's Reference Manual](https://docs.qualcomm.com/doc/80-N2040-61) +//! +//! ## Vector Types +//! +//! In 64-byte mode: +//! - `HvxVector` is 512 bits (64 bytes) containing 16 x 32-bit values +//! - `HvxVectorPair` is 1024 bits (128 bytes) +//! - `HvxVectorPred` is 512 bits (64 bytes) for predicate operations +//! +//! To use this module, compile with `-C target-feature=+hvx-length64b`. +//! +//! ## Architecture Versions +//! +//! Different intrinsics require different HVX architecture versions. Use the +//! appropriate target feature to enable the required version: +//! - HVX v60: `-C target-feature=+hvxv60` (basic HVX operations) +//! - HVX v62: `-C target-feature=+hvxv62` +//! - HVX v65: `-C target-feature=+hvxv65` (includes floating-point support) +//! - HVX v66: `-C target-feature=+hvxv66` +//! - HVX v68: `-C target-feature=+hvxv68` +//! - HVX v69: `-C target-feature=+hvxv69` +//! - HVX v73: `-C target-feature=+hvxv73` +//! - HVX v79: `-C target-feature=+hvxv79` +//! +//! Each version includes all features from previous versions. + +#![allow(non_camel_case_types)] + +#[cfg(test)] +use stdarch_test::assert_instr; + +use crate::intrinsics::simd::{simd_add, simd_and, simd_or, simd_sub, simd_xor}; + +// HVX type definitions for 64-byte vector mode +types! { + #![unstable(feature = "stdarch_hexagon", issue = "151523")] + + /// HVX vector type (512 bits / 64 bytes) + /// + /// This type represents a single HVX vector register containing 16 x 32-bit values. + pub struct HvxVector(16 x i32); + + /// HVX vector pair type (1024 bits / 128 bytes) + /// + /// This type represents a pair of HVX vector registers, often used for + /// operations that produce double-width results. + pub struct HvxVectorPair(32 x i32); + + /// HVX vector predicate type (512 bits / 64 bytes) + /// + /// This type represents a predicate vector used for conditional operations. + /// Each bit corresponds to a lane in the vector. + pub struct HvxVectorPred(16 x i32); +} + +// LLVM intrinsic declarations for 64-byte vector mode +#[allow(improper_ctypes)] +unsafe extern "unadjusted" { + #[link_name = "llvm.hexagon.V6.extractw"] + fn extractw(_: HvxVector, _: i32) -> i32; + #[link_name = "llvm.hexagon.V6.get.qfext"] + fn get_qfext(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.hi"] + fn hi(_: HvxVectorPair) -> HvxVector; + #[link_name = "llvm.hexagon.V6.lo"] + fn lo(_: HvxVectorPair) -> HvxVector; + #[link_name = "llvm.hexagon.V6.lvsplatb"] + fn lvsplatb(_: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.lvsplath"] + fn lvsplath(_: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.lvsplatw"] + fn lvsplatw(_: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.and"] + fn pred_and(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.and.n"] + fn pred_and_n(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.not"] + fn pred_not(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.or"] + fn pred_or(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.or.n"] + fn pred_or_n(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.scalar2"] + fn pred_scalar2(_: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.scalar2v2"] + fn pred_scalar2v2(_: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.xor"] + fn pred_xor(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.set.qfext"] + fn set_qfext(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.shuffeqh"] + fn shuffeqh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.shuffeqw"] + fn shuffeqw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.v6mpyhubs10"] + fn v6mpyhubs10(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.v6mpyhubs10.vxx"] + fn v6mpyhubs10_vxx( + _: HvxVectorPair, + _: HvxVectorPair, + _: HvxVectorPair, + _: i32, + ) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.v6mpyvubs10"] + fn v6mpyvubs10(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.v6mpyvubs10.vxx"] + fn v6mpyvubs10_vxx( + _: HvxVectorPair, + _: HvxVectorPair, + _: HvxVectorPair, + _: i32, + ) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vS32b.nqpred.ai"] + fn vS32b_nqpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vS32b.nt.nqpred.ai"] + fn vS32b_nt_nqpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vS32b.nt.qpred.ai"] + fn vS32b_nt_qpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vS32b.qpred.ai"] + fn vS32b_qpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vabs.f8"] + fn vabs_f8(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabs.hf"] + fn vabs_hf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabs.sf"] + fn vabs_sf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsb"] + fn vabsb(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsb.sat"] + fn vabsb_sat(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsdiffh"] + fn vabsdiffh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsdiffub"] + fn vabsdiffub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsdiffuh"] + fn vabsdiffuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsdiffw"] + fn vabsdiffw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsh"] + fn vabsh(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsh.sat"] + fn vabsh_sat(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsw"] + fn vabsw(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsw.sat"] + fn vabsw_sat(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.hf"] + fn vadd_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.hf.hf"] + fn vadd_hf_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.qf16"] + fn vadd_qf16(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.qf16.mix"] + fn vadd_qf16_mix(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.qf32"] + fn vadd_qf32(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.qf32.mix"] + fn vadd_qf32_mix(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.sf"] + fn vadd_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.sf.hf"] + fn vadd_sf_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vadd.sf.sf"] + fn vadd_sf_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddb"] + fn vaddb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddb.dv"] + fn vaddb_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddbnq"] + fn vaddbnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddbq"] + fn vaddbq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddbsat"] + fn vaddbsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddbsat.dv"] + fn vaddbsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddcarrysat"] + fn vaddcarrysat(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddclbh"] + fn vaddclbh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddclbw"] + fn vaddclbw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddh"] + fn vaddh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddh.dv"] + fn vaddh_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddhnq"] + fn vaddhnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddhq"] + fn vaddhq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddhsat"] + fn vaddhsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddhsat.dv"] + fn vaddhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddhw"] + fn vaddhw(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddhw.acc"] + fn vaddhw_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddubh"] + fn vaddubh(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddubh.acc"] + fn vaddubh_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddubsat"] + fn vaddubsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddubsat.dv"] + fn vaddubsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddububb.sat"] + fn vaddububb_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadduhsat"] + fn vadduhsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadduhsat.dv"] + fn vadduhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vadduhw"] + fn vadduhw(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vadduhw.acc"] + fn vadduhw_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vadduwsat"] + fn vadduwsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadduwsat.dv"] + fn vadduwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddw"] + fn vaddw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddw.dv"] + fn vaddw_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddwnq"] + fn vaddwnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddwq"] + fn vaddwq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddwsat"] + fn vaddwsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddwsat.dv"] + fn vaddwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.valignb"] + fn valignb(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.valignbi"] + fn valignbi(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vand"] + fn vand(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandnqrt"] + fn vandnqrt(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandnqrt.acc"] + fn vandnqrt_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandqrt"] + fn vandqrt(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandqrt.acc"] + fn vandqrt_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandvnqv"] + fn vandvnqv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandvqv"] + fn vandvqv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandvrt"] + fn vandvrt(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandvrt.acc"] + fn vandvrt_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslh"] + fn vaslh(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslh.acc"] + fn vaslh_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslhv"] + fn vaslhv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslw"] + fn vaslw(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslw.acc"] + fn vaslw_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslwv"] + fn vaslwv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasr.into"] + fn vasr_into(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vasrh"] + fn vasrh(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrh.acc"] + fn vasrh_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrhbrndsat"] + fn vasrhbrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrhbsat"] + fn vasrhbsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrhubrndsat"] + fn vasrhubrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrhubsat"] + fn vasrhubsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrhv"] + fn vasrhv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasruhubrndsat"] + fn vasruhubrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasruhubsat"] + fn vasruhubsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasruwuhrndsat"] + fn vasruwuhrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasruwuhsat"] + fn vasruwuhsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrvuhubrndsat"] + fn vasrvuhubrndsat(_: HvxVectorPair, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrvuhubsat"] + fn vasrvuhubsat(_: HvxVectorPair, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrvwuhrndsat"] + fn vasrvwuhrndsat(_: HvxVectorPair, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrvwuhsat"] + fn vasrvwuhsat(_: HvxVectorPair, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrw"] + fn vasrw(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrw.acc"] + fn vasrw_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwh"] + fn vasrwh(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwhrndsat"] + fn vasrwhrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwhsat"] + fn vasrwhsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwuhrndsat"] + fn vasrwuhrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwuhsat"] + fn vasrwuhsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwv"] + fn vasrwv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vassign"] + fn vassign(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vassign.fp"] + fn vassign_fp(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vassignp"] + fn vassignp(_: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vavgb"] + fn vavgb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgbrnd"] + fn vavgbrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgh"] + fn vavgh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavghrnd"] + fn vavghrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgub"] + fn vavgub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgubrnd"] + fn vavgubrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavguh"] + fn vavguh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavguhrnd"] + fn vavguhrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavguw"] + fn vavguw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavguwrnd"] + fn vavguwrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgw"] + fn vavgw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgwrnd"] + fn vavgwrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcl0h"] + fn vcl0h(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcl0w"] + fn vcl0w(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcombine"] + fn vcombine(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vconv.h.hf"] + fn vconv_h_hf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.hf.h"] + fn vconv_hf_h(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.hf.qf16"] + fn vconv_hf_qf16(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.hf.qf32"] + fn vconv_hf_qf32(_: HvxVectorPair) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.sf.qf32"] + fn vconv_sf_qf32(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.sf.w"] + fn vconv_sf_w(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.w.sf"] + fn vconv_w_sf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt2.hf.b"] + fn vcvt2_hf_b(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt2.hf.ub"] + fn vcvt2_hf_ub(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt.b.hf"] + fn vcvt_b_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.h.hf"] + fn vcvt_h_hf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.hf.b"] + fn vcvt_hf_b(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt.hf.f8"] + fn vcvt_hf_f8(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt.hf.h"] + fn vcvt_hf_h(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.hf.sf"] + fn vcvt_hf_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.hf.ub"] + fn vcvt_hf_ub(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt.hf.uh"] + fn vcvt_hf_uh(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.sf.hf"] + fn vcvt_sf_hf(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt.ub.hf"] + fn vcvt_ub_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.uh.hf"] + fn vcvt_uh_hf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vd0"] + fn vd0() -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdd0"] + fn vdd0() -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdealb"] + fn vdealb(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdealb4w"] + fn vdealb4w(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdealh"] + fn vdealh(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdealvdd"] + fn vdealvdd(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdelta"] + fn vdelta(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpy.sf.hf"] + fn vdmpy_sf_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpy.sf.hf.acc"] + fn vdmpy_sf_hf_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpybus"] + fn vdmpybus(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpybus.acc"] + fn vdmpybus_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpybus.dv"] + fn vdmpybus_dv(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdmpybus.dv.acc"] + fn vdmpybus_dv_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdmpyhb"] + fn vdmpyhb(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhb.acc"] + fn vdmpyhb_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhb.dv"] + fn vdmpyhb_dv(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdmpyhb.dv.acc"] + fn vdmpyhb_dv_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdmpyhisat"] + fn vdmpyhisat(_: HvxVectorPair, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhisat.acc"] + fn vdmpyhisat_acc(_: HvxVector, _: HvxVectorPair, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsat"] + fn vdmpyhsat(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsat.acc"] + fn vdmpyhsat_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsuisat"] + fn vdmpyhsuisat(_: HvxVectorPair, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsuisat.acc"] + fn vdmpyhsuisat_acc(_: HvxVector, _: HvxVectorPair, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsusat"] + fn vdmpyhsusat(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsusat.acc"] + fn vdmpyhsusat_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhvsat"] + fn vdmpyhvsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhvsat.acc"] + fn vdmpyhvsat_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdsaduh"] + fn vdsaduh(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdsaduh.acc"] + fn vdsaduh_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.veqb"] + fn veqb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqb.and"] + fn veqb_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqb.or"] + fn veqb_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqb.xor"] + fn veqb_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqh"] + fn veqh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqh.and"] + fn veqh_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqh.or"] + fn veqh_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqh.xor"] + fn veqh_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqw"] + fn veqw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqw.and"] + fn veqw_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqw.or"] + fn veqw_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqw.xor"] + fn veqw_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmax.f8"] + fn vfmax_f8(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmax.hf"] + fn vfmax_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmax.sf"] + fn vfmax_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmin.f8"] + fn vfmin_f8(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmin.hf"] + fn vfmin_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmin.sf"] + fn vfmin_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfneg.f8"] + fn vfneg_f8(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfneg.hf"] + fn vfneg_hf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfneg.sf"] + fn vfneg_sf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgathermh"] + fn vgathermh(_: *mut HvxVector, _: i32, _: i32, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vgathermhq"] + fn vgathermhq(_: *mut HvxVector, _: HvxVector, _: i32, _: i32, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vgathermhw"] + fn vgathermhw(_: *mut HvxVector, _: i32, _: i32, _: HvxVectorPair) -> (); + #[link_name = "llvm.hexagon.V6.vgathermhwq"] + fn vgathermhwq(_: *mut HvxVector, _: HvxVector, _: i32, _: i32, _: HvxVectorPair) -> (); + #[link_name = "llvm.hexagon.V6.vgathermw"] + fn vgathermw(_: *mut HvxVector, _: i32, _: i32, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vgathermwq"] + fn vgathermwq(_: *mut HvxVector, _: HvxVector, _: i32, _: i32, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vgtb"] + fn vgtb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtb.and"] + fn vgtb_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtb.or"] + fn vgtb_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtb.xor"] + fn vgtb_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgth"] + fn vgth(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgth.and"] + fn vgth_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgth.or"] + fn vgth_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgth.xor"] + fn vgth_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgthf"] + fn vgthf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgthf.and"] + fn vgthf_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgthf.or"] + fn vgthf_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgthf.xor"] + fn vgthf_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtsf"] + fn vgtsf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtsf.and"] + fn vgtsf_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtsf.or"] + fn vgtsf_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtsf.xor"] + fn vgtsf_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtub"] + fn vgtub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtub.and"] + fn vgtub_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtub.or"] + fn vgtub_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtub.xor"] + fn vgtub_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuh"] + fn vgtuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuh.and"] + fn vgtuh_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuh.or"] + fn vgtuh_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuh.xor"] + fn vgtuh_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuw"] + fn vgtuw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuw.and"] + fn vgtuw_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuw.or"] + fn vgtuw_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuw.xor"] + fn vgtuw_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtw"] + fn vgtw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtw.and"] + fn vgtw_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtw.or"] + fn vgtw_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtw.xor"] + fn vgtw_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vinsertwr"] + fn vinsertwr(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlalignb"] + fn vlalignb(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlalignbi"] + fn vlalignbi(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlsrb"] + fn vlsrb(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlsrh"] + fn vlsrh(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlsrhv"] + fn vlsrhv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlsrw"] + fn vlsrw(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlsrwv"] + fn vlsrwv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvvb"] + fn vlutvvb(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvvb.nm"] + fn vlutvvb_nm(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvvb.oracc"] + fn vlutvvb_oracc(_: HvxVector, _: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvvb.oracci"] + fn vlutvvb_oracci(_: HvxVector, _: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvvbi"] + fn vlutvvbi(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvwh"] + fn vlutvwh(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vlutvwh.nm"] + fn vlutvwh_nm(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vlutvwh.oracc"] + fn vlutvwh_oracc(_: HvxVectorPair, _: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vlutvwh.oracci"] + fn vlutvwh_oracci(_: HvxVectorPair, _: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vlutvwhi"] + fn vlutvwhi(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmax.hf"] + fn vmax_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmax.sf"] + fn vmax_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmaxb"] + fn vmaxb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmaxh"] + fn vmaxh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmaxub"] + fn vmaxub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmaxuh"] + fn vmaxuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmaxw"] + fn vmaxw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmin.hf"] + fn vmin_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmin.sf"] + fn vmin_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vminb"] + fn vminb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vminh"] + fn vminh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vminub"] + fn vminub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vminuh"] + fn vminuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vminw"] + fn vminw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpabus"] + fn vmpabus(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpabus.acc"] + fn vmpabus_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpabusv"] + fn vmpabusv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpabuu"] + fn vmpabuu(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpabuu.acc"] + fn vmpabuu_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpabuuv"] + fn vmpabuuv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpahb"] + fn vmpahb(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpahb.acc"] + fn vmpahb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpauhb"] + fn vmpauhb(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpauhb.acc"] + fn vmpauhb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.hf.hf"] + fn vmpy_hf_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.hf.hf.acc"] + fn vmpy_hf_hf_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.qf16"] + fn vmpy_qf16(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.qf16.hf"] + fn vmpy_qf16_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.qf16.mix.hf"] + fn vmpy_qf16_mix_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.qf32"] + fn vmpy_qf32(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.qf32.hf"] + fn vmpy_qf32_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.qf32.mix.hf"] + fn vmpy_qf32_mix_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.qf32.qf16"] + fn vmpy_qf32_qf16(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.qf32.sf"] + fn vmpy_qf32_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.sf.hf"] + fn vmpy_sf_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.sf.hf.acc"] + fn vmpy_sf_hf_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.sf.sf"] + fn vmpy_sf_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpybus"] + fn vmpybus(_: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpybus.acc"] + fn vmpybus_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpybusv"] + fn vmpybusv(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpybusv.acc"] + fn vmpybusv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpybv"] + fn vmpybv(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpybv.acc"] + fn vmpybv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyewuh"] + fn vmpyewuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyewuh.64"] + fn vmpyewuh_64(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyh"] + fn vmpyh(_: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyh.acc"] + fn vmpyh_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhsat.acc"] + fn vmpyhsat_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhsrs"] + fn vmpyhsrs(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyhss"] + fn vmpyhss(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyhus"] + fn vmpyhus(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhus.acc"] + fn vmpyhus_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhv"] + fn vmpyhv(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhv.acc"] + fn vmpyhv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhvsrs"] + fn vmpyhvsrs(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyieoh"] + fn vmpyieoh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiewh.acc"] + fn vmpyiewh_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiewuh"] + fn vmpyiewuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiewuh.acc"] + fn vmpyiewuh_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyih"] + fn vmpyih(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyih.acc"] + fn vmpyih_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyihb"] + fn vmpyihb(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyihb.acc"] + fn vmpyihb_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiowh"] + fn vmpyiowh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwb"] + fn vmpyiwb(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwb.acc"] + fn vmpyiwb_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwh"] + fn vmpyiwh(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwh.acc"] + fn vmpyiwh_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwub"] + fn vmpyiwub(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwub.acc"] + fn vmpyiwub_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyowh"] + fn vmpyowh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyowh.64.acc"] + fn vmpyowh_64_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyowh.rnd"] + fn vmpyowh_rnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyowh.rnd.sacc"] + fn vmpyowh_rnd_sacc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyowh.sacc"] + fn vmpyowh_sacc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyub"] + fn vmpyub(_: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyub.acc"] + fn vmpyub_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyubv"] + fn vmpyubv(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyubv.acc"] + fn vmpyubv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyuh"] + fn vmpyuh(_: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyuh.acc"] + fn vmpyuh_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyuhe"] + fn vmpyuhe(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyuhe.acc"] + fn vmpyuhe_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyuhv"] + fn vmpyuhv(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyuhv.acc"] + fn vmpyuhv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyuhvs"] + fn vmpyuhvs(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmux"] + fn vmux(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnavgb"] + fn vnavgb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnavgh"] + fn vnavgh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnavgub"] + fn vnavgub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnavgw"] + fn vnavgw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnormamth"] + fn vnormamth(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnormamtw"] + fn vnormamtw(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnot"] + fn vnot(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vor"] + fn vor(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackeb"] + fn vpackeb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackeh"] + fn vpackeh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackhb.sat"] + fn vpackhb_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackhub.sat"] + fn vpackhub_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackob"] + fn vpackob(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackoh"] + fn vpackoh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackwh.sat"] + fn vpackwh_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackwuh.sat"] + fn vpackwuh_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpopcounth"] + fn vpopcounth(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vprefixqb"] + fn vprefixqb(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vprefixqh"] + fn vprefixqh(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vprefixqw"] + fn vprefixqw(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrdelta"] + fn vrdelta(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybus"] + fn vrmpybus(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybus.acc"] + fn vrmpybus_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybusi"] + fn vrmpybusi(_: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vrmpybusi.acc"] + fn vrmpybusi_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vrmpybusv"] + fn vrmpybusv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybusv.acc"] + fn vrmpybusv_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybv"] + fn vrmpybv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybv.acc"] + fn vrmpybv_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpyub"] + fn vrmpyub(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpyub.acc"] + fn vrmpyub_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpyubi"] + fn vrmpyubi(_: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vrmpyubi.acc"] + fn vrmpyubi_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vrmpyubv"] + fn vrmpyubv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpyubv.acc"] + fn vrmpyubv_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vror"] + fn vror(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrotr"] + fn vrotr(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vroundhb"] + fn vroundhb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vroundhub"] + fn vroundhub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrounduhub"] + fn vrounduhub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrounduwuh"] + fn vrounduwuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vroundwh"] + fn vroundwh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vroundwuh"] + fn vroundwuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrsadubi"] + fn vrsadubi(_: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vrsadubi.acc"] + fn vrsadubi_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsatdw"] + fn vsatdw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsathub"] + fn vsathub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsatuwuh"] + fn vsatuwuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsatwh"] + fn vsatwh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsb"] + fn vsb(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vscattermh"] + fn vscattermh(_: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermh.add"] + fn vscattermh_add(_: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermhq"] + fn vscattermhq(_: HvxVector, _: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermhw"] + fn vscattermhw(_: i32, _: i32, _: HvxVectorPair, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermhw.add"] + fn vscattermhw_add(_: i32, _: i32, _: HvxVectorPair, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermhwq"] + fn vscattermhwq(_: HvxVector, _: i32, _: i32, _: HvxVectorPair, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermw"] + fn vscattermw(_: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermw.add"] + fn vscattermw_add(_: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermwq"] + fn vscattermwq(_: HvxVector, _: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vsh"] + fn vsh(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vshufeh"] + fn vshufeh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vshuffb"] + fn vshuffb(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vshuffeb"] + fn vshuffeb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vshuffh"] + fn vshuffh(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vshuffob"] + fn vshuffob(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vshuffvdd"] + fn vshuffvdd(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vshufoeb"] + fn vshufoeb(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vshufoeh"] + fn vshufoeh(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vshufoh"] + fn vshufoh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.hf"] + fn vsub_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.hf.hf"] + fn vsub_hf_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.qf16"] + fn vsub_qf16(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.qf16.mix"] + fn vsub_qf16_mix(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.qf32"] + fn vsub_qf32(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.qf32.mix"] + fn vsub_qf32_mix(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.sf"] + fn vsub_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.sf.hf"] + fn vsub_sf_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsub.sf.sf"] + fn vsub_sf_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubb"] + fn vsubb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubb.dv"] + fn vsubb_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubbnq"] + fn vsubbnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubbq"] + fn vsubbq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubbsat"] + fn vsubbsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubbsat.dv"] + fn vsubbsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubh"] + fn vsubh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubh.dv"] + fn vsubh_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubhnq"] + fn vsubhnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubhq"] + fn vsubhq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubhsat"] + fn vsubhsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubhsat.dv"] + fn vsubhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubhw"] + fn vsubhw(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsububh"] + fn vsububh(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsububsat"] + fn vsububsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsububsat.dv"] + fn vsububsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubububb.sat"] + fn vsubububb_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubuhsat"] + fn vsubuhsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubuhsat.dv"] + fn vsubuhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubuhw"] + fn vsubuhw(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubuwsat"] + fn vsubuwsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubuwsat.dv"] + fn vsubuwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubw"] + fn vsubw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubw.dv"] + fn vsubw_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubwnq"] + fn vsubwnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubwq"] + fn vsubwq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubwsat"] + fn vsubwsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubwsat.dv"] + fn vsubwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vswap"] + fn vswap(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpyb"] + fn vtmpyb(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpyb.acc"] + fn vtmpyb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpybus"] + fn vtmpybus(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpybus.acc"] + fn vtmpybus_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpyhb"] + fn vtmpyhb(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpyhb.acc"] + fn vtmpyhb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackb"] + fn vunpackb(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackh"] + fn vunpackh(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackob"] + fn vunpackob(_: HvxVectorPair, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackoh"] + fn vunpackoh(_: HvxVectorPair, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackub"] + fn vunpackub(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackuh"] + fn vunpackuh(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vxor"] + fn vxor(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vzb"] + fn vzb(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vzh"] + fn vzh(_: HvxVector) -> HvxVectorPair; +} + +/// `Rd32=vextract(Vu32,Rs32)` +/// +/// Instruction Type: LD +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(extractw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_r_vextract_vr(vu: HvxVector, rs: i32) -> i32 { + extractw(vu, rs) +} + +/// `Vd32=hi(Vss32)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(hi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_hi_w(vss: HvxVectorPair) -> HvxVector { + hi(vss) +} + +/// `Vd32=lo(Vss32)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(lo))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_lo_w(vss: HvxVectorPair) -> HvxVector { + lo(vss) +} + +/// `Vd32=vsplat(Rt32)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(lvsplatw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vsplat_r(rt: i32) -> HvxVector { + lvsplatw(rt) +} + +/// `Vd32.uh=vabsdiff(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsdiffh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vabsdiff_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vabsdiffh(vu, vv) +} + +/// `Vd32.ub=vabsdiff(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsdiffub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vabsdiff_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { + vabsdiffub(vu, vv) +} + +/// `Vd32.uh=vabsdiff(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsdiffuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vabsdiff_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vabsdiffuh(vu, vv) +} + +/// `Vd32.uw=vabsdiff(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsdiffw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vabsdiff_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vabsdiffw(vu, vv) +} + +/// `Vd32.h=vabs(Vu32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vabs_vh(vu: HvxVector) -> HvxVector { + vabsh(vu) +} + +/// `Vd32.h=vabs(Vu32.h):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsh_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vabs_vh_sat(vu: HvxVector) -> HvxVector { + vabsh_sat(vu) +} + +/// `Vd32.w=vabs(Vu32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vabs_vw(vu: HvxVector) -> HvxVector { + vabsw(vu) +} + +/// `Vd32.w=vabs(Vu32.w):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsw_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vabs_vw_sat(vu: HvxVector) -> HvxVector { + vabsw_sat(vu) +} + +/// `Vd32.b=vadd(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vadd_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddb(vu, vv) +} + +/// `Vdd32.b=vadd(Vuu32.b,Vvv32.b)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddb_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wb_vadd_wbwb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vaddb_dv(vuu, vvv) +} + +/// `Vd32.h=vadd(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vadd_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddh(vu, vv) +} + +/// `Vdd32.h=vadd(Vuu32.h,Vvv32.h)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddh_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vadd_whwh(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vaddh_dv(vuu, vvv) +} + +/// `Vd32.h=vadd(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vadd_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddhsat(vu, vv) +} + +/// `Vdd32.h=vadd(Vuu32.h,Vvv32.h):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddhsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vadd_whwh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vaddhsat_dv(vuu, vvv) +} + +/// `Vdd32.w=vadd(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddhw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vadd_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vaddhw(vu, vv) +} + +/// `Vdd32.h=vadd(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddubh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vadd_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vaddubh(vu, vv) +} + +/// `Vd32.ub=vadd(Vu32.ub,Vv32.ub):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddubsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vadd_vubvub_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddubsat(vu, vv) +} + +/// `Vdd32.ub=vadd(Vuu32.ub,Vvv32.ub):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddubsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wub_vadd_wubwub_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vaddubsat_dv(vuu, vvv) +} + +/// `Vd32.uh=vadd(Vu32.uh,Vv32.uh):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vadduhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vadd_vuhvuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadduhsat(vu, vv) +} + +/// `Vdd32.uh=vadd(Vuu32.uh,Vvv32.uh):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vadduhsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vadd_wuhwuh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vadduhsat_dv(vuu, vvv) +} + +/// `Vdd32.w=vadd(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vadduhw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vadd_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vadduhw(vu, vv) +} + +/// `Vd32.w=vadd(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vadd_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + simd_add(vu, vv) +} + +/// `Vdd32.w=vadd(Vuu32.w,Vvv32.w)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddw_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vadd_wwww(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vaddw_dv(vuu, vvv) +} + +/// `Vd32.w=vadd(Vu32.w,Vv32.w):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddwsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vadd_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddwsat(vu, vv) +} + +/// `Vdd32.w=vadd(Vuu32.w,Vvv32.w):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddwsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vadd_wwww_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vaddwsat_dv(vuu, vvv) +} + +/// `Vd32=valign(Vu32,Vv32,Rt8)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(valignb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_valign_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + valignb(vu, vv, rt) +} + +/// `Vd32=valign(Vu32,Vv32,#u3)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(valignbi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_valign_vvi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector { + valignbi(vu, vv, iu3) +} + +/// `Vd32=vand(Vu32,Vv32)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vand))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vand_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { + simd_and(vu, vv) +} + +/// `Vd32.h=vasl(Vu32.h,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaslh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasl_vhr(vu: HvxVector, rt: i32) -> HvxVector { + vaslh(vu, rt) +} + +/// `Vd32.h=vasl(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaslhv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasl_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaslhv(vu, vv) +} + +/// `Vd32.w=vasl(Vu32.w,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaslw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vasl_vwr(vu: HvxVector, rt: i32) -> HvxVector { + vaslw(vu, rt) +} + +/// `Vx32.w+=vasl(Vu32.w,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaslw_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vaslacc_vwvwr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vaslw_acc(vx, vu, rt) +} + +/// `Vd32.w=vasl(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaslwv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vasl_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaslwv(vu, vv) +} + +/// `Vd32.h=vasr(Vu32.h,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasr_vhr(vu: HvxVector, rt: i32) -> HvxVector { + vasrh(vu, rt) +} + +/// `Vd32.b=vasr(Vu32.h,Vv32.h,Rt8):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrhbrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vasr_vhvhr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrhbrndsat(vu, vv, rt) +} + +/// `Vd32.ub=vasr(Vu32.h,Vv32.h,Rt8):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrhubrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vasr_vhvhr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrhubrndsat(vu, vv, rt) +} + +/// `Vd32.ub=vasr(Vu32.h,Vv32.h,Rt8):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrhubsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vasr_vhvhr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrhubsat(vu, vv, rt) +} + +/// `Vd32.h=vasr(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrhv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasr_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vasrhv(vu, vv) +} + +/// `Vd32.w=vasr(Vu32.w,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vasr_vwr(vu: HvxVector, rt: i32) -> HvxVector { + vasrw(vu, rt) +} + +/// `Vx32.w+=vasr(Vu32.w,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrw_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vasracc_vwvwr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vasrw_acc(vx, vu, rt) +} + +/// `Vd32.h=vasr(Vu32.w,Vv32.w,Rt8)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrwh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasr_vwvwr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrwh(vu, vv, rt) +} + +/// `Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrwhrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasr_vwvwr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrwhrndsat(vu, vv, rt) +} + +/// `Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrwhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasr_vwvwr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrwhsat(vu, vv, rt) +} + +/// `Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrwuhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vasr_vwvwr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrwuhsat(vu, vv, rt) +} + +/// `Vd32.w=vasr(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrwv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vasr_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vasrwv(vu, vv) +} + +/// `Vd32=Vu32` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vassign))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_equals_v(vu: HvxVector) -> HvxVector { + vassign(vu) +} + +/// `Vdd32=Vuu32` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vassignp))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_equals_w(vuu: HvxVectorPair) -> HvxVectorPair { + vassignp(vuu) +} + +/// `Vd32.h=vavg(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavgh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vavg_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavgh(vu, vv) +} + +/// `Vd32.h=vavg(Vu32.h,Vv32.h):rnd` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavghrnd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vavg_vhvh_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavghrnd(vu, vv) +} + +/// `Vd32.ub=vavg(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavgub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vavg_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavgub(vu, vv) +} + +/// `Vd32.ub=vavg(Vu32.ub,Vv32.ub):rnd` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavgubrnd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vavg_vubvub_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavgubrnd(vu, vv) +} + +/// `Vd32.uh=vavg(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavguh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vavg_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavguh(vu, vv) +} + +/// `Vd32.uh=vavg(Vu32.uh,Vv32.uh):rnd` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavguhrnd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vavg_vuhvuh_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavguhrnd(vu, vv) +} + +/// `Vd32.w=vavg(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavgw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vavg_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavgw(vu, vv) +} + +/// `Vd32.w=vavg(Vu32.w,Vv32.w):rnd` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavgwrnd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vavg_vwvw_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavgwrnd(vu, vv) +} + +/// `Vd32.uh=vcl0(Vu32.uh)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vcl0h))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vcl0_vuh(vu: HvxVector) -> HvxVector { + vcl0h(vu) +} + +/// `Vd32.uw=vcl0(Vu32.uw)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vcl0w))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vcl0_vuw(vu: HvxVector) -> HvxVector { + vcl0w(vu) +} + +/// `Vdd32=vcombine(Vu32,Vv32)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vcombine))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_vcombine_vv(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vcombine(vu, vv) +} + +/// `Vd32=#0` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vd0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vzero() -> HvxVector { + vd0() +} + +/// `Vd32.b=vdeal(Vu32.b)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdealb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vdeal_vb(vu: HvxVector) -> HvxVector { + vdealb(vu) +} + +/// `Vd32.b=vdeale(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdealb4w))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vdeale_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vdealb4w(vu, vv) +} + +/// `Vd32.h=vdeal(Vu32.h)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdealh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vdeal_vh(vu: HvxVector) -> HvxVector { + vdealh(vu) +} + +/// `Vdd32=vdeal(Vu32,Vv32,Rt8)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdealvdd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_vdeal_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { + vdealvdd(vu, vv, rt) +} + +/// `Vd32=vdelta(Vu32,Vv32)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdelta))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vdelta_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { + vdelta(vu, vv) +} + +/// `Vd32.h=vdmpy(Vu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpybus))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vdmpy_vubrb(vu: HvxVector, rt: i32) -> HvxVector { + vdmpybus(vu, rt) +} + +/// `Vx32.h+=vdmpy(Vu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpybus_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vdmpyacc_vhvubrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vdmpybus_acc(vx, vu, rt) +} + +/// `Vdd32.h=vdmpy(Vuu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpybus_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vdmpy_wubrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vdmpybus_dv(vuu, rt) +} + +/// `Vxx32.h+=vdmpy(Vuu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpybus_dv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vdmpyacc_whwubrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vdmpybus_dv_acc(vxx, vuu, rt) +} + +/// `Vd32.w=vdmpy(Vu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpy_vhrb(vu: HvxVector, rt: i32) -> HvxVector { + vdmpyhb(vu, rt) +} + +/// `Vx32.w+=vdmpy(Vu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhb_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpyacc_vwvhrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vdmpyhb_acc(vx, vu, rt) +} + +/// `Vdd32.w=vdmpy(Vuu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhb_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vdmpy_whrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vdmpyhb_dv(vuu, rt) +} + +/// `Vxx32.w+=vdmpy(Vuu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhb_dv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vdmpyacc_wwwhrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vdmpyhb_dv_acc(vxx, vuu, rt) +} + +/// `Vd32.w=vdmpy(Vuu32.h,Rt32.h):sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhisat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpy_whrh_sat(vuu: HvxVectorPair, rt: i32) -> HvxVector { + vdmpyhisat(vuu, rt) +} + +/// `Vx32.w+=vdmpy(Vuu32.h,Rt32.h):sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhisat_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpyacc_vwwhrh_sat(vx: HvxVector, vuu: HvxVectorPair, rt: i32) -> HvxVector { + vdmpyhisat_acc(vx, vuu, rt) +} + +/// `Vd32.w=vdmpy(Vu32.h,Rt32.h):sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpy_vhrh_sat(vu: HvxVector, rt: i32) -> HvxVector { + vdmpyhsat(vu, rt) +} + +/// `Vx32.w+=vdmpy(Vu32.h,Rt32.h):sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhsat_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpyacc_vwvhrh_sat(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vdmpyhsat_acc(vx, vu, rt) +} + +/// `Vd32.w=vdmpy(Vuu32.h,Rt32.uh,#1):sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhsuisat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpy_whruh_sat(vuu: HvxVectorPair, rt: i32) -> HvxVector { + vdmpyhsuisat(vuu, rt) +} + +/// `Vx32.w+=vdmpy(Vuu32.h,Rt32.uh,#1):sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhsuisat_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpyacc_vwwhruh_sat(vx: HvxVector, vuu: HvxVectorPair, rt: i32) -> HvxVector { + vdmpyhsuisat_acc(vx, vuu, rt) +} + +/// `Vd32.w=vdmpy(Vu32.h,Rt32.uh):sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhsusat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpy_vhruh_sat(vu: HvxVector, rt: i32) -> HvxVector { + vdmpyhsusat(vu, rt) +} + +/// `Vx32.w+=vdmpy(Vu32.h,Rt32.uh):sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhsusat_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpyacc_vwvhruh_sat(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vdmpyhsusat_acc(vx, vu, rt) +} + +/// `Vd32.w=vdmpy(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhvsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpy_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vdmpyhvsat(vu, vv) +} + +/// `Vx32.w+=vdmpy(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhvsat_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpyacc_vwvhvh_sat(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vdmpyhvsat_acc(vx, vu, vv) +} + +/// `Vdd32.uw=vdsad(Vuu32.uh,Rt32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdsaduh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vdsad_wuhruh(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vdsaduh(vuu, rt) +} + +/// `Vxx32.uw+=vdsad(Vuu32.uh,Rt32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdsaduh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vdsadacc_wuwwuhruh( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vdsaduh_acc(vxx, vuu, rt) +} + +/// `Vx32.w=vinsert(Rt32)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vinsertwr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vinsert_vwr(vx: HvxVector, rt: i32) -> HvxVector { + vinsertwr(vx, rt) +} + +/// `Vd32=vlalign(Vu32,Vv32,Rt8)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlalignb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vlalign_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vlalignb(vu, vv, rt) +} + +/// `Vd32=vlalign(Vu32,Vv32,#u3)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlalignbi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vlalign_vvi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector { + vlalignbi(vu, vv, iu3) +} + +/// `Vd32.uh=vlsr(Vu32.uh,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlsrh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vlsr_vuhr(vu: HvxVector, rt: i32) -> HvxVector { + vlsrh(vu, rt) +} + +/// `Vd32.h=vlsr(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlsrhv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vlsr_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vlsrhv(vu, vv) +} + +/// `Vd32.uw=vlsr(Vu32.uw,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlsrw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vlsr_vuwr(vu: HvxVector, rt: i32) -> HvxVector { + vlsrw(vu, rt) +} + +/// `Vd32.w=vlsr(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlsrwv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vlsr_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vlsrwv(vu, vv) +} + +/// `Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlutvvb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vlut32_vbvbr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vlutvvb(vu, vv, rt) +} + +/// `Vx32.b|=vlut32(Vu32.b,Vv32.b,Rt8)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlutvvb_oracc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vlut32or_vbvbvbr( + vx: HvxVector, + vu: HvxVector, + vv: HvxVector, + rt: i32, +) -> HvxVector { + vlutvvb_oracc(vx, vu, vv, rt) +} + +/// `Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlutvwh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vlut16_vbvhr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { + vlutvwh(vu, vv, rt) +} + +/// `Vxx32.h|=vlut16(Vu32.b,Vv32.h,Rt8)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlutvwh_oracc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vlut16or_whvbvhr( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, + rt: i32, +) -> HvxVectorPair { + vlutvwh_oracc(vxx, vu, vv, rt) +} + +/// `Vd32.h=vmax(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmaxh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmax_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmaxh(vu, vv) +} + +/// `Vd32.ub=vmax(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmaxub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vmax_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmaxub(vu, vv) +} + +/// `Vd32.uh=vmax(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmaxuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vmax_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmaxuh(vu, vv) +} + +/// `Vd32.w=vmax(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmaxw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmax_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmaxw(vu, vv) +} + +/// `Vd32.h=vmin(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vminh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmin_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vminh(vu, vv) +} + +/// `Vd32.ub=vmin(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vminub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vmin_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { + vminub(vu, vv) +} + +/// `Vd32.uh=vmin(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vminuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vmin_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vminuh(vu, vv) +} + +/// `Vd32.w=vmin(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vminw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmin_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vminw(vu, vv) +} + +/// `Vdd32.h=vmpa(Vuu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpabus))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpa_wubrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vmpabus(vuu, rt) +} + +/// `Vxx32.h+=vmpa(Vuu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpabus_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpaacc_whwubrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vmpabus_acc(vxx, vuu, rt) +} + +/// `Vdd32.h=vmpa(Vuu32.ub,Vvv32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpabusv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpa_wubwb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vmpabusv(vuu, vvv) +} + +/// `Vdd32.h=vmpa(Vuu32.ub,Vvv32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpabuuv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpa_wubwub(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vmpabuuv(vuu, vvv) +} + +/// `Vdd32.w=vmpa(Vuu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpahb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpa_whrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vmpahb(vuu, rt) +} + +/// `Vxx32.w+=vmpa(Vuu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpahb_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpaacc_wwwhrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vmpahb_acc(vxx, vuu, rt) +} + +/// `Vdd32.h=vmpy(Vu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpybus))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpy_vubrb(vu: HvxVector, rt: i32) -> HvxVectorPair { + vmpybus(vu, rt) +} + +/// `Vxx32.h+=vmpy(Vu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpybus_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpyacc_whvubrb(vxx: HvxVectorPair, vu: HvxVector, rt: i32) -> HvxVectorPair { + vmpybus_acc(vxx, vu, rt) +} + +/// `Vdd32.h=vmpy(Vu32.ub,Vv32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpybusv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpy_vubvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpybusv(vu, vv) +} + +/// `Vxx32.h+=vmpy(Vu32.ub,Vv32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpybusv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpyacc_whvubvb( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpybusv_acc(vxx, vu, vv) +} + +/// `Vdd32.h=vmpy(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpybv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpy_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpybv(vu, vv) +} + +/// `Vxx32.h+=vmpy(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpybv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpyacc_whvbvb( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpybv_acc(vxx, vu, vv) +} + +/// `Vd32.w=vmpye(Vu32.w,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyewuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpye_vwvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyewuh(vu, vv) +} + +/// `Vdd32.w=vmpy(Vu32.h,Rt32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpy_vhrh(vu: HvxVector, rt: i32) -> HvxVectorPair { + vmpyh(vu, rt) +} + +/// `Vxx32.w+=vmpy(Vu32.h,Rt32.h):sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhsat_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpyacc_wwvhrh_sat( + vxx: HvxVectorPair, + vu: HvxVector, + rt: i32, +) -> HvxVectorPair { + vmpyhsat_acc(vxx, vu, rt) +} + +/// `Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:rnd:sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhsrs))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmpy_vhrh_s1_rnd_sat(vu: HvxVector, rt: i32) -> HvxVector { + vmpyhsrs(vu, rt) +} + +/// `Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhss))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmpy_vhrh_s1_sat(vu: HvxVector, rt: i32) -> HvxVector { + vmpyhss(vu, rt) +} + +/// `Vdd32.w=vmpy(Vu32.h,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhus))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpy_vhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpyhus(vu, vv) +} + +/// `Vxx32.w+=vmpy(Vu32.h,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhus_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpyacc_wwvhvuh( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpyhus_acc(vxx, vu, vv) +} + +/// `Vdd32.w=vmpy(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpy_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpyhv(vu, vv) +} + +/// `Vxx32.w+=vmpy(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpyacc_wwvhvh( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpyhv_acc(vxx, vu, vv) +} + +/// `Vd32.h=vmpy(Vu32.h,Vv32.h):<<1:rnd:sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhvsrs))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmpy_vhvh_s1_rnd_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyhvsrs(vu, vv) +} + +/// `Vd32.w=vmpyieo(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyieoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyieo_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyieoh(vu, vv) +} + +/// `Vx32.w+=vmpyie(Vu32.w,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiewh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyieacc_vwvwvh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyiewh_acc(vx, vu, vv) +} + +/// `Vd32.w=vmpyie(Vu32.w,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiewuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyie_vwvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyiewuh(vu, vv) +} + +/// `Vx32.w+=vmpyie(Vu32.w,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiewuh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyieacc_vwvwvuh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyiewuh_acc(vx, vu, vv) +} + +/// `Vd32.h=vmpyi(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyih))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmpyi_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyih(vu, vv) +} + +/// `Vx32.h+=vmpyi(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyih_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmpyiacc_vhvhvh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyih_acc(vx, vu, vv) +} + +/// `Vd32.h=vmpyi(Vu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyihb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmpyi_vhrb(vu: HvxVector, rt: i32) -> HvxVector { + vmpyihb(vu, rt) +} + +/// `Vx32.h+=vmpyi(Vu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyihb_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmpyiacc_vhvhrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vmpyihb_acc(vx, vu, rt) +} + +/// `Vd32.w=vmpyio(Vu32.w,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiowh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyio_vwvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyiowh(vu, vv) +} + +/// `Vd32.w=vmpyi(Vu32.w,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiwb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyi_vwrb(vu: HvxVector, rt: i32) -> HvxVector { + vmpyiwb(vu, rt) +} + +/// `Vx32.w+=vmpyi(Vu32.w,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiwb_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyiacc_vwvwrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vmpyiwb_acc(vx, vu, rt) +} + +/// `Vd32.w=vmpyi(Vu32.w,Rt32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiwh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyi_vwrh(vu: HvxVector, rt: i32) -> HvxVector { + vmpyiwh(vu, rt) +} + +/// `Vx32.w+=vmpyi(Vu32.w,Rt32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiwh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyiacc_vwvwrh(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vmpyiwh_acc(vx, vu, rt) +} + +/// `Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyowh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyo_vwvh_s1_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyowh(vu, vv) +} + +/// `Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyowh_rnd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyo_vwvh_s1_rnd_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyowh_rnd(vu, vv) +} + +/// `Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat:shift` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyowh_rnd_sacc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyoacc_vwvwvh_s1_rnd_sat_shift( + vx: HvxVector, + vu: HvxVector, + vv: HvxVector, +) -> HvxVector { + vmpyowh_rnd_sacc(vx, vu, vv) +} + +/// `Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:sat:shift` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyowh_sacc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyoacc_vwvwvh_s1_sat_shift( + vx: HvxVector, + vu: HvxVector, + vv: HvxVector, +) -> HvxVector { + vmpyowh_sacc(vx, vu, vv) +} + +/// `Vdd32.uh=vmpy(Vu32.ub,Rt32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vmpy_vubrub(vu: HvxVector, rt: i32) -> HvxVectorPair { + vmpyub(vu, rt) +} + +/// `Vxx32.uh+=vmpy(Vu32.ub,Rt32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyub_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vmpyacc_wuhvubrub( + vxx: HvxVectorPair, + vu: HvxVector, + rt: i32, +) -> HvxVectorPair { + vmpyub_acc(vxx, vu, rt) +} + +/// `Vdd32.uh=vmpy(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyubv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vmpy_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpyubv(vu, vv) +} + +/// `Vxx32.uh+=vmpy(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyubv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vmpyacc_wuhvubvub( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpyubv_acc(vxx, vu, vv) +} + +/// `Vdd32.uw=vmpy(Vu32.uh,Rt32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vmpy_vuhruh(vu: HvxVector, rt: i32) -> HvxVectorPair { + vmpyuh(vu, rt) +} + +/// `Vxx32.uw+=vmpy(Vu32.uh,Rt32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyuh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vmpyacc_wuwvuhruh( + vxx: HvxVectorPair, + vu: HvxVector, + rt: i32, +) -> HvxVectorPair { + vmpyuh_acc(vxx, vu, rt) +} + +/// `Vdd32.uw=vmpy(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyuhv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vmpy_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpyuhv(vu, vv) +} + +/// `Vxx32.uw+=vmpy(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyuhv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vmpyacc_wuwvuhvuh( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpyuhv_acc(vxx, vu, vv) +} + +/// `Vd32.h=vnavg(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vnavgh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vnavg_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vnavgh(vu, vv) +} + +/// `Vd32.b=vnavg(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vnavgub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vnavg_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { + vnavgub(vu, vv) +} + +/// `Vd32.w=vnavg(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vnavgw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vnavg_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vnavgw(vu, vv) +} + +/// `Vd32.h=vnormamt(Vu32.h)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vnormamth))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vnormamt_vh(vu: HvxVector) -> HvxVector { + vnormamth(vu) +} + +/// `Vd32.w=vnormamt(Vu32.w)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vnormamtw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vnormamt_vw(vu: HvxVector) -> HvxVector { + vnormamtw(vu) +} + +/// `Vd32=vnot(Vu32)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vnot))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vnot_v(vu: HvxVector) -> HvxVector { + vnot(vu) +} + +/// `Vd32=vor(Vu32,Vv32)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vor))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vor_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { + simd_or(vu, vv) +} + +/// `Vd32.b=vpacke(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackeb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vpacke_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackeb(vu, vv) +} + +/// `Vd32.h=vpacke(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackeh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vpacke_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackeh(vu, vv) +} + +/// `Vd32.b=vpack(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackhb_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vpack_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackhb_sat(vu, vv) +} + +/// `Vd32.ub=vpack(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackhub_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vpack_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackhub_sat(vu, vv) +} + +/// `Vd32.b=vpacko(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackob))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vpacko_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackob(vu, vv) +} + +/// `Vd32.h=vpacko(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vpacko_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackoh(vu, vv) +} + +/// `Vd32.h=vpack(Vu32.w,Vv32.w):sat` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackwh_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vpack_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackwh_sat(vu, vv) +} + +/// `Vd32.uh=vpack(Vu32.w,Vv32.w):sat` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackwuh_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vpack_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackwuh_sat(vu, vv) +} + +/// `Vd32.h=vpopcount(Vu32.h)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpopcounth))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vpopcount_vh(vu: HvxVector) -> HvxVector { + vpopcounth(vu) +} + +/// `Vd32=vrdelta(Vu32,Vv32)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrdelta))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vrdelta_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { + vrdelta(vu, vv) +} + +/// `Vd32.w=vrmpy(Vu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybus))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vrmpy_vubrb(vu: HvxVector, rt: i32) -> HvxVector { + vrmpybus(vu, rt) +} + +/// `Vx32.w+=vrmpy(Vu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybus_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vrmpyacc_vwvubrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vrmpybus_acc(vx, vu, rt) +} + +/// `Vdd32.w=vrmpy(Vuu32.ub,Rt32.b,#u1)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybusi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vrmpy_wubrbi(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair { + vrmpybusi(vuu, rt, iu1) +} + +/// `Vxx32.w+=vrmpy(Vuu32.ub,Rt32.b,#u1)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybusi_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vrmpyacc_wwwubrbi( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, + iu1: i32, +) -> HvxVectorPair { + vrmpybusi_acc(vxx, vuu, rt, iu1) +} + +/// `Vd32.w=vrmpy(Vu32.ub,Vv32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybusv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vrmpy_vubvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vrmpybusv(vu, vv) +} + +/// `Vx32.w+=vrmpy(Vu32.ub,Vv32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybusv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vrmpyacc_vwvubvb(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vrmpybusv_acc(vx, vu, vv) +} + +/// `Vd32.w=vrmpy(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vrmpy_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vrmpybv(vu, vv) +} + +/// `Vx32.w+=vrmpy(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vrmpyacc_vwvbvb(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vrmpybv_acc(vx, vu, vv) +} + +/// `Vd32.uw=vrmpy(Vu32.ub,Rt32.ub)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpyub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vrmpy_vubrub(vu: HvxVector, rt: i32) -> HvxVector { + vrmpyub(vu, rt) +} + +/// `Vx32.uw+=vrmpy(Vu32.ub,Rt32.ub)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpyub_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vrmpyacc_vuwvubrub(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vrmpyub_acc(vx, vu, rt) +} + +/// `Vdd32.uw=vrmpy(Vuu32.ub,Rt32.ub,#u1)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpyubi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vrmpy_wubrubi(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair { + vrmpyubi(vuu, rt, iu1) +} + +/// `Vxx32.uw+=vrmpy(Vuu32.ub,Rt32.ub,#u1)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpyubi_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vrmpyacc_wuwwubrubi( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, + iu1: i32, +) -> HvxVectorPair { + vrmpyubi_acc(vxx, vuu, rt, iu1) +} + +/// `Vd32.uw=vrmpy(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpyubv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vrmpy_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { + vrmpyubv(vu, vv) +} + +/// `Vx32.uw+=vrmpy(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpyubv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vrmpyacc_vuwvubvub(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vrmpyubv_acc(vx, vu, vv) +} + +/// `Vd32=vror(Vu32,Rt32)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vror))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vror_vr(vu: HvxVector, rt: i32) -> HvxVector { + vror(vu, rt) +} + +/// `Vd32.b=vround(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vroundhb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vround_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vroundhb(vu, vv) +} + +/// `Vd32.ub=vround(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vroundhub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vround_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vroundhub(vu, vv) +} + +/// `Vd32.h=vround(Vu32.w,Vv32.w):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vroundwh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vround_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vroundwh(vu, vv) +} + +/// `Vd32.uh=vround(Vu32.w,Vv32.w):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vroundwuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vround_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vroundwuh(vu, vv) +} + +/// `Vdd32.uw=vrsad(Vuu32.ub,Rt32.ub,#u1)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrsadubi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vrsad_wubrubi(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair { + vrsadubi(vuu, rt, iu1) +} + +/// `Vxx32.uw+=vrsad(Vuu32.ub,Rt32.ub,#u1)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrsadubi_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vrsadacc_wuwwubrubi( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, + iu1: i32, +) -> HvxVectorPair { + vrsadubi_acc(vxx, vuu, rt, iu1) +} + +/// `Vd32.ub=vsat(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsathub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vsat_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsathub(vu, vv) +} + +/// `Vd32.h=vsat(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsatwh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vsat_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsatwh(vu, vv) +} + +/// `Vdd32.h=vsxt(Vu32.b)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vsxt_vb(vu: HvxVector) -> HvxVectorPair { + vsb(vu) +} + +/// `Vdd32.w=vsxt(Vu32.h)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vsxt_vh(vu: HvxVector) -> HvxVectorPair { + vsh(vu) +} + +/// `Vd32.h=vshuffe(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshufeh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vshuffe_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vshufeh(vu, vv) +} + +/// `Vd32.b=vshuff(Vu32.b)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshuffb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vshuff_vb(vu: HvxVector) -> HvxVector { + vshuffb(vu) +} + +/// `Vd32.b=vshuffe(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshuffeb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vshuffe_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vshuffeb(vu, vv) +} + +/// `Vd32.h=vshuff(Vu32.h)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshuffh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vshuff_vh(vu: HvxVector) -> HvxVector { + vshuffh(vu) +} + +/// `Vd32.b=vshuffo(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshuffob))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vshuffo_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vshuffob(vu, vv) +} + +/// `Vdd32=vshuff(Vu32,Vv32,Rt8)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshuffvdd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_vshuff_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { + vshuffvdd(vu, vv, rt) +} + +/// `Vdd32.b=vshuffoe(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshufoeb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wb_vshuffoe_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vshufoeb(vu, vv) +} + +/// `Vdd32.h=vshuffoe(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshufoeh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vshuffoe_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vshufoeh(vu, vv) +} + +/// `Vd32.h=vshuffo(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshufoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vshuffo_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vshufoh(vu, vv) +} + +/// `Vd32.b=vsub(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vsub_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubb(vu, vv) +} + +/// `Vdd32.b=vsub(Vuu32.b,Vvv32.b)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubb_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wb_vsub_wbwb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubb_dv(vuu, vvv) +} + +/// `Vd32.h=vsub(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vsub_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubh(vu, vv) +} + +/// `Vdd32.h=vsub(Vuu32.h,Vvv32.h)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubh_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vsub_whwh(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubh_dv(vuu, vvv) +} + +/// `Vd32.h=vsub(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vsub_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubhsat(vu, vv) +} + +/// `Vdd32.h=vsub(Vuu32.h,Vvv32.h):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubhsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vsub_whwh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubhsat_dv(vuu, vvv) +} + +/// `Vdd32.w=vsub(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubhw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vsub_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vsubhw(vu, vv) +} + +/// `Vdd32.h=vsub(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsububh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vsub_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vsububh(vu, vv) +} + +/// `Vd32.ub=vsub(Vu32.ub,Vv32.ub):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsububsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vsub_vubvub_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsububsat(vu, vv) +} + +/// `Vdd32.ub=vsub(Vuu32.ub,Vvv32.ub):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsububsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wub_vsub_wubwub_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsububsat_dv(vuu, vvv) +} + +/// `Vd32.uh=vsub(Vu32.uh,Vv32.uh):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubuhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vsub_vuhvuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubuhsat(vu, vv) +} + +/// `Vdd32.uh=vsub(Vuu32.uh,Vvv32.uh):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubuhsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vsub_wuhwuh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubuhsat_dv(vuu, vvv) +} + +/// `Vdd32.w=vsub(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubuhw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vsub_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vsubuhw(vu, vv) +} + +/// `Vd32.w=vsub(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vsub_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + simd_sub(vu, vv) +} + +/// `Vdd32.w=vsub(Vuu32.w,Vvv32.w)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubw_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vsub_wwww(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubw_dv(vuu, vvv) +} + +/// `Vd32.w=vsub(Vu32.w,Vv32.w):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubwsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vsub_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubwsat(vu, vv) +} + +/// `Vdd32.w=vsub(Vuu32.w,Vvv32.w):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubwsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vsub_wwww_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubwsat_dv(vuu, vvv) +} + +/// `Vdd32.h=vtmpy(Vuu32.b,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vtmpyb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vtmpy_wbrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vtmpyb(vuu, rt) +} + +/// `Vxx32.h+=vtmpy(Vuu32.b,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vtmpyb_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vtmpyacc_whwbrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vtmpyb_acc(vxx, vuu, rt) +} + +/// `Vdd32.h=vtmpy(Vuu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vtmpybus))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vtmpy_wubrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vtmpybus(vuu, rt) +} + +/// `Vxx32.h+=vtmpy(Vuu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vtmpybus_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vtmpyacc_whwubrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vtmpybus_acc(vxx, vuu, rt) +} + +/// `Vdd32.w=vtmpy(Vuu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vtmpyhb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vtmpy_whrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vtmpyhb(vuu, rt) +} + +/// `Vxx32.w+=vtmpy(Vuu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vtmpyhb_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vtmpyacc_wwwhrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vtmpyhb_acc(vxx, vuu, rt) +} + +/// `Vdd32.h=vunpack(Vu32.b)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vunpackb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vunpack_vb(vu: HvxVector) -> HvxVectorPair { + vunpackb(vu) +} + +/// `Vdd32.w=vunpack(Vu32.h)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vunpackh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vunpack_vh(vu: HvxVector) -> HvxVectorPair { + vunpackh(vu) +} + +/// `Vxx32.h|=vunpacko(Vu32.b)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vunpackob))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vunpackoor_whvb(vxx: HvxVectorPair, vu: HvxVector) -> HvxVectorPair { + vunpackob(vxx, vu) +} + +/// `Vxx32.w|=vunpacko(Vu32.h)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vunpackoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vunpackoor_wwvh(vxx: HvxVectorPair, vu: HvxVector) -> HvxVectorPair { + vunpackoh(vxx, vu) +} + +/// `Vdd32.uh=vunpack(Vu32.ub)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vunpackub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vunpack_vub(vu: HvxVector) -> HvxVectorPair { + vunpackub(vu) +} + +/// `Vdd32.uw=vunpack(Vu32.uh)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vunpackuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vunpack_vuh(vu: HvxVector) -> HvxVectorPair { + vunpackuh(vu) +} + +/// `Vd32=vxor(Vu32,Vv32)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vxor))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vxor_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { + simd_xor(vu, vv) +} + +/// `Vdd32.uh=vzxt(Vu32.ub)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vzb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vzxt_vub(vu: HvxVector) -> HvxVectorPair { + vzb(vu) +} + +/// `Vdd32.uw=vzxt(Vu32.uh)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vzh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vzxt_vuh(vu: HvxVector) -> HvxVectorPair { + vzh(vu) +} + +/// `Vd32.b=vsplat(Rt32)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(lvsplatb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vsplat_r(rt: i32) -> HvxVector { + lvsplatb(rt) +} + +/// `Vd32.h=vsplat(Rt32)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(lvsplath))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vsplat_r(rt: i32) -> HvxVector { + lvsplath(rt) +} + +/// `Vd32.b=vadd(Vu32.b,Vv32.b):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vaddbsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vadd_vbvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddbsat(vu, vv) +} + +/// `Vdd32.b=vadd(Vuu32.b,Vvv32.b):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vaddbsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wb_vadd_wbwb_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vaddbsat_dv(vuu, vvv) +} + +/// `Vd32.h=vadd(vclb(Vu32.h),Vv32.h)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vaddclbh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vadd_vclb_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddclbh(vu, vv) +} + +/// `Vd32.w=vadd(vclb(Vu32.w),Vv32.w)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vaddclbw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vadd_vclb_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddclbw(vu, vv) +} + +/// `Vxx32.w+=vadd(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vaddhw_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vaddacc_wwvhvh( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vaddhw_acc(vxx, vu, vv) +} + +/// `Vxx32.h+=vadd(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vaddubh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vaddacc_whvubvub( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vaddubh_acc(vxx, vu, vv) +} + +/// `Vd32.ub=vadd(Vu32.ub,Vv32.b):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vaddububb_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vadd_vubvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddububb_sat(vu, vv) +} + +/// `Vxx32.w+=vadd(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vadduhw_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vaddacc_wwvuhvuh( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vadduhw_acc(vxx, vu, vv) +} + +/// `Vd32.uw=vadd(Vu32.uw,Vv32.uw):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vadduwsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vadd_vuwvuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadduwsat(vu, vv) +} + +/// `Vdd32.uw=vadd(Vuu32.uw,Vvv32.uw):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vadduwsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vadd_wuwwuw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vadduwsat_dv(vuu, vvv) +} + +/// `Vd32.b=vasr(Vu32.h,Vv32.h,Rt8):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vasrhbsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vasr_vhvhr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrhbsat(vu, vv, rt) +} + +/// `Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vasruwuhrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vasr_vuwvuwr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasruwuhrndsat(vu, vv, rt) +} + +/// `Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vasrwuhrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vasr_vwvwr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrwuhrndsat(vu, vv, rt) +} + +/// `Vd32.ub=vlsr(Vu32.ub,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vlsrb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vlsr_vubr(vu: HvxVector, rt: i32) -> HvxVector { + vlsrb(vu, rt) +} + +/// `Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8):nomatch` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vlutvvb_nm))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vlut32_vbvbr_nomatch(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vlutvvb_nm(vu, vv, rt) +} + +/// `Vx32.b|=vlut32(Vu32.b,Vv32.b,#u3)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vlutvvb_oracci))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vlut32or_vbvbvbi( + vx: HvxVector, + vu: HvxVector, + vv: HvxVector, + iu3: i32, +) -> HvxVector { + vlutvvb_oracci(vx, vu, vv, iu3) +} + +/// `Vd32.b=vlut32(Vu32.b,Vv32.b,#u3)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vlutvvbi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vlut32_vbvbi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector { + vlutvvbi(vu, vv, iu3) +} + +/// `Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8):nomatch` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vlutvwh_nm))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vlut16_vbvhr_nomatch(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { + vlutvwh_nm(vu, vv, rt) +} + +/// `Vxx32.h|=vlut16(Vu32.b,Vv32.h,#u3)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vlutvwh_oracci))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vlut16or_whvbvhi( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, + iu3: i32, +) -> HvxVectorPair { + vlutvwh_oracci(vxx, vu, vv, iu3) +} + +/// `Vdd32.h=vlut16(Vu32.b,Vv32.h,#u3)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vlutvwhi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vlut16_vbvhi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVectorPair { + vlutvwhi(vu, vv, iu3) +} + +/// `Vd32.b=vmax(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vmaxb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vmax_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmaxb(vu, vv) +} + +/// `Vd32.b=vmin(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vminb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vmin_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vminb(vu, vv) +} + +/// `Vdd32.w=vmpa(Vuu32.uh,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vmpauhb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpa_wuhrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vmpauhb(vuu, rt) +} + +/// `Vxx32.w+=vmpa(Vuu32.uh,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vmpauhb_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpaacc_wwwuhrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vmpauhb_acc(vxx, vuu, rt) +} + +/// `Vdd32=vmpye(Vu32.w,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vmpyewuh_64))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_vmpye_vwvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpyewuh_64(vu, vv) +} + +/// `Vd32.w=vmpyi(Vu32.w,Rt32.ub)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vmpyiwub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyi_vwrub(vu: HvxVector, rt: i32) -> HvxVector { + vmpyiwub(vu, rt) +} + +/// `Vx32.w+=vmpyi(Vu32.w,Rt32.ub)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vmpyiwub_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyiacc_vwvwrub(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vmpyiwub_acc(vx, vu, rt) +} + +/// `Vxx32+=vmpyo(Vu32.w,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vmpyowh_64_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_vmpyoacc_wvwvh( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpyowh_64_acc(vxx, vu, vv) +} + +/// `Vd32.ub=vround(Vu32.uh,Vv32.uh):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vrounduhub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vround_vuhvuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vrounduhub(vu, vv) +} + +/// `Vd32.uh=vround(Vu32.uw,Vv32.uw):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vrounduwuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vround_vuwvuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vrounduwuh(vu, vv) +} + +/// `Vd32.uh=vsat(Vu32.uw,Vv32.uw)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vsatuwuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vsat_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsatuwuh(vu, vv) +} + +/// `Vd32.b=vsub(Vu32.b,Vv32.b):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vsubbsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vsub_vbvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubbsat(vu, vv) +} + +/// `Vdd32.b=vsub(Vuu32.b,Vvv32.b):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vsubbsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wb_vsub_wbwb_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubbsat_dv(vuu, vvv) +} + +/// `Vd32.ub=vsub(Vu32.ub,Vv32.b):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vsubububb_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vsub_vubvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubububb_sat(vu, vv) +} + +/// `Vd32.uw=vsub(Vu32.uw,Vv32.uw):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vsubuwsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vsub_vuwvuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubuwsat(vu, vv) +} + +/// `Vdd32.uw=vsub(Vuu32.uw,Vvv32.uw):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vsubuwsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vsub_wuwwuw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubuwsat_dv(vuu, vvv) +} + +/// `Vd32.b=vabs(Vu32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vabsb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vabs_vb(vu: HvxVector) -> HvxVector { + vabsb(vu) +} + +/// `Vd32.b=vabs(Vu32.b):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vabsb_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vabs_vb_sat(vu: HvxVector) -> HvxVector { + vabsb_sat(vu) +} + +/// `Vx32.h+=vasl(Vu32.h,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vaslh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vaslacc_vhvhr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vaslh_acc(vx, vu, rt) +} + +/// `Vx32.h+=vasr(Vu32.h,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vasrh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasracc_vhvhr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vasrh_acc(vx, vu, rt) +} + +/// `Vd32.ub=vasr(Vu32.uh,Vv32.uh,Rt8):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vasruhubrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vasr_vuhvuhr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasruhubrndsat(vu, vv, rt) +} + +/// `Vd32.ub=vasr(Vu32.uh,Vv32.uh,Rt8):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vasruhubsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vasr_vuhvuhr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasruhubsat(vu, vv, rt) +} + +/// `Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vasruwuhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vasr_vuwvuwr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasruwuhsat(vu, vv, rt) +} + +/// `Vd32.b=vavg(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vavgb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vavg_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavgb(vu, vv) +} + +/// `Vd32.b=vavg(Vu32.b,Vv32.b):rnd` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vavgbrnd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vavg_vbvb_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavgbrnd(vu, vv) +} + +/// `Vd32.uw=vavg(Vu32.uw,Vv32.uw)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vavguw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vavg_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavguw(vu, vv) +} + +/// `Vd32.uw=vavg(Vu32.uw,Vv32.uw):rnd` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vavguwrnd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vavg_vuwvuw_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavguwrnd(vu, vv) +} + +/// `Vdd32=#0` +/// +/// Instruction Type: MAPPING +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vdd0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_vzero() -> HvxVectorPair { + vdd0() +} + +/// `vtmp.h=vgather(Rt32,Mu2,Vv32.h).h` +/// +/// Instruction Type: CVI_GATHER +/// Execution Slots: SLOT01 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vgathermh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vgather_armvh(rs: *mut HvxVector, rt: i32, mu: i32, vv: HvxVector) { + vgathermh(rs, rt, mu, vv) +} + +/// `vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h` +/// +/// Instruction Type: CVI_GATHER_DV +/// Execution Slots: SLOT01 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vgathermhw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vgather_armww(rs: *mut HvxVector, rt: i32, mu: i32, vvv: HvxVectorPair) { + vgathermhw(rs, rt, mu, vvv) +} + +/// `vtmp.w=vgather(Rt32,Mu2,Vv32.w).w` +/// +/// Instruction Type: CVI_GATHER +/// Execution Slots: SLOT01 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vgathermw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vgather_armvw(rs: *mut HvxVector, rt: i32, mu: i32, vv: HvxVector) { + vgathermw(rs, rt, mu, vv) +} + +/// `Vdd32.h=vmpa(Vuu32.ub,Rt32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vmpabuu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpa_wubrub(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vmpabuu(vuu, rt) +} + +/// `Vxx32.h+=vmpa(Vuu32.ub,Rt32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vmpabuu_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpaacc_whwubrub( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vmpabuu_acc(vxx, vuu, rt) +} + +/// `Vxx32.w+=vmpy(Vu32.h,Rt32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vmpyh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpyacc_wwvhrh(vxx: HvxVectorPair, vu: HvxVector, rt: i32) -> HvxVectorPair { + vmpyh_acc(vxx, vu, rt) +} + +/// `Vd32.uw=vmpye(Vu32.uh,Rt32.uh)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vmpyuhe))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vmpye_vuhruh(vu: HvxVector, rt: i32) -> HvxVector { + vmpyuhe(vu, rt) +} + +/// `Vx32.uw+=vmpye(Vu32.uh,Rt32.uh)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vmpyuhe_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vmpyeacc_vuwvuhruh(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vmpyuhe_acc(vx, vu, rt) +} + +/// `Vd32.b=vnavg(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vnavgb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vnavg_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vnavgb(vu, vv) +} + +/// `vscatter(Rt32,Mu2,Vv32.h).h=Vw32` +/// +/// Instruction Type: CVI_SCATTER +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vscattermh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatter_rmvhv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { + vscattermh(rt, mu, vv, vw) +} + +/// `vscatter(Rt32,Mu2,Vv32.h).h+=Vw32` +/// +/// Instruction Type: CVI_SCATTER +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vscattermh_add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatteracc_rmvhv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { + vscattermh_add(rt, mu, vv, vw) +} + +/// `vscatter(Rt32,Mu2,Vvv32.w).h=Vw32` +/// +/// Instruction Type: CVI_SCATTER_DV +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vscattermhw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatter_rmwwv(rt: i32, mu: i32, vvv: HvxVectorPair, vw: HvxVector) { + vscattermhw(rt, mu, vvv, vw) +} + +/// `vscatter(Rt32,Mu2,Vvv32.w).h+=Vw32` +/// +/// Instruction Type: CVI_SCATTER_DV +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vscattermhw_add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatteracc_rmwwv(rt: i32, mu: i32, vvv: HvxVectorPair, vw: HvxVector) { + vscattermhw_add(rt, mu, vvv, vw) +} + +/// `vscatter(Rt32,Mu2,Vv32.w).w=Vw32` +/// +/// Instruction Type: CVI_SCATTER +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vscattermw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatter_rmvwv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { + vscattermw(rt, mu, vv, vw) +} + +/// `vscatter(Rt32,Mu2,Vv32.w).w+=Vw32` +/// +/// Instruction Type: CVI_SCATTER +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vscattermw_add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatteracc_rmvwv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { + vscattermw_add(rt, mu, vv, vw) +} + +/// `Vxx32.w=vasrinto(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] +#[cfg_attr(test, assert_instr(vasr_into))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vasrinto_wwvwvw( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vasr_into(vxx, vu, vv) +} + +/// `Vd32.uw=vrotr(Vu32.uw,Vv32.uw)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] +#[cfg_attr(test, assert_instr(vrotr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vrotr_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vrotr(vu, vv) +} + +/// `Vd32.w=vsatdw(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] +#[cfg_attr(test, assert_instr(vsatdw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vsatdw_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsatdw(vu, vv) +} + +/// `Vdd32.w=v6mpy(Vuu32.ub,Vvv32.b,#u2):h` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(v6mpyhubs10))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_v6mpy_wubwbi_h( + vuu: HvxVectorPair, + vvv: HvxVectorPair, + iu2: i32, +) -> HvxVectorPair { + v6mpyhubs10(vuu, vvv, iu2) +} + +/// `Vxx32.w+=v6mpy(Vuu32.ub,Vvv32.b,#u2):h` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(v6mpyhubs10_vxx))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_v6mpyacc_wwwubwbi_h( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + vvv: HvxVectorPair, + iu2: i32, +) -> HvxVectorPair { + v6mpyhubs10_vxx(vxx, vuu, vvv, iu2) +} + +/// `Vdd32.w=v6mpy(Vuu32.ub,Vvv32.b,#u2):v` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(v6mpyvubs10))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_v6mpy_wubwbi_v( + vuu: HvxVectorPair, + vvv: HvxVectorPair, + iu2: i32, +) -> HvxVectorPair { + v6mpyvubs10(vuu, vvv, iu2) +} + +/// `Vxx32.w+=v6mpy(Vuu32.ub,Vvv32.b,#u2):v` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(v6mpyvubs10_vxx))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_v6mpyacc_wwwubwbi_v( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + vvv: HvxVectorPair, + iu2: i32, +) -> HvxVectorPair { + v6mpyvubs10_vxx(vxx, vuu, vvv, iu2) +} + +/// `Vd32.hf=vabs(Vu32.hf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vabs_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vabs_vhf(vu: HvxVector) -> HvxVector { + vabs_hf(vu) +} + +/// `Vd32.sf=vabs(Vu32.sf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vabs_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vabs_vsf(vu: HvxVector) -> HvxVector { + vabs_sf(vu) +} + +/// `Vd32.qf16=vadd(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vadd_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_hf(vu, vv) +} + +/// `Vd32.hf=vadd(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_hf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vadd_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_hf_hf(vu, vv) +} + +/// `Vd32.qf16=vadd(Vu32.qf16,Vv32.qf16)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_qf16))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vadd_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_qf16(vu, vv) +} + +/// `Vd32.qf16=vadd(Vu32.qf16,Vv32.hf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_qf16_mix))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vadd_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_qf16_mix(vu, vv) +} + +/// `Vd32.qf32=vadd(Vu32.qf32,Vv32.qf32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_qf32))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vadd_vqf32vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_qf32(vu, vv) +} + +/// `Vd32.qf32=vadd(Vu32.qf32,Vv32.sf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_qf32_mix))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vadd_vqf32vsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_qf32_mix(vu, vv) +} + +/// `Vd32.qf32=vadd(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vadd_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_sf(vu, vv) +} + +/// `Vdd32.sf=vadd(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_sf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wsf_vadd_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vadd_sf_hf(vu, vv) +} + +/// `Vd32.sf=vadd(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_sf_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vadd_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_sf_sf(vu, vv) +} + +/// `Vd32.w=vfmv(Vu32.w)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vassign_fp))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vfmv_vw(vu: HvxVector) -> HvxVector { + vassign_fp(vu) +} + +/// `Vd32.hf=Vu32.qf16` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vconv_hf_qf16))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_equals_vqf16(vu: HvxVector) -> HvxVector { + vconv_hf_qf16(vu) +} + +/// `Vd32.hf=Vuu32.qf32` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vconv_hf_qf32))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_equals_wqf32(vuu: HvxVectorPair) -> HvxVector { + vconv_hf_qf32(vuu) +} + +/// `Vd32.sf=Vu32.qf32` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vconv_sf_qf32))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_equals_vqf32(vu: HvxVector) -> HvxVector { + vconv_sf_qf32(vu) +} + +/// `Vd32.b=vcvt(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_b_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vcvt_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vcvt_b_hf(vu, vv) +} + +/// `Vd32.h=vcvt(Vu32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_h_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vcvt_vhf(vu: HvxVector) -> HvxVector { + vcvt_h_hf(vu) +} + +/// `Vdd32.hf=vcvt(Vu32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_hf_b))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_whf_vcvt_vb(vu: HvxVector) -> HvxVectorPair { + vcvt_hf_b(vu) +} + +/// `Vd32.hf=vcvt(Vu32.h)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_hf_h))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vcvt_vh(vu: HvxVector) -> HvxVector { + vcvt_hf_h(vu) +} + +/// `Vd32.hf=vcvt(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_hf_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vcvt_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vcvt_hf_sf(vu, vv) +} + +/// `Vdd32.hf=vcvt(Vu32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_hf_ub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_whf_vcvt_vub(vu: HvxVector) -> HvxVectorPair { + vcvt_hf_ub(vu) +} + +/// `Vd32.hf=vcvt(Vu32.uh)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_hf_uh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vcvt_vuh(vu: HvxVector) -> HvxVector { + vcvt_hf_uh(vu) +} + +/// `Vdd32.sf=vcvt(Vu32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_sf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wsf_vcvt_vhf(vu: HvxVector) -> HvxVectorPair { + vcvt_sf_hf(vu) +} + +/// `Vd32.ub=vcvt(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_ub_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vcvt_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vcvt_ub_hf(vu, vv) +} + +/// `Vd32.uh=vcvt(Vu32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_uh_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vcvt_vhf(vu: HvxVector) -> HvxVector { + vcvt_uh_hf(vu) +} + +/// `Vd32.sf=vdmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vdmpy_sf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vdmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vdmpy_sf_hf(vu, vv) +} + +/// `Vx32.sf+=vdmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vdmpy_sf_hf_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vdmpyacc_vsfvhfvhf(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vdmpy_sf_hf_acc(vx, vu, vv) +} + +/// `Vd32.hf=vfmax(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vfmax_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vfmax_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vfmax_hf(vu, vv) +} + +/// `Vd32.sf=vfmax(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vfmax_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vfmax_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vfmax_sf(vu, vv) +} + +/// `Vd32.hf=vfmin(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vfmin_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vfmin_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vfmin_hf(vu, vv) +} + +/// `Vd32.sf=vfmin(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vfmin_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vfmin_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vfmin_sf(vu, vv) +} + +/// `Vd32.hf=vfneg(Vu32.hf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vfneg_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vfneg_vhf(vu: HvxVector) -> HvxVector { + vfneg_hf(vu) +} + +/// `Vd32.sf=vfneg(Vu32.sf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vfneg_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vfneg_vsf(vu: HvxVector) -> HvxVector { + vfneg_sf(vu) +} + +/// `Vd32.hf=vmax(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmax_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vmax_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmax_hf(vu, vv) +} + +/// `Vd32.sf=vmax(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmax_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vmax_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmax_sf(vu, vv) +} + +/// `Vd32.hf=vmin(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmin_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vmin_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmin_hf(vu, vv) +} + +/// `Vd32.sf=vmin(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmin_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vmin_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmin_sf(vu, vv) +} + +/// `Vd32.hf=vmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_hf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_hf_hf(vu, vv) +} + +/// `Vx32.hf+=vmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_hf_hf_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vmpyacc_vhfvhfvhf(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_hf_hf_acc(vx, vu, vv) +} + +/// `Vd32.qf16=vmpy(Vu32.qf16,Vv32.qf16)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf16))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vmpy_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_qf16(vu, vv) +} + +/// `Vd32.qf16=vmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf16_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_qf16_hf(vu, vv) +} + +/// `Vd32.qf16=vmpy(Vu32.qf16,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf16_mix_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vmpy_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_qf16_mix_hf(vu, vv) +} + +/// `Vd32.qf32=vmpy(Vu32.qf32,Vv32.qf32)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf32))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vmpy_vqf32vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_qf32(vu, vv) +} + +/// `Vdd32.qf32=vmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf32_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wqf32_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpy_qf32_hf(vu, vv) +} + +/// `Vdd32.qf32=vmpy(Vu32.qf16,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf32_mix_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wqf32_vmpy_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpy_qf32_mix_hf(vu, vv) +} + +/// `Vdd32.qf32=vmpy(Vu32.qf16,Vv32.qf16)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf32_qf16))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wqf32_vmpy_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpy_qf32_qf16(vu, vv) +} + +/// `Vd32.qf32=vmpy(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf32_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vmpy_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_qf32_sf(vu, vv) +} + +/// `Vdd32.sf=vmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_sf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wsf_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpy_sf_hf(vu, vv) +} + +/// `Vxx32.sf+=vmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_sf_hf_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wsf_vmpyacc_wsfvhfvhf( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpy_sf_hf_acc(vxx, vu, vv) +} + +/// `Vd32.sf=vmpy(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_sf_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vmpy_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_sf_sf(vu, vv) +} + +/// `Vd32.qf16=vsub(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vsub_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_hf(vu, vv) +} + +/// `Vd32.hf=vsub(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_hf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vsub_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_hf_hf(vu, vv) +} + +/// `Vd32.qf16=vsub(Vu32.qf16,Vv32.qf16)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_qf16))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vsub_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_qf16(vu, vv) +} + +/// `Vd32.qf16=vsub(Vu32.qf16,Vv32.hf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_qf16_mix))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vsub_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_qf16_mix(vu, vv) +} + +/// `Vd32.qf32=vsub(Vu32.qf32,Vv32.qf32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_qf32))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vsub_vqf32vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_qf32(vu, vv) +} + +/// `Vd32.qf32=vsub(Vu32.qf32,Vv32.sf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_qf32_mix))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vsub_vqf32vsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_qf32_mix(vu, vv) +} + +/// `Vd32.qf32=vsub(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vsub_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_sf(vu, vv) +} + +/// `Vdd32.sf=vsub(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_sf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wsf_vsub_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vsub_sf_hf(vu, vv) +} + +/// `Vd32.sf=vsub(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_sf_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vsub_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_sf_sf(vu, vv) +} + +/// `Vd32.ub=vasr(Vuu32.uh,Vv32.ub):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] +#[cfg_attr(test, assert_instr(vasrvuhubrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vasr_wuhvub_rnd_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { + vasrvuhubrndsat(vuu, vv) +} + +/// `Vd32.ub=vasr(Vuu32.uh,Vv32.ub):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] +#[cfg_attr(test, assert_instr(vasrvuhubsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vasr_wuhvub_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { + vasrvuhubsat(vuu, vv) +} + +/// `Vd32.uh=vasr(Vuu32.w,Vv32.uh):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] +#[cfg_attr(test, assert_instr(vasrvwuhrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vasr_wwvuh_rnd_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { + vasrvwuhrndsat(vuu, vv) +} + +/// `Vd32.uh=vasr(Vuu32.w,Vv32.uh):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] +#[cfg_attr(test, assert_instr(vasrvwuhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vasr_wwvuh_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { + vasrvwuhsat(vuu, vv) +} + +/// `Vd32.uh=vmpy(Vu32.uh,Vv32.uh):>>16` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] +#[cfg_attr(test, assert_instr(vmpyuhvs))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vmpy_vuhvuh_rs16(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyuhvs(vu, vv) +} + +/// `Vd32.h=Vu32.hf` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] +#[cfg_attr(test, assert_instr(vconv_h_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_equals_vhf(vu: HvxVector) -> HvxVector { + vconv_h_hf(vu) +} + +/// `Vd32.hf=Vu32.h` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] +#[cfg_attr(test, assert_instr(vconv_hf_h))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_equals_vh(vu: HvxVector) -> HvxVector { + vconv_hf_h(vu) +} + +/// `Vd32.sf=Vu32.w` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] +#[cfg_attr(test, assert_instr(vconv_sf_w))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_equals_vw(vu: HvxVector) -> HvxVector { + vconv_sf_w(vu) +} + +/// `Vd32.w=Vu32.sf` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] +#[cfg_attr(test, assert_instr(vconv_w_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_equals_vsf(vu: HvxVector) -> HvxVector { + vconv_w_sf(vu) +} + +/// `Vd32=vgetqfext(Vu32.x,Rt32)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(get_qfext))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vgetqfext_vr(vu: HvxVector, rt: i32) -> HvxVector { + get_qfext(vu, rt) +} + +/// `Vd32.x=vsetqfext(Vu32,Rt32)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(set_qfext))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vsetqfext_vr(vu: HvxVector, rt: i32) -> HvxVector { + set_qfext(vu, rt) +} + +/// `Vd32.f8=vabs(Vu32.f8)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(vabs_f8))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vabs_v(vu: HvxVector) -> HvxVector { + vabs_f8(vu) +} + +/// `Vdd32.hf=vcvt2(Vu32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(vcvt2_hf_b))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_whf_vcvt2_vb(vu: HvxVector) -> HvxVectorPair { + vcvt2_hf_b(vu) +} + +/// `Vdd32.hf=vcvt2(Vu32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(vcvt2_hf_ub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_whf_vcvt2_vub(vu: HvxVector) -> HvxVectorPair { + vcvt2_hf_ub(vu) +} + +/// `Vdd32.hf=vcvt(Vu32.f8)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(vcvt_hf_f8))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_whf_vcvt_v(vu: HvxVector) -> HvxVectorPair { + vcvt_hf_f8(vu) +} + +/// `Vd32.f8=vfmax(Vu32.f8,Vv32.f8)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(vfmax_f8))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vfmax_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { + vfmax_f8(vu, vv) +} + +/// `Vd32.f8=vfmin(Vu32.f8,Vv32.f8)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(vfmin_f8))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vfmin_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { + vfmin_f8(vu, vv) +} + +/// `Vd32.f8=vfneg(Vu32.f8)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(vfneg_f8))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vfneg_v(vu: HvxVector) -> HvxVector { + vfneg_f8(vu) +} + +/// `Qd4=and(Qs4,Qt4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_and_qq(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + pred_and( + vandvrt(core::mem::transmute::(qs), -1), + vandvrt(core::mem::transmute::(qt), -1), + ), + -1, + )) +} + +/// `Qd4=and(Qs4,!Qt4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_and_qqn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + pred_and_n( + vandvrt(core::mem::transmute::(qs), -1), + vandvrt(core::mem::transmute::(qt), -1), + ), + -1, + )) +} + +/// `Qd4=not(Qs4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_not_q(qs: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + pred_not(vandvrt( + core::mem::transmute::(qs), + -1, + )), + -1, + )) +} + +/// `Qd4=or(Qs4,Qt4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_or_qq(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + pred_or( + vandvrt(core::mem::transmute::(qs), -1), + vandvrt(core::mem::transmute::(qt), -1), + ), + -1, + )) +} + +/// `Qd4=or(Qs4,!Qt4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_or_qqn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + pred_or_n( + vandvrt(core::mem::transmute::(qs), -1), + vandvrt(core::mem::transmute::(qt), -1), + ), + -1, + )) +} + +/// `Qd4=vsetq(Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vsetq_r(rt: i32) -> HvxVectorPred { + core::mem::transmute::(vandqrt(pred_scalar2(rt), -1)) +} + +/// `Qd4=xor(Qs4,Qt4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_xor_qq(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + pred_xor( + vandvrt(core::mem::transmute::(qs), -1), + vandvrt(core::mem::transmute::(qt), -1), + ), + -1, + )) +} + +/// `if (!Qv4) vmem(Rt32+#s4)=Vs32` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VM_ST +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vmem_qnriv(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { + vS32b_nqpred_ai( + vandvrt(core::mem::transmute::(qv), -1), + rt, + vs, + ) +} + +/// `if (!Qv4) vmem(Rt32+#s4):nt=Vs32` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VM_ST +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vmem_qnriv_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { + vS32b_nt_nqpred_ai( + vandvrt(core::mem::transmute::(qv), -1), + rt, + vs, + ) +} + +/// `if (Qv4) vmem(Rt32+#s4):nt=Vs32` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VM_ST +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vmem_qriv_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { + vS32b_nt_qpred_ai( + vandvrt(core::mem::transmute::(qv), -1), + rt, + vs, + ) +} + +/// `if (Qv4) vmem(Rt32+#s4)=Vs32` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VM_ST +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vmem_qriv(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { + vS32b_qpred_ai( + vandvrt(core::mem::transmute::(qv), -1), + rt, + vs, + ) +} + +/// `if (!Qv4) Vx32.b+=Vu32.b` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_condacc_qnvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vaddbnq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (Qv4) Vx32.b+=Vu32.b` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_condacc_qvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vaddbq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (!Qv4) Vx32.h+=Vu32.h` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_condacc_qnvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vaddhnq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (Qv4) Vx32.h+=Vu32.h` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_condacc_qvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vaddhq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (!Qv4) Vx32.w+=Vu32.w` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_condacc_qnvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vaddwnq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (Qv4) Vx32.w+=Vu32.w` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_condacc_qvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vaddwq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `Vd32=vand(Qu4,Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vand_qr(qu: HvxVectorPred, rt: i32) -> HvxVector { + vandvrt(core::mem::transmute::(qu), rt) +} + +/// `Vx32|=vand(Qu4,Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vandor_vqr(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> HvxVector { + vandvrt_acc(vx, core::mem::transmute::(qu), rt) +} + +/// `Qd4=vand(Vu32,Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vand_vr(vu: HvxVector, rt: i32) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vu, rt)) +} + +/// `Qx4|=vand(Vu32,Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vandor_qvr(qx: HvxVectorPred, vu: HvxVector, rt: i32) -> HvxVectorPred { + core::mem::transmute::(vandqrt_acc( + core::mem::transmute::(qx), + vu, + rt, + )) +} + +/// `Qd4=vcmp.eq(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eq_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(veqb(vu, vv), -1)) +} + +/// `Qx4&=vcmp.eq(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqand_qvbvb( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqb_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.eq(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqor_qvbvb( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqb_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.eq(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqxacc_qvbvb( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqb_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.eq(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eq_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(veqh(vu, vv), -1)) +} + +/// `Qx4&=vcmp.eq(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqand_qvhvh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqh_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.eq(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqor_qvhvh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqh_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.eq(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqxacc_qvhvh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqh_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.eq(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eq_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(veqw(vu, vv), -1)) +} + +/// `Qx4&=vcmp.eq(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqand_qvwvw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqw_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.eq(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqor_qvwvw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqw_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.eq(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqxacc_qvwvw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqw_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.gt(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgtb(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvbvb( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtb_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvbvb( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtb_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvbvb( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtb_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.gt(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgth(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvhvh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgth_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvhvh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgth_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvhvh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgth_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.gt(Vu32.ub,Vv32.ub)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgtub(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.ub,Vv32.ub)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvubvub( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtub_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.ub,Vv32.ub)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvubvub( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtub_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.ub,Vv32.ub)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvubvub( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtub_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.gt(Vu32.uh,Vv32.uh)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgtuh(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.uh,Vv32.uh)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvuhvuh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtuh_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.uh,Vv32.uh)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvuhvuh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtuh_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.uh,Vv32.uh)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvuhvuh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtuh_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.gt(Vu32.uw,Vv32.uw)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgtuw(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.uw,Vv32.uw)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvuwvuw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtuw_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.uw,Vv32.uw)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvuwvuw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtuw_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.uw,Vv32.uw)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvuwvuw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtuw_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.gt(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgtw(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvwvw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtw_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvwvw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtw_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvwvw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtw_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Vd32=vmux(Qt4,Vu32,Vv32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vmux_qvv(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> HvxVector { + vmux( + vandvrt(core::mem::transmute::(qt), -1), + vu, + vv, + ) +} + +/// `if (!Qv4) Vx32.b-=Vu32.b` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_condnac_qnvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vsubbnq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (Qv4) Vx32.b-=Vu32.b` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_condnac_qvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vsubbq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (!Qv4) Vx32.h-=Vu32.h` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_condnac_qnvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vsubhnq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (Qv4) Vx32.h-=Vu32.h` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_condnac_qvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vsubhq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (!Qv4) Vx32.w-=Vu32.w` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_condnac_qnvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vsubwnq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (Qv4) Vx32.w-=Vu32.w` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_condnac_qvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vsubwq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `Vdd32=vswap(Qt4,Vu32,Vv32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_vswap_qvv(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vswap( + vandvrt(core::mem::transmute::(qt), -1), + vu, + vv, + ) +} + +/// `Qd4=vsetq2(Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vsetq2_r(rt: i32) -> HvxVectorPred { + core::mem::transmute::(vandqrt(pred_scalar2v2(rt), -1)) +} + +/// `Qd4.b=vshuffe(Qs4.h,Qt4.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_qb_vshuffe_qhqh(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + shuffeqh( + vandvrt(core::mem::transmute::(qs), -1), + vandvrt(core::mem::transmute::(qt), -1), + ), + -1, + )) +} + +/// `Qd4.h=vshuffe(Qs4.w,Qt4.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_qh_vshuffe_qwqw(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + shuffeqw( + vandvrt(core::mem::transmute::(qs), -1), + vandvrt(core::mem::transmute::(qt), -1), + ), + -1, + )) +} + +/// `Vd32=vand(!Qu4,Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vand_qnr(qu: HvxVectorPred, rt: i32) -> HvxVector { + vandnqrt( + vandvrt(core::mem::transmute::(qu), -1), + rt, + ) +} + +/// `Vx32|=vand(!Qu4,Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vandor_vqnr(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> HvxVector { + vandnqrt_acc( + vx, + vandvrt(core::mem::transmute::(qu), -1), + rt, + ) +} + +/// `Vd32=vand(!Qv4,Vu32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vand_qnv(qv: HvxVectorPred, vu: HvxVector) -> HvxVector { + vandvnqv( + vandvrt(core::mem::transmute::(qv), -1), + vu, + ) +} + +/// `Vd32=vand(Qv4,Vu32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vand_qv(qv: HvxVectorPred, vu: HvxVector) -> HvxVector { + vandvqv( + vandvrt(core::mem::transmute::(qv), -1), + vu, + ) +} + +/// `if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vv32.h).h` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_GATHER +/// Execution Slots: SLOT01 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vgather_aqrmvh( + rs: *mut HvxVector, + qs: HvxVectorPred, + rt: i32, + mu: i32, + vv: HvxVector, +) { + vgathermhq( + rs, + vandvrt(core::mem::transmute::(qs), -1), + rt, + mu, + vv, + ) +} + +/// `if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_GATHER_DV +/// Execution Slots: SLOT01 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vgather_aqrmww( + rs: *mut HvxVector, + qs: HvxVectorPred, + rt: i32, + mu: i32, + vvv: HvxVectorPair, +) { + vgathermhwq( + rs, + vandvrt(core::mem::transmute::(qs), -1), + rt, + mu, + vvv, + ) +} + +/// `if (Qs4) vtmp.w=vgather(Rt32,Mu2,Vv32.w).w` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_GATHER +/// Execution Slots: SLOT01 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vgather_aqrmvw( + rs: *mut HvxVector, + qs: HvxVectorPred, + rt: i32, + mu: i32, + vv: HvxVector, +) { + vgathermwq( + rs, + vandvrt(core::mem::transmute::(qs), -1), + rt, + mu, + vv, + ) +} + +/// `Vd32.b=prefixsum(Qv4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_prefixsum_q(qv: HvxVectorPred) -> HvxVector { + vprefixqb(vandvrt( + core::mem::transmute::(qv), + -1, + )) +} + +/// `Vd32.h=prefixsum(Qv4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_prefixsum_q(qv: HvxVectorPred) -> HvxVector { + vprefixqh(vandvrt( + core::mem::transmute::(qv), + -1, + )) +} + +/// `Vd32.w=prefixsum(Qv4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_prefixsum_q(qv: HvxVectorPred) -> HvxVector { + vprefixqw(vandvrt( + core::mem::transmute::(qv), + -1, + )) +} + +/// `if (Qs4) vscatter(Rt32,Mu2,Vv32.h).h=Vw32` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_SCATTER +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatter_qrmvhv( + qs: HvxVectorPred, + rt: i32, + mu: i32, + vv: HvxVector, + vw: HvxVector, +) { + vscattermhq( + vandvrt(core::mem::transmute::(qs), -1), + rt, + mu, + vv, + vw, + ) +} + +/// `if (Qs4) vscatter(Rt32,Mu2,Vvv32.w).h=Vw32` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_SCATTER_DV +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatter_qrmwwv( + qs: HvxVectorPred, + rt: i32, + mu: i32, + vvv: HvxVectorPair, + vw: HvxVector, +) { + vscattermhwq( + vandvrt(core::mem::transmute::(qs), -1), + rt, + mu, + vvv, + vw, + ) +} + +/// `if (Qs4) vscatter(Rt32,Mu2,Vv32.w).w=Vw32` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_SCATTER +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatter_qrmvwv( + qs: HvxVectorPred, + rt: i32, + mu: i32, + vv: HvxVector, + vw: HvxVector, +) { + vscattermwq( + vandvrt(core::mem::transmute::(qs), -1), + rt, + mu, + vv, + vw, + ) +} + +/// `Vd32.w=vadd(Vu32.w,Vv32.w,Qs4):carry:sat` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vadd_vwvwq_carry_sat( + vu: HvxVector, + vv: HvxVector, + qs: HvxVectorPred, +) -> HvxVector { + vaddcarrysat( + vu, + vv, + vandvrt(core::mem::transmute::(qs), -1), + ) +} + +/// `Qd4=vcmp.gt(Vu32.hf,Vv32.hf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgthf(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.hf,Vv32.hf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvhfvhf( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgthf_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.hf,Vv32.hf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvhfvhf( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgthf_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.hf,Vv32.hf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvhfvhf( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgthf_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.gt(Vu32.sf,Vv32.sf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgtsf(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.sf,Vv32.sf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvsfvsf( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtsf_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.sf,Vv32.sf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvsfvsf( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtsf_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.sf,Vv32.sf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvsfvsf( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtsf_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} diff --git a/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs b/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs index 2f8dec75b76b6..43317ff41d9b1 100644 --- a/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs +++ b/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs @@ -1,13 +1,20 @@ //! Hexagon HVX Code Generator //! -//! This generator creates hvx.rs from scratch using the LLVM HVX header file -//! as the sole source of truth. It parses the C intrinsic prototypes and -//! generates Rust wrapper functions with appropriate attributes. +//! This generator creates v64.rs and v128.rs from scratch using the LLVM HVX +//! header file as the sole source of truth. It parses the C intrinsic prototypes +//! and generates Rust wrapper functions with appropriate attributes. +//! +//! The two generated files provide: +//! - v64.rs: 64-byte vector mode intrinsics (512-bit vectors) +//! - v128.rs: 128-byte vector mode intrinsics (1024-bit vectors) +//! +//! Both modules are available unconditionally, but require the appropriate +//! target features to actually use the intrinsics. //! //! Usage: //! cd crates/stdarch-gen-hexagon //! cargo run -//! # Output is written directly to ../core_arch/src/hexagon/hvx.rs +//! # Output is written to ../core_arch/src/hexagon/v64.rs and v128.rs use regex::Regex; use std::collections::{HashMap, HashSet}; @@ -32,6 +39,46 @@ fn get_simd_intrinsic_mappings() -> HashMap<&'static str, &'static str> { /// The tracking issue number for the stdarch_hexagon feature const TRACKING_ISSUE: &str = "151523"; +/// HVX vector length mode +#[derive(Debug, Clone, Copy, PartialEq)] +enum VectorMode { + /// 64-byte vectors (512 bits) + V64, + /// 128-byte vectors (1024 bits) + V128, +} + +impl VectorMode { + fn bytes(&self) -> u32 { + match self { + VectorMode::V64 => 64, + VectorMode::V128 => 128, + } + } + + fn bits(&self) -> u32 { + self.bytes() * 8 + } + + fn lanes(&self) -> u32 { + self.bytes() / 4 // 32-bit lanes + } + + fn module_name(&self) -> &'static str { + match self { + VectorMode::V64 => "v64", + VectorMode::V128 => "v128", + } + } + + fn target_feature(&self) -> &'static str { + match self { + VectorMode::V64 => "hvx-length64b", + VectorMode::V128 => "hvx-length128b", + } + } +} + /// LLVM tag to fetch the header from const LLVM_TAG: &str = "llvmorg-22.1.0-rc1"; @@ -484,26 +531,24 @@ fn q6_to_rust_name(q6_name: &str) -> String { } /// Generate the module documentation -fn generate_module_doc() -> String { - r#"//! Hexagon HVX intrinsics +fn generate_module_doc(mode: VectorMode) -> String { + format!( + r#"//! Hexagon HVX {bytes}-byte vector mode intrinsics +//! +//! This module provides intrinsics for the Hexagon Vector Extensions (HVX) +//! in {bytes}-byte vector mode ({bits}-bit vectors). //! -//! This module provides intrinsics for the Hexagon Vector Extensions (HVX). //! HVX is a wide vector extension designed for high-performance signal processing. //! [Hexagon HVX Programmer's Reference Manual](https://docs.qualcomm.com/doc/80-N2040-61) //! //! ## Vector Types //! -//! HVX supports different vector lengths depending on the configuration: -//! - 128-byte mode: `HvxVector` is 1024 bits (128 bytes) -//! - 64-byte mode: `HvxVector` is 512 bits (64 bytes) -//! -//! This implementation targets 128-byte mode by default. To change the vector -//! length mode, use the appropriate target feature when compiling: -//! - For 128-byte mode: `-C target-feature=+hvx-length128b` -//! - For 64-byte mode: `-C target-feature=+hvx-length64b` +//! In {bytes}-byte mode: +//! - `HvxVector` is {bits} bits ({bytes} bytes) containing {lanes} x 32-bit values +//! - `HvxVectorPair` is {pair_bits} bits ({pair_bytes} bytes) +//! - `HvxVectorPred` is {bits} bits ({bytes} bytes) for predicate operations //! -//! Note that HVX v66 and later default to 128-byte mode, while earlier versions -//! default to 64-byte mode. +//! To use this module, compile with `-C target-feature=+{target_feature}`. //! //! ## Architecture Versions //! @@ -517,15 +562,27 @@ fn generate_module_doc() -> String { //! - HVX v69: `-C target-feature=+hvxv69` //! - HVX v73: `-C target-feature=+hvxv73` //! - HVX v79: `-C target-feature=+hvxv79` -//! - HVX v81: `-C target-feature=+hvxv81` //! //! Each version includes all features from previous versions. -"# - .to_string() +"#, + bytes = mode.bytes(), + bits = mode.bits(), + lanes = mode.lanes(), + pair_bytes = mode.bytes() * 2, + pair_bits = mode.bits() * 2, + target_feature = mode.target_feature(), + ) } -/// Generate the type definitions -fn generate_types() -> String { +/// Generate the type definitions for a specific vector mode +fn generate_types(mode: VectorMode) -> String { + let lanes = mode.lanes(); + let pair_lanes = lanes * 2; + let bits = mode.bits(); + let bytes = mode.bytes(); + let pair_bits = bits * 2; + let pair_bytes = bytes * 2; + format!( r#" #![allow(non_camel_case_types)] @@ -535,54 +592,35 @@ use stdarch_test::assert_instr; use crate::intrinsics::simd::{{simd_add, simd_and, simd_or, simd_sub, simd_xor}}; -// HVX type definitions for 128-byte vector mode (default for v66+) -// Use -C target-feature=+hvx-length128b to enable -#[cfg(target_feature = "hvx-length128b")] -types! {{ - #![unstable(feature = "stdarch_hexagon", issue = "{TRACKING_ISSUE}")] - - /// HVX vector type (1024 bits / 128 bytes) - /// - /// This type represents a single HVX vector register containing 32 x 32-bit values. - pub struct HvxVector(32 x i32); - - /// HVX vector pair type (2048 bits / 256 bytes) - /// - /// This type represents a pair of HVX vector registers, often used for - /// operations that produce double-width results. - pub struct HvxVectorPair(64 x i32); - - /// HVX vector predicate type (1024 bits / 128 bytes) - /// - /// This type represents a predicate vector used for conditional operations. - /// Each bit corresponds to a lane in the vector. - pub struct HvxVectorPred(32 x i32); -}} - -// HVX type definitions for 64-byte vector mode (default for v60-v65) -// Use -C target-feature=+hvx-length64b to enable, or omit hvx-length128b -#[cfg(not(target_feature = "hvx-length128b"))] +// HVX type definitions for {bytes}-byte vector mode types! {{ #![unstable(feature = "stdarch_hexagon", issue = "{TRACKING_ISSUE}")] - /// HVX vector type (512 bits / 64 bytes) + /// HVX vector type ({bits} bits / {bytes} bytes) /// - /// This type represents a single HVX vector register containing 16 x 32-bit values. - pub struct HvxVector(16 x i32); + /// This type represents a single HVX vector register containing {lanes} x 32-bit values. + pub struct HvxVector({lanes} x i32); - /// HVX vector pair type (1024 bits / 128 bytes) + /// HVX vector pair type ({pair_bits} bits / {pair_bytes} bytes) /// /// This type represents a pair of HVX vector registers, often used for /// operations that produce double-width results. - pub struct HvxVectorPair(32 x i32); + pub struct HvxVectorPair({pair_lanes} x i32); - /// HVX vector predicate type (512 bits / 64 bytes) + /// HVX vector predicate type ({bits} bits / {bytes} bytes) /// /// This type represents a predicate vector used for conditional operations. /// Each bit corresponds to a lane in the vector. - pub struct HvxVectorPred(16 x i32); + pub struct HvxVectorPred({lanes} x i32); }} -"# +"#, + bytes = bytes, + bits = bits, + lanes = lanes, + pair_bits = pair_bits, + pair_bytes = pair_bytes, + pair_lanes = pair_lanes, + TRACKING_ISSUE = TRACKING_ISSUE, ) } @@ -1160,8 +1198,8 @@ fn get_compound_helper_signatures() -> HashMap { map } -/// Generate extern declarations for all intrinsics -fn generate_extern_block(intrinsics: &[IntrinsicInfo]) -> String { +/// Generate extern declarations for all intrinsics for a specific vector mode +fn generate_extern_block(intrinsics: &[IntrinsicInfo], mode: VectorMode) -> String { let mut output = String::new(); // Collect unique builtins to avoid duplicates @@ -1226,15 +1264,18 @@ fn generate_extern_block(intrinsics: &[IntrinsicInfo]) -> String { // Sort by builtin name for consistent output decls.sort_by(|a, b| a.0.cmp(&b.0)); - // Generate 128-byte mode intrinsics (default for v66+) - output.push_str("// LLVM intrinsic declarations for 128-byte vector mode\n"); - output.push_str("#[cfg(target_feature = \"hvx-length128b\")]\n"); + // Generate intrinsic declarations for the specified mode + output.push_str(&format!( + "// LLVM intrinsic declarations for {}-byte vector mode\n", + mode.bytes() + )); output.push_str("#[allow(improper_ctypes)]\n"); output.push_str("unsafe extern \"unadjusted\" {\n"); for (builtin_name, instr_name, return_type, param_types) in &decls { let base_link = builtin_name.replace('_', "."); - let link_name = if builtin_name.starts_with("V6_") { + // 128-byte mode uses .128B suffix, 64-byte mode doesn't + let link_name = if builtin_name.starts_with("V6_") && mode == VectorMode::V128 { format!("llvm.hexagon.{}.128B", base_link) } else { format!("llvm.hexagon.{}", base_link) @@ -1262,41 +1303,6 @@ fn generate_extern_block(intrinsics: &[IntrinsicInfo]) -> String { )); } - output.push_str("}\n\n"); - - // Generate 64-byte mode intrinsics (default for v60-v65) - output.push_str("// LLVM intrinsic declarations for 64-byte vector mode\n"); - output.push_str("#[cfg(not(target_feature = \"hvx-length128b\"))]\n"); - output.push_str("#[allow(improper_ctypes)]\n"); - output.push_str("unsafe extern \"unadjusted\" {\n"); - - for (builtin_name, instr_name, return_type, param_types) in &decls { - let base_link = builtin_name.replace('_', "."); - // 64-byte mode uses intrinsics without the .128B suffix - let link_name = format!("llvm.hexagon.{}", base_link); - - let params_str = if param_types.is_empty() { - String::new() - } else { - param_types - .iter() - .map(|t| format!("_: {}", t.to_extern_str())) - .collect::>() - .join(", ") - }; - - let return_str = if *return_type == RustType::Unit { - " -> ()".to_string() - } else { - format!(" -> {}", return_type.to_extern_str()) - }; - - output.push_str(&format!( - " #[link_name = \"{}\"]\n fn {}({}){};\n", - link_name, instr_name, params_str, return_str - )); - } - output.push_str("}\n"); output } @@ -1596,14 +1602,18 @@ fn generate_functions(intrinsics: &[IntrinsicInfo]) -> String { output } -/// Generate the complete hvx.rs file -fn generate_hvx_rs(intrinsics: &[IntrinsicInfo], output_path: &Path) -> Result<(), String> { +/// Generate a module file for a specific vector mode +fn generate_module_file( + intrinsics: &[IntrinsicInfo], + output_path: &Path, + mode: VectorMode, +) -> Result<(), String> { let mut output = File::create(output_path).map_err(|e| format!("Failed to create output: {}", e))?; - writeln!(output, "{}", generate_module_doc()).map_err(|e| e.to_string())?; - writeln!(output, "{}", generate_types()).map_err(|e| e.to_string())?; - writeln!(output, "{}", generate_extern_block(intrinsics)).map_err(|e| e.to_string())?; + writeln!(output, "{}", generate_module_doc(mode)).map_err(|e| e.to_string())?; + writeln!(output, "{}", generate_types(mode)).map_err(|e| e.to_string())?; + writeln!(output, "{}", generate_extern_block(intrinsics, mode)).map_err(|e| e.to_string())?; writeln!(output, "{}", generate_functions(intrinsics)).map_err(|e| e.to_string())?; // Ensure file is flushed before running rustfmt @@ -1677,21 +1687,39 @@ fn main() -> Result<(), String> { println!(" HVX v{}: {} intrinsics", arch, count); } - // Generate output + // Generate output files let crate_dir = std::env::var("CARGO_MANIFEST_DIR") .map(std::path::PathBuf::from) .unwrap_or_else(|_| std::env::current_dir().unwrap()); - let output_path = crate_dir.join("../core_arch/src/hexagon/hvx.rs"); + let hexagon_dir = crate_dir.join("../core_arch/src/hexagon"); + + // Generate v64.rs (64-byte vector mode) + let v64_path = hexagon_dir.join("v64.rs"); + println!("\nStep 3: Generating v64.rs (64-byte mode)..."); + generate_module_file(&intrinsics, &v64_path, VectorMode::V64)?; + println!(" Output: {}", v64_path.display()); - println!("\nStep 3: Generating hvx.rs..."); - generate_hvx_rs(&intrinsics, &output_path)?; + // Generate v128.rs (128-byte vector mode) + let v128_path = hexagon_dir.join("v128.rs"); + println!("\nStep 4: Generating v128.rs (128-byte mode)..."); + generate_module_file(&intrinsics, &v128_path, VectorMode::V128)?; + println!(" Output: {}", v128_path.display()); println!("\n=== Results ==="); - println!(" Generated {} simple wrapper functions", simple_count); - println!(" Generated {} compound wrapper functions", compound_count); - println!(" Total: {} functions", simple_count + compound_count); - println!(" Output: {}", output_path.display()); + println!( + " Generated {} simple wrapper functions per module", + simple_count + ); + println!( + " Generated {} compound wrapper functions per module", + compound_count + ); + println!( + " Total: {} functions per module", + simple_count + compound_count + ); + println!(" Output files: v64.rs, v128.rs"); Ok(()) } diff --git a/library/stdarch/examples/gaussian.rs b/library/stdarch/examples/gaussian.rs index 1891f194ed7ff..d4a97235b4ca9 100644 --- a/library/stdarch/examples/gaussian.rs +++ b/library/stdarch/examples/gaussian.rs @@ -41,8 +41,10 @@ dead_code )] -#[cfg(target_arch = "hexagon")] -use core_arch::arch::hexagon::*; +#[cfg(all(target_arch = "hexagon", not(target_feature = "hvx-length128b")))] +use core_arch::arch::hexagon::v64::*; +#[cfg(all(target_arch = "hexagon", target_feature = "hvx-length128b"))] +use core_arch::arch::hexagon::v128::*; /// Vector length in bytes for HVX 128-byte mode #[cfg(all(target_arch = "hexagon", target_feature = "hvx-length128b"))] From 736af97d6c4b2d94d5a1189bbd9d330080571a42 Mon Sep 17 00:00:00 2001 From: Brian Cain Date: Tue, 3 Feb 2026 09:18:36 -0600 Subject: [PATCH 37/90] examples: Add assertions to gaussian example Replace print statements with assertions that verify the Gaussian 3x3 blur implementation against the Hexagon SDK reference algorithm. - Port exact SDK Gaussian3x3u8 implementation from: /opt/Hexagon_SDK/.../Examples/HVX/gaussian/src/gaussian.c - Verify specific output values [15, 16, 17, 18, 19, 20, 21, 22] for row 2, cols 1..9 with test pattern ((x + y*7) % 256) - Assert byte-averaging approximation exactly matches SDK reference - On Hexagon: verify HVX output matches both scalar approximation and SDK reference exactly --- library/stdarch/examples/gaussian.rs | 181 +++++++++++++-------------- 1 file changed, 89 insertions(+), 92 deletions(-) diff --git a/library/stdarch/examples/gaussian.rs b/library/stdarch/examples/gaussian.rs index d4a97235b4ca9..575f4db8c8d84 100644 --- a/library/stdarch/examples/gaussian.rs +++ b/library/stdarch/examples/gaussian.rs @@ -178,26 +178,25 @@ pub unsafe fn gaussian3x3u8( } } -/// Scalar reference implementation of Gaussian 3x3 blur for verification +/// Reference C implementation from Hexagon SDK (Gaussian3x3u8) /// -/// Applies the exact 3x3 Gaussian kernel: -/// out[y][x] = (1*p[-1][-1] + 2*p[-1][0] + 1*p[-1][1] + -/// 2*p[ 0][-1] + 4*p[ 0][0] + 2*p[ 0][1] + -/// 1*p[ 1][-1] + 2*p[ 1][0] + 1*p[ 1][1] + 8) / 16 -fn gaussian3x3u8_scalar(src: &[u8], stride: usize, width: usize, height: usize, dst: &mut [u8]) { +/// Kernel: +/// 1 2 1 +/// 2 4 2 / 16 +/// 1 2 1 +fn gaussian3x3u8_reference(src: &[u8], stride: usize, width: usize, height: usize, dst: &mut [u8]) { for y in 1..height - 1 { for x in 1..width - 1 { - let sum = src[(y - 1) * stride + (x - 1)] as u32 - + src[(y - 1) * stride + x] as u32 * 2 - + src[(y - 1) * stride + (x + 1)] as u32 - + src[y * stride + (x - 1)] as u32 * 2 - + src[y * stride + x] as u32 * 4 - + src[y * stride + (x + 1)] as u32 * 2 - + src[(y + 1) * stride + (x - 1)] as u32 - + src[(y + 1) * stride + x] as u32 * 2 - + src[(y + 1) * stride + (x + 1)] as u32; - // Divide by 16 with rounding, saturate to u8 - dst[y * stride + x] = ((sum + 8) >> 4).min(255) as u8; + // Compute column sums (vertical 1-2-1 weights) + let mut col = [0u32; 3]; + for i in 0..3 { + col[i] = 1 * src[(y - 1) * stride + x - 1 + i] as u32 + + 2 * src[y * stride + x - 1 + i] as u32 + + 1 * src[(y + 1) * stride + x - 1 + i] as u32; + } + // Apply horizontal 1-2-1 weights and normalize + // (1*col[0] + 2*col[1] + 1*col[2] + 8) / 16 + dst[y * stride + x] = ((1 * col[0] + 2 * col[1] + 1 * col[2] + 8) >> 4) as u8; } } } @@ -208,13 +207,7 @@ fn gaussian3x3u8_scalar(src: &[u8], stride: usize, width: usize, height: usize, /// - Vertical: avg_rnd(avg_rnd(above, below), center) /// - Horizontal: avg_rnd(avg_rnd(left, right), center) /// where avg_rnd(a, b) = (a + b + 1) / 2 -fn gaussian3x3u8_scalar_approx( - src: &[u8], - stride: usize, - width: usize, - height: usize, - dst: &mut [u8], -) { +fn gaussian3x3u8_approx(src: &[u8], stride: usize, width: usize, height: usize, dst: &mut [u8]) { // Temporary buffer for vertical pass output let mut tmp = vec![0u8; width * height]; @@ -241,66 +234,71 @@ fn gaussian3x3u8_scalar_approx( } } +/// Generate deterministic test pattern matching test approach +fn generate_test_pattern(buf: &mut [u8], width: usize, height: usize) { + for y in 0..height { + for x in 0..width { + buf[y * width + x] = ((x + y * 7) % 256) as u8; + } + } +} + fn main() { - println!("HVX Gaussian 3x3 blur example"); - println!("Separable filter using byte averaging (HvxVector only)"); - println!(); + // Test dimensions + #[cfg(not(target_arch = "hexagon"))] + const WIDTH: usize = 128; + #[cfg(target_arch = "hexagon")] + const WIDTH: usize = 256; // Must be multiple of VLEN (128) + const HEIGHT: usize = 16; #[cfg(not(target_arch = "hexagon"))] { - const WIDTH: usize = 128; - const HEIGHT: usize = 16; - let mut src = vec![0u8; WIDTH * HEIGHT]; - let mut dst_exact = vec![0u8; WIDTH * HEIGHT]; + let mut dst_ref = vec![0u8; WIDTH * HEIGHT]; let mut dst_approx = vec![0u8; WIDTH * HEIGHT]; - // Create test pattern - for y in 0..HEIGHT { - for x in 0..WIDTH { - src[y * WIDTH + x] = ((x + y * 7) % 256) as u8; - } + // Generate test pattern + generate_test_pattern(&mut src, WIDTH, HEIGHT); + + // Run reference implementation + gaussian3x3u8_reference(&src, WIDTH, WIDTH, HEIGHT, &mut dst_ref); + + // Run byte-averaging approximation (matches HVX behavior) + gaussian3x3u8_approx(&src, WIDTH, WIDTH, HEIGHT, &mut dst_approx); + + // Verify specific output values from reference + // These are computed using the exact algorithm on our test pattern + // Row 2, cols 1..9 with input pattern ((x + y*7) % 256) + // Input: row1=[7,8,9,...], row2=[14,15,16,...], row3=[21,22,23,...] + let expected_ref_row2: [u8; 8] = [15, 16, 17, 18, 19, 20, 21, 22]; + for (i, &expected) in expected_ref_row2.iter().enumerate() { + let actual = dst_ref[2 * WIDTH + 1 + i]; + assert_eq!( + actual, expected, + "reference mismatch at row 2, col {}: expected {}, got {}", + 1 + i, + expected, + actual + ); } - // Run exact Gaussian - gaussian3x3u8_scalar(&src, WIDTH, WIDTH, HEIGHT, &mut dst_exact); - - // Run approximate version (matches HVX behavior) - gaussian3x3u8_scalar_approx(&src, WIDTH, WIDTH, HEIGHT, &mut dst_approx); - - // Compare exact vs approximate - let mut max_diff = 0u8; + // Verify approximation exactly matches reference for this test pattern + // The byte-averaging approach avg(avg(a,c), b) produces identical results + // to the Hexagon SDK's (1*a + 2*b + 1*c + 2) / 4 for this input pattern for y in 1..HEIGHT - 1 { for x in 1..WIDTH - 1 { let idx = y * WIDTH + x; - let diff = (dst_exact[idx] as i16 - dst_approx[idx] as i16).unsigned_abs() as u8; - if diff > max_diff { - max_diff = diff; - } + assert_eq!( + dst_approx[idx], dst_ref[idx], + "Approximation differs from reference at ({}, {}): approx={}, ref={}", + x, y, dst_approx[idx], dst_ref[idx] + ); } } - - println!("Scalar implementations completed."); - println!( - "Input sample (row 2, cols 1..9): {:?}", - &src[2 * WIDTH + 1..2 * WIDTH + 9] - ); - println!( - "Exact output (row 2, cols 1..9): {:?}", - &dst_exact[2 * WIDTH + 1..2 * WIDTH + 9] - ); - println!( - "Approx output (row 2, cols 1..9): {:?}", - &dst_approx[2 * WIDTH + 1..2 * WIDTH + 9] - ); - println!("Max diff between exact and approx: {}", max_diff); } #[cfg(target_arch = "hexagon")] { - const WIDTH: usize = 256; // Must be multiple of VLEN (128) - const HEIGHT: usize = 16; - // Aligned buffers for HVX #[repr(align(128))] struct AlignedBuf([u8; N]); @@ -309,15 +307,12 @@ fn main() { let mut dst_hvx = AlignedBuf::<{ WIDTH * HEIGHT }>([0u8; WIDTH * HEIGHT]); let mut tmp = AlignedBuf::<{ WIDTH }>([0u8; WIDTH]); let mut dst_ref = vec![0u8; WIDTH * HEIGHT]; + let mut dst_approx = vec![0u8; WIDTH * HEIGHT]; - // Create test pattern - for y in 0..HEIGHT { - for x in 0..WIDTH { - src.0[y * WIDTH + x] = ((x + y * 7) % 256) as u8; - } - } + // Generate test pattern + generate_test_pattern(&mut src.0, WIDTH, HEIGHT); - // Run HVX version + // Run HVX implementation unsafe { gaussian3x3u8( src.0.as_ptr(), @@ -329,32 +324,34 @@ fn main() { ); } - // Run scalar approximate reference (should match HVX closely) - gaussian3x3u8_scalar_approx(&src.0, WIDTH, WIDTH, HEIGHT, &mut dst_ref); + // Run reference + gaussian3x3u8_reference(&src.0, WIDTH, WIDTH, HEIGHT, &mut dst_ref); + + // Run scalar approximation (should match HVX exactly) + gaussian3x3u8_approx(&src.0, WIDTH, WIDTH, HEIGHT, &mut dst_approx); - // Compare results (skip edges) - let mut max_diff = 0u8; - let mut diff_count = 0usize; + // Verify HVX matches the byte-averaging approximation exactly for y in 1..HEIGHT - 1 { for x in 1..WIDTH - 1 { let idx = y * WIDTH + x; - let diff = (dst_hvx.0[idx] as i16 - dst_ref[idx] as i16).unsigned_abs() as u8; - if diff > max_diff { - max_diff = diff; - } - if diff > 0 { - diff_count += 1; - } + assert_eq!( + dst_hvx.0[idx], dst_approx[idx], + "HVX output differs from scalar approximation at ({}, {}): hvx={}, approx={}", + x, y, dst_hvx.0[idx], dst_approx[idx] + ); } } - println!("HVX implementation completed."); - println!("Max difference from scalar reference: {}", max_diff); - println!("Pixels with any difference: {}", diff_count); - if max_diff <= 1 { - println!("Results match within rounding tolerance!"); - } else { - println!("WARNING: Results differ more than expected."); + // Verify HVX exactly matches reference for this test pattern + for y in 1..HEIGHT - 1 { + for x in 1..WIDTH - 1 { + let idx = y * WIDTH + x; + assert_eq!( + dst_hvx.0[idx], dst_ref[idx], + "HVX differs from reference at ({}, {}): hvx={}, ref={}", + x, y, dst_hvx.0[idx], dst_ref[idx] + ); + } } } } From eb6bbfa830e94c9f8ba57a8a5b5367631d2dcbee Mon Sep 17 00:00:00 2001 From: Brian Cain Date: Thu, 12 Feb 2026 07:47:01 -0600 Subject: [PATCH 38/90] examples: Fix rustfmt formatting in gaussian.rs --- library/stdarch/examples/gaussian.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/library/stdarch/examples/gaussian.rs b/library/stdarch/examples/gaussian.rs index 575f4db8c8d84..b7d8cad805153 100644 --- a/library/stdarch/examples/gaussian.rs +++ b/library/stdarch/examples/gaussian.rs @@ -274,7 +274,8 @@ fn main() { for (i, &expected) in expected_ref_row2.iter().enumerate() { let actual = dst_ref[2 * WIDTH + 1 + i]; assert_eq!( - actual, expected, + actual, + expected, "reference mismatch at row 2, col {}: expected {}, got {}", 1 + i, expected, From 01036ee64e341a83c88ebe38bb94f57eaf5bda44 Mon Sep 17 00:00:00 2001 From: Brian Cain Date: Thu, 12 Feb 2026 07:54:58 -0600 Subject: [PATCH 39/90] stdarch-gen-hexagon: Remove unused module_name method --- library/stdarch/crates/stdarch-gen-hexagon/src/main.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs b/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs index 43317ff41d9b1..590042c37cac5 100644 --- a/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs +++ b/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs @@ -64,13 +64,6 @@ impl VectorMode { self.bytes() / 4 // 32-bit lanes } - fn module_name(&self) -> &'static str { - match self { - VectorMode::V64 => "v64", - VectorMode::V128 => "v128", - } - } - fn target_feature(&self) -> &'static str { match self { VectorMode::V64 => "hvx-length64b", From d2ce1c9c3d87f81e6550f6af56eb133db46e9b42 Mon Sep 17 00:00:00 2001 From: Brian Cain Date: Thu, 12 Feb 2026 08:03:13 -0600 Subject: [PATCH 40/90] stdarch-gen-hexagon: Fix clippy warnings - Move regex compilations outside loops - Use Option::map and or_else instead of manual if-let chains - Use strip_prefix instead of manual starts_with + slice - Use !is_empty() instead of len() >= 1 - Combine consecutive str::replace calls --- .../crates/stdarch-gen-hexagon/src/main.rs | 39 +++++++++---------- 1 file changed, 18 insertions(+), 21 deletions(-) diff --git a/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs b/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs index 590042c37cac5..9d4e80f8f27ca 100644 --- a/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs +++ b/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs @@ -333,10 +333,10 @@ fn parse_prototype(prototype: &str) -> Option<(RustType, Vec<(String, RustType)> let mut params = Vec::new(); if !params_str.trim().is_empty() { + // Pattern: Type Name or Type* Name + let param_re = Regex::new(r"(\w+\*?)\s+(\w+)").unwrap(); for param in params_str.split(',') { let param = param.trim(); - // Pattern: Type Name or Type* Name - let param_re = Regex::new(r"(\w+\*?)\s+(\w+)").unwrap(); if let Some(pcaps) = param_re.captures(param) { let ptype_str = pcaps[1].trim(); let pname = pcaps[2].to_lowercase(); @@ -369,6 +369,12 @@ fn parse_header(content: &str) -> Vec { // Also handle builtins without VECTOR_WRAP let simple_builtin_re2 = Regex::new(r"__builtin_HEXAGON_(\w+)\([^)]*\)\s*$").unwrap(); + // Regex to extract Q6 name from #define + let q6_name_re = Regex::new(r"#define\s+(Q6_\w+)").unwrap(); + + // Regex to extract macro expression body + let macro_expr_re = Regex::new(r"#define\s+Q6_\w+\([^)]*\)\s+(.+)").unwrap(); + let lines: Vec<&str> = content.lines().collect(); let mut current_arch: u32 = 60; let mut i = 0; @@ -421,7 +427,6 @@ fn parse_header(content: &str) -> Vec { let define_line = lines[j]; // Extract Q6 name and check if it's simple or compound - let q6_name_re = Regex::new(r"#define\s+(Q6_\w+)").unwrap(); if let Some(caps) = q6_name_re.captures(define_line) { let q6_name = caps[1].to_string(); @@ -434,14 +439,10 @@ fn parse_header(content: &str) -> Vec { } // Try to extract simple builtin name - let builtin_name = if let Some(bcaps) = simple_builtin_re.captures(¯o_body) - { - Some(bcaps[1].to_string()) - } else if let Some(bcaps) = simple_builtin_re2.captures(¯o_body) { - Some(bcaps[1].to_string()) - } else { - None - }; + let builtin_name = simple_builtin_re + .captures(¯o_body) + .or_else(|| simple_builtin_re2.captures(¯o_body)) + .map(|bcaps| bcaps[1].to_string()); // Check if it's a compound intrinsic (multiple __builtin calls) let builtin_count = macro_body.matches("__builtin_HEXAGON_").count(); @@ -452,11 +453,8 @@ fn parse_header(content: &str) -> Vec { if is_compound { // For compound intrinsics, parse the expression // Extract the macro body after the parameter list - let macro_expr_re = - Regex::new(r"#define\s+Q6_\w+\([^)]*\)\s+(.+)").unwrap(); if let Some(expr_caps) = macro_expr_re.captures(¯o_body) { - let expr_str = - expr_caps[1].trim().replace('\n', " ").replace('\\', " "); + let expr_str = expr_caps[1].trim().replace(['\n', '\\'], " "); let expr_str = expr_str.trim(); if let Some(compound_expr) = parse_compound_expr(expr_str) { @@ -486,11 +484,10 @@ fn parse_header(content: &str) -> Vec { } } else if let Some(builtin) = builtin_name { // Extract short instruction name - let instr_name = if builtin.starts_with("V6_") { - builtin[3..].to_string() - } else { - builtin.clone() - }; + let instr_name = builtin + .strip_prefix("V6_") + .map(|s| s.to_string()) + .unwrap_or_else(|| builtin.clone()); intrinsics.push(IntrinsicInfo { q6_name, @@ -1365,7 +1362,7 @@ fn get_compound_primary_instr(expr: &CompoundExpr) -> Option { match expr { CompoundExpr::BuiltinCall(name, args) => { // For vandqrt wrapper, look inside - if name == "vandqrt" && args.len() >= 1 { + if name == "vandqrt" && !args.is_empty() { if let Some(inner) = get_compound_primary_instr(&args[0]) { return Some(inner); } From 7df6e6799fceff11ce4e0a73520050b549cc9acf Mon Sep 17 00:00:00 2001 From: Brian Cain Date: Thu, 12 Feb 2026 10:41:53 -0600 Subject: [PATCH 41/90] Update Cargo.lock Updates cc crate to 1.2.55 which fixes macabi target triple handling for x86_64-apple-ios-macabi builds. --- library/stdarch/Cargo.lock | 595 ++++++++++++++++++++++++------------- 1 file changed, 384 insertions(+), 211 deletions(-) diff --git a/library/stdarch/Cargo.lock b/library/stdarch/Cargo.lock index 66dd59a379aa3..36b2b09acb2cb 100644 --- a/library/stdarch/Cargo.lock +++ b/library/stdarch/Cargo.lock @@ -10,18 +10,18 @@ checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" [[package]] name = "aho-corasick" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" dependencies = [ "memchr", ] [[package]] name = "anstream" -version = "0.6.20" +version = "0.6.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ae563653d1938f79b1ab1b5e668c87c76a9930414574a6583a7b7e11a8e6192" +checksum = "43d5b281e737544384e969a5ccad3f1cdd24b48086a0fc1b2a5262a26b8f4f4a" dependencies = [ "anstyle", "anstyle-parse", @@ -34,9 +34,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.11" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" +checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" [[package]] name = "anstyle-parse" @@ -49,29 +49,29 @@ dependencies = [ [[package]] name = "anstyle-query" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e231f6134f61b71076a3eab506c379d4f36122f2af15a9ff04415ea4c3339e2" +checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc" dependencies = [ - "windows-sys 0.60.2", + "windows-sys 0.61.2", ] [[package]] name = "anstyle-wincon" -version = "3.0.10" +version = "3.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e0633414522a32ffaac8ac6cc8f748e090c5717661fddeea04219e2344f5f2a" +checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d" dependencies = [ "anstyle", "once_cell_polyfill", - "windows-sys 0.60.2", + "windows-sys 0.61.2", ] [[package]] name = "anyhow" -version = "1.0.99" +version = "1.0.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0674a1ddeecb70197781e945de4b3b8ffb61fa939a5597bcf48503737663100" +checksum = "5f0e0fee31ef5ed1ba1316088939cea399010ed7731dba877ed44aeb407a75ea" [[package]] name = "assert-instr-macro" @@ -96,15 +96,15 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "bitflags" -version = "2.9.4" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394" +checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" [[package]] name = "cc" -version = "1.2.36" +version = "1.2.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5252b3d2648e5eedbc1a6f501e3c795e07025c1e93bbf8bbdd6eef7f447a6d54" +checksum = "47b26a0954ae34af09b50f0de26458fa95369a0d478d8236d3f93082b219bd29" dependencies = [ "find-msvc-tools", "shlex", @@ -112,15 +112,15 @@ dependencies = [ [[package]] name = "cfg-if" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" [[package]] name = "clap" -version = "4.5.47" +version = "4.5.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eac00902d9d136acd712710d71823fb8ac8004ca445a89e73a41d45aa712931" +checksum = "63be97961acde393029492ce0be7a1af7e323e6bae9511ebfac33751be5e6806" dependencies = [ "clap_builder", "clap_derive", @@ -128,9 +128,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.47" +version = "4.5.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ad9bbf750e73b5884fb8a211a9424a1906c1e156724260fdae972f31d70e1d6" +checksum = "7f13174bda5dfd69d7e947827e5af4b0f2f94a4a3ee92912fba07a66150f21e2" dependencies = [ "anstream", "anstyle", @@ -140,9 +140,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.47" +version = "4.5.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbfd7eae0b0f1a6e63d4b13c9c478de77c2eb546fba158ad50b4203dc24b9f9c" +checksum = "a92793da1a46a5f2a02a6f4c46c6496b28c43638adea8306fcb0caa1634f24e5" dependencies = [ "heck", "proc-macro2", @@ -152,9 +152,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.7.5" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" +checksum = "3a822ea5bc7590f9d40f1ba12c0dc3c2760f3482c6984db1573ad11031420831" [[package]] name = "colorchoice" @@ -206,9 +206,9 @@ checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "darling" -version = "0.20.11" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" +checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" dependencies = [ "darling_core", "darling_macro", @@ -216,9 +216,9 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.11" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" +checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4" dependencies = [ "fnv", "ident_case", @@ -230,9 +230,9 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.20.11" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" +checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core", "quote", @@ -263,10 +263,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" [[package]] -name = "env_logger" -version = "0.8.4" +name = "env_filter" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" +checksum = "7a1c3cc8e57274ec99de65301228b537f1e4eedc1b8e0f9411c6caac8ae7308f" dependencies = [ "log", "regex", @@ -285,6 +285,16 @@ dependencies = [ "termcolor", ] +[[package]] +name = "env_logger" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2daee4ea451f429a58296525ddf28b45a3b64f1acf6587e2067437bb11e218d" +dependencies = [ + "env_filter", + "log", +] + [[package]] name = "equivalent" version = "1.0.2" @@ -293,15 +303,15 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "find-msvc-tools" -version = "0.1.1" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fd99930f64d146689264c637b5af2f0233a933bef0d8570e2526bf9e083192d" +checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" [[package]] name = "flate2" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b375d6465b98090a5f25b1c7703f3859783755aa9a80433b36e0379a3ec2f369" +checksum = "843fba2746e448b37e26a819579957415c8cef339bf08564fe8b7ddbd959573c" dependencies = [ "crc32fast", "miniz_oxide", @@ -313,6 +323,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + [[package]] name = "form_urlencoded" version = "1.2.2" @@ -324,15 +340,29 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" dependencies = [ "cfg-if", "libc", "wasi", ] +[[package]] +name = "getrandom" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139ef39800118c7683f2fd3c98c1b23c09ae076556b435f8e9064ae108aaeeec" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "rand_core 0.10.0", + "wasip2", + "wasip3", +] + [[package]] name = "hashbrown" version = "0.12.3" @@ -344,6 +374,15 @@ name = "hashbrown" version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" +dependencies = [ + "foldhash", +] + +[[package]] +name = "hashbrown" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" [[package]] name = "heck" @@ -359,9 +398,9 @@ checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" [[package]] name = "humantime" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b112acc8b3adf4b107a8ec20977da0273a8c386765a3ec0229bd500a1443f9f" +checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" [[package]] name = "icu_collections" @@ -444,6 +483,12 @@ dependencies = [ "zerovec", ] +[[package]] +name = "id-arena" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" + [[package]] name = "ident_case" version = "1.0.1" @@ -483,12 +528,14 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.11.0" +version = "2.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2481980430f9f78649238835720ddccc57e52df14ffce1c6f37391d61b563e9" +checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" dependencies = [ "equivalent", - "hashbrown 0.15.5", + "hashbrown 0.16.1", + "serde", + "serde_core", ] [[package]] @@ -510,20 +557,20 @@ dependencies = [ [[package]] name = "is-terminal" -version = "0.4.16" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" +checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi", "libc", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "is_terminal_polyfill" -version = "1.70.1" +version = "1.70.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" +checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" [[package]] name = "itertools" @@ -536,15 +583,21 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.15" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" + +[[package]] +name = "leb128fmt" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" [[package]] name = "libc" -version = "0.2.175" +version = "0.2.181" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543" +checksum = "459427e2af2b9c839b132acb702a1c654d95e10f8c326bfc2ad11310e458b1c5" [[package]] name = "linked-hash-map" @@ -560,15 +613,15 @@ checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" [[package]] name = "log" -version = "0.4.28" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" [[package]] name = "memchr" -version = "2.7.6" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" +checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" [[package]] name = "miniz_oxide" @@ -588,9 +641,9 @@ checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "once_cell_polyfill" -version = "1.70.1" +version = "1.70.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" +checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe" [[package]] name = "percent-encoding" @@ -626,11 +679,21 @@ dependencies = [ "log", ] +[[package]] +name = "prettyplease" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" +dependencies = [ + "proc-macro2", + "syn", +] + [[package]] name = "proc-macro2" -version = "1.0.101" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" dependencies = [ "unicode-ident", ] @@ -657,24 +720,30 @@ dependencies = [ [[package]] name = "quickcheck" -version = "1.0.3" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" +checksum = "95c589f335db0f6aaa168a7cd27b1fc6920f5e1470c804f814d9cd6e62a0f70b" dependencies = [ - "env_logger 0.8.4", + "env_logger 0.11.9", "log", - "rand", + "rand 0.10.0", ] [[package]] name = "quote" -version = "1.0.40" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4" dependencies = [ "proc-macro2", ] +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + [[package]] name = "rand" version = "0.8.5" @@ -683,7 +752,17 @@ checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha", - "rand_core", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc266eb313df6c5c09c1c7b1fbe2510961e5bcd3add930c1e31f7ed9da0feff8" +dependencies = [ + "getrandom 0.4.1", + "rand_core 0.10.0", ] [[package]] @@ -693,7 +772,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -702,9 +781,15 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom", + "getrandom 0.2.17", ] +[[package]] +name = "rand_core" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c8d0fd677905edcbeedbf2edb6494d676f0e98d54d5cf9bda0b061cb8fb8aba" + [[package]] name = "rayon" version = "1.11.0" @@ -727,9 +812,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.11.2" +version = "1.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912" +checksum = "e10754a14b9137dd7b1e3e5b0493cc9171fdd105e0ab477f51b72e7f3ac0e276" dependencies = [ "aho-corasick", "memchr", @@ -739,9 +824,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.10" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b9458fa0bfeeac22b5ca447c63aaf45f28439a709ccd244698632f9aa6394d6" +checksum = "6e1dd4122fc1595e8162618945476892eefca7b88c52820e74af6262213cae8f" dependencies = [ "aho-corasick", "memchr", @@ -750,9 +835,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.6" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caf4aa5b0f434c91fe5c7f1ecb6a5ece2130b02ad2a590589dda5146df959001" +checksum = "a96887878f22d7bad8a3b6dc5b7440e0ada9a245242924394987b21cf2210a4c" [[package]] name = "ring" @@ -762,7 +847,7 @@ checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", - "getrandom", + "getrandom 0.2.17", "libc", "untrusted", "windows-sys 0.52.0", @@ -770,9 +855,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" +checksum = "b50b8869d9fc858ce7266cce0194bd74df58b9d0e3f6df3a9fc8eb470d95c09d" [[package]] name = "rustls" @@ -811,9 +896,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.20" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" +checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" [[package]] name = "same-file" @@ -826,36 +911,46 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" [[package]] name = "serde" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" dependencies = [ + "serde_core", "serde_derive", ] [[package]] name = "serde-xml-rs" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53630160a98edebde0123eb4dfd0fce6adff091b2305db3154a9e920206eb510" +checksum = "cc2215ce3e6a77550b80a1c37251b7d294febaf42e36e21b7b411e0bf54d540d" dependencies = [ "log", "serde", "thiserror", - "xml-rs", + "xml", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", @@ -864,32 +959,32 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.143" +version = "1.0.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d401abef1d108fbd9cbaebc3e46611f4b1021f714a0597a71f41ee463f5f4a5a" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" dependencies = [ "itoa", "memchr", - "ryu", "serde", + "serde_core", + "zmij", ] [[package]] name = "serde_with" -version = "3.14.0" +version = "3.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5" +checksum = "4fa237f2807440d238e0364a218270b98f767a00d3dada77b1c53ae88940e2e7" dependencies = [ - "serde", - "serde_derive", + "serde_core", "serde_with_macros", ] [[package]] name = "serde_with_macros" -version = "3.14.0" +version = "3.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f" +checksum = "52a8e3ca0ca629121f70ab50f95249e5a6f925cc0f6ffe8256c45b728875706c" dependencies = [ "darling", "proc-macro2", @@ -968,7 +1063,7 @@ dependencies = [ name = "stdarch-gen-loongarch" version = "0.1.0" dependencies = [ - "rand", + "rand 0.8.5", ] [[package]] @@ -1001,7 +1096,7 @@ version = "0.0.0" dependencies = [ "core_arch", "quickcheck", - "rand", + "rand 0.8.5", ] [[package]] @@ -1018,9 +1113,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" -version = "2.0.106" +version = "2.0.115" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" +checksum = "6e614ed320ac28113fa64972c4262d5dbc89deacdfd00c34a3e4cea073243c12" dependencies = [ "proc-macro2", "quote", @@ -1055,18 +1150,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.69" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.69" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", @@ -1085,9 +1180,15 @@ dependencies = [ [[package]] name = "unicode-ident" -version = "1.0.18" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "537dd038a89878be9b64dd4bd1b260315c1bb94f4d784956b81e27a088d9a09e" + +[[package]] +name = "unicode-xid" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] name = "untrusted" @@ -1151,6 +1252,46 @@ version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" +[[package]] +name = "wasip2" +version = "1.0.2+wasi-0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9517f9239f02c069db75e65f174b3da828fe5f5b945c4dd26bd25d89c03ebcf5" +dependencies = [ + "wit-bindgen", +] + +[[package]] +name = "wasip3" +version = "0.4.0+wasi-0.3.0-rc-2026-01-06" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5" +dependencies = [ + "wit-bindgen", +] + +[[package]] +name = "wasm-encoder" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319" +dependencies = [ + "leb128fmt", + "wasmparser 0.244.0", +] + +[[package]] +name = "wasm-metadata" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" +dependencies = [ + "anyhow", + "indexmap 2.13.0", + "wasm-encoder", + "wasmparser 0.244.0", +] + [[package]] name = "wasmparser" version = "0.235.0" @@ -1158,7 +1299,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "161296c618fa2d63f6ed5fffd1112937e803cb9ec71b32b01a76321555660917" dependencies = [ "bitflags", - "indexmap 2.11.0", + "indexmap 2.13.0", + "semver", +] + +[[package]] +name = "wasmparser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" +dependencies = [ + "bitflags", + "hashbrown 0.15.5", + "indexmap 2.13.0", "semver", ] @@ -1170,7 +1323,7 @@ checksum = "75aa8e9076de6b9544e6dab4badada518cca0bf4966d35b131bbd057aed8fa0a" dependencies = [ "anyhow", "termcolor", - "wasmparser", + "wasmparser 0.235.0", ] [[package]] @@ -1179,32 +1332,32 @@ version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" dependencies = [ - "webpki-roots 1.0.5", + "webpki-roots 1.0.6", ] [[package]] name = "webpki-roots" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12bed680863276c63889429bfd6cab3b99943659923822de1c8a39c49e4d722c" +checksum = "22cfaf3c063993ff62e73cb4311efde4db1efb31ab78a3e5c457939ad5cc0bed" dependencies = [ "rustls-pki-types", ] [[package]] name = "winapi-util" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0978bf7171b3d90bac376700cb56d606feb40f251a475a5d6634613564460b22" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.60.2", + "windows-sys 0.61.2", ] [[package]] name = "windows-link" -version = "0.1.3" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" [[package]] name = "windows-sys" @@ -1212,25 +1365,16 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.6", + "windows-targets", ] [[package]] name = "windows-sys" -version = "0.59.0" +version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-sys" -version = "0.60.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" -dependencies = [ - "windows-targets 0.53.3", + "windows-link", ] [[package]] @@ -1239,31 +1383,14 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.6", - "windows_aarch64_msvc 0.52.6", - "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm 0.52.6", - "windows_i686_msvc 0.52.6", - "windows_x86_64_gnu 0.52.6", - "windows_x86_64_gnullvm 0.52.6", - "windows_x86_64_msvc 0.52.6", -] - -[[package]] -name = "windows-targets" -version = "0.53.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91" -dependencies = [ - "windows-link", - "windows_aarch64_gnullvm 0.53.0", - "windows_aarch64_msvc 0.53.0", - "windows_i686_gnu 0.53.0", - "windows_i686_gnullvm 0.53.0", - "windows_i686_msvc 0.53.0", - "windows_x86_64_gnu 0.53.0", - "windows_x86_64_gnullvm 0.53.0", - "windows_x86_64_msvc 0.53.0", + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", ] [[package]] @@ -1272,36 +1399,18 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" - [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" -[[package]] -name = "windows_aarch64_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" - [[package]] name = "windows_i686_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" -[[package]] -name = "windows_i686_gnu" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" - [[package]] name = "windows_i686_gnullvm" version = "0.52.6" @@ -1309,58 +1418,116 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] -name = "windows_i686_gnullvm" -version = "0.53.0" +name = "windows_i686_msvc" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] -name = "windows_i686_msvc" +name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] -name = "windows_i686_msvc" -version = "0.53.0" +name = "windows_x86_64_gnullvm" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] -name = "windows_x86_64_gnu" +name = "windows_x86_64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] -name = "windows_x86_64_gnu" -version = "0.53.0" +name = "wit-bindgen" +version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" +checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" +dependencies = [ + "wit-bindgen-rust-macro", +] [[package]] -name = "windows_x86_64_gnullvm" -version = "0.52.6" +name = "wit-bindgen-core" +version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc" +dependencies = [ + "anyhow", + "heck", + "wit-parser", +] [[package]] -name = "windows_x86_64_gnullvm" -version = "0.53.0" +name = "wit-bindgen-rust" +version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" +checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" +dependencies = [ + "anyhow", + "heck", + "indexmap 2.13.0", + "prettyplease", + "syn", + "wasm-metadata", + "wit-bindgen-core", + "wit-component", +] [[package]] -name = "windows_x86_64_msvc" -version = "0.52.6" +name = "wit-bindgen-rust-macro" +version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a" +dependencies = [ + "anyhow", + "prettyplease", + "proc-macro2", + "quote", + "syn", + "wit-bindgen-core", + "wit-bindgen-rust", +] [[package]] -name = "windows_x86_64_msvc" -version = "0.53.0" +name = "wit-component" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" +dependencies = [ + "anyhow", + "bitflags", + "indexmap 2.13.0", + "log", + "serde", + "serde_derive", + "serde_json", + "wasm-encoder", + "wasm-metadata", + "wasmparser 0.244.0", + "wit-parser", +] + +[[package]] +name = "wit-parser" +version = "0.244.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" +checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" +dependencies = [ + "anyhow", + "id-arena", + "indexmap 2.13.0", + "log", + "semver", + "serde", + "serde_derive", + "serde_json", + "unicode-xid", + "wasmparser 0.244.0", +] [[package]] name = "writeable" @@ -1369,10 +1536,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" [[package]] -name = "xml-rs" -version = "0.8.27" +name = "xml" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fd8403733700263c6eb89f192880191f1b83e332f7a20371ddcf421c4a337c7" +checksum = "b8aa498d22c9bbaf482329839bc5620c46be275a19a812e9a22a2b07529a642a" [[package]] name = "yaml-rust" @@ -1408,18 +1575,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.27" +version = "0.8.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" +checksum = "db6d35d663eadb6c932438e763b262fe1a70987f9ae936e60158176d710cae4a" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.27" +version = "0.8.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" +checksum = "4122cd3169e94605190e77839c9a40d40ed048d305bfdc146e7df40ab0f3e517" dependencies = [ "proc-macro2", "quote", @@ -1485,3 +1652,9 @@ dependencies = [ "quote", "syn", ] + +[[package]] +name = "zmij" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" From 5b7ab682b421f31dd83c098a56b4b9acd3311580 Mon Sep 17 00:00:00 2001 From: Brian Cain Date: Thu, 12 Feb 2026 11:39:37 -0600 Subject: [PATCH 42/90] examples: Simplify gaussian.rs with cfg gate Add `#![cfg(target_arch = "hexagon")]` - Remove redundant #[cfg(target_arch = "hexagon")] from functions - Simplify import and constant cfg conditions - Remove non-Hexagon test code branch from main() --- library/stdarch/examples/gaussian.rs | 171 +++++++++------------------ 1 file changed, 58 insertions(+), 113 deletions(-) diff --git a/library/stdarch/examples/gaussian.rs b/library/stdarch/examples/gaussian.rs index b7d8cad805153..3e9d89db9782b 100644 --- a/library/stdarch/examples/gaussian.rs +++ b/library/stdarch/examples/gaussian.rs @@ -29,8 +29,9 @@ //! qemu-hexagon -L /target/hexagon-unknown-linux-musl \ //! target/hexagon-unknown-linux-musl/debug/gaussian -#![cfg_attr(target_arch = "hexagon", feature(stdarch_hexagon))] -#![cfg_attr(target_arch = "hexagon", feature(hexagon_target_feature))] +#![cfg(target_arch = "hexagon")] +#![feature(stdarch_hexagon)] +#![feature(hexagon_target_feature)] #![allow( unsafe_op_in_unsafe_fn, clippy::unwrap_used, @@ -41,19 +42,23 @@ dead_code )] -#[cfg(all(target_arch = "hexagon", not(target_feature = "hvx-length128b")))] +#[cfg(not(target_feature = "hvx-length128b"))] use core_arch::arch::hexagon::v64::*; -#[cfg(all(target_arch = "hexagon", target_feature = "hvx-length128b"))] +#[cfg(target_feature = "hvx-length128b")] use core_arch::arch::hexagon::v128::*; /// Vector length in bytes for HVX 128-byte mode -#[cfg(all(target_arch = "hexagon", target_feature = "hvx-length128b"))] +#[cfg(target_feature = "hvx-length128b")] const VLEN: usize = 128; /// Vector length in bytes for HVX 64-byte mode -#[cfg(all(target_arch = "hexagon", not(target_feature = "hvx-length128b")))] +#[cfg(not(target_feature = "hvx-length128b"))] const VLEN: usize = 64; +/// Image width - must be multiple of VLEN +const WIDTH: usize = 256; +const HEIGHT: usize = 16; + /// Vertical 1-2-1 filter pass using byte averaging /// /// Computes: dst[x] = avg(avg(row_above[x], row_below[x]), center[x]) @@ -65,7 +70,6 @@ const VLEN: usize = 64; /// - `dst` must point to a valid output buffer for `width` bytes /// - `width` must be a multiple of VLEN /// - All pointers must be HVX-aligned (128-byte for 128B mode) -#[cfg(target_arch = "hexagon")] #[target_feature(enable = "hvxv60")] unsafe fn vertical_121_pass(src: *const u8, stride: isize, width: usize, dst: *mut u8) { let inp0 = src.offset(-stride) as *const HvxVector; @@ -101,7 +105,6 @@ unsafe fn vertical_121_pass(src: *const u8, stride: isize, width: usize, dst: *m /// - `src` and `dst` must point to valid buffers of `width` bytes /// - `width` must be a multiple of VLEN /// - All pointers must be HVX-aligned -#[cfg(target_arch = "hexagon")] #[target_feature(enable = "hvxv60")] unsafe fn horizontal_121_pass(src: *const u8, width: usize, dst: *mut u8) { let inp = src as *const HvxVector; @@ -153,7 +156,6 @@ unsafe fn horizontal_121_pass(src: *const u8, width: usize, dst: *mut u8) { /// - `width` must be a multiple of VLEN and >= VLEN /// - `stride` must be >= `width` /// - All buffers must be HVX-aligned (128-byte for 128B mode) -#[cfg(target_arch = "hexagon")] #[target_feature(enable = "hvxv60")] pub unsafe fn gaussian3x3u8( src: *const u8, @@ -234,7 +236,7 @@ fn gaussian3x3u8_approx(src: &[u8], stride: usize, width: usize, height: usize, } } -/// Generate deterministic test pattern matching test approach +/// Generate deterministic test pattern fn generate_test_pattern(buf: &mut [u8], width: usize, height: usize) { for y in 0..height { for x in 0..width { @@ -244,115 +246,58 @@ fn generate_test_pattern(buf: &mut [u8], width: usize, height: usize) { } fn main() { - // Test dimensions - #[cfg(not(target_arch = "hexagon"))] - const WIDTH: usize = 128; - #[cfg(target_arch = "hexagon")] - const WIDTH: usize = 256; // Must be multiple of VLEN (128) - const HEIGHT: usize = 16; - - #[cfg(not(target_arch = "hexagon"))] - { - let mut src = vec![0u8; WIDTH * HEIGHT]; - let mut dst_ref = vec![0u8; WIDTH * HEIGHT]; - let mut dst_approx = vec![0u8; WIDTH * HEIGHT]; - - // Generate test pattern - generate_test_pattern(&mut src, WIDTH, HEIGHT); - - // Run reference implementation - gaussian3x3u8_reference(&src, WIDTH, WIDTH, HEIGHT, &mut dst_ref); - - // Run byte-averaging approximation (matches HVX behavior) - gaussian3x3u8_approx(&src, WIDTH, WIDTH, HEIGHT, &mut dst_approx); - - // Verify specific output values from reference - // These are computed using the exact algorithm on our test pattern - // Row 2, cols 1..9 with input pattern ((x + y*7) % 256) - // Input: row1=[7,8,9,...], row2=[14,15,16,...], row3=[21,22,23,...] - let expected_ref_row2: [u8; 8] = [15, 16, 17, 18, 19, 20, 21, 22]; - for (i, &expected) in expected_ref_row2.iter().enumerate() { - let actual = dst_ref[2 * WIDTH + 1 + i]; + // Aligned buffers for HVX + #[repr(align(128))] + struct AlignedBuf([u8; N]); + + let mut src = AlignedBuf::<{ WIDTH * HEIGHT }>([0u8; WIDTH * HEIGHT]); + let mut dst_hvx = AlignedBuf::<{ WIDTH * HEIGHT }>([0u8; WIDTH * HEIGHT]); + let mut tmp = AlignedBuf::<{ WIDTH }>([0u8; WIDTH]); + let mut dst_ref = vec![0u8; WIDTH * HEIGHT]; + let mut dst_approx = vec![0u8; WIDTH * HEIGHT]; + + // Generate test pattern + generate_test_pattern(&mut src.0, WIDTH, HEIGHT); + + // Run HVX implementation + unsafe { + gaussian3x3u8( + src.0.as_ptr(), + WIDTH, + WIDTH, + HEIGHT, + dst_hvx.0.as_mut_ptr(), + tmp.0.as_mut_ptr(), + ); + } + + // Run reference + gaussian3x3u8_reference(&src.0, WIDTH, WIDTH, HEIGHT, &mut dst_ref); + + // Run scalar approximation (should match HVX exactly) + gaussian3x3u8_approx(&src.0, WIDTH, WIDTH, HEIGHT, &mut dst_approx); + + // Verify HVX matches the byte-averaging approximation exactly + for y in 1..HEIGHT - 1 { + for x in 1..WIDTH - 1 { + let idx = y * WIDTH + x; assert_eq!( - actual, - expected, - "reference mismatch at row 2, col {}: expected {}, got {}", - 1 + i, - expected, - actual + dst_hvx.0[idx], dst_approx[idx], + "HVX output differs from scalar approximation at ({}, {}): hvx={}, approx={}", + x, y, dst_hvx.0[idx], dst_approx[idx] ); } - - // Verify approximation exactly matches reference for this test pattern - // The byte-averaging approach avg(avg(a,c), b) produces identical results - // to the Hexagon SDK's (1*a + 2*b + 1*c + 2) / 4 for this input pattern - for y in 1..HEIGHT - 1 { - for x in 1..WIDTH - 1 { - let idx = y * WIDTH + x; - assert_eq!( - dst_approx[idx], dst_ref[idx], - "Approximation differs from reference at ({}, {}): approx={}, ref={}", - x, y, dst_approx[idx], dst_ref[idx] - ); - } - } } - #[cfg(target_arch = "hexagon")] - { - // Aligned buffers for HVX - #[repr(align(128))] - struct AlignedBuf([u8; N]); - - let mut src = AlignedBuf::<{ WIDTH * HEIGHT }>([0u8; WIDTH * HEIGHT]); - let mut dst_hvx = AlignedBuf::<{ WIDTH * HEIGHT }>([0u8; WIDTH * HEIGHT]); - let mut tmp = AlignedBuf::<{ WIDTH }>([0u8; WIDTH]); - let mut dst_ref = vec![0u8; WIDTH * HEIGHT]; - let mut dst_approx = vec![0u8; WIDTH * HEIGHT]; - - // Generate test pattern - generate_test_pattern(&mut src.0, WIDTH, HEIGHT); - - // Run HVX implementation - unsafe { - gaussian3x3u8( - src.0.as_ptr(), - WIDTH, - WIDTH, - HEIGHT, - dst_hvx.0.as_mut_ptr(), - tmp.0.as_mut_ptr(), + // Verify HVX exactly matches reference for this test pattern + for y in 1..HEIGHT - 1 { + for x in 1..WIDTH - 1 { + let idx = y * WIDTH + x; + assert_eq!( + dst_hvx.0[idx], dst_ref[idx], + "HVX differs from reference at ({}, {}): hvx={}, ref={}", + x, y, dst_hvx.0[idx], dst_ref[idx] ); } - - // Run reference - gaussian3x3u8_reference(&src.0, WIDTH, WIDTH, HEIGHT, &mut dst_ref); - - // Run scalar approximation (should match HVX exactly) - gaussian3x3u8_approx(&src.0, WIDTH, WIDTH, HEIGHT, &mut dst_approx); - - // Verify HVX matches the byte-averaging approximation exactly - for y in 1..HEIGHT - 1 { - for x in 1..WIDTH - 1 { - let idx = y * WIDTH + x; - assert_eq!( - dst_hvx.0[idx], dst_approx[idx], - "HVX output differs from scalar approximation at ({}, {}): hvx={}, approx={}", - x, y, dst_hvx.0[idx], dst_approx[idx] - ); - } - } - - // Verify HVX exactly matches reference for this test pattern - for y in 1..HEIGHT - 1 { - for x in 1..WIDTH - 1 { - let idx = y * WIDTH + x; - assert_eq!( - dst_hvx.0[idx], dst_ref[idx], - "HVX differs from reference at ({}, {}): hvx={}, ref={}", - x, y, dst_hvx.0[idx], dst_ref[idx] - ); - } - } } } From 81cb2a9d6de822b3cda5dec3e080a10c88a0a0ee Mon Sep 17 00:00:00 2001 From: mu001999 Date: Sat, 7 Feb 2026 17:49:08 +0800 Subject: [PATCH 43/90] Remove unused features in compiler --- src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index a49dc9be34583..bbf7d1b6998e8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -2,8 +2,8 @@ // Note: please avoid adding other feature gates where possible #![feature(rustc_private)] // Only used to define intrinsics in `compiler_builtins.rs`. -#![feature(f16)] -#![feature(f128)] +#![cfg_attr(feature = "jit", feature(f16))] +#![cfg_attr(feature = "jit", feature(f128))] // Note: please avoid adding other feature gates where possible #![warn(rust_2018_idioms)] #![warn(unreachable_pub)] From 94db326f2597590ebeac26cde82ddb7f2921da46 Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Fri, 13 Feb 2026 18:15:08 +1100 Subject: [PATCH 44/90] Remove unnecessary closure. The comments that says it's necessary is wrong. --- compiler/rustc_interface/src/passes.rs | 98 ++++++++++---------------- 1 file changed, 37 insertions(+), 61 deletions(-) diff --git a/compiler/rustc_interface/src/passes.rs b/compiler/rustc_interface/src/passes.rs index e474f106433df..4a1a9ab945580 100644 --- a/compiler/rustc_interface/src/passes.rs +++ b/compiler/rustc_interface/src/passes.rs @@ -10,7 +10,6 @@ use rustc_attr_parsing::{AttributeParser, Early, ShouldEmit}; use rustc_codegen_ssa::traits::CodegenBackend; use rustc_codegen_ssa::{CodegenResults, CrateInfo}; use rustc_data_structures::indexmap::IndexMap; -use rustc_data_structures::jobserver::Proxy; use rustc_data_structures::steal::Steal; use rustc_data_structures::sync::{AppendOnlyIndexVec, FreezeLock, WorkerLocal}; use rustc_data_structures::{parallel, thousands}; @@ -28,7 +27,7 @@ use rustc_lint::{BufferedEarlyLint, EarlyCheckNode, LintStore, unerased_lint_sto use rustc_metadata::EncodedMetadata; use rustc_metadata::creader::CStore; use rustc_middle::arena::Arena; -use rustc_middle::ty::{self, CurrentGcx, GlobalCtxt, RegisteredTools, TyCtxt}; +use rustc_middle::ty::{self, RegisteredTools, TyCtxt}; use rustc_middle::util::Providers; use rustc_parse::lexer::StripTokens; use rustc_parse::{new_parser_from_file, new_parser_from_source_str, unwrap_or_emit_fatal}; @@ -969,68 +968,45 @@ pub fn create_and_enter_global_ctxt FnOnce(TyCtxt<'tcx>) -> T>( let arena = WorkerLocal::new(|_| Arena::default()); let hir_arena = WorkerLocal::new(|_| rustc_hir::Arena::default()); - // This closure is necessary to force rustc to perform the correct lifetime - // subtyping for GlobalCtxt::enter to be allowed. - let inner: Box< - dyn for<'tcx> FnOnce( - &'tcx Session, - CurrentGcx, - Arc, - &'tcx OnceLock>, - &'tcx WorkerLocal>, - &'tcx WorkerLocal>, - F, - ) -> T, - > = Box::new(move |sess, current_gcx, jobserver_proxy, gcx_cell, arena, hir_arena, f| { - TyCtxt::create_global_ctxt( - gcx_cell, - sess, - crate_types, - stable_crate_id, - arena, - hir_arena, - untracked, - dep_graph, - rustc_query_impl::make_dep_kind_vtables(arena), - rustc_query_impl::query_system( - providers.queries, - providers.extern_queries, - query_result_on_disk_cache, - incremental, - ), - providers.hooks, - current_gcx, - jobserver_proxy, - |tcx| { - let feed = tcx.create_crate_num(stable_crate_id).unwrap(); - assert_eq!(feed.key(), LOCAL_CRATE); - feed.crate_name(crate_name); - - let feed = tcx.feed_unit_query(); - feed.features_query(tcx.arena.alloc(rustc_expand::config::features( - tcx.sess, - &pre_configured_attrs, - crate_name, - ))); - feed.crate_for_resolver(tcx.arena.alloc(Steal::new((krate, pre_configured_attrs)))); - feed.output_filenames(Arc::new(outputs)); - - let res = f(tcx); - // FIXME maybe run finish even when a fatal error occurred? or at least tcx.alloc_self_profile_query_strings()? - tcx.finish(); - res - }, - ) - }); - - inner( - &compiler.sess, - compiler.current_gcx.clone(), - Arc::clone(&compiler.jobserver_proxy), + TyCtxt::create_global_ctxt( &gcx_cell, + &compiler.sess, + crate_types, + stable_crate_id, &arena, &hir_arena, - f, + untracked, + dep_graph, + rustc_query_impl::make_dep_kind_vtables(&arena), + rustc_query_impl::query_system( + providers.queries, + providers.extern_queries, + query_result_on_disk_cache, + incremental, + ), + providers.hooks, + compiler.current_gcx.clone(), + Arc::clone(&compiler.jobserver_proxy), + |tcx| { + let feed = tcx.create_crate_num(stable_crate_id).unwrap(); + assert_eq!(feed.key(), LOCAL_CRATE); + feed.crate_name(crate_name); + + let feed = tcx.feed_unit_query(); + feed.features_query(tcx.arena.alloc(rustc_expand::config::features( + tcx.sess, + &pre_configured_attrs, + crate_name, + ))); + feed.crate_for_resolver(tcx.arena.alloc(Steal::new((krate, pre_configured_attrs)))); + feed.output_filenames(Arc::new(outputs)); + + let res = f(tcx); + // FIXME maybe run finish even when a fatal error occurred? or at least + // tcx.alloc_self_profile_query_strings()? + tcx.finish(); + res + }, ) } From 47f2bf6d65ebd04016d3ac1aac52a94457afc733 Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Fri, 13 Feb 2026 10:38:52 +0000 Subject: [PATCH 45/90] Rustup to rustc 1.95.0-nightly (47611e160 2026-02-12) --- rust-toolchain.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 1faaa47c88433..510772d71306a 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] -channel = "nightly-2026-01-27" +channel = "nightly-2026-02-13" components = ["rust-src", "rustc-dev", "llvm-tools", "rustfmt"] profile = "minimal" From 0b477792235b917a1d2b01b59a68ed8ec3405729 Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Fri, 13 Feb 2026 11:18:20 +0000 Subject: [PATCH 46/90] Fix rustc test suite --- scripts/test_rustc_tests.sh | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/scripts/test_rustc_tests.sh b/scripts/test_rustc_tests.sh index a50eec40cb649..6cfbd46a4cd98 100755 --- a/scripts/test_rustc_tests.sh +++ b/scripts/test_rustc_tests.sh @@ -59,6 +59,7 @@ rm tests/ui/asm/x86_64/goto.rs # inline asm labels not supported rm tests/ui/asm/label-operand.rs # same rm tests/ui/asm/may_unwind.rs # asm unwinding not supported rm tests/ui/asm/aarch64/may_unwind.rs # same +rm tests/ui/asm/x86_64/global_asm_escape.rs # rust-lang/rust#151955 needs to be applied to non-LLVM codegen backends too # misc unimplemented things rm tests/ui/target-feature/missing-plusminus.rs # error not implemented @@ -127,6 +128,14 @@ rm -r tests/run-make/notify-all-emit-artifacts rm -r tests/run-make/reset-codegen-1 rm -r tests/run-make/inline-always-many-cgu rm -r tests/run-make/intrinsic-unreachable +rm -r tests/run-make/artifact-incr-cache +rm -r tests/run-make/artifact-incr-cache-no-obj +rm -r tests/run-make/emit +rm -r tests/run-make/llvm-outputs +rm -r tests/run-make/panic-impl-transitive +rm -r tests/ui/debuginfo/debuginfo-emit-llvm-ir-and-split-debuginfo.rs +rm -r tests/ui/statics/issue-91050-1.rs +rm -r tests/ui/statics/issue-91050-2.rs # giving different but possibly correct results # ============================================= @@ -135,6 +144,7 @@ rm tests/ui/mir/mir_raw_fat_ptr.rs # same rm tests/ui/consts/issue-33537.rs # same rm tests/ui/consts/const-mut-refs-crate.rs # same rm tests/ui/abi/large-byval-align.rs # exceeds implementation limit of Cranelift +rm -r tests/run-make/short-ice # ICE backtrace begin/end marker mismatch # doesn't work due to the way the rustc test suite is invoked. # should work when using ./x.py test the way it is intended From 15831ddb754510de90ea6018a9d4ec2b61b9a85f Mon Sep 17 00:00:00 2001 From: usamoi Date: Fri, 13 Feb 2026 20:30:23 +0800 Subject: [PATCH 47/90] move `escape_symbol_name` to `cg_ssa` --- src/global_asm.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/global_asm.rs b/src/global_asm.rs index 97d6cecf68481..1daf428acf766 100644 --- a/src/global_asm.rs +++ b/src/global_asm.rs @@ -106,6 +106,7 @@ fn codegen_global_asm_inner<'tcx>( match *piece { InlineAsmTemplatePiece::String(ref s) => global_asm.push_str(s), InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span } => { + use rustc_codegen_ssa::back::symbol_export::escape_symbol_name; match operands[operand_idx] { GlobalAsmOperandRef::Const { ref string } => { global_asm.push_str(string); @@ -121,7 +122,7 @@ fn codegen_global_asm_inner<'tcx>( let symbol = tcx.symbol_name(instance); // FIXME handle the case where the function was made private to the // current codegen unit - global_asm.push_str(symbol.name); + global_asm.push_str(&escape_symbol_name(tcx, symbol.name, span)); } GlobalAsmOperandRef::SymStatic { def_id } => { if cfg!(not(feature = "inline_asm_sym")) { @@ -133,7 +134,7 @@ fn codegen_global_asm_inner<'tcx>( let instance = Instance::mono(tcx, def_id); let symbol = tcx.symbol_name(instance); - global_asm.push_str(symbol.name); + global_asm.push_str(&escape_symbol_name(tcx, symbol.name, span)); } } } From 1e367f9663133da183f078ee2ee6e5421e74eab3 Mon Sep 17 00:00:00 2001 From: Martin Nordholts Date: Sat, 14 Feb 2026 18:57:38 +0100 Subject: [PATCH 48/90] tests: rustc_public: Check const allocation for all variables (1 of 11 was missing) In the test `tests/ui-fulldeps/rustc_public/check_allocation.rs` there is a check for constant allocations of local variables of this function: fn other_consts() {{ let _max_u128 = u128::MAX; let _min_i128 = i128::MIN; let _max_i8 = i8::MAX; let _char = 'x'; let _false = false; let _true = true; let _ptr = &BAR; let _null_ptr: *const u8 = NULL; let _tuple = TUPLE; let _char_id = const {{ type_id::() }}; let _bool_id = const {{ type_id::() }}; }} The current test only finds 10 out of 11 allocations. The constant allocation for let _ptr = &BAR; is not checked, because the `SingleUseConsts` MIR pass does not optimize away that assignment. Add code to also collect constant allocation from assignment rvalues. Not only does this change make sense on its own, it also makes the test pass both with and without the `SingleUseConsts` pass. --- .../rustc_public/check_allocation.rs | 47 +++++++++++++++---- 1 file changed, 37 insertions(+), 10 deletions(-) diff --git a/tests/ui-fulldeps/rustc_public/check_allocation.rs b/tests/ui-fulldeps/rustc_public/check_allocation.rs index 8f3b9693382b9..580ce98329dc7 100644 --- a/tests/ui-fulldeps/rustc_public/check_allocation.rs +++ b/tests/ui-fulldeps/rustc_public/check_allocation.rs @@ -27,9 +27,9 @@ use std::io::Write; use std::ops::ControlFlow; use rustc_public::crate_def::CrateDef; -use rustc_public::mir::Body; use rustc_public::mir::alloc::GlobalAlloc; use rustc_public::mir::mono::{Instance, StaticDef}; +use rustc_public::mir::{Body, Operand, Rvalue, StatementKind}; use rustc_public::ty::{Allocation, ConstantKind}; use rustc_public::{CrateItem, CrateItems, ItemKind}; @@ -106,7 +106,7 @@ fn check_other_consts(item: CrateItem) { // Instance body will force constant evaluation. let body = Instance::try_from(item).unwrap().body().unwrap(); let assigns = collect_consts(&body); - assert_eq!(assigns.len(), 10); + assert_eq!(assigns.len(), 11); let mut char_id = None; let mut bool_id = None; for (name, alloc) in assigns { @@ -167,17 +167,44 @@ fn check_other_consts(item: CrateItem) { assert_ne!(bool_id, char_id); } -/// Collects all the constant assignments. +/// Collects all constant allocations from `fn other_consts()`. The returned map +/// maps variable names to their corresponding constant allocation. pub fn collect_consts(body: &Body) -> HashMap { - body.var_debug_info + let local_to_const_alloc = body + .blocks .iter() - .filter_map(|info| { - info.constant().map(|const_op| { - let ConstantKind::Allocated(alloc) = const_op.const_.kind() else { unreachable!() }; - (info.name.clone(), alloc) - }) + .flat_map(|block| block.statements.iter()) + .filter_map(|statement| { + let StatementKind::Assign(place, Rvalue::Use(Operand::Constant(const_op))) = + &statement.kind + else { + return None; + }; + let ConstantKind::Allocated(alloc) = const_op.const_.kind() else { return None }; + Some((place.local, alloc)) }) - .collect::>() + .collect::>(); + + let mut allocations = HashMap::new(); + for info in &body.var_debug_info { + // MIR optimzations sometimes gets rid of assignments. Look up the + // constant allocation directly in this case. + if let Some(const_op) = info.constant() { + let ConstantKind::Allocated(alloc) = const_op.const_.kind() else { unreachable!() }; + allocations.insert(info.name.clone(), alloc); + } + + // If MIR optimzations didn't get rid of the assignment, then we can + // find the constant allocation as an rvalue of the corresponding + // assignment. + if let Some(local) = info.local() { + if let Some(alloc) = local_to_const_alloc.get(&local) { + allocations.insert(info.name.clone(), alloc); + } + } + } + + allocations } /// Check the allocation data for `LEN`. From 6a12103dda8b1cd7deb27f8705152ed458c70d94 Mon Sep 17 00:00:00 2001 From: Folkert de Vries Date: Sat, 14 Feb 2026 18:54:35 +0100 Subject: [PATCH 49/90] use `intrinsics::simd` for 'shift right and insert' --- .../core_arch/src/aarch64/neon/generated.rs | 76 +++---------------- .../crates/core_arch/src/aarch64/neon/mod.rs | 24 ++++++ .../spec/neon/aarch64.spec.yml | 31 +++----- 3 files changed, 45 insertions(+), 86 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs index a81914af7838b..30c7db3f27a65 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs @@ -25530,14 +25530,7 @@ pub fn vsqrth_f16(a: f16) -> f16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsri.v8i8" - )] - fn _vsri_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t; - } - unsafe { _vsri_n_s8(a, b, N) } + unsafe { super::shift_right_and_insert!(u8, 8, N, a, b) } } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"] @@ -25548,14 +25541,7 @@ pub fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsri.v16i8" - )] - fn _vsriq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t; - } - unsafe { _vsriq_n_s8(a, b, N) } + unsafe { super::shift_right_and_insert!(u8, 16, N, a, b) } } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"] @@ -25566,14 +25552,7 @@ pub fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsri.v4i16" - )] - fn _vsri_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t; - } - unsafe { _vsri_n_s16(a, b, N) } + unsafe { super::shift_right_and_insert!(u16, 4, N, a, b) } } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"] @@ -25584,14 +25563,7 @@ pub fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsri.v8i16" - )] - fn _vsriq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t; - } - unsafe { _vsriq_n_s16(a, b, N) } + unsafe { super::shift_right_and_insert!(u16, 8, N, a, b) } } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"] @@ -25602,14 +25574,7 @@ pub fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsri.v2i32" - )] - fn _vsri_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t; - } - unsafe { _vsri_n_s32(a, b, N) } + unsafe { super::shift_right_and_insert!(u32, 2, N, a, b) } } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"] @@ -25620,14 +25585,7 @@ pub fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsri.v4i32" - )] - fn _vsriq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t; - } - unsafe { _vsriq_n_s32(a, b, N) } + unsafe { super::shift_right_and_insert!(u32, 4, N, a, b) } } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s64)"] @@ -25638,14 +25596,7 @@ pub fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsri.v1i64" - )] - fn _vsri_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t; - } - unsafe { _vsri_n_s64(a, b, N) } + unsafe { super::shift_right_and_insert!(u64, 1, N, a, b) } } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"] @@ -25656,14 +25607,7 @@ pub fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { static_assert!(N >= 1 && N <= 64); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsri.v2i64" - )] - fn _vsriq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t; - } - unsafe { _vsriq_n_s64(a, b, N) } + unsafe { super::shift_right_and_insert!(u64, 2, N, a, b) } } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"] @@ -25825,7 +25769,7 @@ pub fn vsriq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sri, N = 2))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(bfxil, N = 2))] pub fn vsrid_n_s64(a: i64, b: i64) -> i64 { static_assert!(N >= 1 && N <= 64); unsafe { transmute(vsri_n_s64::(transmute(a), transmute(b))) } @@ -25836,7 +25780,7 @@ pub fn vsrid_n_s64(a: i64, b: i64) -> i64 { #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sri, N = 2))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(bfxil, N = 2))] pub fn vsrid_n_u64(a: u64, b: u64) -> u64 { static_assert!(N >= 1 && N <= 64); unsafe { transmute(vsri_n_u64::(transmute(a), transmute(b))) } diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs index 580f203ef0662..135d0a156dc3f 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs @@ -70,6 +70,30 @@ pub struct float64x2x4_t( pub float64x2_t, ); +/// Helper for the 'shift right and insert' functions. +macro_rules! shift_right_and_insert { + ($ty:ty, $width:literal, $N:expr, $a:expr, $b:expr) => {{ + type V = Simd<$ty, $width>; + + if $N as u32 == <$ty>::BITS { + $a + } else { + let a: V = transmute($a); + let b: V = transmute($b); + + let mask = <$ty>::MAX >> $N; + let kept: V = simd_and(a, V::splat(!mask)); + + let shift_counts = V::splat($N as $ty); + let shifted = simd_shr(b, shift_counts); + + transmute(simd_or(kept, shifted)) + } + }}; +} + +pub(crate) use shift_right_and_insert; + /// Duplicate vector element to vector or scalar #[inline] #[target_feature(enable = "neon")] diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml index 1c95bbe3d3a60..ed6989d44ab53 100644 --- a/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml +++ b/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml @@ -10166,7 +10166,7 @@ intrinsics: attr: - *neon-stable - FnCall: [rustc_legacy_const_generics, ['2']] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [not, ['target_env = "msvc"']]}]]}, {FnCall: [assert_instr, [sri, 'N = 2']]}]] + - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [not, ['target_env = "msvc"']]}]]}, {FnCall: [assert_instr, [bfxil, 'N = 2']]}]] safety: safe types: - i64 @@ -13722,26 +13722,17 @@ intrinsics: static_defs: ['const N: i32'] safety: safe types: - - [int8x8_t, 'N >= 1 && N <= 8'] - - [int8x16_t, 'N >= 1 && N <= 8'] - - [int16x4_t, 'N >= 1 && N <= 16'] - - [int16x8_t, 'N >= 1 && N <= 16'] - - [int32x2_t, 'N >= 1 && N <= 32'] - - [int32x4_t, 'N >= 1 && N <= 32'] - - [int64x1_t, 'N >= 1 && N <= 64'] - - [int64x2_t, 'N >= 1 && N <= 64'] + - [int8x8_t, u8, '8', 'N >= 1 && N <= 8'] + - [int8x16_t, u8, '16', 'N >= 1 && N <= 8'] + - [int16x4_t, u16, '4', 'N >= 1 && N <= 16'] + - [int16x8_t, u16, '8', 'N >= 1 && N <= 16'] + - [int32x2_t, u32, '2', 'N >= 1 && N <= 32'] + - [int32x4_t, u32, '4', 'N >= 1 && N <= 32'] + - [int64x1_t, u64, '1', 'N >= 1 && N <= 64'] + - [int64x2_t, u64, '2', 'N >= 1 && N <= 64'] compose: - - FnCall: ['static_assert!', ['{type[1]}']] - - LLVMLink: - name: "vsri{neon_type[0].N}" - arguments: - - "a: {neon_type[0]}" - - "b: {neon_type[0]}" - - "n: i32" - links: - - link: "llvm.aarch64.neon.vsri.{neon_type[0]}" - arch: aarch64,arm64ec - - FnCall: ["_vsri{neon_type[0].N}", [a, b, N], [], true] + - FnCall: ['static_assert!', ['{type[3]}']] + - FnCall: ["super::shift_right_and_insert!", ['{type[1]}', '{type[2]}', N, a, b], [], true] - name: "vsri{neon_type[0].N}" doc: "Shift Right and Insert (immediate)" From be9dc99118fadf663d636a6da99f15758ff7db7e Mon Sep 17 00:00:00 2001 From: Brian Cain Date: Sun, 15 Feb 2026 00:17:18 -0600 Subject: [PATCH 50/90] examples: Use HvxVectorPair for precise Gaussian blur arithmetic Update the Gaussian 3x3 blur example to use HvxVectorPair widening operations. This demonstrates that HvxVectorPair intrinsics now work correctly with the updated nightly. - Add #![cfg(target_arch = "hexagon")] crate-level gate --- library/stdarch/examples/Cargo.toml | 5 + library/stdarch/examples/gaussian.rs | 173 ++++++++++++++------------- 2 files changed, 92 insertions(+), 86 deletions(-) diff --git a/library/stdarch/examples/Cargo.toml b/library/stdarch/examples/Cargo.toml index 1e893dc15f971..8ac14d3e446a8 100644 --- a/library/stdarch/examples/Cargo.toml +++ b/library/stdarch/examples/Cargo.toml @@ -10,6 +10,10 @@ description = "Examples of the stdarch crate." edition = "2024" default-run = "hex" +[features] +# Enable to build Hexagon-specific examples (requires hexagon target) +hexagon = [] + [dependencies] core_arch = { path = "../crates/core_arch" } quickcheck = "1.0" @@ -26,6 +30,7 @@ path = "connect5.rs" [[bin]] name = "gaussian" path = "gaussian.rs" +required-features = ["hexagon"] [[example]] name = "wasm" diff --git a/library/stdarch/examples/gaussian.rs b/library/stdarch/examples/gaussian.rs index 3e9d89db9782b..61fb5e68c6cb2 100644 --- a/library/stdarch/examples/gaussian.rs +++ b/library/stdarch/examples/gaussian.rs @@ -9,19 +9,19 @@ //! 1 2 1 //! //! This is a separable filter: `[1 2 1]^T * [1 2 1] / 16`. -//! Each 1D pass of `[1 2 1] / 4` is computed using byte averaging: -//! avg(avg(a, c), b) ≈ (a + 2b + c) / 4 //! -//! This approach uses only `HvxVector` (single-vector) operations, avoiding -//! `HvxVectorPair` which currently has ABI limitations in the Rust/LLVM -//! Hexagon backend. +//! This implementation uses `HvxVectorPair` for widening arithmetic to achieve +//! full precision in the Gaussian computation, avoiding the approximation errors +//! of byte-averaging approaches. //! -//! To build: +//! # Building and Running //! -//! RUSTFLAGS="-C target-feature=+hvxv60,+hvx-length128b \ +//! To build (requires Hexagon toolchain): +//! +//! RUSTFLAGS="-C target-feature=+hvxv62,+hvx-length128b \ //! -C linker=hexagon-unknown-linux-musl-clang" \ //! cargo +nightly build --bin gaussian -p stdarch_examples \ -//! --target hexagon-unknown-linux-musl \ +//! --features hexagon --target hexagon-unknown-linux-musl \ //! -Zbuild-std -Zbuild-std-features=llvm-libunwind //! //! To run under QEMU: @@ -29,6 +29,7 @@ //! qemu-hexagon -L /target/hexagon-unknown-linux-musl \ //! target/hexagon-unknown-linux-musl/debug/gaussian +// This example only compiles on Hexagon targets #![cfg(target_arch = "hexagon")] #![feature(stdarch_hexagon)] #![feature(hexagon_target_feature)] @@ -38,8 +39,7 @@ clippy::print_stdout, clippy::missing_docs_in_private_items, clippy::cast_possible_wrap, - clippy::cast_ptr_alignment, - dead_code + clippy::cast_ptr_alignment )] #[cfg(not(target_feature = "hvx-length128b"))] @@ -59,10 +59,12 @@ const VLEN: usize = 64; const WIDTH: usize = 256; const HEIGHT: usize = 16; -/// Vertical 1-2-1 filter pass using byte averaging +/// Vertical 1-2-1 filter pass using HvxVectorPair widening arithmetic +/// +/// Computes: dst[x] = (row_above[x] + 2*center[x] + row_below[x] + 2) >> 2 /// -/// Computes: dst[x] = avg(avg(row_above[x], row_below[x]), center[x]) -/// ≈ (row_above[x] + 2*center[x] + row_below[x]) / 4 +/// Uses HvxVectorPair to widen u8 to u16 for precise arithmetic, avoiding +/// the rounding errors of byte-averaging approximations. /// /// # Safety /// @@ -70,7 +72,7 @@ const HEIGHT: usize = 16; /// - `dst` must point to a valid output buffer for `width` bytes /// - `width` must be a multiple of VLEN /// - All pointers must be HVX-aligned (128-byte for 128B mode) -#[target_feature(enable = "hvxv60")] +#[target_feature(enable = "hvxv62")] unsafe fn vertical_121_pass(src: *const u8, stride: isize, width: usize, dst: *mut u8) { let inp0 = src.offset(-stride) as *const HvxVector; let inp1 = src as *const HvxVector; @@ -83,29 +85,49 @@ unsafe fn vertical_121_pass(src: *const u8, stride: isize, width: usize, dst: *m let center = *inp1.add(i); let below = *inp2.add(i); - // avg(above, below) ≈ (above + below) / 2 - let avg_ab = q6_vub_vavg_vubvub_rnd(above, below); - // avg(avg_ab, center) ≈ ((above + below)/2 + center) / 2 - // ≈ (above + 2*center + below) / 4 - let result = q6_vub_vavg_vubvub_rnd(avg_ab, center); + // Widen above + below to 16-bit using HvxVectorPair + // q6_wh_vadd_vubvub: adds two u8 vectors, producing u16 results in a pair + let above_plus_below: HvxVectorPair = q6_wh_vadd_vubvub(above, below); + + // Widen center * 2 (add center to itself) + let center_x2: HvxVectorPair = q6_wh_vadd_vubvub(center, center); + + // Add them: (above + below) + (center * 2) = above + 2*center + below + let sum: HvxVectorPair = q6_wh_vadd_whwh(above_plus_below, center_x2); + + // Extract high and low vectors from the pair (each contains u16 values) + let sum_lo = q6_v_lo_w(sum); // Lower 64 elements as i16 + let sum_hi = q6_v_hi_w(sum); // Upper 64 elements as i16 + + // Arithmetic right shift by 2 (divide by 4) with rounding + // Add 2 for rounding before shift: (sum + 2) >> 2 + let two = q6_vh_vsplat_r(2); + let sum_lo_rounded = q6_vh_vadd_vhvh(sum_lo, two); + let sum_hi_rounded = q6_vh_vadd_vhvh(sum_hi, two); + let shifted_lo = q6_vh_vasr_vhvh(sum_lo_rounded, two); + let shifted_hi = q6_vh_vasr_vhvh(sum_hi_rounded, two); + + // Pack back to u8 with saturation: takes hi and lo halfword vectors, + // saturates to u8, and interleaves them back to original order + let result = q6_vub_vsat_vhvh(shifted_hi, shifted_lo); *outp.add(i) = result; } } -/// Horizontal 1-2-1 filter pass using byte averaging with vector alignment +/// Horizontal 1-2-1 filter pass using HvxVectorPair widening arithmetic /// -/// Computes: dst[x] = avg(avg(src[x-1], src[x+1]), src[x]) -/// ≈ (src[x-1] + 2*src[x] + src[x+1]) / 4 +/// Computes: dst[x] = (src[x-1] + 2*src[x] + src[x+1] + 2) >> 2 /// -/// Uses `valign` and `vlalign` to shift vectors by 1 byte for neighbor access. +/// Uses `valign` and `vlalign` to shift vectors by 1 byte for neighbor access, +/// then HvxVectorPair for precise widening arithmetic. /// /// # Safety /// /// - `src` and `dst` must point to valid buffers of `width` bytes /// - `width` must be a multiple of VLEN /// - All pointers must be HVX-aligned -#[target_feature(enable = "hvxv60")] +#[target_feature(enable = "hvxv62")] unsafe fn horizontal_121_pass(src: *const u8, width: usize, dst: *mut u8) { let inp = src as *const HvxVector; let outp = dst as *mut HvxVector; @@ -122,18 +144,33 @@ unsafe fn horizontal_121_pass(src: *const u8, width: usize, dst: *mut u8) { }; // Left neighbor (x-1): shift curr right by 1 byte, filling from prev - // vlalign(curr, prev, 1) = { prev[VLEN-1], curr[0], curr[1], ..., curr[VLEN-2] } let left = q6_v_vlalign_vvr(curr, prev, 1); // Right neighbor (x+1): shift curr left by 1 byte, filling from next - // valign(next, curr, 1) = { curr[1], curr[2], ..., curr[VLEN-1], next[0] } let right = q6_v_valign_vvr(next, curr, 1); - // avg(left, right) ≈ (src[x-1] + src[x+1]) / 2 - let avg_lr = q6_vub_vavg_vubvub_rnd(left, right); - // avg(avg_lr, curr) ≈ ((src[x-1] + src[x+1])/2 + src[x]) / 2 - // ≈ (src[x-1] + 2*src[x] + src[x+1]) / 4 - let result = q6_vub_vavg_vubvub_rnd(avg_lr, curr); + // Widen left + right to 16-bit + let left_plus_right: HvxVectorPair = q6_wh_vadd_vubvub(left, right); + + // Widen center * 2 + let center_x2: HvxVectorPair = q6_wh_vadd_vubvub(curr, curr); + + // Add: left + 2*center + right + let sum: HvxVectorPair = q6_wh_vadd_whwh(left_plus_right, center_x2); + + // Extract high and low vectors + let sum_lo = q6_v_lo_w(sum); + let sum_hi = q6_v_hi_w(sum); + + // Arithmetic right shift by 2 with rounding + let two = q6_vh_vsplat_r(2); + let sum_lo_rounded = q6_vh_vadd_vhvh(sum_lo, two); + let sum_hi_rounded = q6_vh_vadd_vhvh(sum_hi, two); + let shifted_lo = q6_vh_vasr_vhvh(sum_lo_rounded, two); + let shifted_hi = q6_vh_vasr_vhvh(sum_hi_rounded, two); + + // Pack back to u8 with saturation + let result = q6_vub_vsat_vhvh(shifted_hi, shifted_lo); *outp.add(i) = result; @@ -156,7 +193,7 @@ unsafe fn horizontal_121_pass(src: *const u8, width: usize, dst: *mut u8) { /// - `width` must be a multiple of VLEN and >= VLEN /// - `stride` must be >= `width` /// - All buffers must be HVX-aligned (128-byte for 128B mode) -#[target_feature(enable = "hvxv60")] +#[target_feature(enable = "hvxv62")] pub unsafe fn gaussian3x3u8( src: *const u8, stride: usize, @@ -180,7 +217,7 @@ pub unsafe fn gaussian3x3u8( } } -/// Reference C implementation from Hexagon SDK (Gaussian3x3u8) +/// Reference implementation from Hexagon SDK (Gaussian3x3u8) /// /// Kernel: /// 1 2 1 @@ -203,39 +240,6 @@ fn gaussian3x3u8_reference(src: &[u8], stride: usize, width: usize, height: usiz } } -/// Scalar approximation matching the HVX byte-averaging approach -/// -/// This matches the HVX implementation's behavior: -/// - Vertical: avg_rnd(avg_rnd(above, below), center) -/// - Horizontal: avg_rnd(avg_rnd(left, right), center) -/// where avg_rnd(a, b) = (a + b + 1) / 2 -fn gaussian3x3u8_approx(src: &[u8], stride: usize, width: usize, height: usize, dst: &mut [u8]) { - // Temporary buffer for vertical pass output - let mut tmp = vec![0u8; width * height]; - - // Vertical pass: 1-2-1 using rounding average - for y in 1..height - 1 { - for x in 0..width { - let above = src[(y - 1) * stride + x] as u16; - let center = src[y * stride + x] as u16; - let below = src[(y + 1) * stride + x] as u16; - let avg_ab = ((above + below + 1) / 2) as u8; - tmp[y * width + x] = ((avg_ab as u16 + center + 1) / 2) as u8; - } - } - - // Horizontal pass: 1-2-1 using rounding average - for y in 1..height - 1 { - for x in 1..width - 1 { - let left = tmp[y * width + (x - 1)] as u16; - let center = tmp[y * width + x] as u16; - let right = tmp[y * width + (x + 1)] as u16; - let avg_lr = ((left + right + 1) / 2) as u8; - dst[y * stride + x] = ((avg_lr as u16 + center + 1) / 2) as u8; - } - } -} - /// Generate deterministic test pattern fn generate_test_pattern(buf: &mut [u8], width: usize, height: usize) { for y in 0..height { @@ -254,7 +258,6 @@ fn main() { let mut dst_hvx = AlignedBuf::<{ WIDTH * HEIGHT }>([0u8; WIDTH * HEIGHT]); let mut tmp = AlignedBuf::<{ WIDTH }>([0u8; WIDTH]); let mut dst_ref = vec![0u8; WIDTH * HEIGHT]; - let mut dst_approx = vec![0u8; WIDTH * HEIGHT]; // Generate test pattern generate_test_pattern(&mut src.0, WIDTH, HEIGHT); @@ -274,30 +277,28 @@ fn main() { // Run reference gaussian3x3u8_reference(&src.0, WIDTH, WIDTH, HEIGHT, &mut dst_ref); - // Run scalar approximation (should match HVX exactly) - gaussian3x3u8_approx(&src.0, WIDTH, WIDTH, HEIGHT, &mut dst_approx); - - // Verify HVX matches the byte-averaging approximation exactly + // Verify HVX matches reference (allowing small rounding differences) + let mut max_diff = 0i32; for y in 1..HEIGHT - 1 { for x in 1..WIDTH - 1 { let idx = y * WIDTH + x; - assert_eq!( - dst_hvx.0[idx], dst_approx[idx], - "HVX output differs from scalar approximation at ({}, {}): hvx={}, approx={}", - x, y, dst_hvx.0[idx], dst_approx[idx] + let diff = (dst_hvx.0[idx] as i32 - dst_ref[idx] as i32).abs(); + max_diff = max_diff.max(diff); + // Allow up to 1 LSB difference due to rounding + assert!( + diff <= 1, + "HVX differs from reference at ({}, {}): hvx={}, ref={}, diff={}", + x, + y, + dst_hvx.0[idx], + dst_ref[idx], + diff ); } } - // Verify HVX exactly matches reference for this test pattern - for y in 1..HEIGHT - 1 { - for x in 1..WIDTH - 1 { - let idx = y * WIDTH + x; - assert_eq!( - dst_hvx.0[idx], dst_ref[idx], - "HVX differs from reference at ({}, {}): hvx={}, ref={}", - x, y, dst_hvx.0[idx], dst_ref[idx] - ); - } - } + println!( + "Gaussian 3x3 HVX test passed! Max difference from reference: {}", + max_diff + ); } From d6b2cb08a8b797dc6281c16f68bab6e58aece9ac Mon Sep 17 00:00:00 2001 From: Brian Cain Date: Sun, 15 Feb 2026 07:03:46 -0600 Subject: [PATCH 51/90] examples: Make gaussian build on all targets Restructure gaussian.rs to follow the pattern used by hex.rs and connect5.rs. Remove the 'hexagon' feature gate. --- library/stdarch/examples/Cargo.toml | 6 +- library/stdarch/examples/gaussian.rs | 401 +++++++++++++++------------ 2 files changed, 225 insertions(+), 182 deletions(-) diff --git a/library/stdarch/examples/Cargo.toml b/library/stdarch/examples/Cargo.toml index 8ac14d3e446a8..c4fc4c7e374c8 100644 --- a/library/stdarch/examples/Cargo.toml +++ b/library/stdarch/examples/Cargo.toml @@ -10,10 +10,6 @@ description = "Examples of the stdarch crate." edition = "2024" default-run = "hex" -[features] -# Enable to build Hexagon-specific examples (requires hexagon target) -hexagon = [] - [dependencies] core_arch = { path = "../crates/core_arch" } quickcheck = "1.0" @@ -27,10 +23,10 @@ path = "hex.rs" name = "connect5" path = "connect5.rs" +# Hexagon-only: requires --target hexagon-unknown-linux-musl [[bin]] name = "gaussian" path = "gaussian.rs" -required-features = ["hexagon"] [[example]] name = "wasm" diff --git a/library/stdarch/examples/gaussian.rs b/library/stdarch/examples/gaussian.rs index 61fb5e68c6cb2..dea16f797aca6 100644 --- a/library/stdarch/examples/gaussian.rs +++ b/library/stdarch/examples/gaussian.rs @@ -10,29 +10,32 @@ //! //! This is a separable filter: `[1 2 1]^T * [1 2 1] / 16`. //! -//! This implementation uses `HvxVectorPair` for widening arithmetic to achieve -//! full precision in the Gaussian computation, avoiding the approximation errors -//! of byte-averaging approaches. +//! On Hexagon targets, this implementation uses `HvxVectorPair` for widening +//! arithmetic to achieve full precision in the Gaussian computation, avoiding +//! the approximation errors of byte-averaging approaches. On other targets, +//! it runs a reference implementation in pure Rust. //! -//! # Building and Running +//! # Building and Running (Hexagon) //! //! To build (requires Hexagon toolchain): //! //! RUSTFLAGS="-C target-feature=+hvxv62,+hvx-length128b \ //! -C linker=hexagon-unknown-linux-musl-clang" \ -//! cargo +nightly build --bin gaussian -p stdarch_examples \ -//! --features hexagon --target hexagon-unknown-linux-musl \ +//! cargo +nightly build -p stdarch_examples --bin gaussian \ +//! --target hexagon-unknown-linux-musl \ //! -Zbuild-std -Zbuild-std-features=llvm-libunwind //! //! To run under QEMU: //! //! qemu-hexagon -L /target/hexagon-unknown-linux-musl \ //! target/hexagon-unknown-linux-musl/debug/gaussian +//! +//! # Building and Running (Other targets) +//! +//! cargo +nightly run -p stdarch_examples --bin gaussian -// This example only compiles on Hexagon targets -#![cfg(target_arch = "hexagon")] -#![feature(stdarch_hexagon)] -#![feature(hexagon_target_feature)] +#![cfg_attr(target_arch = "hexagon", feature(stdarch_hexagon))] +#![cfg_attr(target_arch = "hexagon", feature(hexagon_target_feature))] #![allow( unsafe_op_in_unsafe_fn, clippy::unwrap_used, @@ -42,182 +45,193 @@ clippy::cast_ptr_alignment )] -#[cfg(not(target_feature = "hvx-length128b"))] -use core_arch::arch::hexagon::v64::*; -#[cfg(target_feature = "hvx-length128b")] -use core_arch::arch::hexagon::v128::*; - -/// Vector length in bytes for HVX 128-byte mode -#[cfg(target_feature = "hvx-length128b")] -const VLEN: usize = 128; - -/// Vector length in bytes for HVX 64-byte mode -#[cfg(not(target_feature = "hvx-length128b"))] -const VLEN: usize = 64; - -/// Image width - must be multiple of VLEN +/// Image width - must be multiple of HVX vector length on Hexagon const WIDTH: usize = 256; const HEIGHT: usize = 16; -/// Vertical 1-2-1 filter pass using HvxVectorPair widening arithmetic -/// -/// Computes: dst[x] = (row_above[x] + 2*center[x] + row_below[x] + 2) >> 2 -/// -/// Uses HvxVectorPair to widen u8 to u16 for precise arithmetic, avoiding -/// the rounding errors of byte-averaging approximations. -/// -/// # Safety -/// -/// - `src` must point to the center row with valid data at -stride and +stride -/// - `dst` must point to a valid output buffer for `width` bytes -/// - `width` must be a multiple of VLEN -/// - All pointers must be HVX-aligned (128-byte for 128B mode) -#[target_feature(enable = "hvxv62")] -unsafe fn vertical_121_pass(src: *const u8, stride: isize, width: usize, dst: *mut u8) { - let inp0 = src.offset(-stride) as *const HvxVector; - let inp1 = src as *const HvxVector; - let inp2 = src.offset(stride) as *const HvxVector; - let outp = dst as *mut HvxVector; - - let n_chunks = width / VLEN; - for i in 0..n_chunks { - let above = *inp0.add(i); - let center = *inp1.add(i); - let below = *inp2.add(i); - - // Widen above + below to 16-bit using HvxVectorPair - // q6_wh_vadd_vubvub: adds two u8 vectors, producing u16 results in a pair - let above_plus_below: HvxVectorPair = q6_wh_vadd_vubvub(above, below); - - // Widen center * 2 (add center to itself) - let center_x2: HvxVectorPair = q6_wh_vadd_vubvub(center, center); - - // Add them: (above + below) + (center * 2) = above + 2*center + below - let sum: HvxVectorPair = q6_wh_vadd_whwh(above_plus_below, center_x2); - - // Extract high and low vectors from the pair (each contains u16 values) - let sum_lo = q6_v_lo_w(sum); // Lower 64 elements as i16 - let sum_hi = q6_v_hi_w(sum); // Upper 64 elements as i16 - - // Arithmetic right shift by 2 (divide by 4) with rounding - // Add 2 for rounding before shift: (sum + 2) >> 2 - let two = q6_vh_vsplat_r(2); - let sum_lo_rounded = q6_vh_vadd_vhvh(sum_lo, two); - let sum_hi_rounded = q6_vh_vadd_vhvh(sum_hi, two); - let shifted_lo = q6_vh_vasr_vhvh(sum_lo_rounded, two); - let shifted_hi = q6_vh_vasr_vhvh(sum_hi_rounded, two); - - // Pack back to u8 with saturation: takes hi and lo halfword vectors, - // saturates to u8, and interleaves them back to original order - let result = q6_vub_vsat_vhvh(shifted_hi, shifted_lo); - - *outp.add(i) = result; +// ============================================================================ +// Hexagon HVX implementation +// ============================================================================ + +#[cfg(target_arch = "hexagon")] +mod hvx { + #[cfg(not(target_feature = "hvx-length128b"))] + use core_arch::arch::hexagon::v64::*; + #[cfg(target_feature = "hvx-length128b")] + use core_arch::arch::hexagon::v128::*; + + /// Vector length in bytes for HVX 128-byte mode + #[cfg(target_feature = "hvx-length128b")] + const VLEN: usize = 128; + + /// Vector length in bytes for HVX 64-byte mode + #[cfg(not(target_feature = "hvx-length128b"))] + const VLEN: usize = 64; + + /// Vertical 1-2-1 filter pass using HvxVectorPair widening arithmetic + /// + /// Computes: dst[x] = (row_above[x] + 2*center[x] + row_below[x] + 2) >> 2 + /// + /// Uses HvxVectorPair to widen u8 to u16 for precise arithmetic, avoiding + /// the rounding errors of byte-averaging approximations. + /// + /// # Safety + /// + /// - `src` must point to the center row with valid data at -stride and +stride + /// - `dst` must point to a valid output buffer for `width` bytes + /// - `width` must be a multiple of VLEN + /// - All pointers must be HVX-aligned (128-byte for 128B mode) + #[target_feature(enable = "hvxv62")] + unsafe fn vertical_121_pass(src: *const u8, stride: isize, width: usize, dst: *mut u8) { + let inp0 = src.offset(-stride) as *const HvxVector; + let inp1 = src as *const HvxVector; + let inp2 = src.offset(stride) as *const HvxVector; + let outp = dst as *mut HvxVector; + + let n_chunks = width / VLEN; + for i in 0..n_chunks { + let above = *inp0.add(i); + let center = *inp1.add(i); + let below = *inp2.add(i); + + // Widen above + below to 16-bit using HvxVectorPair + // q6_wh_vadd_vubvub: adds two u8 vectors, producing u16 results in a pair + let above_plus_below: HvxVectorPair = q6_wh_vadd_vubvub(above, below); + + // Widen center * 2 (add center to itself) + let center_x2: HvxVectorPair = q6_wh_vadd_vubvub(center, center); + + // Add them: (above + below) + (center * 2) = above + 2*center + below + let sum: HvxVectorPair = q6_wh_vadd_whwh(above_plus_below, center_x2); + + // Extract high and low vectors from the pair (each contains u16 values) + let sum_lo = q6_v_lo_w(sum); // Lower 64 elements as i16 + let sum_hi = q6_v_hi_w(sum); // Upper 64 elements as i16 + + // Arithmetic right shift by 2 (divide by 4) with rounding + // Add 2 for rounding before shift: (sum + 2) >> 2 + let two = q6_vh_vsplat_r(2); + let sum_lo_rounded = q6_vh_vadd_vhvh(sum_lo, two); + let sum_hi_rounded = q6_vh_vadd_vhvh(sum_hi, two); + let shifted_lo = q6_vh_vasr_vhvh(sum_lo_rounded, two); + let shifted_hi = q6_vh_vasr_vhvh(sum_hi_rounded, two); + + // Pack back to u8 with saturation: takes hi and lo halfword vectors, + // saturates to u8, and interleaves them back to original order + let result = q6_vub_vsat_vhvh(shifted_hi, shifted_lo); + + *outp.add(i) = result; + } } -} - -/// Horizontal 1-2-1 filter pass using HvxVectorPair widening arithmetic -/// -/// Computes: dst[x] = (src[x-1] + 2*src[x] + src[x+1] + 2) >> 2 -/// -/// Uses `valign` and `vlalign` to shift vectors by 1 byte for neighbor access, -/// then HvxVectorPair for precise widening arithmetic. -/// -/// # Safety -/// -/// - `src` and `dst` must point to valid buffers of `width` bytes -/// - `width` must be a multiple of VLEN -/// - All pointers must be HVX-aligned -#[target_feature(enable = "hvxv62")] -unsafe fn horizontal_121_pass(src: *const u8, width: usize, dst: *mut u8) { - let inp = src as *const HvxVector; - let outp = dst as *mut HvxVector; - - let n_chunks = width / VLEN; - let mut prev = q6_v_vzero(); - - for i in 0..n_chunks { - let curr = *inp.add(i); - let next = if i + 1 < n_chunks { - *inp.add(i + 1) - } else { - q6_v_vzero() - }; - - // Left neighbor (x-1): shift curr right by 1 byte, filling from prev - let left = q6_v_vlalign_vvr(curr, prev, 1); - - // Right neighbor (x+1): shift curr left by 1 byte, filling from next - let right = q6_v_valign_vvr(next, curr, 1); - - // Widen left + right to 16-bit - let left_plus_right: HvxVectorPair = q6_wh_vadd_vubvub(left, right); - // Widen center * 2 - let center_x2: HvxVectorPair = q6_wh_vadd_vubvub(curr, curr); - - // Add: left + 2*center + right - let sum: HvxVectorPair = q6_wh_vadd_whwh(left_plus_right, center_x2); - - // Extract high and low vectors - let sum_lo = q6_v_lo_w(sum); - let sum_hi = q6_v_hi_w(sum); - - // Arithmetic right shift by 2 with rounding - let two = q6_vh_vsplat_r(2); - let sum_lo_rounded = q6_vh_vadd_vhvh(sum_lo, two); - let sum_hi_rounded = q6_vh_vadd_vhvh(sum_hi, two); - let shifted_lo = q6_vh_vasr_vhvh(sum_lo_rounded, two); - let shifted_hi = q6_vh_vasr_vhvh(sum_hi_rounded, two); - - // Pack back to u8 with saturation - let result = q6_vub_vsat_vhvh(shifted_hi, shifted_lo); - - *outp.add(i) = result; - - prev = curr; + /// Horizontal 1-2-1 filter pass using HvxVectorPair widening arithmetic + /// + /// Computes: dst[x] = (src[x-1] + 2*src[x] + src[x+1] + 2) >> 2 + /// + /// Uses `valign` and `vlalign` to shift vectors by 1 byte for neighbor access, + /// then HvxVectorPair for precise widening arithmetic. + /// + /// # Safety + /// + /// - `src` and `dst` must point to valid buffers of `width` bytes + /// - `width` must be a multiple of VLEN + /// - All pointers must be HVX-aligned + #[target_feature(enable = "hvxv62")] + unsafe fn horizontal_121_pass(src: *const u8, width: usize, dst: *mut u8) { + let inp = src as *const HvxVector; + let outp = dst as *mut HvxVector; + + let n_chunks = width / VLEN; + let mut prev = q6_v_vzero(); + + for i in 0..n_chunks { + let curr = *inp.add(i); + let next = if i + 1 < n_chunks { + *inp.add(i + 1) + } else { + q6_v_vzero() + }; + + // Left neighbor (x-1): shift curr right by 1 byte, filling from prev + let left = q6_v_vlalign_vvr(curr, prev, 1); + + // Right neighbor (x+1): shift curr left by 1 byte, filling from next + let right = q6_v_valign_vvr(next, curr, 1); + + // Widen left + right to 16-bit + let left_plus_right: HvxVectorPair = q6_wh_vadd_vubvub(left, right); + + // Widen center * 2 + let center_x2: HvxVectorPair = q6_wh_vadd_vubvub(curr, curr); + + // Add: left + 2*center + right + let sum: HvxVectorPair = q6_wh_vadd_whwh(left_plus_right, center_x2); + + // Extract high and low vectors + let sum_lo = q6_v_lo_w(sum); + let sum_hi = q6_v_hi_w(sum); + + // Arithmetic right shift by 2 with rounding + let two = q6_vh_vsplat_r(2); + let sum_lo_rounded = q6_vh_vadd_vhvh(sum_lo, two); + let sum_hi_rounded = q6_vh_vadd_vhvh(sum_hi, two); + let shifted_lo = q6_vh_vasr_vhvh(sum_lo_rounded, two); + let shifted_hi = q6_vh_vasr_vhvh(sum_hi_rounded, two); + + // Pack back to u8 with saturation + let result = q6_vub_vsat_vhvh(shifted_hi, shifted_lo); + + *outp.add(i) = result; + + prev = curr; + } } -} - -/// Apply Gaussian 3x3 blur to an entire image using separable filtering -/// -/// Two-pass approach: -/// 1. Vertical pass: apply 1-2-1 filter across rows -/// 2. Horizontal pass: apply 1-2-1 filter across columns -/// -/// Combined effect: 3x3 Gaussian kernel [1 2 1; 2 4 2; 1 2 1] / 16 -/// -/// # Safety -/// -/// - `src` and `dst` must point to valid image buffers of `stride * height` bytes -/// - `tmp` must point to a valid temporary buffer of `width` bytes, HVX-aligned -/// - `width` must be a multiple of VLEN and >= VLEN -/// - `stride` must be >= `width` -/// - All buffers must be HVX-aligned (128-byte for 128B mode) -#[target_feature(enable = "hvxv62")] -pub unsafe fn gaussian3x3u8( - src: *const u8, - stride: usize, - width: usize, - height: usize, - dst: *mut u8, - tmp: *mut u8, -) { - let stride_i = stride as isize; - - // Process interior rows (skip first and last which lack vertical neighbors) - for y in 1..height - 1 { - let row_src = src.offset(y as isize * stride_i); - let row_dst = dst.offset(y as isize * stride_i); - // Pass 1: vertical 1-2-1 into tmp - vertical_121_pass(row_src, stride_i, width, tmp); - - // Pass 2: horizontal 1-2-1 from tmp into dst - horizontal_121_pass(tmp, width, row_dst); + /// Apply Gaussian 3x3 blur to an entire image using separable filtering + /// + /// Two-pass approach: + /// 1. Vertical pass: apply 1-2-1 filter across rows + /// 2. Horizontal pass: apply 1-2-1 filter across columns + /// + /// Combined effect: 3x3 Gaussian kernel [1 2 1; 2 4 2; 1 2 1] / 16 + /// + /// # Safety + /// + /// - `src` and `dst` must point to valid image buffers of `stride * height` bytes + /// - `tmp` must point to a valid temporary buffer of `width` bytes, HVX-aligned + /// - `width` must be a multiple of VLEN and >= VLEN + /// - `stride` must be >= `width` + /// - All buffers must be HVX-aligned (128-byte for 128B mode) + #[target_feature(enable = "hvxv62")] + pub unsafe fn gaussian3x3u8( + src: *const u8, + stride: usize, + width: usize, + height: usize, + dst: *mut u8, + tmp: *mut u8, + ) { + let stride_i = stride as isize; + + // Process interior rows (skip first and last which lack vertical neighbors) + for y in 1..height - 1 { + let row_src = src.offset(y as isize * stride_i); + let row_dst = dst.offset(y as isize * stride_i); + + // Pass 1: vertical 1-2-1 into tmp + vertical_121_pass(row_src, stride_i, width, tmp); + + // Pass 2: horizontal 1-2-1 from tmp into dst + horizontal_121_pass(tmp, width, row_dst); + } } } -/// Reference implementation from Hexagon SDK (Gaussian3x3u8) +// ============================================================================ +// Reference implementation (works on all targets) +// ============================================================================ + +/// Reference implementation of Gaussian 3x3 blur /// /// Kernel: /// 1 2 1 @@ -249,6 +263,11 @@ fn generate_test_pattern(buf: &mut [u8], width: usize, height: usize) { } } +// ============================================================================ +// Main: runs HVX + reference on Hexagon, reference-only on other targets +// ============================================================================ + +#[cfg(target_arch = "hexagon")] fn main() { // Aligned buffers for HVX #[repr(align(128))] @@ -264,7 +283,7 @@ fn main() { // Run HVX implementation unsafe { - gaussian3x3u8( + hvx::gaussian3x3u8( src.0.as_ptr(), WIDTH, WIDTH, @@ -302,3 +321,31 @@ fn main() { max_diff ); } + +#[cfg(not(target_arch = "hexagon"))] +fn main() { + let mut src = vec![0u8; WIDTH * HEIGHT]; + let mut dst = vec![0u8; WIDTH * HEIGHT]; + + // Generate test pattern + generate_test_pattern(&mut src, WIDTH, HEIGHT); + + // Run reference implementation + gaussian3x3u8_reference(&src, WIDTH, WIDTH, HEIGHT, &mut dst); + + // Verify output is non-trivial (blurred values differ from input) + let mut changed = 0; + for y in 1..HEIGHT - 1 { + for x in 1..WIDTH - 1 { + let idx = y * WIDTH + x; + if src[idx] != dst[idx] { + changed += 1; + } + } + } + + println!( + "Gaussian 3x3 reference test passed! {} pixels changed by blur", + changed + ); +} From 0e08703130e3597dc896c88db8d6030838306f24 Mon Sep 17 00:00:00 2001 From: Brian Cain Date: Sun, 15 Feb 2026 11:57:54 -0600 Subject: [PATCH 52/90] stdarch-gen-hexagon: Use checked-in header file instead of downloading Check in the LLVM HVX header file (hvx_hexagon_protos.h) from LLVM 22.1.0-rc1 and modify the generator to read from this local copy instead of downloading it at runtime. This removes the ureq dependency and makes the build more reproducible. --- library/stdarch/Cargo.lock | 513 +- .../crates/stdarch-gen-hexagon/Cargo.toml | 1 - .../stdarch-gen-hexagon/hvx_hexagon_protos.h | 6003 +++++++++++++++++ .../crates/stdarch-gen-hexagon/src/main.rs | 45 +- 4 files changed, 6028 insertions(+), 534 deletions(-) create mode 100644 library/stdarch/crates/stdarch-gen-hexagon/hvx_hexagon_protos.h diff --git a/library/stdarch/Cargo.lock b/library/stdarch/Cargo.lock index 36b2b09acb2cb..7e7cb592889a8 100644 --- a/library/stdarch/Cargo.lock +++ b/library/stdarch/Cargo.lock @@ -2,12 +2,6 @@ # It is not intended for manual editing. version = 4 -[[package]] -name = "adler2" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" - [[package]] name = "aho-corasick" version = "1.1.4" @@ -53,7 +47,7 @@ version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc" dependencies = [ - "windows-sys 0.61.2", + "windows-sys", ] [[package]] @@ -64,7 +58,7 @@ checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d" dependencies = [ "anstyle", "once_cell_polyfill", - "windows-sys 0.61.2", + "windows-sys", ] [[package]] @@ -88,12 +82,6 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" -[[package]] -name = "base64" -version = "0.22.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" - [[package]] name = "bitflags" version = "2.10.0" @@ -170,15 +158,6 @@ dependencies = [ "syscalls", ] -[[package]] -name = "crc32fast" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" -dependencies = [ - "cfg-if", -] - [[package]] name = "crossbeam-deque" version = "0.8.6" @@ -245,17 +224,6 @@ version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" -[[package]] -name = "displaydoc" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "either" version = "1.15.0" @@ -307,16 +275,6 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" -[[package]] -name = "flate2" -version = "1.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "843fba2746e448b37e26a819579957415c8cef339bf08564fe8b7ddbd959573c" -dependencies = [ - "crc32fast", - "miniz_oxide", -] - [[package]] name = "fnv" version = "1.0.7" @@ -329,15 +287,6 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" -[[package]] -name = "form_urlencoded" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" -dependencies = [ - "percent-encoding", -] - [[package]] name = "getrandom" version = "0.2.17" @@ -402,87 +351,6 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" -[[package]] -name = "icu_collections" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" -dependencies = [ - "displaydoc", - "potential_utf", - "yoke", - "zerofrom", - "zerovec", -] - -[[package]] -name = "icu_locale_core" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" -dependencies = [ - "displaydoc", - "litemap", - "tinystr", - "writeable", - "zerovec", -] - -[[package]] -name = "icu_normalizer" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" -dependencies = [ - "icu_collections", - "icu_normalizer_data", - "icu_properties", - "icu_provider", - "smallvec", - "zerovec", -] - -[[package]] -name = "icu_normalizer_data" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" - -[[package]] -name = "icu_properties" -version = "2.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec" -dependencies = [ - "icu_collections", - "icu_locale_core", - "icu_properties_data", - "icu_provider", - "zerotrie", - "zerovec", -] - -[[package]] -name = "icu_properties_data" -version = "2.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af" - -[[package]] -name = "icu_provider" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" -dependencies = [ - "displaydoc", - "icu_locale_core", - "writeable", - "yoke", - "zerofrom", - "zerotrie", - "zerovec", -] - [[package]] name = "id-arena" version = "2.3.0" @@ -495,27 +363,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" -[[package]] -name = "idna" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" -dependencies = [ - "idna_adapter", - "smallvec", - "utf8_iter", -] - -[[package]] -name = "idna_adapter" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" -dependencies = [ - "icu_normalizer", - "icu_properties", -] - [[package]] name = "indexmap" version = "1.9.3" @@ -563,7 +410,7 @@ checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi", "libc", - "windows-sys 0.61.2", + "windows-sys", ] [[package]] @@ -605,12 +452,6 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" -[[package]] -name = "litemap" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" - [[package]] name = "log" version = "0.4.29" @@ -623,43 +464,12 @@ version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" -[[package]] -name = "miniz_oxide" -version = "0.8.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" -dependencies = [ - "adler2", - "simd-adler32", -] - -[[package]] -name = "once_cell" -version = "1.21.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" - [[package]] name = "once_cell_polyfill" version = "1.70.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe" -[[package]] -name = "percent-encoding" -version = "2.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" - -[[package]] -name = "potential_utf" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" -dependencies = [ - "zerovec", -] - [[package]] name = "ppv-lite86" version = "0.2.21" @@ -839,61 +649,12 @@ version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a96887878f22d7bad8a3b6dc5b7440e0ada9a245242924394987b21cf2210a4c" -[[package]] -name = "ring" -version = "0.17.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" -dependencies = [ - "cc", - "cfg-if", - "getrandom 0.2.17", - "libc", - "untrusted", - "windows-sys 0.52.0", -] - [[package]] name = "rustc-demangle" version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b50b8869d9fc858ce7266cce0194bd74df58b9d0e3f6df3a9fc8eb470d95c09d" -[[package]] -name = "rustls" -version = "0.23.36" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" -dependencies = [ - "log", - "once_cell", - "ring", - "rustls-pki-types", - "rustls-webpki", - "subtle", - "zeroize", -] - -[[package]] -name = "rustls-pki-types" -version = "1.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd" -dependencies = [ - "zeroize", -] - -[[package]] -name = "rustls-webpki" -version = "0.103.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53" -dependencies = [ - "ring", - "rustls-pki-types", - "untrusted", -] - [[package]] name = "ryu" version = "1.0.23" @@ -1010,12 +771,6 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" -[[package]] -name = "simd-adler32" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" - [[package]] name = "simd-test-macro" version = "0.1.0" @@ -1025,18 +780,6 @@ dependencies = [ "syn", ] -[[package]] -name = "smallvec" -version = "1.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" - -[[package]] -name = "stable_deref_trait" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" - [[package]] name = "stdarch-gen-arm" version = "0.1.0" @@ -1056,7 +799,6 @@ name = "stdarch-gen-hexagon" version = "0.1.0" dependencies = [ "regex", - "ureq", ] [[package]] @@ -1105,12 +847,6 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" -[[package]] -name = "subtle" -version = "2.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" - [[package]] name = "syn" version = "2.0.115" @@ -1122,17 +858,6 @@ dependencies = [ "unicode-ident", ] -[[package]] -name = "synstructure" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "syscalls" version = "0.6.18" @@ -1168,16 +893,6 @@ dependencies = [ "syn", ] -[[package]] -name = "tinystr" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" -dependencies = [ - "displaydoc", - "zerovec", -] - [[package]] name = "unicode-ident" version = "1.0.23" @@ -1190,46 +905,6 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" -[[package]] -name = "untrusted" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" - -[[package]] -name = "ureq" -version = "2.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02d1a66277ed75f640d608235660df48c8e3c19f3b4edb6a263315626cc3c01d" -dependencies = [ - "base64", - "flate2", - "log", - "once_cell", - "rustls", - "rustls-pki-types", - "url", - "webpki-roots 0.26.11", -] - -[[package]] -name = "url" -version = "2.5.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" -dependencies = [ - "form_urlencoded", - "idna", - "percent-encoding", - "serde", -] - -[[package]] -name = "utf8_iter" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" - [[package]] name = "utf8parse" version = "0.2.2" @@ -1326,31 +1001,13 @@ dependencies = [ "wasmparser 0.235.0", ] -[[package]] -name = "webpki-roots" -version = "0.26.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" -dependencies = [ - "webpki-roots 1.0.6", -] - -[[package]] -name = "webpki-roots" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cfaf3c063993ff62e73cb4311efde4db1efb31ab78a3e5c457939ad5cc0bed" -dependencies = [ - "rustls-pki-types", -] - [[package]] name = "winapi-util" version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.61.2", + "windows-sys", ] [[package]] @@ -1359,15 +1016,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" -[[package]] -name = "windows-sys" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" -dependencies = [ - "windows-targets", -] - [[package]] name = "windows-sys" version = "0.61.2" @@ -1377,70 +1025,6 @@ dependencies = [ "windows-link", ] -[[package]] -name = "windows-targets" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" -dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_gnullvm", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" - -[[package]] -name = "windows_i686_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" - -[[package]] -name = "windows_i686_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" - [[package]] name = "wit-bindgen" version = "0.51.0" @@ -1529,12 +1113,6 @@ dependencies = [ "wasmparser 0.244.0", ] -[[package]] -name = "writeable" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" - [[package]] name = "xml" version = "1.2.1" @@ -1550,29 +1128,6 @@ dependencies = [ "linked-hash-map", ] -[[package]] -name = "yoke" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" -dependencies = [ - "stable_deref_trait", - "yoke-derive", - "zerofrom", -] - -[[package]] -name = "yoke-derive" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "synstructure", -] - [[package]] name = "zerocopy" version = "0.8.39" @@ -1593,66 +1148,6 @@ dependencies = [ "syn", ] -[[package]] -name = "zerofrom" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" -dependencies = [ - "zerofrom-derive", -] - -[[package]] -name = "zerofrom-derive" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "synstructure", -] - -[[package]] -name = "zeroize" -version = "1.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" - -[[package]] -name = "zerotrie" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" -dependencies = [ - "displaydoc", - "yoke", - "zerofrom", -] - -[[package]] -name = "zerovec" -version = "0.11.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" -dependencies = [ - "yoke", - "zerofrom", - "zerovec-derive", -] - -[[package]] -name = "zerovec-derive" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "zmij" version = "1.0.21" diff --git a/library/stdarch/crates/stdarch-gen-hexagon/Cargo.toml b/library/stdarch/crates/stdarch-gen-hexagon/Cargo.toml index f8c446c1d15a0..397c7816f8d1e 100644 --- a/library/stdarch/crates/stdarch-gen-hexagon/Cargo.toml +++ b/library/stdarch/crates/stdarch-gen-hexagon/Cargo.toml @@ -7,4 +7,3 @@ edition = "2021" [dependencies] regex = "1.10" -ureq = "2.9" diff --git a/library/stdarch/crates/stdarch-gen-hexagon/hvx_hexagon_protos.h b/library/stdarch/crates/stdarch-gen-hexagon/hvx_hexagon_protos.h new file mode 100644 index 0000000000000..19309a40d6dd1 --- /dev/null +++ b/library/stdarch/crates/stdarch-gen-hexagon/hvx_hexagon_protos.h @@ -0,0 +1,6003 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// Automatically generated file, do not edit! +//===----------------------------------------------------------------------===// + + +#ifndef _HVX_HEXAGON_PROTOS_H_ +#define _HVX_HEXAGON_PROTOS_H_ 1 + +#ifdef __HVX__ +#if __HVX_LENGTH__ == 128 +#define __BUILTIN_VECTOR_WRAP(a) a ## _128B +#else +#define __BUILTIN_VECTOR_WRAP(a) a +#endif + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Rd32=vextract(Vu32,Rs32) + C Intrinsic Prototype: Word32 Q6_R_vextract_VR(HVX_Vector Vu, Word32 Rs) + Instruction Type: LD + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_R_vextract_VR(Vu,Rs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_extractw)(Vu,Rs) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=hi(Vss32) + C Intrinsic Prototype: HVX_Vector Q6_V_hi_W(HVX_VectorPair Vss) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_hi_W(Vss) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_hi)(Vss) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=lo(Vss32) + C Intrinsic Prototype: HVX_Vector Q6_V_lo_W(HVX_VectorPair Vss) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_lo_W(Vss) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lo)(Vss) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vsplat(Rt32) + C Intrinsic Prototype: HVX_Vector Q6_V_vsplat_R(Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vsplat_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatw)(Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=and(Qs4,Qt4) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_and_QQ(HVX_VectorPred Qs, HVX_VectorPred Qt) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_and_QQ(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=and(Qs4,!Qt4) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_and_QQn(HVX_VectorPred Qs, HVX_VectorPred Qt) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_and_QQn(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and_n)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=not(Qs4) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_not_Q(HVX_VectorPred Qs) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_not_Q(Qs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_not)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1))),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=or(Qs4,Qt4) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_or_QQ(HVX_VectorPred Qs, HVX_VectorPred Qt) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_or_QQ(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=or(Qs4,!Qt4) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_or_QQn(HVX_VectorPred Qs, HVX_VectorPred Qt) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_or_QQn(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or_n)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vsetq(Rt32) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vsetq_R(Word32 Rt) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vsetq_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2)(Rt)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=xor(Qs4,Qt4) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_xor_QQ(HVX_VectorPred Qs, HVX_VectorPred Qt) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_xor_QQ(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (!Qv4) vmem(Rt32+#s4)=Vs32 + C Intrinsic Prototype: void Q6_vmem_QnRIV(HVX_VectorPred Qv, HVX_Vector* Rt, HVX_Vector Vs) + Instruction Type: CVI_VM_ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vmem_QnRIV(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nqpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (!Qv4) vmem(Rt32+#s4):nt=Vs32 + C Intrinsic Prototype: void Q6_vmem_QnRIV_nt(HVX_VectorPred Qv, HVX_Vector* Rt, HVX_Vector Vs) + Instruction Type: CVI_VM_ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vmem_QnRIV_nt(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (Qv4) vmem(Rt32+#s4):nt=Vs32 + C Intrinsic Prototype: void Q6_vmem_QRIV_nt(HVX_VectorPred Qv, HVX_Vector* Rt, HVX_Vector Vs) + Instruction Type: CVI_VM_ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vmem_QRIV_nt(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_qpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (Qv4) vmem(Rt32+#s4)=Vs32 + C Intrinsic Prototype: void Q6_vmem_QRIV(HVX_VectorPred Qv, HVX_Vector* Rt, HVX_Vector Vs) + Instruction Type: CVI_VM_ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vmem_QRIV(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_qpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vabsdiff(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vabsdiff_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuh_vabsdiff_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vabsdiff(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vub_vabsdiff_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vub_vabsdiff_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffub)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vabsdiff(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vabsdiff_VuhVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuh_vabsdiff_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffuh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vabsdiff(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vabsdiff_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuw_vabsdiff_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffw)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vabs(Vu32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vabs_Vh(HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vabs_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vabs(Vu32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vabs_Vh_sat(HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vabs_Vh_sat(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh_sat)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vabs(Vu32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vabs_Vw(HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vabs_Vw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vabs(Vu32.w):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vabs_Vw_sat(HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vabs_Vw_sat(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw_sat)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vadd(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vadd_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vadd_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.b=vadd(Vuu32.b,Vvv32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vadd_WbWb(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wb_vadd_WbWb(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (!Qv4) Vx32.b+=Vu32.b + C Intrinsic Prototype: HVX_Vector Q6_Vb_condacc_QnVbVb(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_condacc_QnVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (Qv4) Vx32.b+=Vu32.b + C Intrinsic Prototype: HVX_Vector Q6_Vb_condacc_QVbVb(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_condacc_QVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vadd(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vadd_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vadd_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vadd(Vuu32.h,Vvv32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vadd_WhWh(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vadd_WhWh(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (!Qv4) Vx32.h+=Vu32.h + C Intrinsic Prototype: HVX_Vector Q6_Vh_condacc_QnVhVh(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_condacc_QnVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (Qv4) Vx32.h+=Vu32.h + C Intrinsic Prototype: HVX_Vector Q6_Vh_condacc_QVhVh(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_condacc_QVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vadd(Vu32.h,Vv32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vadd_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vadd_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vadd(Vuu32.h,Vvv32.h):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vadd_WhWh_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vadd_WhWh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vadd(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vadd_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vadd_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vadd(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vadd_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vadd_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vadd(Vu32.ub,Vv32.ub):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vadd_VubVub_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vadd_VubVub_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.ub=vadd(Vuu32.ub,Vvv32.ub):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wub_vadd_WubWub_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wub_vadd_WubWub_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vadd(Vu32.uh,Vv32.uh):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vadd_VuhVuh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vadd_VuhVuh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uh=vadd(Vuu32.uh,Vvv32.uh):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vadd_WuhWuh_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wuh_vadd_WuhWuh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vadd(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vadd_VuhVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vadd_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vadd(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vadd_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vadd(Vuu32.w,Vvv32.w) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vadd_WwWw(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Ww_vadd_WwWw(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (!Qv4) Vx32.w+=Vu32.w + C Intrinsic Prototype: HVX_Vector Q6_Vw_condacc_QnVwVw(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_condacc_QnVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (Qv4) Vx32.w+=Vu32.w + C Intrinsic Prototype: HVX_Vector Q6_Vw_condacc_QVwVw(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_condacc_QVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vadd(Vu32.w,Vv32.w):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vadd_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vadd(Vuu32.w,Vvv32.w):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vadd_WwWw_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Ww_vadd_WwWw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=valign(Vu32,Vv32,Rt8) + C Intrinsic Prototype: HVX_Vector Q6_V_valign_VVR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_valign_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignb)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=valign(Vu32,Vv32,#u3) + C Intrinsic Prototype: HVX_Vector Q6_V_valign_VVI(HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_valign_VVI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignbi)(Vu,Vv,Iu3) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vand(Vu32,Vv32) + C Intrinsic Prototype: HVX_Vector Q6_V_vand_VV(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vand_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vand)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vand(Qu4,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_V_vand_QR(HVX_VectorPred Qu, Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vand_QR(Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32|=vand(Qu4,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_V_vandor_VQR(HVX_Vector Vx, HVX_VectorPred Qu, Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vandor_VQR(Vx,Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt_acc)(Vx,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vand(Vu32,Rt32) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vand_VR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Q_vand_VR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)(Vu,Rt)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vand(Vu32,Rt32) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vandor_QVR(HVX_VectorPred Qx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Q_vandor_QVR(Qx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt_acc)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Rt)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vasl(Vu32.h,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vasl_VhR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vasl_VhR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vasl(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vasl_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vasl_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslhv)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vasl(Vu32.w,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vasl_VwR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vasl_VwR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vasl(Vu32.w,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vaslacc_VwVwR(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vaslacc_VwVwR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vasl(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vasl_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vasl_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslwv)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vasr(Vu32.h,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VhR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vasr_VhR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vasr(Vu32.h,Vv32.h,Rt8):rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vb_vasr_VhVhR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vasr_VhVhR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbrndsat)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vasr(Vu32.h,Vv32.h,Rt8):rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_VhVhR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vasr_VhVhR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubrndsat)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vasr(Vu32.h,Vv32.h,Rt8):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_VhVhR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vasr_VhVhR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubsat)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vasr(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vasr_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhv)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vasr(Vu32.w,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vasr_VwR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vasr_VwR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vasr(Vu32.w,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vasracc_VwVwR(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vasracc_VwVwR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vasr(Vu32.w,Vv32.w,Rt8) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VwVwR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vasr_VwVwR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwh)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VwVwR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vasr_VwVwR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhrndsat)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VwVwR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vasr_VwVwR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhsat)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_VwVwR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vasr_VwVwR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhsat)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vasr(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vasr_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vasr_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwv)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=Vu32 + C Intrinsic Prototype: HVX_Vector Q6_V_equals_V(HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_equals_V(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassign)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32=Vuu32 + C Intrinsic Prototype: HVX_VectorPair Q6_W_equals_W(HVX_VectorPair Vuu) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_W_equals_W(Vuu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassignp)(Vuu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vavg(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vavg_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vavg_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vavg(Vu32.h,Vv32.h):rnd + C Intrinsic Prototype: HVX_Vector Q6_Vh_vavg_VhVh_rnd(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vavg_VhVh_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavghrnd)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vavg(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vub_vavg_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vavg_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgub)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vavg(Vu32.ub,Vv32.ub):rnd + C Intrinsic Prototype: HVX_Vector Q6_Vub_vavg_VubVub_rnd(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vavg_VubVub_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgubrnd)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vavg(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vavg_VuhVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vavg_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vavg(Vu32.uh,Vv32.uh):rnd + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vavg_VuhVuh_rnd(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vavg_VuhVuh_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguhrnd)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vavg(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vavg_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vavg_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgw)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vavg(Vu32.w,Vv32.w):rnd + C Intrinsic Prototype: HVX_Vector Q6_Vw_vavg_VwVw_rnd(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vavg_VwVw_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgwrnd)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vcl0(Vu32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vcl0_Vuh(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vcl0_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0h)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vcl0(Vu32.uw) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vcl0_Vuw(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuw_vcl0_Vuw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0w)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32=vcombine(Vu32,Vv32) + C Intrinsic Prototype: HVX_VectorPair Q6_W_vcombine_VV(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_W_vcombine_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcombine)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=#0 + C Intrinsic Prototype: HVX_Vector Q6_V_vzero() + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vzero() __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vd0)() +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vdeal(Vu32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vdeal_Vb(HVX_Vector Vu) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vdeal_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vdeale(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vdeale_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vdeale_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb4w)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vdeal(Vu32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vdeal_Vh(HVX_Vector Vu) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vdeal_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealh)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32=vdeal(Vu32,Vv32,Rt8) + C Intrinsic Prototype: HVX_VectorPair Q6_W_vdeal_VVR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_W_vdeal_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealvdd)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vdelta(Vu32,Vv32) + C Intrinsic Prototype: HVX_Vector Q6_V_vdelta_VV(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vdelta_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdelta)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vdmpy(Vu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vdmpy_VubRb(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vdmpy_VubRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.h+=vdmpy(Vu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vdmpyacc_VhVubRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vdmpyacc_VhVubRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vdmpy(Vuu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vdmpy_WubRb(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vdmpy_WubRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv)(Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.h+=vdmpy(Vuu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vdmpyacc_WhWubRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vdmpyacc_WhWubRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv_acc)(Vxx,Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vdmpy(Vu32.h,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRb(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpy_VhRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vdmpy(Vu32.h,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpyacc_VwVhRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vdmpy(Vuu32.h,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vdmpy_WhRb(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vdmpy_WhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv)(Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vdmpy(Vuu32.h,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vdmpyacc_WwWhRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vdmpyacc_WwWhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv_acc)(Vxx,Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vdmpy(Vuu32.h,Rt32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_WhRh_sat(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpy_WhRh_sat(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat)(Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vdmpy(Vuu32.h,Rt32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwWhRh_sat(HVX_Vector Vx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpyacc_VwWhRh_sat(Vx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat_acc)(Vx,Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vdmpy(Vu32.h,Rt32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRh_sat(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpy_VhRh_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vdmpy(Vu32.h,Rt32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRh_sat(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpyacc_VwVhRh_sat(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vdmpy(Vuu32.h,Rt32.uh,#1):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_WhRuh_sat(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpy_WhRuh_sat(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat)(Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vdmpy(Vuu32.h,Rt32.uh,#1):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwWhRuh_sat(HVX_Vector Vx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpyacc_VwWhRuh_sat(Vx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat_acc)(Vx,Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vdmpy(Vu32.h,Rt32.uh):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRuh_sat(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpy_VhRuh_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vdmpy(Vu32.h,Rt32.uh):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRuh_sat(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpyacc_VwVhRuh_sat(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vdmpy(Vu32.h,Vv32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpy_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vdmpy(Vu32.h,Vv32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhVh_sat(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpyacc_VwVhVh_sat(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat_acc)(Vx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uw=vdsad(Vuu32.uh,Rt32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vdsad_WuhRuh(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vdsad_WuhRuh(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh)(Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.uw+=vdsad(Vuu32.uh,Rt32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vdsadacc_WuwWuhRuh(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vdsadacc_WuwWuhRuh(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh_acc)(Vxx,Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.eq(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eq_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eq_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb)(Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.eq(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqand_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqand_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.eq(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqor_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqor_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.eq(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqxacc_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqxacc_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.eq(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eq_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eq_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh)(Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.eq(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqand_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqand_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.eq(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqor_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqor_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.eq(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqxacc_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqxacc_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.eq(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eq_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eq_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw)(Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.eq(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqand_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqand_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.eq(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqor_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqor_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.eq(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqxacc_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqxacc_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.gt(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gt_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb)(Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.gt(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtand_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.gt(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtor_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.gt(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtxacc_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.gt(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gt_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth)(Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.gt(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtand_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.gt(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtor_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.gt(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtxacc_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.gt(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gt_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub)(Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.gt(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVubVub(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtand_QVubVub(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.gt(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVubVub(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtor_QVubVub(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.gt(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVubVub(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtxacc_QVubVub(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.gt(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VuhVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gt_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh)(Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.gt(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVuhVuh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtand_QVuhVuh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.gt(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVuhVuh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtor_QVuhVuh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.gt(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVuhVuh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtxacc_QVuhVuh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.gt(Vu32.uw,Vv32.uw) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VuwVuw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gt_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw)(Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.gt(Vu32.uw,Vv32.uw) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVuwVuw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtand_QVuwVuw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.gt(Vu32.uw,Vv32.uw) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVuwVuw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtor_QVuwVuw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.gt(Vu32.uw,Vv32.uw) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVuwVuw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtxacc_QVuwVuw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.gt(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gt_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw)(Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.gt(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtand_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.gt(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtor_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.gt(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtxacc_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w=vinsert(Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vinsert_VwR(HVX_Vector Vx, Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vinsert_VwR(Vx,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vinsertwr)(Vx,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vlalign(Vu32,Vv32,Rt8) + C Intrinsic Prototype: HVX_Vector Q6_V_vlalign_VVR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vlalign_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignb)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vlalign(Vu32,Vv32,#u3) + C Intrinsic Prototype: HVX_Vector Q6_V_vlalign_VVI(HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vlalign_VVI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignbi)(Vu,Vv,Iu3) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vlsr(Vu32.uh,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vlsr_VuhR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vlsr_VuhR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrh)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vlsr(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vlsr_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vlsr_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrhv)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vlsr(Vu32.uw,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vlsr_VuwR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuw_vlsr_VuwR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrw)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vlsr(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vlsr_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vlsr_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrwv)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32_VbVbR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vlut32_VbVbR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.b|=vlut32(Vu32.b,Vv32.b,Rt8) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32or_VbVbVbR(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vlut32or_VbVbVbR(Vx,Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracc)(Vx,Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16_VbVhR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vlut16_VbVhR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.h|=vlut16(Vu32.b,Vv32.h,Rt8) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16or_WhVbVhR(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vlut16or_WhVbVhR(Vxx,Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracc)(Vxx,Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vmax(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmax_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vmax_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vmax(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vub_vmax_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vmax_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxub)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vmax(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vmax_VuhVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vmax_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxuh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmax(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmax_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vmax_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxw)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vmin(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmin_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vmin_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vmin(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vub_vmin_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vmin_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminub)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vmin(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vmin_VuhVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vmin_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminuh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmin(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmin_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vmin_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminw)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vmpa(Vuu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpa_WubRb(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpa_WubRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus)(Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.h+=vmpa(Vuu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpaacc_WhWubRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpaacc_WhWubRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus_acc)(Vxx,Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vmpa(Vuu32.ub,Vvv32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpa_WubWb(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpa_WubWb(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabusv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vmpa(Vuu32.ub,Vvv32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpa_WubWub(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpa_WubWub(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuuv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vmpa(Vuu32.h,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpa_WhRb(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpa_WhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb)(Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vmpa(Vuu32.h,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpaacc_WwWhRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpaacc_WwWhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb_acc)(Vxx,Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vmpy(Vu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpy_VubRb(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpy_VubRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.h+=vmpy(Vu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpyacc_WhVubRb(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpyacc_WhVubRb(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus_acc)(Vxx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vmpy(Vu32.ub,Vv32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpy_VubVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpy_VubVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.h+=vmpy(Vu32.ub,Vv32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpyacc_WhVubVb(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpyacc_WhVubVb(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv_acc)(Vxx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vmpy(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpy_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpy_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.h+=vmpy(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpyacc_WhVbVb(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpyacc_WhVbVb(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv_acc)(Vxx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmpye(Vu32.w,Vv32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpye_VwVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpye_VwVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vmpy(Vu32.h,Rt32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpy_VhRh(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpy_VhRh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vmpy(Vu32.h,Rt32.h):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpyacc_WwVhRh_sat(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpyacc_WwVhRh_sat(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsat_acc)(Vxx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhRh_s1_rnd_sat(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vmpy_VhRh_s1_rnd_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsrs)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhRh_s1_sat(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vmpy_VhRh_s1_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhss)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vmpy(Vu32.h,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpy_VhVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpy_VhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vmpy(Vu32.h,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpyacc_WwVhVuh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpyacc_WwVhVuh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus_acc)(Vxx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vmpy(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpy_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpy_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vmpy(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpyacc_WwVhVh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpyacc_WwVhVh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv_acc)(Vxx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vmpy(Vu32.h,Vv32.h):<<1:rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhVh_s1_rnd_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vmpy_VhVh_s1_rnd_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhvsrs)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmpyieo(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyieo_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyieo_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyieoh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vmpyie(Vu32.w,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyieacc_VwVwVh(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyieacc_VwVwVh(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewh_acc)(Vx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmpyie(Vu32.w,Vv32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyie_VwVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyie_VwVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vmpyie(Vu32.w,Vv32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyieacc_VwVwVuh(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyieacc_VwVwVuh(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh_acc)(Vx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vmpyi(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpyi_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vmpyi_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.h+=vmpyi(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpyiacc_VhVhVh(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vmpyiacc_VhVhVh(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih_acc)(Vx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vmpyi(Vu32.h,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpyi_VhRb(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vmpyi_VhRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.h+=vmpyi(Vu32.h,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpyiacc_VhVhRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vmpyiacc_VhVhRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmpyio(Vu32.w,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyio_VwVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyio_VwVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiowh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmpyi(Vu32.w,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyi_VwRb(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyi_VwRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vmpyi(Vu32.w,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyiacc_VwVwRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyiacc_VwVwRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmpyi(Vu32.w,Rt32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyi_VwRh(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyi_VwRh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vmpyi(Vu32.w,Rt32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyiacc_VwVwRh(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyiacc_VwVwRh(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyo_VwVh_s1_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyo_VwVh_s1_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyo_VwVh_s1_rnd_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyo_VwVh_s1_rnd_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat:shift + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd_sacc)(Vx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:sat:shift + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_sacc)(Vx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uh=vmpy(Vu32.ub,Rt32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vmpy_VubRub(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuh_vmpy_VubRub(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.uh+=vmpy(Vu32.ub,Rt32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vmpyacc_WuhVubRub(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuh_vmpyacc_WuhVubRub(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub_acc)(Vxx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uh=vmpy(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vmpy_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuh_vmpy_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.uh+=vmpy(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vmpyacc_WuhVubVub(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuh_vmpyacc_WuhVubVub(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv_acc)(Vxx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uw=vmpy(Vu32.uh,Rt32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vmpy_VuhRuh(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vmpy_VuhRuh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.uw+=vmpy(Vu32.uh,Rt32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vmpyacc_WuwVuhRuh(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vmpyacc_WuwVuhRuh(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh_acc)(Vxx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uw=vmpy(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vmpy_VuhVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vmpy_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.uw+=vmpy(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vmpyacc_WuwVuhVuh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vmpyacc_WuwVuhVuh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv_acc)(Vxx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vmux(Qt4,Vu32,Vv32) + C Intrinsic Prototype: HVX_Vector Q6_V_vmux_QVV(HVX_VectorPred Qt, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vmux_QVV(Qt,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmux)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1),Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vnavg(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vnavg_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vnavg_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vnavg(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vnavg_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vnavg_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgub)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vnavg(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vnavg_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vnavg_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgw)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vnormamt(Vu32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vnormamt_Vh(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vnormamt_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamth)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vnormamt(Vu32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vnormamt_Vw(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vnormamt_Vw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamtw)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vnot(Vu32) + C Intrinsic Prototype: HVX_Vector Q6_V_vnot_V(HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vnot_V(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnot)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vor(Vu32,Vv32) + C Intrinsic Prototype: HVX_Vector Q6_V_vor_VV(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vor_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vor)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vpacke(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vpacke_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vpacke_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeb)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vpacke(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vpacke_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vpacke_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vpack(Vu32.h,Vv32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vb_vpack_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vpack_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhb_sat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vpack(Vu32.h,Vv32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vpack_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vpack_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhub_sat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vpacko(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vpacko_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vpacko_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackob)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vpacko(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vpacko_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vpacko_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackoh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vpack(Vu32.w,Vv32.w):sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vpack_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vpack_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwh_sat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vpack(Vu32.w,Vv32.w):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vpack_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vpack_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwuh_sat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vpopcount(Vu32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vpopcount_Vh(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vpopcount_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpopcounth)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vrdelta(Vu32,Vv32) + C Intrinsic Prototype: HVX_Vector Q6_V_vrdelta_VV(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vrdelta_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrdelta)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vrmpy(Vu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpy_VubRb(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vrmpy_VubRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vrmpy(Vu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVubRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vrmpyacc_VwVubRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vrmpy(Vuu32.ub,Rt32.b,#u1) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vrmpy_WubRbI(HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vrmpy_WubRbI(Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi)(Vuu,Rt,Iu1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vrmpy(Vuu32.ub,Rt32.b,#u1) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vrmpyacc_WwWubRbI(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vrmpyacc_WwWubRbI(Vxx,Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi_acc)(Vxx,Vuu,Rt,Iu1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vrmpy(Vu32.ub,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpy_VubVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vrmpy_VubVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vrmpy(Vu32.ub,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVubVb(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vrmpyacc_VwVubVb(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv_acc)(Vx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vrmpy(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpy_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vrmpy_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vrmpy(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVbVb(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vrmpyacc_VwVbVb(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv_acc)(Vx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vrmpy(Vu32.ub,Rt32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpy_VubRub(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuw_vrmpy_VubRub(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.uw+=vrmpy(Vu32.ub,Rt32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpyacc_VuwVubRub(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuw_vrmpyacc_VuwVubRub(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uw=vrmpy(Vuu32.ub,Rt32.ub,#u1) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vrmpy_WubRubI(HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vrmpy_WubRubI(Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi)(Vuu,Rt,Iu1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.uw+=vrmpy(Vuu32.ub,Rt32.ub,#u1) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vrmpyacc_WuwWubRubI(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vrmpyacc_WuwWubRubI(Vxx,Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi_acc)(Vxx,Vuu,Rt,Iu1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vrmpy(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpy_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuw_vrmpy_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.uw+=vrmpy(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpyacc_VuwVubVub(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuw_vrmpyacc_VuwVubVub(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv_acc)(Vx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vror(Vu32,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_V_vror_VR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vror_VR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vror)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vround(Vu32.h,Vv32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vb_vround_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vround_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhb)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vround(Vu32.h,Vv32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vround_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vround_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhub)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vround(Vu32.w,Vv32.w):sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vround_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vround_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vround(Vu32.w,Vv32.w):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vround_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vround_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwuh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uw=vrsad(Vuu32.ub,Rt32.ub,#u1) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vrsad_WubRubI(HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vrsad_WubRubI(Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi)(Vuu,Rt,Iu1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.uw+=vrsad(Vuu32.ub,Rt32.ub,#u1) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vrsadacc_WuwWubRubI(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vrsadacc_WuwWubRubI(Vxx,Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi_acc)(Vxx,Vuu,Rt,Iu1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vsat(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vub_vsat_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vsat_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsathub)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vsat(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vsat_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vsat_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatwh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vsxt(Vu32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vsxt_Vb(HVX_Vector Vu) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vsxt_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsb)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vsxt(Vu32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsxt_Vh(HVX_Vector Vu) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Ww_vsxt_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsh)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vshuffe(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vshuffe_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vshuffe_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufeh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vshuff(Vu32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vshuff_Vb(HVX_Vector Vu) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vshuff_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffb)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vshuffe(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vshuffe_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vshuffe_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffeb)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vshuff(Vu32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vshuff_Vh(HVX_Vector Vu) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vshuff_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffh)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vshuffo(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vshuffo_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vshuffo_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffob)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32=vshuff(Vu32,Vv32,Rt8) + C Intrinsic Prototype: HVX_VectorPair Q6_W_vshuff_VVR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_W_vshuff_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffvdd)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.b=vshuffoe(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vshuffoe_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wb_vshuffoe_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeb)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vshuffoe(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vshuffoe_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vshuffoe_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vshuffo(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vshuffo_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vshuffo_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vsub(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vsub_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vsub_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.b=vsub(Vuu32.b,Vvv32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vsub_WbWb(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wb_vsub_WbWb(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (!Qv4) Vx32.b-=Vu32.b + C Intrinsic Prototype: HVX_Vector Q6_Vb_condnac_QnVbVb(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_condnac_QnVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (Qv4) Vx32.b-=Vu32.b + C Intrinsic Prototype: HVX_Vector Q6_Vb_condnac_QVbVb(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_condnac_QVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vsub(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vsub_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vsub_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vsub(Vuu32.h,Vvv32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vsub_WhWh(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vsub_WhWh(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (!Qv4) Vx32.h-=Vu32.h + C Intrinsic Prototype: HVX_Vector Q6_Vh_condnac_QnVhVh(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_condnac_QnVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (Qv4) Vx32.h-=Vu32.h + C Intrinsic Prototype: HVX_Vector Q6_Vh_condnac_QVhVh(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_condnac_QVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vsub(Vu32.h,Vv32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vsub_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vsub_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vsub(Vuu32.h,Vvv32.h):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vsub_WhWh_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vsub_WhWh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vsub(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsub_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vsub_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhw)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vsub(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vsub_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vsub_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vsub(Vu32.ub,Vv32.ub):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vsub_VubVub_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vsub_VubVub_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.ub=vsub(Vuu32.ub,Vvv32.ub):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wub_vsub_WubWub_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wub_vsub_WubWub_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vsub(Vu32.uh,Vv32.uh):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vsub_VuhVuh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vsub_VuhVuh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uh=vsub(Vuu32.uh,Vvv32.uh):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vsub_WuhWuh_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wuh_vsub_WuhWuh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vsub(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsub_VuhVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vsub_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhw)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vsub(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vsub_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vsub_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vsub(Vuu32.w,Vvv32.w) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsub_WwWw(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Ww_vsub_WwWw(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (!Qv4) Vx32.w-=Vu32.w + C Intrinsic Prototype: HVX_Vector Q6_Vw_condnac_QnVwVw(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_condnac_QnVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (Qv4) Vx32.w-=Vu32.w + C Intrinsic Prototype: HVX_Vector Q6_Vw_condnac_QVwVw(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_condnac_QVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vsub(Vu32.w,Vv32.w):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vsub_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vsub_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vsub(Vuu32.w,Vvv32.w):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsub_WwWw_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Ww_vsub_WwWw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32=vswap(Qt4,Vu32,Vv32) + C Intrinsic Prototype: HVX_VectorPair Q6_W_vswap_QVV(HVX_VectorPred Qt, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_W_vswap_QVV(Qt,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vswap)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1),Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vtmpy(Vuu32.b,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vtmpy_WbRb(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vtmpy_WbRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb)(Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.h+=vtmpy(Vuu32.b,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vtmpyacc_WhWbRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vtmpyacc_WhWbRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb_acc)(Vxx,Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vtmpy(Vuu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vtmpy_WubRb(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vtmpy_WubRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus)(Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.h+=vtmpy(Vuu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vtmpyacc_WhWubRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vtmpyacc_WhWubRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus_acc)(Vxx,Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vtmpy(Vuu32.h,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vtmpy_WhRb(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vtmpy_WhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb)(Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vtmpy(Vuu32.h,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vtmpyacc_WwWhRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vtmpyacc_WwWhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb_acc)(Vxx,Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vunpack(Vu32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vunpack_Vb(HVX_Vector Vu) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vunpack_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackb)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vunpack(Vu32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vunpack_Vh(HVX_Vector Vu) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Ww_vunpack_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackh)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.h|=vunpacko(Vu32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vunpackoor_WhVb(HVX_VectorPair Vxx, HVX_Vector Vu) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vunpackoor_WhVb(Vxx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackob)(Vxx,Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.w|=vunpacko(Vu32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vunpackoor_WwVh(HVX_VectorPair Vxx, HVX_Vector Vu) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Ww_vunpackoor_WwVh(Vxx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackoh)(Vxx,Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uh=vunpack(Vu32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vunpack_Vub(HVX_Vector Vu) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wuh_vunpack_Vub(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackub)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uw=vunpack(Vu32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vunpack_Vuh(HVX_Vector Vu) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wuw_vunpack_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackuh)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vxor(Vu32,Vv32) + C Intrinsic Prototype: HVX_Vector Q6_V_vxor_VV(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vxor_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vxor)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uh=vzxt(Vu32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vzxt_Vub(HVX_Vector Vu) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wuh_vzxt_Vub(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzb)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uw=vzxt(Vu32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vzxt_Vuh(HVX_Vector Vu) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wuw_vzxt_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzh)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.b=vsplat(Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vsplat_R(Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vb_vsplat_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatb)(Rt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.h=vsplat(Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vsplat_R(Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vsplat_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplath)(Rt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Qd4=vsetq2(Rt32) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vsetq2_R(Word32 Rt) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vsetq2_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2v2)(Rt)),-1) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Qd4.b=vshuffe(Qs4.h,Qt4.h) + C Intrinsic Prototype: HVX_VectorPred Q6_Qb_vshuffe_QhQh(HVX_VectorPred Qs, HVX_VectorPred Qt) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Qb_vshuffe_QhQh(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqh)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Qd4.h=vshuffe(Qs4.w,Qt4.w) + C Intrinsic Prototype: HVX_VectorPred Q6_Qh_vshuffe_QwQw(HVX_VectorPred Qs, HVX_VectorPred Qt) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Qh_vshuffe_QwQw(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqw)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.b=vadd(Vu32.b,Vv32.b):sat + C Intrinsic Prototype: HVX_Vector Q6_Vb_vadd_VbVb_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vadd_VbVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vdd32.b=vadd(Vuu32.b,Vvv32.b):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vadd_WbWb_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wb_vadd_WbWb_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.w=vadd(Vu32.w,Vv32.w,Qx4):carry + C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_VwVwQ_carry(HVX_Vector Vu, HVX_Vector Vv, HVX_VectorPred* Qx) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vadd_VwVwQ_carry(Vu,Vv,Qx) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarry)(Vu,Vv,Qx) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.h=vadd(vclb(Vu32.h),Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vadd_vclb_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vadd_vclb_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.w=vadd(vclb(Vu32.w),Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_vclb_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vadd_vclb_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbw)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vadd(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vaddacc_WwVhVh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vaddacc_WwVhVh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw_acc)(Vxx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vxx32.h+=vadd(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vaddacc_WhVubVub(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vaddacc_WhVubVub(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh_acc)(Vxx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vadd(Vu32.ub,Vv32.b):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vadd_VubVb_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vadd_VubVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddububb_sat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vadd(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vaddacc_WwVuhVuh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vaddacc_WwVuhVuh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw_acc)(Vxx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vadd(Vu32.uw,Vv32.uw):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vadd_VuwVuw_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuw_vadd_VuwVuw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vdd32.uw=vadd(Vuu32.uw,Vvv32.uw):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vadd_WuwWuw_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wuw_vadd_WuwWuw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32=vand(!Qu4,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_V_vand_QnR(HVX_VectorPred Qu, Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vand_QnR(Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vx32|=vand(!Qu4,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_V_vandor_VQnR(HVX_Vector Vx, HVX_VectorPred Qu, Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vandor_VQnR(Vx,Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt_acc)(Vx,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32=vand(!Qv4,Vu32) + C Intrinsic Prototype: HVX_Vector Q6_V_vand_QnV(HVX_VectorPred Qv, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vand_QnV(Qv,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvnqv)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vu) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32=vand(Qv4,Vu32) + C Intrinsic Prototype: HVX_Vector Q6_V_vand_QV(HVX_VectorPred Qv, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vand_QV(Qv,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvqv)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vu) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.b=vasr(Vu32.h,Vv32.h,Rt8):sat + C Intrinsic Prototype: HVX_Vector Q6_Vb_vasr_VhVhR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vasr_VhVhR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbsat)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_VuwVuwR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vasr_VuwVuwR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhrndsat)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_VwVwR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vasr_VwVwR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhrndsat)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vlsr(Vu32.ub,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vub_vlsr_VubR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vlsr_VubR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrb)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8):nomatch + C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32_VbVbR_nomatch(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vlut32_VbVbR_nomatch(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_nm)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vx32.b|=vlut32(Vu32.b,Vv32.b,#u3) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32or_VbVbVbI(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vlut32or_VbVbVbI(Vx,Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracci)(Vx,Vu,Vv,Iu3) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.b=vlut32(Vu32.b,Vv32.b,#u3) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32_VbVbI(HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vlut32_VbVbI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvbi)(Vu,Vv,Iu3) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8):nomatch + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16_VbVhR_nomatch(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vlut16_VbVhR_nomatch(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_nm)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vxx32.h|=vlut16(Vu32.b,Vv32.h,#u3) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16or_WhVbVhI(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vlut16or_WhVbVhI(Vxx,Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracci)(Vxx,Vu,Vv,Iu3) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vlut16(Vu32.b,Vv32.h,#u3) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16_VbVhI(HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vlut16_VbVhI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwhi)(Vu,Vv,Iu3) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.b=vmax(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vmax_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vmax_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxb)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.b=vmin(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vmin_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vmin_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminb)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vmpa(Vuu32.uh,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpa_WuhRb(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpa_WuhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb)(Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vmpa(Vuu32.uh,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpaacc_WwWuhRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpaacc_WwWuhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb_acc)(Vxx,Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vdd32=vmpye(Vu32.w,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_W_vmpye_VwVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_W_vmpye_VwVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh_64)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmpyi(Vu32.w,Rt32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyi_VwRub(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyi_VwRub(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vmpyi(Vu32.w,Rt32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyiacc_VwVwRub(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyiacc_VwVwRub(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vxx32+=vmpyo(Vu32.w,Vv32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_W_vmpyoacc_WVwVh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_W_vmpyoacc_WVwVh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_64_acc)(Vxx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vround(Vu32.uh,Vv32.uh):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vround_VuhVuh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vround_VuhVuh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduhub)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vround(Vu32.uw,Vv32.uw):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vround_VuwVuw_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vround_VuwVuw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduwuh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vsat(Vu32.uw,Vv32.uw) + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vsat_VuwVuw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vsat_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatuwuh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.b=vsub(Vu32.b,Vv32.b):sat + C Intrinsic Prototype: HVX_Vector Q6_Vb_vsub_VbVb_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vsub_VbVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vdd32.b=vsub(Vuu32.b,Vvv32.b):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vsub_WbWb_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wb_vsub_WbWb_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.w=vsub(Vu32.w,Vv32.w,Qx4):carry + C Intrinsic Prototype: HVX_Vector Q6_Vw_vsub_VwVwQ_carry(HVX_Vector Vu, HVX_Vector Vv, HVX_VectorPred* Qx) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vsub_VwVwQ_carry(Vu,Vv,Qx) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubcarry)(Vu,Vv,Qx) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vsub(Vu32.ub,Vv32.b):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vsub_VubVb_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vsub_VubVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubububb_sat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vsub(Vu32.uw,Vv32.uw):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vsub_VuwVuw_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuw_vsub_VuwVuw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vdd32.uw=vsub(Vuu32.uw,Vvv32.uw):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vsub_WuwWuw_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wuw_vsub_WuwWuw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.b=vabs(Vu32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vabs_Vb(HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vabs_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb)(Vu) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.b=vabs(Vu32.b):sat + C Intrinsic Prototype: HVX_Vector Q6_Vb_vabs_Vb_sat(HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vabs_Vb_sat(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb_sat)(Vu) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vx32.h+=vasl(Vu32.h,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vaslacc_VhVhR(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vaslacc_VhVhR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vx32.h+=vasr(Vu32.h,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vasracc_VhVhR(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vasracc_VhVhR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vasr(Vu32.uh,Vv32.uh,Rt8):rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_VuhVuhR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vasr_VuhVuhR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubrndsat)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vasr(Vu32.uh,Vv32.uh,Rt8):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_VuhVuhR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vasr_VuhVuhR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubsat)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_VuwVuwR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vasr_VuwVuwR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhsat)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.b=vavg(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vavg_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vavg_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgb)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.b=vavg(Vu32.b,Vv32.b):rnd + C Intrinsic Prototype: HVX_Vector Q6_Vb_vavg_VbVb_rnd(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vavg_VbVb_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgbrnd)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vavg(Vu32.uw,Vv32.uw) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vavg_VuwVuw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuw_vavg_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguw)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vavg(Vu32.uw,Vv32.uw):rnd + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vavg_VuwVuw_rnd(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuw_vavg_VuwVuw_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguwrnd)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vdd32=#0 + C Intrinsic Prototype: HVX_VectorPair Q6_W_vzero() + Instruction Type: MAPPING + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_W_vzero() __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdd0)() +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: vtmp.h=vgather(Rt32,Mu2,Vv32.h).h + C Intrinsic Prototype: void Q6_vgather_ARMVh(HVX_Vector* Rs, Word32 Rt, Word32 Mu, HVX_Vector Vv) + Instruction Type: CVI_GATHER + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_vgather_ARMVh(Rs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermh)(Rs,Rt,Mu,Vv) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vv32.h).h + C Intrinsic Prototype: void Q6_vgather_AQRMVh(HVX_Vector* Rs, HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_Vector Vv) + Instruction Type: CVI_GATHER + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_vgather_AQRMVh(Rs,Qs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhq)(Rs,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h + C Intrinsic Prototype: void Q6_vgather_ARMWw(HVX_Vector* Rs, Word32 Rt, Word32 Mu, HVX_VectorPair Vvv) + Instruction Type: CVI_GATHER_DV + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_vgather_ARMWw(Rs,Rt,Mu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhw)(Rs,Rt,Mu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h + C Intrinsic Prototype: void Q6_vgather_AQRMWw(HVX_Vector* Rs, HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_VectorPair Vvv) + Instruction Type: CVI_GATHER_DV + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_vgather_AQRMWw(Rs,Qs,Rt,Mu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhwq)(Rs,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: vtmp.w=vgather(Rt32,Mu2,Vv32.w).w + C Intrinsic Prototype: void Q6_vgather_ARMVw(HVX_Vector* Rs, Word32 Rt, Word32 Mu, HVX_Vector Vv) + Instruction Type: CVI_GATHER + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_vgather_ARMVw(Rs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermw)(Rs,Rt,Mu,Vv) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: if (Qs4) vtmp.w=vgather(Rt32,Mu2,Vv32.w).w + C Intrinsic Prototype: void Q6_vgather_AQRMVw(HVX_Vector* Rs, HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_Vector Vv) + Instruction Type: CVI_GATHER + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_vgather_AQRMVw(Rs,Qs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermwq)(Rs,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.h=vlut4(Vu32.uh,Rtt32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vlut4_VuhPh(HVX_Vector Vu, Word64 Rtt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT2 + ========================================================================== */ + +#define Q6_Vh_vlut4_VuhPh(Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlut4)(Vu,Rtt) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vmpa(Vuu32.ub,Rt32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpa_WubRub(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpa_WubRub(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu)(Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vxx32.h+=vmpa(Vuu32.ub,Rt32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpaacc_WhWubRub(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpaacc_WhWubRub(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu_acc)(Vxx,Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vx32.h=vmpa(Vx32.h,Vu32.h,Rtt32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpa_VhVhVhPh_sat(HVX_Vector Vx, HVX_Vector Vu, Word64 Rtt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT2 + ========================================================================== */ + +#define Q6_Vh_vmpa_VhVhVhPh_sat(Vx,Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahhsat)(Vx,Vu,Rtt) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vx32.h=vmpa(Vx32.h,Vu32.uh,Rtt32.uh):sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpa_VhVhVuhPuh_sat(HVX_Vector Vx, HVX_Vector Vu, Word64 Rtt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT2 + ========================================================================== */ + +#define Q6_Vh_vmpa_VhVhVuhPuh_sat(Vx,Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhuhsat)(Vx,Vu,Rtt) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vx32.h=vmps(Vx32.h,Vu32.uh,Rtt32.uh):sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmps_VhVhVuhPuh_sat(HVX_Vector Vx, HVX_Vector Vu, Word64 Rtt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT2 + ========================================================================== */ + +#define Q6_Vh_vmps_VhVhVuhPuh_sat(Vx,Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpsuhuhsat)(Vx,Vu,Rtt) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vmpy(Vu32.h,Rt32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpyacc_WwVhRh(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpyacc_WwVhRh(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh_acc)(Vxx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vmpye(Vu32.uh,Rt32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vmpye_VuhRuh(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuw_vmpye_VuhRuh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vx32.uw+=vmpye(Vu32.uh,Rt32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vmpyeacc_VuwVuhRuh(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuw_vmpyeacc_VuwVuhRuh(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.b=vnavg(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vnavg_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vnavg_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgb)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.b=prefixsum(Qv4) + C Intrinsic Prototype: HVX_Vector Q6_Vb_prefixsum_Q(HVX_VectorPred Qv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_prefixsum_Q(Qv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqb)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1)) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.h=prefixsum(Qv4) + C Intrinsic Prototype: HVX_Vector Q6_Vh_prefixsum_Q(HVX_VectorPred Qv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_prefixsum_Q(Qv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqh)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1)) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.w=prefixsum(Qv4) + C Intrinsic Prototype: HVX_Vector Q6_Vw_prefixsum_Q(HVX_VectorPred Qv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_prefixsum_Q(Qv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqw)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1)) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: vscatter(Rt32,Mu2,Vv32.h).h=Vw32 + C Intrinsic Prototype: void Q6_vscatter_RMVhV(Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw) + Instruction Type: CVI_SCATTER + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vscatter_RMVhV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh)(Rt,Mu,Vv,Vw) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: vscatter(Rt32,Mu2,Vv32.h).h+=Vw32 + C Intrinsic Prototype: void Q6_vscatteracc_RMVhV(Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw) + Instruction Type: CVI_SCATTER + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vscatteracc_RMVhV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh_add)(Rt,Mu,Vv,Vw) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: if (Qs4) vscatter(Rt32,Mu2,Vv32.h).h=Vw32 + C Intrinsic Prototype: void Q6_vscatter_QRMVhV(HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw) + Instruction Type: CVI_SCATTER + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vscatter_QRMVhV(Qs,Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv,Vw) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: vscatter(Rt32,Mu2,Vvv32.w).h=Vw32 + C Intrinsic Prototype: void Q6_vscatter_RMWwV(Word32 Rt, Word32 Mu, HVX_VectorPair Vvv, HVX_Vector Vw) + Instruction Type: CVI_SCATTER_DV + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vscatter_RMWwV(Rt,Mu,Vvv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw)(Rt,Mu,Vvv,Vw) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: vscatter(Rt32,Mu2,Vvv32.w).h+=Vw32 + C Intrinsic Prototype: void Q6_vscatteracc_RMWwV(Word32 Rt, Word32 Mu, HVX_VectorPair Vvv, HVX_Vector Vw) + Instruction Type: CVI_SCATTER_DV + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vscatteracc_RMWwV(Rt,Mu,Vvv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw_add)(Rt,Mu,Vvv,Vw) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: if (Qs4) vscatter(Rt32,Mu2,Vvv32.w).h=Vw32 + C Intrinsic Prototype: void Q6_vscatter_QRMWwV(HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_VectorPair Vvv, HVX_Vector Vw) + Instruction Type: CVI_SCATTER_DV + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vscatter_QRMWwV(Qs,Rt,Mu,Vvv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vvv,Vw) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: vscatter(Rt32,Mu2,Vv32.w).w=Vw32 + C Intrinsic Prototype: void Q6_vscatter_RMVwV(Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw) + Instruction Type: CVI_SCATTER + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vscatter_RMVwV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw)(Rt,Mu,Vv,Vw) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: vscatter(Rt32,Mu2,Vv32.w).w+=Vw32 + C Intrinsic Prototype: void Q6_vscatteracc_RMVwV(Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw) + Instruction Type: CVI_SCATTER + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vscatteracc_RMVwV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw_add)(Rt,Mu,Vv,Vw) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: if (Qs4) vscatter(Rt32,Mu2,Vv32.w).w=Vw32 + C Intrinsic Prototype: void Q6_vscatter_QRMVwV(HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw) + Instruction Type: CVI_SCATTER + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vscatter_QRMVwV(Qs,Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv,Vw) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 66 +/* ========================================================================== + Assembly Syntax: Vd32.w=vadd(Vu32.w,Vv32.w,Qs4):carry:sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_VwVwQ_carry_sat(HVX_Vector Vu, HVX_Vector Vv, HVX_VectorPred Qs) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vadd_VwVwQ_carry_sat(Vu,Vv,Qs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarrysat)(Vu,Vv,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1)) +#endif /* __HEXAGON_ARCH___ >= 66 */ + +#if __HVX_ARCH__ >= 66 +/* ========================================================================== + Assembly Syntax: Vxx32.w=vasrinto(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vasrinto_WwVwVw(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Ww_vasrinto_WwVwVw(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasr_into)(Vxx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 66 */ + +#if __HVX_ARCH__ >= 66 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vrotr(Vu32.uw,Vv32.uw) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrotr_VuwVuw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuw_vrotr_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrotr)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 66 */ + +#if __HVX_ARCH__ >= 66 +/* ========================================================================== + Assembly Syntax: Vd32.w=vsatdw(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vsatdw_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vsatdw_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatdw)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 66 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vdd32.w=v6mpy(Vuu32.ub,Vvv32.b,#u2):h + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_v6mpy_WubWbI_h(HVX_VectorPair Vuu, HVX_VectorPair Vvv, Word32 Iu2) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_v6mpy_WubWbI_h(Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10)(Vuu,Vvv,Iu2) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=v6mpy(Vuu32.ub,Vvv32.b,#u2):h + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_v6mpyacc_WwWubWbI_h(HVX_VectorPair Vxx, HVX_VectorPair Vuu, HVX_VectorPair Vvv, Word32 Iu2) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_v6mpyacc_WwWubWbI_h(Vxx,Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10_vxx)(Vxx,Vuu,Vvv,Iu2) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vdd32.w=v6mpy(Vuu32.ub,Vvv32.b,#u2):v + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_v6mpy_WubWbI_v(HVX_VectorPair Vuu, HVX_VectorPair Vvv, Word32 Iu2) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_v6mpy_WubWbI_v(Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10)(Vuu,Vvv,Iu2) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=v6mpy(Vuu32.ub,Vvv32.b,#u2):v + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_v6mpyacc_WwWubWbI_v(HVX_VectorPair Vxx, HVX_VectorPair Vuu, HVX_VectorPair Vvv, Word32 Iu2) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_v6mpyacc_WwWubWbI_v(Vxx,Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10_vxx)(Vxx,Vuu,Vvv,Iu2) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.hf=vabs(Vu32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vhf_vabs_Vhf(HVX_Vector Vu) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vhf_vabs_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabs_hf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.sf=vabs(Vu32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vsf_vabs_Vsf(HVX_Vector Vu) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vsf_vabs_Vsf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabs_sf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vadd(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vadd_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf16_vadd_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.hf=vadd(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vhf_vadd_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vhf_vadd_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_hf_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vadd(Vu32.qf16,Vv32.qf16) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vadd_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf16_vadd_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf16)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vadd(Vu32.qf16,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vadd_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf16_vadd_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf16_mix)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=vadd(Vu32.qf32,Vv32.qf32) + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vadd_Vqf32Vqf32(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf32_vadd_Vqf32Vqf32(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf32)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=vadd(Vu32.qf32,Vv32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vadd_Vqf32Vsf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf32_vadd_Vqf32Vsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf32_mix)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=vadd(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vadd_VsfVsf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf32_vadd_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vdd32.sf=vadd(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vadd_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wsf_vadd_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.sf=vadd(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vsf_vadd_VsfVsf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vsf_vadd_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf_sf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.w=vfmv(Vu32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vfmv_Vw(HVX_Vector Vu) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vfmv_Vw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassign_fp)(Vu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.hf=Vu32.qf16 + C Intrinsic Prototype: HVX_Vector Q6_Vhf_equals_Vqf16(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vhf_equals_Vqf16(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_hf_qf16)(Vu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.hf=Vuu32.qf32 + C Intrinsic Prototype: HVX_Vector Q6_Vhf_equals_Wqf32(HVX_VectorPair Vuu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vhf_equals_Wqf32(Vuu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_hf_qf32)(Vuu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.sf=Vu32.qf32 + C Intrinsic Prototype: HVX_Vector Q6_Vsf_equals_Vqf32(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vsf_equals_Vqf32(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_sf_qf32)(Vu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.b=vcvt(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vcvt_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vb_vcvt_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_b_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.h=vcvt(Vu32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vcvt_Vhf(HVX_Vector Vu) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vcvt_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_h_hf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vdd32.hf=vcvt(Vu32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vcvt_Vb(HVX_Vector Vu) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Whf_vcvt_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_b)(Vu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.hf=vcvt(Vu32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vhf_vcvt_Vh(HVX_Vector Vu) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vhf_vcvt_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_h)(Vu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.hf=vcvt(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vhf_vcvt_VsfVsf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vhf_vcvt_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_sf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vdd32.hf=vcvt(Vu32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vcvt_Vub(HVX_Vector Vu) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Whf_vcvt_Vub(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_ub)(Vu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.hf=vcvt(Vu32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vhf_vcvt_Vuh(HVX_Vector Vu) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vhf_vcvt_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_uh)(Vu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vdd32.sf=vcvt(Vu32.hf) + C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vcvt_Vhf(HVX_Vector Vu) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wsf_vcvt_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_sf_hf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vcvt(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vub_vcvt_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vub_vcvt_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_ub_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vcvt(Vu32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vcvt_Vhf(HVX_Vector Vu) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuh_vcvt_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_uh_hf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.sf=vdmpy(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vsf_vdmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vsf_vdmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpy_sf_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vx32.sf+=vdmpy(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vsf_vdmpyacc_VsfVhfVhf(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vsf_vdmpyacc_VsfVhfVhf(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpy_sf_hf_acc)(Vx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.hf=vfmax(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vhf_vfmax_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vhf_vfmax_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmax_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.sf=vfmax(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vsf_vfmax_VsfVsf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vsf_vfmax_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmax_sf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.hf=vfmin(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vhf_vfmin_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vhf_vfmin_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmin_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.sf=vfmin(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vsf_vfmin_VsfVsf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vsf_vfmin_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmin_sf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.hf=vfneg(Vu32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vhf_vfneg_Vhf(HVX_Vector Vu) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vhf_vfneg_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfneg_hf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.sf=vfneg(Vu32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vsf_vfneg_Vsf(HVX_Vector Vu) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vsf_vfneg_Vsf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfneg_sf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.gt(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gt_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf)(Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.gt(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVhfVhf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtand_QVhfVhf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.gt(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVhfVhf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtor_QVhfVhf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.gt(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVhfVhf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtxacc_QVhfVhf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.gt(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VsfVsf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gt_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf)(Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.gt(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVsfVsf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtand_QVsfVsf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.gt(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVsfVsf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtor_QVsfVsf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.gt(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVsfVsf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtxacc_QVsfVsf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.hf=vmax(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmax_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vhf_vmax_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmax_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.sf=vmax(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vsf_vmax_VsfVsf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vsf_vmax_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmax_sf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.hf=vmin(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmin_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vhf_vmin_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmin_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.sf=vmin(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vsf_vmin_VsfVsf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vsf_vmin_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmin_sf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.hf=vmpy(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vhf_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_hf_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vx32.hf+=vmpy(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmpyacc_VhfVhfVhf(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vhf_vmpyacc_VhfVhfVhf(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_hf_hf_acc)(Vx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vmpy(Vu32.qf16,Vv32.qf16) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vqf16_vmpy_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf16)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vmpy(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vqf16_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf16_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vmpy(Vu32.qf16,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vqf16_vmpy_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf16_mix_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=vmpy(Vu32.qf32,Vv32.qf32) + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vmpy_Vqf32Vqf32(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vqf32_vmpy_Vqf32Vqf32(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vdd32.qf32=vmpy(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_VectorPair Q6_Wqf32_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wqf32_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vdd32.qf32=vmpy(Vu32.qf16,Vv32.hf) + C Intrinsic Prototype: HVX_VectorPair Q6_Wqf32_vmpy_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wqf32_vmpy_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_mix_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vdd32.qf32=vmpy(Vu32.qf16,Vv32.qf16) + C Intrinsic Prototype: HVX_VectorPair Q6_Wqf32_vmpy_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wqf32_vmpy_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_qf16)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=vmpy(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vmpy_VsfVsf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vqf32_vmpy_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_sf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vdd32.sf=vmpy(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wsf_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vxx32.sf+=vmpy(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vmpyacc_WsfVhfVhf(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wsf_vmpyacc_WsfVhfVhf(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_hf_acc)(Vxx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.sf=vmpy(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vsf_vmpy_VsfVsf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vsf_vmpy_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_sf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vsub(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf16_vsub_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.hf=vsub(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vhf_vsub_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vhf_vsub_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_hf_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vsub(Vu32.qf16,Vv32.qf16) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf16_vsub_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf16)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vsub(Vu32.qf16,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf16_vsub_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf16_mix)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=vsub(Vu32.qf32,Vv32.qf32) + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_Vqf32Vqf32(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf32_vsub_Vqf32Vqf32(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf32)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=vsub(Vu32.qf32,Vv32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_Vqf32Vsf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf32_vsub_Vqf32Vsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf32_mix)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=vsub(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_VsfVsf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf32_vsub_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vdd32.sf=vsub(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vsub_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wsf_vsub_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.sf=vsub(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vsf_vsub_VsfVsf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vsf_vsub_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf_sf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 69 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vasr(Vuu32.uh,Vv32.ub):rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_WuhVub_rnd_sat(HVX_VectorPair Vuu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vasr_WuhVub_rnd_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvuhubrndsat)(Vuu,Vv) +#endif /* __HEXAGON_ARCH___ >= 69 */ + +#if __HVX_ARCH__ >= 69 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vasr(Vuu32.uh,Vv32.ub):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_WuhVub_sat(HVX_VectorPair Vuu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vasr_WuhVub_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvuhubsat)(Vuu,Vv) +#endif /* __HEXAGON_ARCH___ >= 69 */ + +#if __HVX_ARCH__ >= 69 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vasr(Vuu32.w,Vv32.uh):rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_WwVuh_rnd_sat(HVX_VectorPair Vuu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vasr_WwVuh_rnd_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvwuhrndsat)(Vuu,Vv) +#endif /* __HEXAGON_ARCH___ >= 69 */ + +#if __HVX_ARCH__ >= 69 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vasr(Vuu32.w,Vv32.uh):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_WwVuh_sat(HVX_VectorPair Vuu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vasr_WwVuh_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvwuhsat)(Vuu,Vv) +#endif /* __HEXAGON_ARCH___ >= 69 */ + +#if __HVX_ARCH__ >= 69 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vmpy(Vu32.uh,Vv32.uh):>>16 + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vmpy_VuhVuh_rs16(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuh_vmpy_VuhVuh_rs16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhvs)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 69 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Vdd32.sf=vadd(Vu32.bf,Vv32.bf) + C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vadd_VbfVbf(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VX_DV Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wsf_vadd_VbfVbf(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf_bf)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Vd32.h=Vu32.hf + C Intrinsic Prototype: HVX_Vector Q6_Vh_equals_Vhf(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_equals_Vhf(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_h_hf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Vd32.hf=Vu32.h + C Intrinsic Prototype: HVX_Vector Q6_Vhf_equals_Vh(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vhf_equals_Vh(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_hf_h)(Vu) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Vd32.sf=Vu32.w + C Intrinsic Prototype: HVX_Vector Q6_Vsf_equals_Vw(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vsf_equals_Vw(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_sf_w)(Vu) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Vd32.w=Vu32.sf + C Intrinsic Prototype: HVX_Vector Q6_Vw_equals_Vsf(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_equals_Vsf(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_w_sf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Vd32.bf=vcvt(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vbf_vcvt_VsfVsf(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VX Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vbf_vcvt_VsfVsf(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_bf_sf)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.gt(Vu32.bf,Vv32.bf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VbfVbf(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VA Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gt_VbfVbf(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt) \ + ((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtbf)(Vu, Vv)), -1) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.gt(Vu32.bf,Vv32.bf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVbfVbf(HVX_VectorPred + Qx, HVX_Vector Vu, HVX_Vector Vv) Instruction Type: CVI_VA Execution + Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtand_QVbfVbf(Qx, Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt) \ + ((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtbf_and)( \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx), -1), Vu, \ + Vv)), \ + -1) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.gt(Vu32.bf,Vv32.bf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVbfVbf(HVX_VectorPred + Qx, HVX_Vector Vu, HVX_Vector Vv) Instruction Type: CVI_VA Execution + Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtor_QVbfVbf(Qx, Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt) \ + ((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtbf_or)( \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx), -1), Vu, \ + Vv)), \ + -1) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.gt(Vu32.bf,Vv32.bf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVbfVbf(HVX_VectorPred + Qx, HVX_Vector Vu, HVX_Vector Vv) Instruction Type: CVI_VA Execution + Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtxacc_QVbfVbf(Qx, Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt) \ + ((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtbf_xor)( \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx), -1), Vu, \ + Vv)), \ + -1) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Vd32.bf=vmax(Vu32.bf,Vv32.bf) + C Intrinsic Prototype: HVX_Vector Q6_Vbf_vmax_VbfVbf(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VX_LATE Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vbf_vmax_VbfVbf(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmax_bf)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Vd32.bf=vmin(Vu32.bf,Vv32.bf) + C Intrinsic Prototype: HVX_Vector Q6_Vbf_vmin_VbfVbf(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VX_LATE Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vbf_vmin_VbfVbf(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmin_bf)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Vdd32.sf=vmpy(Vu32.bf,Vv32.bf) + C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vmpy_VbfVbf(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VX_DV Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wsf_vmpy_VbfVbf(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_bf)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Vxx32.sf+=vmpy(Vu32.bf,Vv32.bf) + C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vmpyacc_WsfVbfVbf(HVX_VectorPair + Vxx, HVX_Vector Vu, HVX_Vector Vv) Instruction Type: CVI_VX_DV Execution + Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wsf_vmpyacc_WsfVbfVbf(Vxx, Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_bf_acc)(Vxx, Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Vdd32.sf=vsub(Vu32.bf,Vv32.bf) + C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vsub_VbfVbf(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VX_DV Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wsf_vsub_VbfVbf(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf_bf)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vd32=vgetqfext(Vu32.x,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_V_vgetqfext_VR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vgetqfext_VR(Vu, Rt) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_get_qfext)(Vu, Rt) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vx32|=vgetqfext(Vu32.x,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_V_vgetqfextor_VVR(HVX_Vector Vx, + HVX_Vector Vu, Word32 Rt) Instruction Type: CVI_VX Execution Slots: + SLOT23 + ========================================================================== */ + +#define Q6_V_vgetqfextor_VVR(Vx, Vu, Rt) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_get_qfext_oracc)(Vx, Vu, Rt) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vd32.x=vsetqfext(Vu32,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_V_vsetqfext_VR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vsetqfext_VR(Vu, Rt) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_set_qfext)(Vu, Rt) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vd32.f8=vabs(Vu32.f8) + C Intrinsic Prototype: HVX_Vector Q6_V_vabs_V(HVX_Vector Vu) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vabs_V(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabs_f8)(Vu) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vdd32.hf=vadd(Vu32.f8,Vv32.f8) + C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vadd_VV(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VX_DV Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Whf_vadd_VV(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_hf_f8)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vd32.b=vcvt2(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vcvt2_VhfVhf(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VX Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vb_vcvt2_VhfVhf(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt2_b_hf)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vdd32.hf=vcvt2(Vu32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vcvt2_Vb(HVX_Vector Vu) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Whf_vcvt2_Vb(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt2_hf_b)(Vu) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vdd32.hf=vcvt2(Vu32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vcvt2_Vub(HVX_Vector Vu) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Whf_vcvt2_Vub(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt2_hf_ub)(Vu) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vcvt2(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vub_vcvt2_VhfVhf(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VX Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vub_vcvt2_VhfVhf(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt2_ub_hf)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vd32.f8=vcvt(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_V_vcvt_VhfVhf(HVX_Vector Vu, HVX_Vector + Vv) Instruction Type: CVI_VX Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vcvt_VhfVhf(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_f8_hf)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vdd32.hf=vcvt(Vu32.f8) + C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vcvt_V(HVX_Vector Vu) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Whf_vcvt_V(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_f8)(Vu) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vd32.f8=vfmax(Vu32.f8,Vv32.f8) + C Intrinsic Prototype: HVX_Vector Q6_V_vfmax_VV(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vfmax_VV(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmax_f8)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vd32.f8=vfmin(Vu32.f8,Vv32.f8) + C Intrinsic Prototype: HVX_Vector Q6_V_vfmin_VV(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vfmin_VV(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmin_f8)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vd32.f8=vfneg(Vu32.f8) + C Intrinsic Prototype: HVX_Vector Q6_V_vfneg_V(HVX_Vector Vu) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vfneg_V(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfneg_f8)(Vu) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vd32=vmerge(Vu32.x,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_V_vmerge_VVw(HVX_Vector Vu, HVX_Vector + Vv) Instruction Type: CVI_VS Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vmerge_VVw(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmerge_qf)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vdd32.hf=vmpy(Vu32.f8,Vv32.f8) + C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vmpy_VV(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VX_DV Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Whf_vmpy_VV(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_hf_f8)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vxx32.hf+=vmpy(Vu32.f8,Vv32.f8) + C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vmpyacc_WhfVV(HVX_VectorPair + Vxx, HVX_Vector Vu, HVX_Vector Vv) Instruction Type: CVI_VX_DV Execution + Slots: SLOT23 + ========================================================================== */ + +#define Q6_Whf_vmpyacc_WhfVV(Vxx, Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_hf_f8_acc)(Vxx, Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vmpy(Vu32.hf,Rt32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_VhfRhf(HVX_Vector Vu, Word32 + Rt) Instruction Type: CVI_VX_DV Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vqf16_vmpy_VhfRhf(Vu, Rt) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_rt_hf)(Vu, Rt) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vmpy(Vu32.qf16,Rt32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_Vqf16Rhf(HVX_Vector Vu, + Word32 Rt) Instruction Type: CVI_VX_DV Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vqf16_vmpy_Vqf16Rhf(Vu, Rt) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_rt_qf16)(Vu, Rt) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=vmpy(Vu32.sf,Rt32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vmpy_VsfRsf(HVX_Vector Vu, Word32 + Rt) Instruction Type: CVI_VX_DV Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vqf32_vmpy_VsfRsf(Vu, Rt) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_rt_sf)(Vu, Rt) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vdd32.hf=vsub(Vu32.f8,Vv32.f8) + C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vsub_VV(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VX_DV Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Whf_vsub_VV(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_hf_f8)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vabs(Vu32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vabs_Vhf(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf16_vabs_Vhf(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabs_qf16_hf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vabs(Vu32.qf16) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vabs_Vqf16(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf16_vabs_Vqf16(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabs_qf16_qf16)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=vabs(Vu32.qf32) + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vabs_Vqf32(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf32_vabs_Vqf32(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabs_qf32_qf32)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=vabs(Vu32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vabs_Vsf(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf32_vabs_Vsf(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabs_qf32_sf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32=valign4(Vu32,Vv32,Rt8) + C Intrinsic Prototype: HVX_Vector Q6_V_valign4_VVR(HVX_Vector Vu, HVX_Vector + Vv, Word32 Rt) Instruction Type: CVI_VA Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_valign4_VVR(Vu, Vv, Rt) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valign4)(Vu, Vv, Rt) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.bf=Vuu32.qf32 + C Intrinsic Prototype: HVX_Vector Q6_Vbf_equals_Wqf32(HVX_VectorPair Vuu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vbf_equals_Wqf32(Vuu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_bf_qf32)(Vuu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.f8=Vu32.qf16 + C Intrinsic Prototype: HVX_Vector Q6_V_equals_Vqf16(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_equals_Vqf16(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_f8_qf16)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.h=Vu32.hf:rnd + C Intrinsic Prototype: HVX_Vector Q6_Vh_equals_Vhf_rnd(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_equals_Vhf_rnd(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_h_hf_rnd)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vdd32.qf16=Vu32.f8 + C Intrinsic Prototype: HVX_VectorPair Q6_Wqf16_equals_V(HVX_Vector Vu) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wqf16_equals_V(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_qf16_f8)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=Vu32.hf + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_equals_Vhf(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf16_equals_Vhf(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_qf16_hf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=Vu32.qf16 + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_equals_Vqf16(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf16_equals_Vqf16(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_qf16_qf16)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=Vu32.qf32 + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_equals_Vqf32(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf32_equals_Vqf32(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_qf32_qf32)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=Vu32.sf + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_equals_Vsf(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf32_equals_Vsf(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_qf32_sf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.eq(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eq_VhfVhf(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VA Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eq_VhfVhf(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)( \ + (__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqhf)(Vu, Vv)), -1) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.eq(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqand_QVhfVhf(HVX_VectorPred + Qx, HVX_Vector Vu, HVX_Vector Vv) Instruction Type: CVI_VA Execution + Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqand_QVhfVhf(Qx, Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)( \ + (__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqhf_and)( \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx), -1), Vu, \ + Vv)), \ + -1) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.eq(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqor_QVhfVhf(HVX_VectorPred + Qx, HVX_Vector Vu, HVX_Vector Vv) Instruction Type: CVI_VA Execution + Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqor_QVhfVhf(Qx, Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)( \ + (__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqhf_or)( \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx), -1), Vu, \ + Vv)), \ + -1) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.eq(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqxacc_QVhfVhf(HVX_VectorPred + Qx, HVX_Vector Vu, HVX_Vector Vv) Instruction Type: CVI_VA Execution + Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqxacc_QVhfVhf(Qx, Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)( \ + (__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqhf_xor)( \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx), -1), Vu, \ + Vv)), \ + -1) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.eq(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eq_VsfVsf(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VA Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eq_VsfVsf(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)( \ + (__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqsf)(Vu, Vv)), -1) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.eq(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqand_QVsfVsf(HVX_VectorPred + Qx, HVX_Vector Vu, HVX_Vector Vv) Instruction Type: CVI_VA Execution + Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqand_QVsfVsf(Qx, Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)( \ + (__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqsf_and)( \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx), -1), Vu, \ + Vv)), \ + -1) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.eq(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqor_QVsfVsf(HVX_VectorPred + Qx, HVX_Vector Vu, HVX_Vector Vv) Instruction Type: CVI_VA Execution + Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqor_QVsfVsf(Qx, Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)( \ + (__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqsf_or)( \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx), -1), Vu, \ + Vv)), \ + -1) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.eq(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqxacc_QVsfVsf(HVX_VectorPred + Qx, HVX_Vector Vu, HVX_Vector Vv) Instruction Type: CVI_VA Execution + Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqxacc_QVsfVsf(Qx, Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)( \ + (__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqsf_xor)( \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx), -1), Vu, \ + Vv)), \ + -1) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.w=vilog2(Vu32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vilog2_Vhf(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vilog2_Vhf(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vilog2_hf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.w=vilog2(Vu32.qf16) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vilog2_Vqf16(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vilog2_Vqf16(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vilog2_qf16)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.w=vilog2(Vu32.qf32) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vilog2_Vqf32(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vilog2_Vqf32(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vilog2_qf32)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.w=vilog2(Vu32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vilog2_Vsf(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vilog2_Vsf(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vilog2_sf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vneg(Vu32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vneg_Vhf(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf16_vneg_Vhf(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vneg_qf16_hf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vneg(Vu32.qf16) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vneg_Vqf16(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf16_vneg_Vqf16(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vneg_qf16_qf16)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=vneg(Vu32.qf32) + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vneg_Vqf32(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf32_vneg_Vqf32(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vneg_qf32_qf32)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=vneg(Vu32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vneg_Vsf(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf32_vneg_Vsf(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vneg_qf32_sf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vsub(Vu32.hf,Vv32.qf16) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_VhfVqf16(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VS Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf16_vsub_VhfVqf16(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_hf_mix)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=vsub(Vu32.sf,Vv32.qf32) + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_VsfVqf32(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VS Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf32_vsub_VsfVqf32(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf_mix)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#endif /* __HVX__ */ + +#endif diff --git a/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs b/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs index 9d4e80f8f27ca..4bd5a35549e7a 100644 --- a/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs +++ b/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs @@ -72,16 +72,16 @@ impl VectorMode { } } -/// LLVM tag to fetch the header from -const LLVM_TAG: &str = "llvmorg-22.1.0-rc1"; +/// LLVM version the header file is from (for reference) +/// Source: https://github.com/llvm/llvm-project/blob/llvmorg-22.1.0-rc1/clang/lib/Headers/hvx_hexagon_protos.h +const LLVM_VERSION: &str = "22.1.0-rc1"; /// Maximum HVX architecture version supported by rustc /// Check with: rustc --target=hexagon-unknown-linux-musl --print target-features const MAX_SUPPORTED_ARCH: u32 = 79; -/// URL template for the HVX header file -const HEADER_URL: &str = - "https://raw.githubusercontent.com/llvm/llvm-project/{tag}/clang/lib/Headers/hvx_hexagon_protos.h"; +/// Local header file path (checked into the repository) +const HEADER_FILE: &str = "hvx_hexagon_protos.h"; /// Intrinsic information parsed from the LLVM header #[derive(Debug, Clone)] @@ -306,18 +306,14 @@ fn collect_builtins_from_expr(expr: &CompoundExpr, builtins: &mut HashSet Result { - let url = HEADER_URL.replace("{tag}", LLVM_TAG); - println!("Downloading HVX header from: {}", url); +/// Read the local HVX header file +fn read_header(crate_dir: &Path) -> Result { + let header_path = crate_dir.join(HEADER_FILE); + println!("Reading HVX header from: {}", header_path.display()); + println!(" (LLVM version: {})", LLVM_VERSION); - let response = ureq::get(&url) - .call() - .map_err(|e| format!("Failed to download header: {}", e))?; - - response - .into_string() - .map_err(|e| format!("Failed to read response: {}", e)) + std::fs::read_to_string(&header_path) + .map_err(|e| format!("Failed to read header file {}: {}", header_path.display(), e)) } /// Parse a C function prototype to extract return type and parameters @@ -1625,10 +1621,15 @@ fn generate_module_file( fn main() -> Result<(), String> { println!("=== Hexagon HVX Code Generator ===\n"); - // Download and parse the LLVM header - println!("Step 1: Downloading LLVM HVX header..."); - let header_content = download_header()?; - println!(" Downloaded {} bytes", header_content.len()); + // Get the crate directory first (needed for both reading header and writing output) + let crate_dir = std::env::var("CARGO_MANIFEST_DIR") + .map(std::path::PathBuf::from) + .unwrap_or_else(|_| std::env::current_dir().unwrap()); + + // Read and parse the local LLVM header + println!("Step 1: Reading LLVM HVX header..."); + let header_content = read_header(&crate_dir)?; + println!(" Read {} bytes", header_content.len()); println!("\nStep 2: Parsing intrinsic definitions..."); let all_intrinsics = parse_header(&header_content); @@ -1678,10 +1679,6 @@ fn main() -> Result<(), String> { } // Generate output files - let crate_dir = std::env::var("CARGO_MANIFEST_DIR") - .map(std::path::PathBuf::from) - .unwrap_or_else(|_| std::env::current_dir().unwrap()); - let hexagon_dir = crate_dir.join("../core_arch/src/hexagon"); // Generate v64.rs (64-byte vector mode) From 5868ac677e47b03f7e818a3b40f83ef588b3ee8c Mon Sep 17 00:00:00 2001 From: xonx <119700621+xonx4l@users.noreply.github.com> Date: Tue, 28 Oct 2025 11:49:20 +0000 Subject: [PATCH 53/90] unify and deduplicate floats --- .../0029-sysroot_tests-disable-f16-math.patch | 133 ++++++++++++++++++ 1 file changed, 133 insertions(+) create mode 100644 patches/0029-sysroot_tests-disable-f16-math.patch diff --git a/patches/0029-sysroot_tests-disable-f16-math.patch b/patches/0029-sysroot_tests-disable-f16-math.patch new file mode 100644 index 0000000000000..6a0244cfde3fb --- /dev/null +++ b/patches/0029-sysroot_tests-disable-f16-math.patch @@ -0,0 +1,133 @@ +From 285d5716fcfa6d43a3516d899b73bc85da322c25 Mon Sep 17 00:00:00 2001 +From: xonx <119700621+xonx4l@users.noreply.github.com> +Date: Sun, 15 Feb 2026 14:06:49 +0000 +Subject: [PATCH] Disable f16 math tests for cranelift + +--- + coretests/tests/floats/mod.rs | 26 +++++++++++++------------- + 1 file changed, 13 insertions(+), 13 deletions(-) + +diff --git a/coretests/tests/floats/mod.rs b/coretests/tests/floats/mod.rs +index c61961f8584..d7b4fa20322 100644 +--- a/coretests/tests/floats/mod.rs ++++ b/coretests/tests/floats/mod.rs +@@ -1534,7 +1534,7 @@ fn s_nan() -> Float { + name: powf, + attrs: { + const: #[cfg(false)], +- f16: #[cfg(all(not(miri), target_has_reliable_f16_math))], ++ f16: #[cfg(false)], // FIXME(rust-lang/rustc_codegen_cranelift#1622) + f128: #[cfg(all(not(miri), target_has_reliable_f128_math))], + }, + test { +@@ -1557,7 +1557,7 @@ fn s_nan() -> Float { + name: exp, + attrs: { + const: #[cfg(false)], +- f16: #[cfg(all(not(miri), target_has_reliable_f16_math))], ++ f16: #[cfg(false)], // FIXME(rust-lang/rustc_codegen_cranelift#1622) + f128: #[cfg(all(not(miri), target_has_reliable_f128_math))], + }, + test { +@@ -1578,7 +1578,7 @@ fn s_nan() -> Float { + name: exp2, + attrs: { + const: #[cfg(false)], +- f16: #[cfg(all(not(miri), target_has_reliable_f16_math))], ++ f16: #[cfg(false)], // FIXME(rust-lang/rustc_codegen_cranelift#1622) + f128: #[cfg(all(not(miri), target_has_reliable_f128_math))], + }, + test { +@@ -1598,7 +1598,7 @@ fn s_nan() -> Float { + name: ln, + attrs: { + const: #[cfg(false)], +- f16: #[cfg(all(not(miri), target_has_reliable_f16_math))], ++ f16: #[cfg(false)], // FIXME(rust-lang/rustc_codegen_cranelift#1622) + f128: #[cfg(all(not(miri), target_has_reliable_f128_math))], + }, + test { +@@ -1620,7 +1620,7 @@ fn s_nan() -> Float { + name: log, + attrs: { + const: #[cfg(false)], +- f16: #[cfg(all(not(miri), target_has_reliable_f16_math))], ++ f16: #[cfg(false)], // FIXME(rust-lang/rustc_codegen_cranelift#1622) + f128: #[cfg(all(not(miri), target_has_reliable_f128_math))], + }, + test { +@@ -1645,7 +1645,7 @@ fn s_nan() -> Float { + name: log2, + attrs: { + const: #[cfg(false)], +- f16: #[cfg(all(not(miri), target_has_reliable_f16_math))], ++ f16: #[cfg(false)], // FIXME(rust-lang/rustc_codegen_cranelift#1622) + f128: #[cfg(all(not(miri), target_has_reliable_f128_math))], + }, + test { +@@ -1668,7 +1668,7 @@ fn s_nan() -> Float { + name: log10, + attrs: { + const: #[cfg(false)], +- f16: #[cfg(all(not(miri), target_has_reliable_f16_math))], ++ f16: #[cfg(false)], // FIXME(rust-lang/rustc_codegen_cranelift#1622) + f128: #[cfg(all(not(miri), target_has_reliable_f128_math))], + }, + test { +@@ -1692,7 +1692,7 @@ fn s_nan() -> Float { + name: asinh, + attrs: { + const: #[cfg(false)], +- f16: #[cfg(all(not(miri), target_has_reliable_f16_math))], ++ f16: #[cfg(false)], // FIXME(rust-lang/rustc_codegen_cranelift#1622) + f128: #[cfg(all(not(miri), target_has_reliable_f128_math))], + }, + test { +@@ -1725,7 +1725,7 @@ fn s_nan() -> Float { + name: acosh, + attrs: { + const: #[cfg(false)], +- f16: #[cfg(all(not(miri), target_has_reliable_f16_math))], ++ f16: #[cfg(false)], // FIXME(rust-lang/rustc_codegen_cranelift#1622) + f128: #[cfg(all(not(miri), target_has_reliable_f128_math))], + }, + test { +@@ -1753,7 +1753,7 @@ fn s_nan() -> Float { + name: atanh, + attrs: { + const: #[cfg(false)], +- f16: #[cfg(all(not(miri), target_has_reliable_f16_math))], ++ f16: #[cfg(false)], // FIXME(rust-lang/rustc_codegen_cranelift#1622) + f128: #[cfg(all(not(miri), target_has_reliable_f128_math))], + }, + test { +@@ -1779,7 +1779,7 @@ fn s_nan() -> Float { + name: gamma, + attrs: { + const: #[cfg(false)], +- f16: #[cfg(all(not(miri), target_has_reliable_f16_math))], ++ f16: #[cfg(false)], // FIXME(rust-lang/rustc_codegen_cranelift#1622) + f128: #[cfg(all(not(miri), target_has_reliable_f128_math))], + }, + test { +@@ -1814,7 +1814,7 @@ fn s_nan() -> Float { + name: ln_gamma, + attrs: { + const: #[cfg(false)], +- f16: #[cfg(all(not(miri), target_has_reliable_f16_math))], ++ f16: #[cfg(false)], // FIXME(rust-lang/rustc_codegen_cranelift#1622) + f128: #[cfg(all(not(miri), target_has_reliable_f128_math))], + }, + test { +@@ -2027,7 +2027,7 @@ fn s_nan() -> Float { + attrs: { + // FIXME(f16_f128): add math tests when available + const: #[cfg(false)], +- f16: #[cfg(all(not(miri), target_has_reliable_f16_math))], ++ f16: #[cfg(false)], // FIXME(rust-lang/rustc_codegen_cranelift#1622) + f128: #[cfg(all(not(miri), target_has_reliable_f128_math))], + }, + test { +-- +2.50.1 + From b28ebc87a8716c561cfbc1913e5c146975971d22 Mon Sep 17 00:00:00 2001 From: Trevor Gross Date: Fri, 30 Jan 2026 21:35:07 -0600 Subject: [PATCH 54/90] clif: Only set has_reliable_f128_math with glibc New float tests in core are failing on clif with issues like the following: Undefined symbols for architecture arm64: "_coshf128", referenced from: __RNvMNtCshY0fR2o0hOA_3std4f128C4f1284coshCs5TKtJxXQNGL_9coretests in coretests-e38519c0cc90db54.coretests.44b6247a565e10d1-cgu.10.rcgu.o "_exp2f128", referenced from: __RNvMNtCshY0fR2o0hOA_3std4f128C4f1284exp2Cs5TKtJxXQNGL_9coretests in coretests-e38519c0cc90db54.coretests.44b6247a565e10d1-cgu.10.rcgu.o ... Disable f128 math unless the symbols are known to be available, which for now is only glibc targets. This matches the LLVM backend. --- src/lib.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 656e7b0aec5b2..7bab07def63d1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -180,6 +180,10 @@ impl CodegenBackend for CraneliftCodegenBackend { && sess.target.env == Env::Gnu && sess.target.abi != Abi::Llvm); + // FIXME(f128): f128 math operations need f128 math symbols, which currently aren't always + // filled in by compiler-builtins. The only libc that provides these currently is glibc. + let has_reliable_f128_math = has_reliable_f16_f128 && sess.target.env == Env::Gnu; + TargetConfig { target_features, unstable_target_features, @@ -188,7 +192,7 @@ impl CodegenBackend for CraneliftCodegenBackend { has_reliable_f16: has_reliable_f16_f128, has_reliable_f16_math: has_reliable_f16_f128, has_reliable_f128: has_reliable_f16_f128, - has_reliable_f128_math: has_reliable_f16_f128, + has_reliable_f128_math, } } From f5161526e6315d3aeb0f89456de451c66f646017 Mon Sep 17 00:00:00 2001 From: Brian Cain Date: Sun, 15 Feb 2026 12:13:00 -0600 Subject: [PATCH 55/90] core_arch: Add tracking issue to hexagon module declaration Update the unstable attribute for the hexagon module to use the proper tracking issue number (151523) instead of "none". --- library/stdarch/crates/core_arch/src/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/stdarch/crates/core_arch/src/mod.rs b/library/stdarch/crates/core_arch/src/mod.rs index f8ea68b35c665..2483d07b230f9 100644 --- a/library/stdarch/crates/core_arch/src/mod.rs +++ b/library/stdarch/crates/core_arch/src/mod.rs @@ -329,7 +329,7 @@ pub mod arch { /// See the [module documentation](../index.html) for more details. #[cfg(any(target_arch = "hexagon", doc))] #[doc(cfg(target_arch = "hexagon"))] - #[unstable(feature = "stdarch_hexagon", issue = "none")] + #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub mod hexagon { pub use crate::core_arch::hexagon::*; } From 76c041c19a6c9e4c0a196c92afda2c82d1dc78be Mon Sep 17 00:00:00 2001 From: Brian Cain Date: Sun, 15 Feb 2026 12:18:19 -0600 Subject: [PATCH 56/90] stdarch-gen-hexagon: Fix formatting --- library/stdarch/crates/stdarch-gen-hexagon/src/main.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs b/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs index 4bd5a35549e7a..3cfbabfe0ab28 100644 --- a/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs +++ b/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs @@ -312,8 +312,13 @@ fn read_header(crate_dir: &Path) -> Result { println!("Reading HVX header from: {}", header_path.display()); println!(" (LLVM version: {})", LLVM_VERSION); - std::fs::read_to_string(&header_path) - .map_err(|e| format!("Failed to read header file {}: {}", header_path.display(), e)) + std::fs::read_to_string(&header_path).map_err(|e| { + format!( + "Failed to read header file {}: {}", + header_path.display(), + e + ) + }) } /// Parse a C function prototype to extract return type and parameters From 73cb0682f5e77b4688d83c441cc8d0bb521120ed Mon Sep 17 00:00:00 2001 From: The rustc-josh-sync Cronjob Bot Date: Mon, 16 Feb 2026 04:48:11 +0000 Subject: [PATCH 57/90] Prepare for merging from rust-lang/rust This updates the rust-version file to 139651428df86cf88443295542c12ea617cbb587. --- library/stdarch/rust-version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/stdarch/rust-version b/library/stdarch/rust-version index aa3876b14a221..b22c6c3869c62 100644 --- a/library/stdarch/rust-version +++ b/library/stdarch/rust-version @@ -1 +1 @@ -db3e99bbab28c6ca778b13222becdea54533d908 +139651428df86cf88443295542c12ea617cbb587 From 417cc114f526a6be58faf6f90966381e366a7986 Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Mon, 16 Feb 2026 09:42:54 +0000 Subject: [PATCH 58/90] Rustup to rustc 1.95.0-nightly (873b4beb0 2026-02-15) --- rust-toolchain.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 510772d71306a..b0a3239b8fdda 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] -channel = "nightly-2026-02-13" +channel = "nightly-2026-02-16" components = ["rust-src", "rustc-dev", "llvm-tools", "rustfmt"] profile = "minimal" From e028e7848cc752a9e84b082e9ec888cd28982f62 Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Mon, 16 Feb 2026 09:53:02 +0000 Subject: [PATCH 59/90] Re-enable fixed rustc test --- scripts/test_rustc_tests.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/test_rustc_tests.sh b/scripts/test_rustc_tests.sh index 6cfbd46a4cd98..2c994276dac35 100755 --- a/scripts/test_rustc_tests.sh +++ b/scripts/test_rustc_tests.sh @@ -59,7 +59,6 @@ rm tests/ui/asm/x86_64/goto.rs # inline asm labels not supported rm tests/ui/asm/label-operand.rs # same rm tests/ui/asm/may_unwind.rs # asm unwinding not supported rm tests/ui/asm/aarch64/may_unwind.rs # same -rm tests/ui/asm/x86_64/global_asm_escape.rs # rust-lang/rust#151955 needs to be applied to non-LLVM codegen backends too # misc unimplemented things rm tests/ui/target-feature/missing-plusminus.rs # error not implemented From c259c4048a97329a70a7ec2dcd0e79fe2c36be68 Mon Sep 17 00:00:00 2001 From: Sasha Pourcelot Date: Sat, 14 Feb 2026 19:52:42 +0000 Subject: [PATCH 60/90] compiletest: normalize stderr before SVG rendering Element position is hardcoded in the rendered SVG. This means that any change in element length (for instance, when substituting the path with the `rust` checkout with `$DIR`) would not change the position and result in buggy SVG being generated. Normalizing before SVG rendering allows us to keep a consistent element placement. --- src/tools/compiletest/src/runtest.rs | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/src/tools/compiletest/src/runtest.rs b/src/tools/compiletest/src/runtest.rs index e6eb1f3bd957f..99c51207f601d 100644 --- a/src/tools/compiletest/src/runtest.rs +++ b/src/tools/compiletest/src/runtest.rs @@ -2379,15 +2379,22 @@ impl<'test> TestCx<'test> { _ => {} }; - let stderr = if self.force_color_svg() { - anstyle_svg::Term::new().render_svg(&proc_res.stderr) - } else if explicit_format { - proc_res.stderr.clone() + let stderr; + let normalized_stderr; + + if self.force_color_svg() { + let normalized = self.normalize_output(&proc_res.stderr, &self.props.normalize_stderr); + stderr = anstyle_svg::Term::new().render_svg(&normalized); + normalized_stderr = stderr.clone(); } else { - json::extract_rendered(&proc_res.stderr) - }; + stderr = if explicit_format { + proc_res.stderr.clone() + } else { + json::extract_rendered(&proc_res.stderr) + }; + normalized_stderr = self.normalize_output(&stderr, &self.props.normalize_stderr); + } - let normalized_stderr = self.normalize_output(&stderr, &self.props.normalize_stderr); let mut errors = 0; match output_kind { TestOutput::Compile => { From db5ed68a8871305f352bc4447832eac9ed748fbf Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sun, 26 Oct 2025 11:04:37 +0100 Subject: [PATCH 61/90] add write_box_via_move intrinsic and use it for vec! This allows us to get rid of box_new entirely --- example/mini_core.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/example/mini_core.rs b/example/mini_core.rs index 301547cadaf7c..5293b458d8c4f 100644 --- a/example/mini_core.rs +++ b/example/mini_core.rs @@ -622,11 +622,6 @@ impl Deref for Box { } } -#[lang = "exchange_malloc"] -unsafe fn allocate(size: usize, _align: usize) -> *mut u8 { - unsafe { libc::malloc(size) } -} - #[lang = "drop"] pub trait Drop { fn drop(&mut self); From 66c85df09d669bfd8688bb8fb2ac9d145d21cf4e Mon Sep 17 00:00:00 2001 From: xtqqczze <45661989+xtqqczze@users.noreply.github.com> Date: Mon, 16 Feb 2026 19:19:59 +0000 Subject: [PATCH 62/90] actions: bump rustsec/audit-check to v2 (#1627) --- .github/workflows/audit.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/audit.yml b/.github/workflows/audit.yml index 274b9504beb04..95a4dcd3266df 100644 --- a/.github/workflows/audit.yml +++ b/.github/workflows/audit.yml @@ -13,6 +13,6 @@ jobs: - uses: actions/checkout@v4 - run: | sed -i 's/components.*/components = []/' rust-toolchain.toml - - uses: rustsec/audit-check@v1.4.1 + - uses: rustsec/audit-check@v2.0.0 with: token: ${{ secrets.GITHUB_TOKEN }} From d54a023c69d095d1185a7303456672656bda25f9 Mon Sep 17 00:00:00 2001 From: okaneco <47607823+okaneco@users.noreply.github.com> Date: Thu, 12 Feb 2026 15:38:13 -0500 Subject: [PATCH 63/90] [cg_clif]: Fix codegen of f128 to i128 casts Correct name for intrinsic that converts f128 to u128 Use `to_signed` instead of `from_signed` to ensure proper intrinsic selected for u128/i128 --- src/codegen_f16_f128.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/codegen_f16_f128.rs b/src/codegen_f16_f128.rs index 86bff32dc623c..d8977657e305d 100644 --- a/src/codegen_f16_f128.rs +++ b/src/codegen_f16_f128.rs @@ -208,7 +208,7 @@ pub(crate) fn codegen_cast( let ret_ty = if to_ty.bits() < 32 { types::I32 } else { to_ty }; let name = format!( "__fix{sign}tf{size}i", - sign = if from_signed { "" } else { "un" }, + sign = if to_signed { "" } else { "uns" }, size = match ret_ty { types::I32 => 's', types::I64 => 'd', From 7ce33416af975ace3faca65ab3e83f6bcc61596b Mon Sep 17 00:00:00 2001 From: Folkert de Vries Date: Sat, 14 Feb 2026 15:43:32 +0100 Subject: [PATCH 64/90] use `intrinsics::simd` for `vmull_*` --- .../src/arm_shared/neon/generated.rs | 60 ++----------------- .../spec/neon/arm_shared.spec.yml | 22 +++---- 2 files changed, 14 insertions(+), 68 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs index c2e90d41eff02..a578f6c158d71 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs @@ -33216,15 +33216,7 @@ pub fn vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smull.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulls.v4i32")] - fn _vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t; - } - unsafe { _vmull_s16(a, b) } + unsafe { simd_mul(simd_cast(a), simd_cast(b)) } } #[doc = "Signed multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s32)"] @@ -33245,15 +33237,7 @@ pub fn vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smull.v2i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulls.v2i64")] - fn _vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t; - } - unsafe { _vmull_s32(a, b) } + unsafe { simd_mul(simd_cast(a), simd_cast(b)) } } #[doc = "Signed multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s8)"] @@ -33274,15 +33258,7 @@ pub fn vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smull.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulls.v8i16")] - fn _vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t; - } - unsafe { _vmull_s8(a, b) } + unsafe { simd_mul(simd_cast(a), simd_cast(b)) } } #[doc = "Unsigned multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u8)"] @@ -33303,15 +33279,7 @@ pub fn vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umull.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v8i16")] - fn _vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t; - } - unsafe { _vmull_u8(a, b) } + unsafe { simd_mul(simd_cast(a), simd_cast(b)) } } #[doc = "Unsigned multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u16)"] @@ -33332,15 +33300,7 @@ pub fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umull.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v4i32")] - fn _vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t; - } - unsafe { _vmull_u16(a, b) } + unsafe { simd_mul(simd_cast(a), simd_cast(b)) } } #[doc = "Unsigned multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u32)"] @@ -33361,15 +33321,7 @@ pub fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umull.v2i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v2i64")] - fn _vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t; - } - unsafe { _vmull_u32(a, b) } + unsafe { simd_mul(simd_cast(a), simd_cast(b)) } } #[doc = "Vector bitwise not."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_p8)"] diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml index c726d1a028a57..404e67b3c56e0 100644 --- a/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml +++ b/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml @@ -6507,13 +6507,10 @@ intrinsics: - ["s16", int16x4_t, int32x4_t] - ["s32", int32x2_t, int64x2_t] compose: - - LLVMLink: - name: "smull.{neon_type[1]}" - links: - - link: "llvm.aarch64.neon.smull.{neon_type[2]}" - arch: aarch64,arm64ec - - link: "llvm.arm.neon.vmulls.{neon_type[2]}" - arch: arm + - FnCall: + - simd_mul + - - FnCall: ['simd_cast', [a]] + - FnCall: ['simd_cast', [b]] - name: "vmull{neon_type[1].no}" doc: "Unsigned multiply long" @@ -6531,13 +6528,10 @@ intrinsics: - ["u16", uint16x4_t, uint32x4_t] - ["u32", uint32x2_t, uint64x2_t] compose: - - LLVMLink: - name: "smull.{neon_type[1]}" - links: - - link: "llvm.aarch64.neon.umull.{neon_type[2]}" - arch: aarch64,arm64ec - - link: "llvm.arm.neon.vmullu.{neon_type[2]}" - arch: arm + - FnCall: + - simd_mul + - - FnCall: ['simd_cast', [a]] + - FnCall: ['simd_cast', [b]] - name: "vmull{neon_type[1].no}" doc: "Polynomial multiply long" From f3ff080c68cbf8eca1f638ec5d16a611984b6446 Mon Sep 17 00:00:00 2001 From: Folkert de Vries Date: Tue, 17 Feb 2026 11:29:05 +0100 Subject: [PATCH 65/90] use `read_unaligned` for f64 `vld` and `vldq` --- .../core_arch/src/aarch64/neon/generated.rs | 78 +++++-------------- .../crates/core_arch/src/aarch64/neon/mod.rs | 8 ++ .../spec/neon/aarch64.spec.yml | 15 ++-- 3 files changed, 34 insertions(+), 67 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs index a0647551e4a7a..28db407924502 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs @@ -11488,16 +11488,9 @@ pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { #[inline(always)] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld1))] -pub unsafe fn vld1_f64_x2(a: *const f64) -> float64x1x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v1f64.p0" - )] - fn _vld1_f64_x2(a: *const f64) -> float64x1x2_t; - } - _vld1_f64_x2(a) +#[cfg_attr(test, assert_instr(ld))] +pub unsafe fn vld1_f64_x2(ptr: *const f64) -> float64x1x2_t { + crate::ptr::read_unaligned(ptr.cast()) } #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x3)"] @@ -11506,16 +11499,9 @@ pub unsafe fn vld1_f64_x2(a: *const f64) -> float64x1x2_t { #[inline(always)] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld1))] -pub unsafe fn vld1_f64_x3(a: *const f64) -> float64x1x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v1f64.p0" - )] - fn _vld1_f64_x3(a: *const f64) -> float64x1x3_t; - } - _vld1_f64_x3(a) +#[cfg_attr(test, assert_instr(ld))] +pub unsafe fn vld1_f64_x3(ptr: *const f64) -> float64x1x3_t { + crate::ptr::read_unaligned(ptr.cast()) } #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x4)"] @@ -11524,16 +11510,9 @@ pub unsafe fn vld1_f64_x3(a: *const f64) -> float64x1x3_t { #[inline(always)] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld1))] -pub unsafe fn vld1_f64_x4(a: *const f64) -> float64x1x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v1f64.p0" - )] - fn _vld1_f64_x4(a: *const f64) -> float64x1x4_t; - } - _vld1_f64_x4(a) +#[cfg_attr(test, assert_instr(ld))] +pub unsafe fn vld1_f64_x4(ptr: *const f64) -> float64x1x4_t { + crate::ptr::read_unaligned(ptr.cast()) } #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x2)"] @@ -11542,16 +11521,9 @@ pub unsafe fn vld1_f64_x4(a: *const f64) -> float64x1x4_t { #[inline(always)] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld1))] -pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x2.v2f64.p0" - )] - fn _vld1q_f64_x2(a: *const f64) -> float64x2x2_t; - } - _vld1q_f64_x2(a) +#[cfg_attr(test, assert_instr(ld))] +pub unsafe fn vld1q_f64_x2(ptr: *const f64) -> float64x2x2_t { + crate::ptr::read_unaligned(ptr.cast()) } #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x3)"] @@ -11560,16 +11532,9 @@ pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t { #[inline(always)] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld1))] -pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x3.v2f64.p0" - )] - fn _vld1q_f64_x3(a: *const f64) -> float64x2x3_t; - } - _vld1q_f64_x3(a) +#[cfg_attr(test, assert_instr(ld))] +pub unsafe fn vld1q_f64_x3(ptr: *const f64) -> float64x2x3_t { + crate::ptr::read_unaligned(ptr.cast()) } #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x4)"] @@ -11578,16 +11543,9 @@ pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t { #[inline(always)] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld1))] -pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld1x4.v2f64.p0" - )] - fn _vld1q_f64_x4(a: *const f64) -> float64x2x4_t; - } - _vld1q_f64_x4(a) +#[cfg_attr(test, assert_instr(ld))] +pub unsafe fn vld1q_f64_x4(ptr: *const f64) -> float64x2x4_t { + crate::ptr::read_unaligned(ptr.cast()) } #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f64)"] diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs index 135d0a156dc3f..c39b3e93af961 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs @@ -1093,6 +1093,14 @@ mod tests { test_vld1q_f32_x3(f32, 12, float32x4x3_t, vst1q_f32_x3, vld1q_f32_x3); test_vld1q_f32_x4(f32, 16, float32x4x4_t, vst1q_f32_x4, vld1q_f32_x4); + test_vld1_f64_x2(f64, 2, float64x1x2_t, vst1_f64_x2, vld1_f64_x2); + test_vld1_f64_x3(f64, 3, float64x1x3_t, vst1_f64_x3, vld1_f64_x3); + test_vld1_f64_x4(f64, 4, float64x1x4_t, vst1_f64_x4, vld1_f64_x4); + + test_vld1q_f64_x2(f64, 4, float64x2x2_t, vst1q_f64_x2, vld1q_f64_x2); + test_vld1q_f64_x3(f64, 6, float64x2x3_t, vst1q_f64_x3, vld1q_f64_x3); + test_vld1q_f64_x4(f64, 8, float64x2x4_t, vst1q_f64_x4, vld1q_f64_x4); + test_vld1_s8_x2(i8, 16, int8x8x2_t, vst1_s8_x2, vld1_s8_x2); test_vld1_s8_x3(i8, 24, int8x8x3_t, vst1_s8_x3, vld1_s8_x3); test_vld1_s8_x4(i8, 32, int8x8x4_t, vst1_s8_x4, vld1_s8_x4); diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml index 95f23ebd9a0ff..ec9d49a510ff2 100644 --- a/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml +++ b/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml @@ -3479,10 +3479,10 @@ intrinsics: - name: "vld1{neon_type[1].no}" doc: "Load multiple single-element structures to one, two, three, or four registers" - arguments: ["a: {type[0]}"] + arguments: ["ptr: {type[0]}"] return_type: "{neon_type[1]}" attr: [*neon-stable] - assert_instr: [ld1] + assert_instr: [ld] safety: unsafe: [neon] types: @@ -3493,11 +3493,12 @@ intrinsics: - ["*const f64", float64x1x4_t] - ["*const f64", float64x2x4_t] compose: - - LLVMLink: - name: "vld1{neon_type[1].no}" - links: - - link: "llvm.aarch64.neon.ld1x{neon_type[1].tuple}.v{neon_type[1].lane}f{neon_type[1].base}.p0" - arch: aarch64,arm64ec + - FnCall: + - 'crate::ptr::read_unaligned' + - - MethodCall: + - ptr + - cast + - [] - name: "vld2{neon_type[1].lane_nox}" doc: Load multiple 2-element structures to two registers From 7964c1b78d41e211207b7c8a6ea152ae39c42dc1 Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Tue, 17 Feb 2026 11:00:57 +0000 Subject: [PATCH 66/90] Rustup to rustc 1.95.0-nightly (3c9faa0d0 2026-02-16) --- ...sure-va_end-doesn-t-get-emitted-unless-VaList-is-a.patch | 6 +++--- rust-toolchain.toml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/patches/0028-stdlib-Ensure-va_end-doesn-t-get-emitted-unless-VaList-is-a.patch b/patches/0028-stdlib-Ensure-va_end-doesn-t-get-emitted-unless-VaList-is-a.patch index 34a95dadf0ef0..2aa93164674f6 100644 --- a/patches/0028-stdlib-Ensure-va_end-doesn-t-get-emitted-unless-VaList-is-a.patch +++ b/patches/0028-stdlib-Ensure-va_end-doesn-t-get-emitted-unless-VaList-is-a.patch @@ -13,9 +13,9 @@ index d0f1553..75129af 100644 --- a/library/core/src/ffi/va_list.rs +++ b/library/core/src/ffi/va_list.rs @@ -217,6 +217,7 @@ impl Clone for VaList<'_> { - } - - impl<'f> Drop for VaList<'f> { + + #[rustc_const_unstable(feature = "const_c_variadic", issue = "151787")] + impl<'f> const Drop for VaList<'f> { + #[inline] fn drop(&mut self) { // SAFETY: this variable argument list is being dropped, so won't be read from again. diff --git a/rust-toolchain.toml b/rust-toolchain.toml index b0a3239b8fdda..2aaffdc5d244c 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] -channel = "nightly-2026-02-16" +channel = "nightly-2026-02-17" components = ["rust-src", "rustc-dev", "llvm-tools", "rustfmt"] profile = "minimal" From 224eef62e84fba78d963129061e61c41bda8be90 Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Tue, 17 Feb 2026 11:07:43 +0000 Subject: [PATCH 67/90] Fix rustc test suite --- scripts/test_rustc_tests.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/test_rustc_tests.sh b/scripts/test_rustc_tests.sh index 2c994276dac35..4cad18f2a94ff 100755 --- a/scripts/test_rustc_tests.sh +++ b/scripts/test_rustc_tests.sh @@ -41,6 +41,7 @@ rm -r tests/run-make/naked-symbol-visibility # variadic arguments rm tests/ui/abi/mir/mir_codegen_calls_variadic.rs # requires float varargs rm tests/ui/c-variadic/naked.rs # same +rm tests/ui/consts/const-eval/c-variadic.rs # same rm tests/ui/abi/variadic-ffi.rs # requires callee side vararg support rm -r tests/run-make/c-link-to-rust-va-list-fn # requires callee side vararg support rm tests/ui/c-variadic/valid.rs # same @@ -165,6 +166,7 @@ rm -r tests/run-make/export # same rm -r tests/ui/compiletest-self-test/compile-flags-incremental.rs # needs compiletest compiled with panic=unwind rm -r tests/ui/extern/extern-types-field-offset.rs # expects /rustc/ rather than /rustc/FAKE_PREFIX rm -r tests/ui/process/println-with-broken-pipe.rs # same +rm tests/codegen-units/item-collection/opaque-return-impls.rs # extra mono item. possibly due to other configuration # genuine bugs # ============ From 6d0aca46a8db9957bbb56cf72812d085ef94c509 Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Tue, 17 Feb 2026 11:07:59 +0000 Subject: [PATCH 68/90] Simplify bootstrap patches --- scripts/setup_rust_fork.sh | 36 +++++++++++------------------------- 1 file changed, 11 insertions(+), 25 deletions(-) diff --git a/scripts/setup_rust_fork.sh b/scripts/setup_rust_fork.sh index 3187bda1304a7..bb9f69b5c9744 100644 --- a/scripts/setup_rust_fork.sh +++ b/scripts/setup_rust_fork.sh @@ -66,42 +66,28 @@ diff --git a/src/bootstrap/src/core/config/config.rs b/src/bootstrap/src/core/co index a656927b1f6..44fc5546fac 100644 --- a/src/bootstrap/src/core/config/config.rs +++ b/src/bootstrap/src/core/config/config.rs -@@ -2249,14 +2249,6 @@ pub fn parse_download_ci_llvm<'a>( - ); +@@ -2249,7 +2249,7 @@ pub fn parse_download_ci_llvm<'a>( } -- #[cfg(not(test))] + #[cfg(not(test))] - if b && dwn_ctx.is_running_on_ci && CiEnv::is_rust_lang_managed_ci_job() { -- // On rust-lang CI, we must always rebuild LLVM if there were any modifications to it -- panic!( -- "\`llvm.download-ci-llvm\` cannot be set to \`true\` on CI. Use \`if-unchanged\` instead." -- ); -- } -- - // If download-ci-llvm=true we also want to check that CI llvm is available - b && llvm::is_ci_llvm_available_for_target(&dwn_ctx.host_target, asserts) - } ++ if false && dwn_ctx.is_running_on_ci && CiEnv::is_rust_lang_managed_ci_job() { + // On rust-lang CI, we must always rebuild LLVM if there were any modifications to it + panic!( + "\`llvm.download-ci-llvm\` cannot be set to \`true\` on CI. Use \`if-unchanged\` instead." diff --git a/src/build_helper/src/git.rs b/src/build_helper/src/git.rs index 330fb465de..a4593ed96f 100644 --- a/src/build_helper/src/git.rs +++ b/src/build_helper/src/git.rs -@@ -218,15 +218,7 @@ pub fn get_closest_upstream_commit( +@@ -218,7 +218,7 @@ pub fn get_closest_upstream_commit( config: &GitConfig<'_>, env: CiEnv, ) -> Result, String> { - let base = match env { -- CiEnv::None => "HEAD", -- CiEnv::GitHubActions => { -- // On CI, we should always have a non-upstream merge commit at the tip, -- // and our first parent should be the most recently merged upstream commit. -- // We thus simply return our first parent. -- return resolve_commit_sha(git_dir, "HEAD^1").map(Some); -- } -- }; -+ let base = "HEAD"; - - let mut git = Command::new("git"); - ++ let base = match CiEnv::None { + CiEnv::None => "HEAD", + CiEnv::GitHubActions => { + // On CI, we should always have a non-upstream merge commit at the tip, EOF popd From 72adcde31d33f626a6860b264be88c3d60747a4e Mon Sep 17 00:00:00 2001 From: Folkert de Vries Date: Tue, 17 Feb 2026 17:22:33 +0100 Subject: [PATCH 69/90] test interleaving load/store roundtrip --- .../crates/core_arch/src/aarch64/neon/mod.rs | 98 +++++++++++++++++++ 1 file changed, 98 insertions(+) diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs index 135d0a156dc3f..3777cc7bdf79a 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs @@ -1173,6 +1173,104 @@ mod tests { test_vld1q_p16_x3(p16, 24, poly16x8x3_t, vst1q_p16_x3, vld1q_p16_x3); test_vld1q_p16_x4(p16, 32, poly16x8x4_t, vst1q_p16_x4, vld1q_p16_x4); } + + wide_store_load_roundtrip_neon! { + test_vld2_f32_x2(f32, 4, float32x2x2_t, vst2_f32, vld2_f32); + test_vld2_f32_x3(f32, 6, float32x2x3_t, vst3_f32, vld3_f32); + test_vld2_f32_x4(f32, 8, float32x2x4_t, vst4_f32, vld4_f32); + + test_vld2q_f32_x2(f32, 8, float32x4x2_t, vst2q_f32, vld2q_f32); + test_vld3q_f32_x3(f32, 12, float32x4x3_t, vst3q_f32, vld3q_f32); + test_vld4q_f32_x4(f32, 16, float32x4x4_t, vst4q_f32, vld4q_f32); + + test_vld2_f64_x2(f64, 2, float64x1x2_t, vst2_f64, vld2_f64); + test_vld2_f64_x3(f64, 3, float64x1x3_t, vst3_f64, vld3_f64); + test_vld2_f64_x4(f64, 4, float64x1x4_t, vst4_f64, vld4_f64); + + test_vld2q_f64_x2(f64, 4, float64x2x2_t, vst2q_f64, vld2q_f64); + test_vld3q_f64_x3(f64, 6, float64x2x3_t, vst3q_f64, vld3q_f64); + test_vld4q_f64_x4(f64, 8, float64x2x4_t, vst4q_f64, vld4q_f64); + + test_vld2_s8_x2(i8, 16, int8x8x2_t, vst2_s8, vld2_s8); + test_vld2_s8_x3(i8, 24, int8x8x3_t, vst3_s8, vld3_s8); + test_vld2_s8_x4(i8, 32, int8x8x4_t, vst4_s8, vld4_s8); + + test_vld2q_s8_x2(i8, 32, int8x16x2_t, vst2q_s8, vld2q_s8); + test_vld3q_s8_x3(i8, 48, int8x16x3_t, vst3q_s8, vld3q_s8); + test_vld4q_s8_x4(i8, 64, int8x16x4_t, vst4q_s8, vld4q_s8); + + test_vld2_s16_x2(i16, 8, int16x4x2_t, vst2_s16, vld2_s16); + test_vld2_s16_x3(i16, 12, int16x4x3_t, vst3_s16, vld3_s16); + test_vld2_s16_x4(i16, 16, int16x4x4_t, vst4_s16, vld4_s16); + + test_vld2q_s16_x2(i16, 16, int16x8x2_t, vst2q_s16, vld2q_s16); + test_vld3q_s16_x3(i16, 24, int16x8x3_t, vst3q_s16, vld3q_s16); + test_vld4q_s16_x4(i16, 32, int16x8x4_t, vst4q_s16, vld4q_s16); + + test_vld2_s32_x2(i32, 4, int32x2x2_t, vst2_s32, vld2_s32); + test_vld2_s32_x3(i32, 6, int32x2x3_t, vst3_s32, vld3_s32); + test_vld2_s32_x4(i32, 8, int32x2x4_t, vst4_s32, vld4_s32); + + test_vld2q_s32_x2(i32, 8, int32x4x2_t, vst2q_s32, vld2q_s32); + test_vld3q_s32_x3(i32, 12, int32x4x3_t, vst3q_s32, vld3q_s32); + test_vld4q_s32_x4(i32, 16, int32x4x4_t, vst4q_s32, vld4q_s32); + + test_vld2_s64_x2(i64, 2, int64x1x2_t, vst2_s64, vld2_s64); + test_vld2_s64_x3(i64, 3, int64x1x3_t, vst3_s64, vld3_s64); + test_vld2_s64_x4(i64, 4, int64x1x4_t, vst4_s64, vld4_s64); + + test_vld2q_s64_x2(i64, 4, int64x2x2_t, vst2q_s64, vld2q_s64); + test_vld3q_s64_x3(i64, 6, int64x2x3_t, vst3q_s64, vld3q_s64); + test_vld4q_s64_x4(i64, 8, int64x2x4_t, vst4q_s64, vld4q_s64); + + test_vld2_u8_x2(u8, 16, uint8x8x2_t, vst2_u8, vld2_u8); + test_vld2_u8_x3(u8, 24, uint8x8x3_t, vst3_u8, vld3_u8); + test_vld2_u8_x4(u8, 32, uint8x8x4_t, vst4_u8, vld4_u8); + + test_vld2q_u8_x2(u8, 32, uint8x16x2_t, vst2q_u8, vld2q_u8); + test_vld3q_u8_x3(u8, 48, uint8x16x3_t, vst3q_u8, vld3q_u8); + test_vld4q_u8_x4(u8, 64, uint8x16x4_t, vst4q_u8, vld4q_u8); + + test_vld2_u16_x2(u16, 8, uint16x4x2_t, vst2_u16, vld2_u16); + test_vld2_u16_x3(u16, 12, uint16x4x3_t, vst3_u16, vld3_u16); + test_vld2_u16_x4(u16, 16, uint16x4x4_t, vst4_u16, vld4_u16); + + test_vld2q_u16_x2(u16, 16, uint16x8x2_t, vst2q_u16, vld2q_u16); + test_vld3q_u16_x3(u16, 24, uint16x8x3_t, vst3q_u16, vld3q_u16); + test_vld4q_u16_x4(u16, 32, uint16x8x4_t, vst4q_u16, vld4q_u16); + + test_vld2_u32_x2(u32, 4, uint32x2x2_t, vst2_u32, vld2_u32); + test_vld2_u32_x3(u32, 6, uint32x2x3_t, vst3_u32, vld3_u32); + test_vld2_u32_x4(u32, 8, uint32x2x4_t, vst4_u32, vld4_u32); + + test_vld2q_u32_x2(u32, 8, uint32x4x2_t, vst2q_u32, vld2q_u32); + test_vld3q_u32_x3(u32, 12, uint32x4x3_t, vst3q_u32, vld3q_u32); + test_vld4q_u32_x4(u32, 16, uint32x4x4_t, vst4q_u32, vld4q_u32); + + test_vld2_u64_x2(u64, 2, uint64x1x2_t, vst2_u64, vld2_u64); + test_vld2_u64_x3(u64, 3, uint64x1x3_t, vst3_u64, vld3_u64); + test_vld2_u64_x4(u64, 4, uint64x1x4_t, vst4_u64, vld4_u64); + + test_vld2q_u64_x2(u64, 4, uint64x2x2_t, vst2q_u64, vld2q_u64); + test_vld3q_u64_x3(u64, 6, uint64x2x3_t, vst3q_u64, vld3q_u64); + test_vld4q_u64_x4(u64, 8, uint64x2x4_t, vst4q_u64, vld4q_u64); + + test_vld2_p8_x2(p8, 16, poly8x8x2_t, vst2_p8, vld2_p8); + test_vld2_p8_x3(p8, 24, poly8x8x3_t, vst3_p8, vld3_p8); + test_vld2_p8_x4(p8, 32, poly8x8x4_t, vst4_p8, vld4_p8); + + test_vld2q_p8_x2(p8, 32, poly8x16x2_t, vst2q_p8, vld2q_p8); + test_vld3q_p8_x3(p8, 48, poly8x16x3_t, vst3q_p8, vld3q_p8); + test_vld4q_p8_x4(p8, 64, poly8x16x4_t, vst4q_p8, vld4q_p8); + + test_vld2_p16_x2(p16, 8, poly16x4x2_t, vst2_p16, vld2_p16); + test_vld2_p16_x3(p16, 12, poly16x4x3_t, vst3_p16, vld3_p16); + test_vld2_p16_x4(p16, 16, poly16x4x4_t, vst4_p16, vld4_p16); + + test_vld2q_p16_x2(p16, 16, poly16x8x2_t, vst2q_p16, vld2q_p16); + test_vld3q_p16_x3(p16, 24, poly16x8x3_t, vst3q_p16, vld3q_p16); + test_vld4q_p16_x4(p16, 32, poly16x8x4_t, vst4q_p16, vld4q_p16); + } } #[cfg(test)] From 061b5225f4c06d3d8055d3517c6094564e2ea4d4 Mon Sep 17 00:00:00 2001 From: Folkert de Vries Date: Tue, 17 Feb 2026 18:18:10 +0100 Subject: [PATCH 70/90] fix interleaving read/write not roundtripping on aarch64_be --- .../core_arch/src/aarch64/neon/generated.rs | 1413 +------------- .../src/arm_shared/neon/generated.rs | 1642 ++++------------- .../spec/neon/aarch64.spec.yml | 24 +- .../spec/neon/arm_shared.spec.yml | 7 + 4 files changed, 381 insertions(+), 2705 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs index a0647551e4a7a..9a8a9ad59e13a 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs @@ -11962,28 +11962,12 @@ pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t { #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t { transmute(vld2q_s64(transmute(a))) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t { - let mut ret_val: uint64x2x2_t = transmute(vld2q_s64(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) }; - ret_val -} #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f64)"] #[doc = "## Safety"] @@ -12389,29 +12373,12 @@ pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t { #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t { transmute(vld3q_s64(transmute(a))) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t { - let mut ret_val: uint64x2x3_t = transmute(vld3q_s64(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) }; - ret_val -} #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f64)"] #[doc = "## Safety"] @@ -12825,30 +12792,12 @@ pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t { #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t { transmute(vld4q_s64(transmute(a))) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t { - let mut ret_val: uint64x2x4_t = transmute(vld4q_s64(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) }; - ret_val -} #[doc = "Load-acquire RCpc one single-element structure to one lane of one register"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1_lane_s64)"] #[doc = "## Safety"] @@ -19739,7 +19688,6 @@ pub fn vqtbl2q_s8(a: int8x16x2_t, b: uint8x16_t) -> int8x16_t { #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19747,38 +19695,8 @@ pub fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t { unsafe { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) } } #[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t { - let mut a: uint8x16x2_t = a; - a.0 = unsafe { - simd_shuffle!( - a.0, - a.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - a.1 = unsafe { - simd_shuffle!( - a.1, - a.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x8_t = transmute(vqtbl2(transmute(a.0), transmute(a.1), b)); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19786,43 +19704,8 @@ pub fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t { unsafe { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) } } #[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t { - let mut a: uint8x16x2_t = a; - a.0 = unsafe { - simd_shuffle!( - a.0, - a.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - a.1 = unsafe { - simd_shuffle!( - a.1, - a.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - let b: uint8x16_t = - unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x16_t = transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19830,38 +19713,8 @@ pub fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t { unsafe { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) } } #[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t { - let mut a: poly8x16x2_t = a; - a.0 = unsafe { - simd_shuffle!( - a.0, - a.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - a.1 = unsafe { - simd_shuffle!( - a.1, - a.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x8_t = transmute(vqtbl2(transmute(a.0), transmute(a.1), b)); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19869,40 +19722,6 @@ pub fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t { unsafe { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) } } #[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t { - let mut a: poly8x16x2_t = a; - a.0 = unsafe { - simd_shuffle!( - a.0, - a.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - a.1 = unsafe { - simd_shuffle!( - a.1, - a.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - let b: uint8x16_t = - unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x16_t = transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3)"] #[inline(always)] #[target_feature(enable = "neon")] @@ -19955,7 +19774,6 @@ pub fn vqtbl3q_s8(a: int8x16x3_t, b: uint8x16_t) -> int8x16_t { #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19963,46 +19781,8 @@ pub fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t { unsafe { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) } } #[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t { - let mut a: uint8x16x3_t = a; - a.0 = unsafe { - simd_shuffle!( - a.0, - a.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - a.1 = unsafe { - simd_shuffle!( - a.1, - a.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - a.2 = unsafe { - simd_shuffle!( - a.2, - a.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x8_t = - transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20010,51 +19790,8 @@ pub fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t { unsafe { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) } } #[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t { - let mut a: uint8x16x3_t = a; - a.0 = unsafe { - simd_shuffle!( - a.0, - a.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - a.1 = unsafe { - simd_shuffle!( - a.1, - a.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - a.2 = unsafe { - simd_shuffle!( - a.2, - a.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - let b: uint8x16_t = - unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x16_t = - transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20062,46 +19799,8 @@ pub fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t { unsafe { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) } } #[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t { - let mut a: poly8x16x3_t = a; - a.0 = unsafe { - simd_shuffle!( - a.0, - a.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - a.1 = unsafe { - simd_shuffle!( - a.1, - a.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - a.2 = unsafe { - simd_shuffle!( - a.2, - a.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x8_t = - transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20109,48 +19808,6 @@ pub fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t { unsafe { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) } } #[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t { - let mut a: poly8x16x3_t = a; - a.0 = unsafe { - simd_shuffle!( - a.0, - a.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - a.1 = unsafe { - simd_shuffle!( - a.1, - a.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - a.2 = unsafe { - simd_shuffle!( - a.2, - a.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - let b: uint8x16_t = - unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x16_t = - transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4)"] #[inline(always)] #[target_feature(enable = "neon")] @@ -20215,7 +19872,6 @@ pub fn vqtbl4q_s8(a: int8x16x4_t, b: uint8x16_t) -> int8x16_t { #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20231,64 +19887,14 @@ pub fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t { } } #[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"] #[inline(always)] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t { - let mut a: uint8x16x4_t = a; - a.0 = unsafe { - simd_shuffle!( - a.0, - a.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - a.1 = unsafe { - simd_shuffle!( - a.1, - a.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - a.2 = unsafe { - simd_shuffle!( - a.2, - a.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - a.3 = unsafe { - simd_shuffle!( - a.3, - a.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t { unsafe { - let ret_val: uint8x8_t = transmute(vqtbl4( - transmute(a.0), - transmute(a.1), - transmute(a.2), - transmute(a.3), - b, - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"] -#[inline(always)] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t { - unsafe { - transmute(vqtbl4q( + transmute(vqtbl4q( transmute(a.0), transmute(a.1), transmute(a.2), @@ -20298,63 +19904,8 @@ pub fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t { } } #[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t { - let mut a: uint8x16x4_t = a; - a.0 = unsafe { - simd_shuffle!( - a.0, - a.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - a.1 = unsafe { - simd_shuffle!( - a.1, - a.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - a.2 = unsafe { - simd_shuffle!( - a.2, - a.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - a.3 = unsafe { - simd_shuffle!( - a.3, - a.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - let b: uint8x16_t = - unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x16_t = transmute(vqtbl4q( - transmute(a.0), - transmute(a.1), - transmute(a.2), - transmute(a.3), - b, - )); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20370,58 +19921,8 @@ pub fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t { } } #[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t { - let mut a: poly8x16x4_t = a; - a.0 = unsafe { - simd_shuffle!( - a.0, - a.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - a.1 = unsafe { - simd_shuffle!( - a.1, - a.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - a.2 = unsafe { - simd_shuffle!( - a.2, - a.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - a.3 = unsafe { - simd_shuffle!( - a.3, - a.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x8_t = transmute(vqtbl4( - transmute(a.0), - transmute(a.1), - transmute(a.2), - transmute(a.3), - b, - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20436,60 +19937,6 @@ pub fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t { )) } } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t { - let mut a: poly8x16x4_t = a; - a.0 = unsafe { - simd_shuffle!( - a.0, - a.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - a.1 = unsafe { - simd_shuffle!( - a.1, - a.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - a.2 = unsafe { - simd_shuffle!( - a.2, - a.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - a.3 = unsafe { - simd_shuffle!( - a.3, - a.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - let b: uint8x16_t = - unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x16_t = transmute(vqtbl4q( - transmute(a.0), - transmute(a.1), - transmute(a.2), - transmute(a.3), - b, - )); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1)"] #[inline(always)] @@ -20629,7 +20076,6 @@ pub fn vqtbx2q_s8(a: int8x16_t, b: int8x16x2_t, c: uint8x16_t) -> int8x16_t { #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20637,39 +20083,8 @@ pub fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t { unsafe { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) } } #[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t { - let mut b: uint8x16x2_t = b; - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.0 = unsafe { - simd_shuffle!( - b.0, - b.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - b.1 = unsafe { - simd_shuffle!( - b.1, - b.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x8_t = transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20677,46 +20092,8 @@ pub fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t { unsafe { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) } } #[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t { - let mut b: uint8x16x2_t = b; - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - b.0 = unsafe { - simd_shuffle!( - b.0, - b.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - b.1 = unsafe { - simd_shuffle!( - b.1, - b.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - let c: uint8x16_t = - unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x16_t = - transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20724,39 +20101,8 @@ pub fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t { unsafe { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) } } #[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t { - let mut b: poly8x16x2_t = b; - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.0 = unsafe { - simd_shuffle!( - b.0, - b.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - b.1 = unsafe { - simd_shuffle!( - b.1, - b.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x8_t = transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20764,43 +20110,6 @@ pub fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t { unsafe { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) } } #[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t { - let mut b: poly8x16x2_t = b; - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - b.0 = unsafe { - simd_shuffle!( - b.0, - b.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - b.1 = unsafe { - simd_shuffle!( - b.1, - b.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - let c: uint8x16_t = - unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x16_t = - transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3)"] #[inline(always)] #[target_feature(enable = "neon")] @@ -20860,7 +20169,6 @@ pub fn vqtbx3q_s8(a: int8x16_t, b: int8x16x3_t, c: uint8x16_t) -> int8x16_t { #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20876,52 +20184,8 @@ pub fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t { } } #[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t { - let mut b: uint8x16x3_t = b; - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.0 = unsafe { - simd_shuffle!( - b.0, - b.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - b.1 = unsafe { - simd_shuffle!( - b.1, - b.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - b.2 = unsafe { - simd_shuffle!( - b.2, - b.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x8_t = transmute(vqtbx3( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - c, - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20937,58 +20201,8 @@ pub fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t { } } #[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t { - let mut b: uint8x16x3_t = b; - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - b.0 = unsafe { - simd_shuffle!( - b.0, - b.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - b.1 = unsafe { - simd_shuffle!( - b.1, - b.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - b.2 = unsafe { - simd_shuffle!( - b.2, - b.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - let c: uint8x16_t = - unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x16_t = transmute(vqtbx3q( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - c, - )); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21004,52 +20218,8 @@ pub fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t { } } #[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t { - let mut b: poly8x16x3_t = b; - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.0 = unsafe { - simd_shuffle!( - b.0, - b.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - b.1 = unsafe { - simd_shuffle!( - b.1, - b.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - b.2 = unsafe { - simd_shuffle!( - b.2, - b.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x8_t = transmute(vqtbx3( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - c, - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21065,55 +20235,6 @@ pub fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t { } } #[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t { - let mut b: poly8x16x3_t = b; - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - b.0 = unsafe { - simd_shuffle!( - b.0, - b.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - b.1 = unsafe { - simd_shuffle!( - b.1, - b.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - b.2 = unsafe { - simd_shuffle!( - b.2, - b.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - let c: uint8x16_t = - unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x16_t = transmute(vqtbx3q( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - c, - )); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4)"] #[inline(always)] #[target_feature(enable = "neon")] @@ -21183,168 +20304,21 @@ pub fn vqtbx4_s8(a: int8x8_t, b: int8x16x4_t, c: uint8x8_t) -> int8x8_t { vqtbx4(a, b.0, b.1, b.2, b.3, c) } #[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_s8)"] -#[inline(always)] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbx4q_s8(a: int8x16_t, b: int8x16x4_t, c: uint8x16_t) -> int8x16_t { - vqtbx4q(a, b.0, b.1, b.2, b.3, c) -} -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"] -#[inline(always)] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t { - unsafe { - transmute(vqtbx4( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(b.3), - c, - )) - } -} -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t { - let mut b: uint8x16x4_t = b; - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.0 = unsafe { - simd_shuffle!( - b.0, - b.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - b.1 = unsafe { - simd_shuffle!( - b.1, - b.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - b.2 = unsafe { - simd_shuffle!( - b.2, - b.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - b.3 = unsafe { - simd_shuffle!( - b.3, - b.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x8_t = transmute(vqtbx4( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(b.3), - c, - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"] -#[inline(always)] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t { - unsafe { - transmute(vqtbx4q( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(b.3), - c, - )) - } -} -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t { - let mut b: uint8x16x4_t = b; - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - b.0 = unsafe { - simd_shuffle!( - b.0, - b.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - b.1 = unsafe { - simd_shuffle!( - b.1, - b.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - b.2 = unsafe { - simd_shuffle!( - b.2, - b.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - b.3 = unsafe { - simd_shuffle!( - b.3, - b.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - let c: uint8x16_t = - unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x16_t = transmute(vqtbx4q( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(b.3), - c, - )); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_s8)"] +#[inline(always)] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbx4q_s8(a: int8x16_t, b: int8x16x4_t, c: uint8x16_t) -> int8x16_t { + vqtbx4q(a, b.0, b.1, b.2, b.3, c) } #[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t { +pub fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t { unsafe { transmute(vqtbx4( transmute(a), @@ -21357,66 +20331,32 @@ pub fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t { } } #[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"] #[inline(always)] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t { - let mut b: poly8x16x4_t = b; - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.0 = unsafe { - simd_shuffle!( - b.0, - b.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - b.1 = unsafe { - simd_shuffle!( - b.1, - b.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - b.2 = unsafe { - simd_shuffle!( - b.2, - b.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - b.3 = unsafe { - simd_shuffle!( - b.3, - b.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t { unsafe { - let ret_val: poly8x8_t = transmute(vqtbx4( + transmute(vqtbx4q( transmute(a), transmute(b.0), transmute(b.1), transmute(b.2), transmute(b.3), c, - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + )) } } #[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t { +pub fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t { unsafe { - transmute(vqtbx4q( + transmute(vqtbx4( transmute(a), transmute(b.0), transmute(b.1), @@ -21429,58 +20369,19 @@ pub fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t { #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"] #[inline(always)] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t { - let mut b: poly8x16x4_t = b; - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - b.0 = unsafe { - simd_shuffle!( - b.0, - b.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - b.1 = unsafe { - simd_shuffle!( - b.1, - b.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - b.2 = unsafe { - simd_shuffle!( - b.2, - b.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - b.3 = unsafe { - simd_shuffle!( - b.3, - b.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - let c: uint8x16_t = - unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: poly8x16_t = transmute(vqtbx4q( + transmute(vqtbx4q( transmute(a), transmute(b.0), transmute(b.1), transmute(b.2), transmute(b.3), c, - )); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) + )) } } #[doc = "Rotate and exclusive OR"] @@ -27421,7 +26322,6 @@ pub fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -27429,26 +26329,8 @@ pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { unsafe { transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b)) } } #[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { - let mut a: uint8x8x2_t = a; - a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x8_t = transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b)); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -27456,23 +26338,6 @@ pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { unsafe { transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b)) } } #[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { - let mut a: poly8x8x2_t = a; - a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x8_t = transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b)); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"] #[inline(always)] #[target_feature(enable = "neon")] @@ -27488,7 +26353,6 @@ pub fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -27500,31 +26364,8 @@ pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) } } #[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { - let mut a: uint8x8x3_t = a; - a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let x = uint8x16x2_t( - vcombine_u8(a.0, a.1), - vcombine_u8(a.2, unsafe { crate::mem::zeroed() }), - ); - unsafe { - let ret_val: uint8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b)); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -27536,28 +26377,6 @@ pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) } } #[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { - let mut a: poly8x8x3_t = a; - a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let x = poly8x16x2_t( - vcombine_p8(a.0, a.1), - vcombine_p8(a.2, unsafe { crate::mem::zeroed() }), - ); - unsafe { - let ret_val: poly8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b)); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"] #[inline(always)] #[target_feature(enable = "neon")] @@ -27570,7 +26389,6 @@ pub fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -27579,29 +26397,8 @@ pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) } } #[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { - let mut a: uint8x8x4_t = a; - a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - a.3 = unsafe { simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3)); - unsafe { - let ret_val: uint8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b)); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -27609,26 +26406,6 @@ pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3)); unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) } } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { - let mut a: poly8x8x4_t = a; - a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - a.3 = unsafe { simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3)); - unsafe { - let ret_val: poly8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b)); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"] #[inline(always)] @@ -27698,7 +26475,6 @@ pub fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -27706,28 +26482,8 @@ pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { unsafe { transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c)) } } #[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { - let mut b: uint8x8x2_t = b; - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x8_t = - transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c)); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -27735,25 +26491,6 @@ pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { unsafe { transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c)) } } #[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { - let mut b: poly8x8x2_t = b; - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x8_t = - transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c)); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"] #[inline(always)] #[target_feature(enable = "neon")] @@ -27780,7 +26517,6 @@ pub fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -27798,36 +26534,8 @@ pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t { } } #[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t { - let mut b: uint8x8x3_t = b; - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let x = uint8x16x2_t( - vcombine_u8(b.0, b.1), - vcombine_u8(b.2, unsafe { crate::mem::zeroed() }), - ); - unsafe { - let ret_val: uint8x8_t = transmute(simd_select( - simd_lt::(transmute(c), transmute(u8x8::splat(24))), - transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)), - a, - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -27845,33 +26553,6 @@ pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t { } } #[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t { - let mut b: poly8x8x3_t = b; - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let x = poly8x16x2_t( - vcombine_p8(b.0, b.1), - vcombine_p8(b.2, unsafe { crate::mem::zeroed() }), - ); - unsafe { - let ret_val: poly8x8_t = transmute(simd_select( - simd_lt::(transmute(c), transmute(u8x8::splat(24))), - transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)), - a, - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"] #[inline(always)] #[target_feature(enable = "neon")] @@ -27890,7 +26571,6 @@ pub fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -27905,34 +26585,8 @@ pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t { } } #[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t { - let mut b: uint8x8x4_t = b; - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.3 = unsafe { simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x8_t = transmute(vqtbx2( - transmute(a), - transmute(vcombine_u8(b.0, b.1)), - transmute(vcombine_u8(b.2, b.3)), - c, - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -27946,31 +26600,6 @@ pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { )) } } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { - let mut b: poly8x8x4_t = b; - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.3 = unsafe { simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x8_t = transmute(vqtbx2( - transmute(a), - transmute(vcombine_p8(b.0, b.1)), - transmute(vcombine_p8(b.2, b.3)), - c, - )); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f16)"] #[inline(always)] diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs index d05d376402257..06a6381ccd3d7 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs @@ -20755,7 +20755,6 @@ pub unsafe fn vld2_u64(a: *const u64) -> uint64x1x2_t { #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] @@ -20775,11 +20774,10 @@ pub unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t { transmute(vld2_s8(transmute(a))) } #[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] @@ -20795,18 +20793,14 @@ pub unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t { - let mut ret_val: uint8x8x2_t = transmute(vld2_s8(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val +pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t { + transmute(vld2q_s8(transmute(a))) } #[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] @@ -20822,15 +20816,14 @@ pub unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t { - transmute(vld2q_s8(transmute(a))) +pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t { + transmute(vld2_s16(transmute(a))) } #[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] @@ -20846,30 +20839,14 @@ pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t { - let mut ret_val: uint8x16x2_t = transmute(vld2q_s8(transmute(a))); - ret_val.0 = unsafe { - simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.1 = unsafe { - simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val +pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t { + transmute(vld2q_s16(transmute(a))) } #[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] @@ -20885,15 +20862,14 @@ pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t { - transmute(vld2_s16(transmute(a))) +pub unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t { + transmute(vld2_s32(transmute(a))) } #[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] @@ -20909,18 +20885,14 @@ pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t { - let mut ret_val: uint16x4x2_t = transmute(vld2_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; - ret_val +pub unsafe fn vld2q_u32(a: *const u32) -> uint32x4x2_t { + transmute(vld2q_s32(transmute(a))) } #[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] @@ -20936,15 +20908,14 @@ pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t { - transmute(vld2q_s16(transmute(a))) +pub unsafe fn vld2_p8(a: *const p8) -> poly8x8x2_t { + transmute(vld2_s8(transmute(a))) } #[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] @@ -20960,18 +20931,14 @@ pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t { - let mut ret_val: uint16x8x2_t = transmute(vld2q_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val +pub unsafe fn vld2q_p8(a: *const p8) -> poly8x16x2_t { + transmute(vld2q_s8(transmute(a))) } #[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] @@ -20987,15 +20954,14 @@ pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t { - transmute(vld2_s32(transmute(a))) +pub unsafe fn vld2_p16(a: *const p16) -> poly16x4x2_t { + transmute(vld2_s16(transmute(a))) } #[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] @@ -21011,386 +20977,116 @@ pub unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t { - let mut ret_val: uint32x2x2_t = transmute(vld2_s32(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) }; - ret_val +pub unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t { + transmute(vld2q_s16(transmute(a))) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u32)"] +#[doc = "Load single 3-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_u32(a: *const u32) -> uint32x4x2_t { - transmute(vld2q_s32(transmute(a))) +#[cfg(target_arch = "arm")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld3_dup_f16(a: *const f16) -> float16x4x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4f16.p0")] + fn _vld3_dup_f16(ptr: *const f16, size: i32) -> float16x4x3_t; + } + _vld3_dup_f16(a as _, 2) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u32)"] +#[doc = "Load single 3-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_u32(a: *const u32) -> uint32x4x2_t { - let mut ret_val: uint32x4x2_t = transmute(vld2q_s32(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; - ret_val +#[cfg(target_arch = "arm")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld3q_dup_f16(a: *const f16) -> float16x8x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8f16.p0")] + fn _vld3q_dup_f16(ptr: *const f16, size: i32) -> float16x8x3_t; + } + _vld3q_dup_f16(a as _, 2) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p8)"] +#[doc = "Load single 3-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(ld3r) )] -pub unsafe fn vld2_p8(a: *const p8) -> poly8x8x2_t { - transmute(vld2_s8(transmute(a))) +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld3_dup_f16(a: *const f16) -> float16x4x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v4f16.p0" + )] + fn _vld3_dup_f16(ptr: *const f16) -> float16x4x3_t; + } + _vld3_dup_f16(a as _) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p8)"] +#[doc = "Load single 3-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(ld3r) )] -pub unsafe fn vld2_p8(a: *const p8) -> poly8x8x2_t { - let mut ret_val: poly8x8x2_t = transmute(vld2_s8(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld3q_dup_f16(a: *const f16) -> float16x8x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v8f16.p0" + )] + fn _vld3q_dup_f16(ptr: *const f16) -> float16x8x3_t; + } + _vld3q_dup_f16(a as _) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p8)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_p8(a: *const p8) -> poly8x16x2_t { - transmute(vld2q_s8(transmute(a))) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v2f32.p0" + )] + fn _vld3_dup_f32(ptr: *const f32) -> float32x2x3_t; + } + _vld3_dup_f32(a as _) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_p8(a: *const p8) -> poly8x16x2_t { - let mut ret_val: poly8x16x2_t = transmute(vld2q_s8(transmute(a))); - ret_val.0 = unsafe { - simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.1 = unsafe { - simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_p16(a: *const p16) -> poly16x4x2_t { - transmute(vld2_s16(transmute(a))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2_p16(a: *const p16) -> poly16x4x2_t { - let mut ret_val: poly16x4x2_t = transmute(vld2_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; - ret_val -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t { - transmute(vld2q_s16(transmute(a))) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t { - let mut ret_val: poly16x8x2_t = transmute(vld2q_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val -} -#[doc = "Load single 3-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg(target_arch = "arm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld3_dup_f16(a: *const f16) -> float16x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4f16.p0")] - fn _vld3_dup_f16(ptr: *const f16, size: i32) -> float16x4x3_t; - } - _vld3_dup_f16(a as _, 2) -} -#[doc = "Load single 3-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg(target_arch = "arm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld3q_dup_f16(a: *const f16) -> float16x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8f16.p0")] - fn _vld3q_dup_f16(ptr: *const f16, size: i32) -> float16x8x3_t; - } - _vld3q_dup_f16(a as _, 2) -} -#[doc = "Load single 3-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) -)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld3_dup_f16(a: *const f16) -> float16x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v4f16.p0" - )] - fn _vld3_dup_f16(ptr: *const f16) -> float16x4x3_t; - } - _vld3_dup_f16(a as _) -} -#[doc = "Load single 3-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) -)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld3q_dup_f16(a: *const f16) -> float16x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v8f16.p0" - )] - fn _vld3q_dup_f16(ptr: *const f16) -> float16x8x3_t; - } - _vld3q_dup_f16(a as _) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v2f32.p0" - )] - fn _vld3_dup_f32(ptr: *const f32) -> float32x2x3_t; - } - _vld3_dup_f32(a as _) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] @@ -23396,7 +23092,6 @@ pub unsafe fn vld3_u64(a: *const u64) -> uint64x1x3_t { #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] @@ -23416,11 +23111,10 @@ pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { transmute(vld3_s8(transmute(a))) } #[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] @@ -23436,19 +23130,14 @@ pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { - let mut ret_val: uint8x8x3_t = transmute(vld3_s8(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val +pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { + transmute(vld3q_s8(transmute(a))) } #[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] @@ -23464,15 +23153,14 @@ pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { - transmute(vld3q_s8(transmute(a))) +pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { + transmute(vld3_s16(transmute(a))) } #[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] @@ -23488,37 +23176,14 @@ pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { - let mut ret_val: uint8x16x3_t = transmute(vld3q_s8(transmute(a))); - ret_val.0 = unsafe { - simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.1 = unsafe { - simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.2 = unsafe { - simd_shuffle!( - ret_val.2, - ret_val.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val +pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { + transmute(vld3q_s16(transmute(a))) } #[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] @@ -23534,15 +23199,14 @@ pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { - transmute(vld3_s16(transmute(a))) +pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { + transmute(vld3_s32(transmute(a))) } #[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] @@ -23558,19 +23222,14 @@ pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { - let mut ret_val: uint16x4x3_t = transmute(vld3_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]) }; - ret_val +pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { + transmute(vld3q_s32(transmute(a))) } #[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] @@ -23586,15 +23245,14 @@ pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { - transmute(vld3q_s16(transmute(a))) +pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { + transmute(vld3_s8(transmute(a))) } #[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] @@ -23610,19 +23268,14 @@ pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { - let mut ret_val: uint16x8x3_t = transmute(vld3q_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val +pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { + transmute(vld3q_s8(transmute(a))) } #[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] @@ -23638,15 +23291,14 @@ pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { - transmute(vld3_s32(transmute(a))) +pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { + transmute(vld3_s16(transmute(a))) } #[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] @@ -23662,397 +23314,115 @@ pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { - let mut ret_val: uint32x2x3_t = transmute(vld3_s32(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) }; - ret_val +pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { + transmute(vld3q_s16(transmute(a))) } #[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { - transmute(vld3q_s32(transmute(a))) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4f32.p0")] + fn _vld3q_lane_f32( + ptr: *const i8, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + n: i32, + size: i32, + ) -> float32x4x3_t; + } + _vld3q_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { - let mut ret_val: uint32x4x3_t = transmute(vld3q_s32(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]) }; - ret_val +#[cfg(target_arch = "arm")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld4_dup_f16(a: *const f16) -> float16x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4f16.p0")] + fn _vld4_dup_f16(ptr: *const f16, size: i32) -> float16x4x4_t; + } + _vld4_dup_f16(a as _, 2) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p8)"] +#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { - transmute(vld3_s8(transmute(a))) +#[cfg(target_arch = "arm")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld4q_dup_f16(a: *const f16) -> float16x8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8f16.p0")] + fn _vld4q_dup_f16(ptr: *const f16, size: i32) -> float16x8x4_t; + } + _vld4q_dup_f16(a as _, 2) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p8)"] +#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(ld4r) )] -pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { - let mut ret_val: poly8x8x3_t = transmute(vld3_s8(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld4_dup_f16(a: *const f16) -> float16x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v4f16.p0" + )] + fn _vld4_dup_f16(ptr: *const f16) -> float16x4x4_t; + } + _vld4_dup_f16(a as _) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p8)"] +#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(ld4r) )] -pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { - transmute(vld3q_s8(transmute(a))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { - let mut ret_val: poly8x16x3_t = transmute(vld3q_s8(transmute(a))); - ret_val.0 = unsafe { - simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.1 = unsafe { - simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.2 = unsafe { - simd_shuffle!( - ret_val.2, - ret_val.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { - transmute(vld3_s16(transmute(a))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { - let mut ret_val: poly16x4x3_t = transmute(vld3_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]) }; - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { - transmute(vld3q_s16(transmute(a))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { - let mut ret_val: poly16x8x3_t = transmute(vld3q_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4f32.p0")] - fn _vld3q_lane_f32( - ptr: *const i8, - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - n: i32, - size: i32, - ) -> float32x4x3_t; - } - _vld3q_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) -} -#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg(target_arch = "arm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld4_dup_f16(a: *const f16) -> float16x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4f16.p0")] - fn _vld4_dup_f16(ptr: *const f16, size: i32) -> float16x4x4_t; - } - _vld4_dup_f16(a as _, 2) -} -#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg(target_arch = "arm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld4q_dup_f16(a: *const f16) -> float16x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8f16.p0")] - fn _vld4q_dup_f16(ptr: *const f16, size: i32) -> float16x8x4_t; - } - _vld4q_dup_f16(a as _, 2) -} -#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld4_dup_f16(a: *const f16) -> float16x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v4f16.p0" - )] - fn _vld4_dup_f16(ptr: *const f16) -> float16x4x4_t; - } - _vld4_dup_f16(a as _) -} -#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld4q_dup_f16(a: *const f16) -> float16x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v8f16.p0" - )] - fn _vld4q_dup_f16(ptr: *const f16) -> float16x8x4_t; - } - _vld4q_dup_f16(a as _) +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld4q_dup_f16(a: *const f16) -> float16x8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v8f16.p0" + )] + fn _vld4q_dup_f16(ptr: *const f16) -> float16x8x4_t; + } + _vld4q_dup_f16(a as _) } #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)"] @@ -25929,354 +25299,18 @@ pub unsafe fn vld4q_lane_u16(a: *const u16, b: uint16x8x4_t) -> transmute(vld4q_lane_s16::(transmute(a), transmute(b))) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_lane_u32(a: *const u32, b: uint32x2x4_t) -> uint32x2x4_t { - static_assert_uimm_bits!(LANE, 1); - transmute(vld4_lane_s32::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_lane_u32(a: *const u32, b: uint32x4x4_t) -> uint32x4x4_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld4q_lane_s32::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_lane_p8(a: *const p8, b: poly8x8x4_t) -> poly8x8x4_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld4_lane_s8::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_lane_p16(a: *const p16, b: poly16x4x4_t) -> poly16x4x4_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld4_lane_s16::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_lane_p16(a: *const p16, b: poly16x8x4_t) -> poly16x8x4_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld4q_lane_s16::(transmute(a), transmute(b))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_p64(a: *const p64) -> poly64x1x4_t { - transmute(vld4_s64(transmute(a))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v1i64.p0" - )] - fn _vld4_s64(ptr: *const int64x1_t) -> int64x1x4_t; - } - _vld4_s64(a as _) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v1i64.p0")] - fn _vld4_s64(ptr: *const i8, size: i32) -> int64x1x4_t; - } - _vld4_s64(a as *const i8, 8) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_u64(a: *const u64) -> uint64x1x4_t { - transmute(vld4_s64(transmute(a))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { - transmute(vld4_s8(transmute(a))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { - let mut ret_val: uint8x8x4_t = transmute(vld4_s8(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { - transmute(vld4q_s8(transmute(a))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { - let mut ret_val: uint8x16x4_t = transmute(vld4q_s8(transmute(a))); - ret_val.0 = unsafe { - simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.1 = unsafe { - simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.2 = unsafe { - simd_shuffle!( - ret_val.2, - ret_val.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.3 = unsafe { - simd_shuffle!( - ret_val.3, - ret_val.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(ld4, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26285,22 +25319,23 @@ pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { - transmute(vld4_s16(transmute(a))) +pub unsafe fn vld4_lane_u32(a: *const u32, b: uint32x2x4_t) -> uint32x2x4_t { + static_assert_uimm_bits!(LANE, 1); + transmute(vld4_lane_s32::(transmute(a), transmute(b))) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(ld4, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26309,27 +25344,23 @@ pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { - let mut ret_val: uint16x4x4_t = transmute(vld4_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]) }; - ret_val +pub unsafe fn vld4q_lane_u32(a: *const u32, b: uint32x4x4_t) -> uint32x4x4_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld4q_lane_s32::(transmute(a), transmute(b))) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(ld4, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26338,22 +25369,23 @@ pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { - transmute(vld4q_s16(transmute(a))) +pub unsafe fn vld4_lane_p8(a: *const p8, b: poly8x8x4_t) -> poly8x8x4_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld4_lane_s8::(transmute(a), transmute(b))) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(ld4, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26362,27 +25394,23 @@ pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { - let mut ret_val: uint16x8x4_t = transmute(vld4q_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val +pub unsafe fn vld4_lane_p16(a: *const p16, b: poly16x4x4_t) -> poly16x4x4_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld4_lane_s16::(transmute(a), transmute(b))) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(ld4, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26391,21 +25419,79 @@ pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { - transmute(vld4_s32(transmute(a))) +pub unsafe fn vld4q_lane_p16(a: *const p16, b: poly16x8x4_t) -> poly16x8x4_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld4q_lane_s16::(transmute(a), transmute(b))) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline(always)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld4_p64(a: *const p64) -> poly64x1x4_t { + transmute(vld4_s64(transmute(a))) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline(always)] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4.v1i64.p0" + )] + fn _vld4_s64(ptr: *const int64x1_t) -> int64x1x4_t; + } + _vld4_s64(a as _) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline(always)] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v1i64.p0")] + fn _vld4_s64(ptr: *const i8, size: i32) -> int64x1x4_t; + } + _vld4_s64(a as *const i8, 8) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -26415,20 +25501,14 @@ pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { - let mut ret_val: uint32x2x4_t = transmute(vld4_s32(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) }; - ret_val +pub unsafe fn vld4_u64(a: *const u64) -> uint64x1x4_t { + transmute(vld4_s64(transmute(a))) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] @@ -26444,15 +25524,14 @@ pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { - transmute(vld4q_s32(transmute(a))) +pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { + transmute(vld4_s8(transmute(a))) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] @@ -26468,20 +25547,14 @@ pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { - let mut ret_val: uint32x4x4_t = transmute(vld4q_s32(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]) }; - ret_val +pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { + transmute(vld4q_s8(transmute(a))) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] @@ -26497,15 +25570,14 @@ pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { - transmute(vld4_s8(transmute(a))) +pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { + transmute(vld4_s16(transmute(a))) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] @@ -26521,20 +25593,14 @@ pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { - let mut ret_val: poly8x8x4_t = transmute(vld4_s8(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val +pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { + transmute(vld4q_s16(transmute(a))) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] @@ -26550,15 +25616,14 @@ pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { - transmute(vld4q_s8(transmute(a))) +pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { + transmute(vld4_s32(transmute(a))) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] @@ -26574,44 +25639,14 @@ pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { - let mut ret_val: poly8x16x4_t = transmute(vld4q_s8(transmute(a))); - ret_val.0 = unsafe { - simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.1 = unsafe { - simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.2 = unsafe { - simd_shuffle!( - ret_val.2, - ret_val.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.3 = unsafe { - simd_shuffle!( - ret_val.3, - ret_val.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val +pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { + transmute(vld4q_s32(transmute(a))) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] @@ -26627,15 +25662,14 @@ pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { - transmute(vld4_s16(transmute(a))) +pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { + transmute(vld4_s8(transmute(a))) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] @@ -26651,20 +25685,14 @@ pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { - let mut ret_val: poly16x4x4_t = transmute(vld4_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]) }; - ret_val +pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { + transmute(vld4q_s8(transmute(a))) } #[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] @@ -26680,15 +25708,14 @@ pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { - transmute(vld4q_s16(transmute(a))) +pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { + transmute(vld4_s16(transmute(a))) } #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline(always)] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] @@ -26705,12 +25732,7 @@ pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { - let mut ret_val: poly16x8x4_t = transmute(vld4q_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val + transmute(vld4q_s16(transmute(a))) } #[doc = "Store SIMD&FP register (immediate offset)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldrq_p128)"] diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml index 95f23ebd9a0ff..a10403de41252 100644 --- a/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml +++ b/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml @@ -3715,6 +3715,7 @@ intrinsics: return_type: "{neon_type[1]}" attr: [*neon-stable] assert_instr: [ld2] + big_endian_inverse: false safety: unsafe: [neon] types: @@ -4070,6 +4071,7 @@ intrinsics: arguments: ["a: {type[0]}"] return_type: "{neon_type[1]}" attr: [*neon-stable] + big_endian_inverse: false safety: unsafe: [neon] assert_instr: [ld3] @@ -4216,6 +4218,7 @@ intrinsics: return_type: "{neon_type[1]}" attr: [*neon-stable] assert_instr: [ld4] + big_endian_inverse: false safety: unsafe: [neon] types: @@ -4323,6 +4326,7 @@ intrinsics: - *neon-stable static_defs: - "const LANE: i32" + big_endian_inverse: false safety: unsafe: [neon] types: @@ -4372,6 +4376,7 @@ intrinsics: - *neon-stable static_defs: - "const LANE: i32" + big_endian_inverse: false safety: unsafe: [neon] types: @@ -12176,6 +12181,7 @@ intrinsics: attr: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + big_endian_inverse: false safety: safe types: - [uint8x8_t, uint8x8x4_t, uint8x8_t] @@ -12243,6 +12249,7 @@ intrinsics: attr: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + big_endian_inverse: false safety: safe types: - [uint8x8x2_t, 'uint8x8_t', 'uint8x8_t'] @@ -12296,7 +12303,7 @@ intrinsics: types: - [uint8x8x3_t, 'uint8x8_t', 'uint8x16x2', 'uint8x8_t'] - [poly8x8x3_t, 'uint8x8_t', 'poly8x16x2', 'poly8x8_t'] - big_endian_inverse: true + big_endian_inverse: false compose: - Let: - x @@ -12348,7 +12355,7 @@ intrinsics: types: - [uint8x8x4_t, 'uint8x8_t', 'uint8x16x2', 'uint8x8_t'] - [poly8x8x4_t, 'uint8x8_t', 'poly8x16x2', 'poly8x8_t'] - big_endian_inverse: true + big_endian_inverse: false compose: - Let: - x @@ -12457,6 +12464,7 @@ intrinsics: attr: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + big_endian_inverse: false safety: safe types: - [uint8x8_t, 'uint8x8x2_t', uint8x8_t] @@ -12518,7 +12526,7 @@ intrinsics: types: - [uint8x8_t, 'uint8x8x3_t', 'uint8x16x2', 'u8x8::splat(24)', 'uint8x8'] - [poly8x8_t, 'poly8x8x3_t', 'poly8x16x2', 'u8x8::splat(24)', 'poly8x8'] - big_endian_inverse: true + big_endian_inverse: false compose: - Let: - x @@ -12601,6 +12609,7 @@ intrinsics: attr: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + big_endian_inverse: false safety: safe types: - ['uint8x16x2_t', uint8x8_t, 'vqtbl2', 'uint8x8_t'] @@ -12637,6 +12646,7 @@ intrinsics: attr: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + big_endian_inverse: false safety: safe types: - [uint8x8_t, 'uint8x16x2_t', uint8x8_t, 'vqtbx2'] @@ -12660,6 +12670,7 @@ intrinsics: attr: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + big_endian_inverse: false safety: safe types: - ['int8x8_t', 'int8x16x3_t', uint8x8_t, 'vqtbl3'] @@ -12674,6 +12685,7 @@ intrinsics: attr: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + big_endian_inverse: false safety: safe types: - ['uint8x8_t', 'uint8x16x3_t', uint8x8_t, 'vqtbl3'] @@ -12711,6 +12723,7 @@ intrinsics: attr: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + big_endian_inverse: false safety: safe types: - [uint8x8_t, 'uint8x16x3_t', uint8x8_t, 'vqtbx3'] @@ -12735,6 +12748,7 @@ intrinsics: attr: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + big_endian_inverse: false safety: safe types: - ['int8x16x4_t', uint8x8_t, 'vqtbl4', 'int8x8_t'] @@ -12749,6 +12763,7 @@ intrinsics: attr: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + big_endian_inverse: false safety: safe types: - ['uint8x16x4_t', uint8x8_t, 'vqtbl4', 'uint8x8_t'] @@ -12787,6 +12802,7 @@ intrinsics: attr: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + big_endian_inverse: false safety: safe types: - [uint8x8_t, 'uint8x16x4_t', uint8x8_t, 'vqtbx4'] @@ -12851,6 +12867,7 @@ intrinsics: attr: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + big_endian_inverse: false safety: safe types: - ["vqtbl3", int8x16_t, uint8x8_t, int8x8_t] @@ -12870,6 +12887,7 @@ intrinsics: attr: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] + big_endian_inverse: false safety: safe types: - ["vqtbl4", int8x16_t, uint8x8_t, int8x8_t] diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml index 2dd2fb0d3f1d0..76718dcecae66 100644 --- a/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml +++ b/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml @@ -2976,6 +2976,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld2]]}]] - *neon-not-arm-stable - *neon-cfg-arm-unstable + big_endian_inverse: false safety: unsafe: [neon] types: @@ -3006,6 +3007,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop]]}]] - *neon-not-arm-stable - *neon-cfg-arm-unstable + big_endian_inverse: false safety: unsafe: [neon] types: @@ -3102,6 +3104,7 @@ intrinsics: - *neon-cfg-arm-unstable static_defs: - "const LANE: i32" + big_endian_inverse: false safety: unsafe: [neon] types: @@ -4106,6 +4109,7 @@ intrinsics: - *neon-not-arm-stable - *neon-cfg-arm-unstable static_defs: ['const LANE: i32'] + big_endian_inverse: false safety: unsafe: [neon] types: @@ -4136,6 +4140,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld3]]}]] - *neon-not-arm-stable - *neon-cfg-arm-unstable + big_endian_inverse: false safety: unsafe: [neon] types: @@ -4508,6 +4513,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld4]]}]] - *neon-not-arm-stable - *neon-cfg-arm-unstable + big_endian_inverse: false safety: unsafe: [neon] types: @@ -4629,6 +4635,7 @@ intrinsics: - *neon-not-arm-stable - *neon-cfg-arm-unstable static_defs: ["const LANE: i32"] + big_endian_inverse: false safety: unsafe: [neon] types: From 4deff630049a22b79ad9bc624ff9408564cc1a4b Mon Sep 17 00:00:00 2001 From: Daniel Scherzer Date: Tue, 17 Feb 2026 11:19:03 -0800 Subject: [PATCH 71/90] std::r#try! - avoid link to nightly docs Use a relative link to the current version of rust-by-example rather than sending people to the nightly version. --- library/core/src/macros/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/core/src/macros/mod.rs b/library/core/src/macros/mod.rs index d900b4a21b36d..cdbb8c300455d 100644 --- a/library/core/src/macros/mod.rs +++ b/library/core/src/macros/mod.rs @@ -445,7 +445,7 @@ macro_rules! matches { /// [raw-identifier syntax][ris]: `r#try`. /// /// [propagating-errors]: https://doc.rust-lang.org/book/ch09-02-recoverable-errors-with-result.html#a-shortcut-for-propagating-errors-the--operator -/// [ris]: https://doc.rust-lang.org/nightly/rust-by-example/compatibility/raw_identifiers.html +/// [ris]: ../rust-by-example/compatibility/raw_identifiers.html /// /// `try!` matches the given [`Result`]. In case of the `Ok` variant, the /// expression has the value of the wrapped value. From 1f8a48a1a33753a804b926f839cef6f33a46eebf Mon Sep 17 00:00:00 2001 From: Cameron Steffen Date: Tue, 17 Feb 2026 20:44:39 -0600 Subject: [PATCH 72/90] Remove some clones in deriving --- .../src/deriving/default.rs | 8 ++--- .../src/deriving/generic/mod.rs | 31 ++++++++++--------- 2 files changed, 20 insertions(+), 19 deletions(-) diff --git a/compiler/rustc_builtin_macros/src/deriving/default.rs b/compiler/rustc_builtin_macros/src/deriving/default.rs index 86d53b23e1f1b..263ba2968eab4 100644 --- a/compiler/rustc_builtin_macros/src/deriving/default.rs +++ b/compiler/rustc_builtin_macros/src/deriving/default.rs @@ -67,7 +67,7 @@ fn default_struct_substructure( cx: &ExtCtxt<'_>, trait_span: Span, substr: &Substructure<'_>, - summary: &StaticFields, + summary: &StaticFields<'_>, ) -> BlockOrExpr { let expr = match summary { Unnamed(_, IsTuple::No) => cx.expr_ident(trait_span, substr.type_ident), @@ -78,16 +78,16 @@ fn default_struct_substructure( Named(fields) => { let default_fields = fields .iter() - .map(|(ident, span, default_val)| { + .map(|&(ident, span, default_val)| { let value = match default_val { // We use `Default::default()`. - None => default_call(cx, *span), + None => default_call(cx, span), // We use the field default const expression. Some(val) => { cx.expr(val.value.span, ast::ExprKind::ConstBlock(val.clone())) } }; - cx.field_imm(*span, *ident, value) + cx.field_imm(span, ident, value) }) .collect(); cx.expr_struct_ident(trait_span, substr.type_ident, default_fields) diff --git a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs index 5362bcde1aad8..b2fff77e3f162 100644 --- a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs +++ b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs @@ -303,11 +303,11 @@ pub(crate) enum IsTuple { } /// Fields for a static method -pub(crate) enum StaticFields { +pub(crate) enum StaticFields<'a> { /// Tuple and unit structs/enum variants like this. Unnamed(Vec, IsTuple), /// Normal structs/struct variants. - Named(Vec<(Ident, Span, Option)>), + Named(Vec<(Ident, Span, Option<&'a AnonConst>)>), } /// A summary of the possible sets of fields. @@ -331,7 +331,7 @@ pub(crate) enum SubstructureFields<'a> { EnumDiscr(FieldInfo, Option>), /// A static method where `Self` is a struct. - StaticStruct(&'a ast::VariantData, StaticFields), + StaticStruct(&'a ast::VariantData, StaticFields<'a>), /// A static method where `Self` is an enum. StaticEnum(&'a ast::EnumDef), @@ -596,7 +596,7 @@ impl<'a> TraitDef<'a> { cx: &ExtCtxt<'_>, type_ident: Ident, generics: &Generics, - field_tys: Vec>, + field_tys: Vec<&ast::Ty>, methods: Vec>, is_packed: bool, ) -> Box { @@ -870,8 +870,7 @@ impl<'a> TraitDef<'a> { from_scratch: bool, is_packed: bool, ) -> Box { - let field_tys: Vec> = - struct_def.fields().iter().map(|field| field.ty.clone()).collect(); + let field_tys = Vec::from_iter(struct_def.fields().iter().map(|field| &*field.ty)); let methods = self .methods @@ -923,11 +922,13 @@ impl<'a> TraitDef<'a> { generics: &Generics, from_scratch: bool, ) -> Box { - let mut field_tys = Vec::new(); - - for variant in &enum_def.variants { - field_tys.extend(variant.data.fields().iter().map(|field| field.ty.clone())); - } + let field_tys = Vec::from_iter( + enum_def + .variants + .iter() + .flat_map(|variant| variant.data.fields()) + .map(|field| &*field.ty), + ); let methods = self .methods @@ -1160,8 +1161,8 @@ impl<'a> MethodDef<'a> { fn expand_static_struct_method_body( &self, cx: &ExtCtxt<'_>, - trait_: &TraitDef<'_>, - struct_def: &VariantData, + trait_: &TraitDef<'a>, + struct_def: &'a VariantData, type_ident: Ident, nonselflike_args: &[Box], ) -> BlockOrExpr { @@ -1480,13 +1481,13 @@ impl<'a> MethodDef<'a> { // general helper methods. impl<'a> TraitDef<'a> { - fn summarise_struct(&self, cx: &ExtCtxt<'_>, struct_def: &VariantData) -> StaticFields { + fn summarise_struct(&self, cx: &ExtCtxt<'_>, struct_def: &'a VariantData) -> StaticFields<'a> { let mut named_idents = Vec::new(); let mut just_spans = Vec::new(); for field in struct_def.fields() { let sp = field.span.with_ctxt(self.span.ctxt()); match field.ident { - Some(ident) => named_idents.push((ident, sp, field.default.clone())), + Some(ident) => named_idents.push((ident, sp, field.default.as_ref())), _ => just_spans.push(sp), } } From 7585786ecb24e7cc3eaad08a6706ae3fb8e63ea8 Mon Sep 17 00:00:00 2001 From: Scott McMurray Date: Sat, 14 Feb 2026 17:58:44 -0800 Subject: [PATCH 73/90] Add a mir-opt test for alignment check generation --- tests/mir-opt/alignment_checks.rs | 19 +++++++++++++ ....sized_ptr.CheckAlignment.panic-abort.diff | 27 +++++++++++++++++++ ...sized_ptr.CheckAlignment.panic-unwind.diff | 27 +++++++++++++++++++ 3 files changed, 73 insertions(+) create mode 100644 tests/mir-opt/alignment_checks.rs create mode 100644 tests/mir-opt/alignment_checks.sized_ptr.CheckAlignment.panic-abort.diff create mode 100644 tests/mir-opt/alignment_checks.sized_ptr.CheckAlignment.panic-unwind.diff diff --git a/tests/mir-opt/alignment_checks.rs b/tests/mir-opt/alignment_checks.rs new file mode 100644 index 0000000000000..6f1329cb4e645 --- /dev/null +++ b/tests/mir-opt/alignment_checks.rs @@ -0,0 +1,19 @@ +//@ compile-flags: -Copt-level=1 -Zmir-opt-level=2 -Zub-checks +//@ only-64bit +// EMIT_MIR_FOR_EACH_PANIC_STRATEGY + +#![crate_type = "lib"] + +// The diff shows exactly what is generated by the pass; +// then we check the final `-O1` output for people who want to run them +// without the codegen being too terrible. + +// EMIT_MIR alignment_checks.sized_ptr.CheckAlignment.diff +pub unsafe fn sized_ptr(ptr: *const u32) -> u32 { + // CHECK-LABEL: fn sized_ptr(_1: *const u32) + // CHECK: _2 = copy _1 as usize (Transmute); + // CHECK: _3 = BitAnd(copy _2, const 3_usize); + // CHECK: _4 = Eq(copy _3, const 0_usize); + // CHECK: assert(copy _4, + *ptr +} diff --git a/tests/mir-opt/alignment_checks.sized_ptr.CheckAlignment.panic-abort.diff b/tests/mir-opt/alignment_checks.sized_ptr.CheckAlignment.panic-abort.diff new file mode 100644 index 0000000000000..c383975d9c0fa --- /dev/null +++ b/tests/mir-opt/alignment_checks.sized_ptr.CheckAlignment.panic-abort.diff @@ -0,0 +1,27 @@ +- // MIR for `sized_ptr` before CheckAlignment ++ // MIR for `sized_ptr` after CheckAlignment + + fn sized_ptr(_1: *const u32) -> u32 { + debug ptr => _1; + let mut _0: u32; ++ let mut _2: *const (); ++ let mut _3: usize; ++ let mut _4: usize; ++ let mut _5: usize; ++ let mut _6: bool; + + bb0: { ++ _2 = copy _1 as *const () (PtrToPtr); ++ _3 = copy _2 as usize (Transmute); ++ _4 = Sub(const ::ALIGN, const 1_usize); ++ _5 = BitAnd(copy _3, copy _4); ++ _6 = Eq(copy _5, const 0_usize); ++ assert(copy _6, "misaligned pointer dereference: address must be a multiple of {} but is {}", const ::ALIGN, copy _3) -> [success: bb1, unwind unreachable]; ++ } ++ ++ bb1: { + _0 = copy (*_1); + return; + } + } + diff --git a/tests/mir-opt/alignment_checks.sized_ptr.CheckAlignment.panic-unwind.diff b/tests/mir-opt/alignment_checks.sized_ptr.CheckAlignment.panic-unwind.diff new file mode 100644 index 0000000000000..c383975d9c0fa --- /dev/null +++ b/tests/mir-opt/alignment_checks.sized_ptr.CheckAlignment.panic-unwind.diff @@ -0,0 +1,27 @@ +- // MIR for `sized_ptr` before CheckAlignment ++ // MIR for `sized_ptr` after CheckAlignment + + fn sized_ptr(_1: *const u32) -> u32 { + debug ptr => _1; + let mut _0: u32; ++ let mut _2: *const (); ++ let mut _3: usize; ++ let mut _4: usize; ++ let mut _5: usize; ++ let mut _6: bool; + + bb0: { ++ _2 = copy _1 as *const () (PtrToPtr); ++ _3 = copy _2 as usize (Transmute); ++ _4 = Sub(const ::ALIGN, const 1_usize); ++ _5 = BitAnd(copy _3, copy _4); ++ _6 = Eq(copy _5, const 0_usize); ++ assert(copy _6, "misaligned pointer dereference: address must be a multiple of {} but is {}", const ::ALIGN, copy _3) -> [success: bb1, unwind unreachable]; ++ } ++ ++ bb1: { + _0 = copy (*_1); + return; + } + } + From dba288e0bd7b482a7d9876e0beb2e141f0fbb62d Mon Sep 17 00:00:00 2001 From: Folkert de Vries Date: Sun, 15 Feb 2026 00:22:53 +0100 Subject: [PATCH 74/90] lock stdout when printing a intrinsic test failure --- .../intrinsic-test/src/common/compare.rs | 21 +++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/library/stdarch/crates/intrinsic-test/src/common/compare.rs b/library/stdarch/crates/intrinsic-test/src/common/compare.rs index 5214349171591..c22d7fd4ec0aa 100644 --- a/library/stdarch/crates/intrinsic-test/src/common/compare.rs +++ b/library/stdarch/crates/intrinsic-test/src/common/compare.rs @@ -109,13 +109,26 @@ pub fn compare_outputs( } }) .inspect(|(intrinsic, diffs)| { - println!("Difference for intrinsic: {intrinsic}"); + use std::io::Write; + + let stdout = std::io::stdout(); + let mut out = stdout.lock(); + + writeln!(out, "Difference for intrinsic: {intrinsic}").unwrap(); diffs.into_iter().for_each(|diff| match diff { - diff::Result::Left(c) => println!("C: {c}"), - diff::Result::Right(rust) => println!("Rust: {rust}"), + diff::Result::Left(c) => { + writeln!(out, "C: {c}").unwrap(); + } + diff::Result::Right(rust) => { + writeln!(out, "Rust: {rust}").unwrap(); + } _ => (), }); - println!("****************************************************************"); + writeln!( + out, + "****************************************************************" + ) + .unwrap(); }) .count(); From 0168f07955801ec267bc92b7a6010aa8f796f139 Mon Sep 17 00:00:00 2001 From: Folkert de Vries Date: Sat, 14 Feb 2026 22:11:00 +0100 Subject: [PATCH 75/90] use `intrinsics::simd` for aarch64 deinterleaving loads --- .../src/arm_shared/neon/generated.rs | 72 +++---------------- .../stdarch/crates/core_arch/src/macros.rs | 69 ++++++++++++++++++ .../spec/neon/arm_shared.spec.yml | 26 +++---- 3 files changed, 87 insertions(+), 80 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs index b6951907eb56a..7b4f69a375037 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs @@ -22079,14 +22079,7 @@ pub unsafe fn vld3q_f16(a: *const f16) -> float16x8x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v2f32.p0" - )] - fn _vld3_f32(ptr: *const float32x2_t) -> float32x2x3_t; - } - _vld3_f32(a as _) + crate::core_arch::macros::deinterleaving_load!(f32, 2, 3, a) } #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)"] @@ -22098,14 +22091,7 @@ pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v4f32.p0" - )] - fn _vld3q_f32(ptr: *const float32x4_t) -> float32x4x3_t; - } - _vld3q_f32(a as _) + crate::core_arch::macros::deinterleaving_load!(f32, 4, 3, a) } #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)"] @@ -22117,14 +22103,7 @@ pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v8i8.p0" - )] - fn _vld3_s8(ptr: *const int8x8_t) -> int8x8x3_t; - } - _vld3_s8(a as _) + crate::core_arch::macros::deinterleaving_load!(i8, 8, 3, a) } #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)"] @@ -22136,14 +22115,7 @@ pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v16i8.p0" - )] - fn _vld3q_s8(ptr: *const int8x16_t) -> int8x16x3_t; - } - _vld3q_s8(a as _) + crate::core_arch::macros::deinterleaving_load!(i8, 16, 3, a) } #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)"] @@ -22155,14 +22127,7 @@ pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v4i16.p0" - )] - fn _vld3_s16(ptr: *const int16x4_t) -> int16x4x3_t; - } - _vld3_s16(a as _) + crate::core_arch::macros::deinterleaving_load!(i16, 4, 3, a) } #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)"] @@ -22174,14 +22139,7 @@ pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v8i16.p0" - )] - fn _vld3q_s16(ptr: *const int16x8_t) -> int16x8x3_t; - } - _vld3q_s16(a as _) + crate::core_arch::macros::deinterleaving_load!(i16, 8, 3, a) } #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)"] @@ -22193,14 +22151,7 @@ pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v2i32.p0" - )] - fn _vld3_s32(ptr: *const int32x2_t) -> int32x2x3_t; - } - _vld3_s32(a as _) + crate::core_arch::macros::deinterleaving_load!(i32, 2, 3, a) } #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)"] @@ -22212,14 +22163,7 @@ pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v4i32.p0" - )] - fn _vld3q_s32(ptr: *const int32x4_t) -> int32x4x3_t; - } - _vld3q_s32(a as _) + crate::core_arch::macros::deinterleaving_load!(i32, 4, 3, a) } #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)"] diff --git a/library/stdarch/crates/core_arch/src/macros.rs b/library/stdarch/crates/core_arch/src/macros.rs index 353829633f018..d40ce51c746c4 100644 --- a/library/stdarch/crates/core_arch/src/macros.rs +++ b/library/stdarch/crates/core_arch/src/macros.rs @@ -186,3 +186,72 @@ macro_rules! simd_masked_store { $crate::intrinsics::simd::simd_masked_store::<_, _, _, { $align }>($mask, $ptr, $default) }; } + +pub(crate) const fn deinterleave_mask() +-> [u32; LANES] { + // Produces: [K, K+N, K+2N, ...] + let mut out = [0u32; LANES]; + let mut i = 0usize; + while i < LANES { + out[i] = (i * N + K) as u32; + i += 1; + } + out +} + +#[allow(unused)] +macro_rules! deinterleaving_load { + ($elem:ty, $lanes:literal, 2, $ptr:expr) => {{ + use $crate::core_arch::macros::deinterleave_mask; + use $crate::core_arch::simd::Simd; + use $crate::{mem::transmute, ptr}; + + type V = Simd<$elem, $lanes>; + type W = Simd<$elem, { $lanes * 2 }>; + + let w: W = ptr::read_unaligned($ptr as *const W); + + let v0: V = simd_shuffle!(w, w, deinterleave_mask::<$lanes, 2, 0>()); + let v1: V = simd_shuffle!(w, w, deinterleave_mask::<$lanes, 2, 1>()); + + transmute((v0, v1)) + }}; + + ($elem:ty, $lanes:literal, 3, $ptr:expr) => {{ + use $crate::core_arch::macros::deinterleave_mask; + use $crate::core_arch::simd::Simd; + use $crate::{mem::transmute, ptr}; + + type V = Simd<$elem, $lanes>; + type W = Simd<$elem, { $lanes * 3 }>; + + let w: W = ptr::read_unaligned($ptr as *const W); + + let v0: V = simd_shuffle!(w, w, deinterleave_mask::<$lanes, 3, 0>()); + let v1: V = simd_shuffle!(w, w, deinterleave_mask::<$lanes, 3, 1>()); + let v2: V = simd_shuffle!(w, w, deinterleave_mask::<$lanes, 3, 2>()); + + transmute((v0, v1, v2)) + }}; + + ($elem:ty, $lanes:literal, 4, $ptr:expr) => {{ + use $crate::core_arch::macros::deinterleave_mask; + use $crate::core_arch::simd::Simd; + use $crate::{mem::transmute, ptr}; + + type V = Simd<$elem, $lanes>; + type W = Simd<$elem, { $lanes * 4 }>; + + let w: W = ptr::read_unaligned($ptr as *const W); + + let v0: V = simd_shuffle!(w, w, deinterleave_mask::<$lanes, 4, 0>()); + let v1: V = simd_shuffle!(w, w, deinterleave_mask::<$lanes, 4, 1>()); + let v2: V = simd_shuffle!(w, w, deinterleave_mask::<$lanes, 4, 2>()); + let v3: V = simd_shuffle!(w, w, deinterleave_mask::<$lanes, 4, 3>()); + + transmute((v0, v1, v2, v3)) + }}; +} + +#[allow(unused)] +pub(crate) use deinterleaving_load; diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml index 3f7adbc2785a4..3b2e9f25aea54 100644 --- a/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml +++ b/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml @@ -3875,23 +3875,17 @@ intrinsics: safety: unsafe: [neon] types: - - ['*const i8', int8x8x3_t, '*const int8x8_t', i8] - - ['*const i16', int16x4x3_t, '*const int16x4_t', i16] - - ['*const i32', int32x2x3_t, '*const int32x2_t', i32] - - ['*const i8', int8x16x3_t, '*const int8x16_t', i8] - - ['*const i16', int16x8x3_t, '*const int16x8_t', i16] - - ['*const i32', int32x4x3_t, '*const int32x4_t', i32] - - ['*const f32', float32x2x3_t, '*const float32x2_t', f32] - - ['*const f32', float32x4x3_t, '*const float32x4_t', f32] + - ['*const i8', int8x8x3_t, i8, "8"] + - ['*const i16', int16x4x3_t, i16, "4"] + - ['*const i32', int32x2x3_t, i32, "2"] + - ['*const i8', int8x16x3_t, i8, "16"] + - ['*const i16', int16x8x3_t, i16, "8"] + - ['*const i32', int32x4x3_t, i32, "4"] + - ['*const f32', float32x2x3_t, f32, "2"] + - ['*const f32', float32x4x3_t, f32, "4"] compose: - - LLVMLink: - name: 'vld3{neon_type[1].nox}' - arguments: - - 'ptr: {type[2]}' - links: - - link: 'llvm.aarch64.neon.ld3.v{neon_type[1].lane}{type[3]}.p0' - arch: aarch64,arm64ec - - FnCall: ['_vld3{neon_type[1].nox}', ['a as _']] + - FnCall: ["crate::core_arch::macros::deinterleaving_load!", [{ Type: "{type[2]}" }, "{type[3]}", "3", a], [], true] + - name: "vld3{neon_type[1].nox}" doc: Load multiple 3-element structures to three registers From 15e68a188ca8f007deac77a18f28973a44f72a0d Mon Sep 17 00:00:00 2001 From: Folkert de Vries Date: Sat, 14 Feb 2026 22:46:20 +0100 Subject: [PATCH 76/90] neon `ld3` --- .../src/arm_shared/neon/generated.rs | 27 ++--------------- .../spec/neon/arm_shared.spec.yml | 30 +++++++------------ 2 files changed, 13 insertions(+), 44 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs index 7b4f69a375037..33213e58ffce5 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs @@ -22036,14 +22036,7 @@ pub unsafe fn vld3q_f16(a: *const f16) -> float16x8x3_t { #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld3_f16(a: *const f16) -> float16x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v4f16.p0" - )] - fn _vld3_f16(ptr: *const f16) -> float16x4x3_t; - } - _vld3_f16(a as _) + crate::core_arch::macros::deinterleaving_load!(f16, 4, 3, a) } #[doc = "Load single 3-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f16)"] @@ -22060,14 +22053,7 @@ pub unsafe fn vld3_f16(a: *const f16) -> float16x4x3_t { #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld3q_f16(a: *const f16) -> float16x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v8f16.p0" - )] - fn _vld3q_f16(ptr: *const f16) -> float16x8x3_t; - } - _vld3q_f16(a as _) + crate::core_arch::macros::deinterleaving_load!(f16, 8, 3, a) } #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)"] @@ -22983,14 +22969,7 @@ pub unsafe fn vld3_p64(a: *const p64) -> poly64x1x3_t { #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v1i64.p0" - )] - fn _vld3_s64(ptr: *const int64x1_t) -> int64x1x3_t; - } - _vld3_s64(a as _) + crate::ptr::read_unaligned(a.cast()) } #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s64)"] diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml index 3b2e9f25aea54..968d5f99de84a 100644 --- a/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml +++ b/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml @@ -3669,19 +3669,11 @@ intrinsics: safety: unsafe: [neon] types: - - ["*const f16", float16x4x3_t, f16] - - ["*const f16", float16x8x3_t, f16] + - ["*const f16", float16x4x3_t, f16, "4"] + - ["*const f16", float16x8x3_t, f16, "8"] compose: - - LLVMLink: - name: "vld3.{neon_type[1]}" - arguments: - - "ptr: {type[0]}" - links: - - link: "llvm.aarch64.neon.ld3.v{neon_type[1].lane}{type[2]}.p0" - arch: aarch64,arm64ec - - FnCall: - - "_vld3{neon_type[1].nox}" - - - "a as _" + - FnCall: ["crate::core_arch::macros::deinterleaving_load!", [{ Type: "{type[2]}" }, "{type[3]}", "3", a], [], true] + - name: "vld3{neon_type[1].dup_nox}" doc: Load single 3-element structure and replicate to all lanes of two registers @@ -3900,14 +3892,12 @@ intrinsics: types: - ['*const i64', int64x1x3_t, '*const int64x1_t', i64] compose: - - LLVMLink: - name: "vld3{neon_type[1].nox}" - arguments: - - 'ptr: {type[2]}' - links: - - link: 'llvm.aarch64.neon.ld3.v{neon_type[1].lane}{type[3]}.p0' - arch: aarch64,arm64ec - - FnCall: ['_vld3{neon_type[1].nox}', ['a as _']] + - FnCall: + - 'crate::ptr::read_unaligned' + - - MethodCall: + - a + - cast + - [] - name: "vld3{neon_type[1].nox}" doc: Load multiple 3-element structures to three registers From caff33290293254c36465d6e8ddfbad491128cdc Mon Sep 17 00:00:00 2001 From: Folkert de Vries Date: Sat, 14 Feb 2026 23:03:39 +0100 Subject: [PATCH 77/90] neon `ld4` --- .../src/arm_shared/neon/generated.rs | 99 +++---------------- .../spec/neon/arm_shared.spec.yml | 54 ++++------ 2 files changed, 29 insertions(+), 124 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs index 33213e58ffce5..45c83b880e907 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs @@ -24336,14 +24336,7 @@ pub unsafe fn vld4q_f16(a: *const f16) -> float16x8x4_t { #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld4_f16(a: *const f16) -> float16x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v4f16.p0" - )] - fn _vld4_f16(ptr: *const f16) -> float16x4x4_t; - } - _vld4_f16(a as _) + crate::core_arch::macros::deinterleaving_load!(f16, 4, 4, a) } #[doc = "Load single 4-element structure and replicate to all lanes of two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f16)"] @@ -24359,14 +24352,7 @@ pub unsafe fn vld4_f16(a: *const f16) -> float16x4x4_t { #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld4q_f16(a: *const f16) -> float16x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v8f16.p0" - )] - fn _vld4q_f16(ptr: *const f16) -> float16x8x4_t; - } - _vld4q_f16(a as _) + crate::core_arch::macros::deinterleaving_load!(f16, 8, 4, a) } #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)"] @@ -24378,14 +24364,7 @@ pub unsafe fn vld4q_f16(a: *const f16) -> float16x8x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v2f32.p0" - )] - fn _vld4_f32(ptr: *const float32x2_t) -> float32x2x4_t; - } - _vld4_f32(a as _) + crate::core_arch::macros::deinterleaving_load!(f32, 2, 4, a) } #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)"] @@ -24397,14 +24376,7 @@ pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v4f32.p0" - )] - fn _vld4q_f32(ptr: *const float32x4_t) -> float32x4x4_t; - } - _vld4q_f32(a as _) + crate::core_arch::macros::deinterleaving_load!(f32, 4, 4, a) } #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)"] @@ -24416,14 +24388,7 @@ pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v8i8.p0" - )] - fn _vld4_s8(ptr: *const int8x8_t) -> int8x8x4_t; - } - _vld4_s8(a as _) + crate::core_arch::macros::deinterleaving_load!(i8, 8, 4, a) } #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)"] @@ -24435,14 +24400,7 @@ pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v16i8.p0" - )] - fn _vld4q_s8(ptr: *const int8x16_t) -> int8x16x4_t; - } - _vld4q_s8(a as _) + crate::core_arch::macros::deinterleaving_load!(i8, 16, 4, a) } #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)"] @@ -24454,14 +24412,7 @@ pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v4i16.p0" - )] - fn _vld4_s16(ptr: *const int16x4_t) -> int16x4x4_t; - } - _vld4_s16(a as _) + crate::core_arch::macros::deinterleaving_load!(i16, 4, 4, a) } #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)"] @@ -24473,14 +24424,7 @@ pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v8i16.p0" - )] - fn _vld4q_s16(ptr: *const int16x8_t) -> int16x8x4_t; - } - _vld4q_s16(a as _) + crate::core_arch::macros::deinterleaving_load!(i16, 8, 4, a) } #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)"] @@ -24492,14 +24436,7 @@ pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v2i32.p0" - )] - fn _vld4_s32(ptr: *const int32x2_t) -> int32x2x4_t; - } - _vld4_s32(a as _) + crate::core_arch::macros::deinterleaving_load!(i32, 2, 4, a) } #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)"] @@ -24511,14 +24448,7 @@ pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v4i32.p0" - )] - fn _vld4q_s32(ptr: *const int32x4_t) -> int32x4x4_t; - } - _vld4q_s32(a as _) + crate::core_arch::macros::deinterleaving_load!(i32, 4, 4, a) } #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)"] @@ -25379,14 +25309,7 @@ pub unsafe fn vld4_p64(a: *const p64) -> poly64x1x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v1i64.p0" - )] - fn _vld4_s64(ptr: *const int64x1_t) -> int64x1x4_t; - } - _vld4_s64(a as _) + crate::ptr::read_unaligned(a.cast()) } #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)"] diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml index 968d5f99de84a..8e10fff984ac7 100644 --- a/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml +++ b/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml @@ -4357,23 +4357,16 @@ intrinsics: safety: unsafe: [neon] types: - - ['*const i8', int8x8x4_t, i8, '*const int8x8_t'] - - ['*const i32', int32x4x4_t, i32, '*const int32x4_t'] - - ['*const i16', int16x4x4_t, i16, '*const int16x4_t'] - - ['*const i32', int32x2x4_t, i32, '*const int32x2_t'] - - ['*const i8', int8x16x4_t, i8, '*const int8x16_t'] - - ['*const i16', int16x8x4_t, i16, '*const int16x8_t'] - - ['*const f32', float32x2x4_t, f32, '*const float32x2_t'] - - ['*const f32', float32x4x4_t, f32, '*const float32x4_t'] + - ['*const i8', int8x8x4_t, i8, "8"] + - ['*const i32', int32x4x4_t, i32, "4"] + - ['*const i16', int16x4x4_t, i16, "4"] + - ['*const i32', int32x2x4_t, i32, "2"] + - ['*const i8', int8x16x4_t, i8, "16"] + - ['*const i16', int16x8x4_t, i16, "8"] + - ['*const f32', float32x2x4_t, f32, "2"] + - ['*const f32', float32x4x4_t, f32, "4"] compose: - - LLVMLink: - name: 'vld4{neon_type[1].nox}' - arguments: - - 'ptr: {type[3]}' - links: - - link: 'llvm.aarch64.neon.ld4.v{neon_type[1].lane}{type[2]}.p0' - arch: aarch64,arm64ec - - FnCall: ['_vld4{neon_type[1].nox}', ['a as _']] + - FnCall: ["crate::core_arch::macros::deinterleaving_load!", [{ Type: "{type[2]}" }, "{type[3]}", "4", a], [], true] - name: "vld4{neon_type[1].nox}" doc: Load multiple 4-element structures to four registers @@ -4386,14 +4379,12 @@ intrinsics: types: - ['*const i64', int64x1x4_t, i64, '*const int64x1_t'] compose: - - LLVMLink: - name: 'vld4{neon_type[1].nox}' - arguments: - - 'ptr: {type[3]}' - links: - - link: 'llvm.aarch64.neon.ld4.v{neon_type[1].lane}{type[2]}.p0' - arch: aarch64,arm64ec - - FnCall: ['_vld4{neon_type[1].nox}', ['a as _']] + - FnCall: + - 'crate::ptr::read_unaligned' + - - MethodCall: + - a + - cast + - [] - name: "vld4{neon_type[1].lane_nox}" doc: Load multiple 4-element structures to four registers @@ -12418,19 +12409,10 @@ intrinsics: safety: unsafe: [neon] types: - - ["*const f16", float16x4x4_t, f16] - - ["*const f16", float16x8x4_t, f16] + - ["*const f16", float16x4x4_t, f16, "4"] + - ["*const f16", float16x8x4_t, f16, "8"] compose: - - LLVMLink: - name: "vld4.{neon_type[1]}" - arguments: - - "ptr: {type[0]}" - links: - - link: "llvm.aarch64.neon.ld4.v{neon_type[1].lane}{type[2]}.p0" - arch: aarch64,arm64ec - - FnCall: - - "_vld4{neon_type[1].nox}" - - - "a as _" + - FnCall: ["crate::core_arch::macros::deinterleaving_load!", [{ Type: "{type[2]}" }, "{type[3]}", "4", a], [], true] - name: "vld4{neon_type[1].dup_nox}" doc: Load single 4-element structure and replicate to all lanes of two registers From 57a197439899099d84f74d8121365a00be344548 Mon Sep 17 00:00:00 2001 From: Folkert de Vries Date: Sat, 14 Feb 2026 23:09:35 +0100 Subject: [PATCH 78/90] neon `ld1` --- .../core_arch/src/aarch64/neon/generated.rs | 27 ++---------- .../spec/neon/aarch64.spec.yml | 42 ++++++++----------- 2 files changed, 20 insertions(+), 49 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs index 9a8a9ad59e13a..119f903de715c 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs @@ -11652,14 +11652,7 @@ pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v1f64.p0" - )] - fn _vld2_f64(ptr: *const float64x1_t) -> float64x1x2_t; - } - _vld2_f64(a as _) + crate::ptr::read_unaligned(a.cast()) } #[doc = "Load multiple 2-element structures to two registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f64)"] @@ -12031,14 +12024,7 @@ pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3.v1f64.p0" - )] - fn _vld3_f64(ptr: *const float64x1_t) -> float64x1x3_t; - } - _vld3_f64(a as _) + crate::ptr::read_unaligned(a.cast()) } #[doc = "Load multiple 3-element structures to three registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f64)"] @@ -12442,14 +12428,7 @@ pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4.v1f64.p0" - )] - fn _vld4_f64(ptr: *const float64x1_t) -> float64x1x4_t; - } - _vld4_f64(a as _) + crate::ptr::read_unaligned(a.cast()) } #[doc = "Load multiple 4-element structures to four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f64)"] diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml index a10403de41252..b81f04ebc0ebb 100644 --- a/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml +++ b/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml @@ -3698,16 +3698,12 @@ intrinsics: types: - ["*const f64", float64x1x2_t, f64, float64x1_t] compose: - - LLVMLink: - name: "vld2.{neon_type[1]}" - arguments: - - "ptr: *const {neon_type[3]}" - links: - - link: "llvm.aarch64.neon.ld2.v{neon_type[1].lane}{type[2]}.p0" - arch: aarch64,arm64ec - FnCall: - - "_vld2{neon_type[1].nox}" - - - "a as _" + - 'crate::ptr::read_unaligned' + - - MethodCall: + - a + - cast + - [] - name: "vld2{neon_type[1].nox}" doc: Load multiple 2-element structures to two registers @@ -4057,14 +4053,12 @@ intrinsics: types: - ['*const f64', float64x1x3_t, '*const float64x1_t', f64] compose: - - LLVMLink: - name: 'vld3{neon_type[1].nox}' - arguments: - - 'ptr: {type[2]}' - links: - - link: 'llvm.aarch64.neon.ld3.v{neon_type[1].lane}{type[3]}.p0' - arch: aarch64,arm64ec - - FnCall: ['_vld3{neon_type[1].nox}', ['a as _']] + - FnCall: + - 'crate::ptr::read_unaligned' + - - MethodCall: + - a + - cast + - [] - name: "vld3{neon_type[1].nox}" doc: Load multiple 3-element structures to three registers @@ -4203,14 +4197,12 @@ intrinsics: types: - ['*const f64', float64x1x4_t, f64, '*const float64x1_t'] compose: - - LLVMLink: - name: 'vld4{neon_type[1].nox}' - arguments: - - 'ptr: {type[3]}' - links: - - link: 'llvm.aarch64.neon.ld4.v{neon_type[1].lane}{type[2]}.p0' - arch: aarch64,arm64ec - - FnCall: ['_vld4{neon_type[1].nox}', ['a as _']] + - FnCall: + - 'crate::ptr::read_unaligned' + - - MethodCall: + - a + - cast + - [] - name: "vld4{neon_type[1].nox}" doc: Load multiple 4-element structures to four registers From 39edacad9cc0efa63f0ba798d233fbc478a6f454 Mon Sep 17 00:00:00 2001 From: Friedrich Date: Wed, 18 Feb 2026 10:14:28 +0100 Subject: [PATCH 79/90] Fix incorrect target The target was `-musl`, but should be `-gnu`. --- src/doc/rustc/src/platform-support/aarch64-unknown-linux-gnu.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/doc/rustc/src/platform-support/aarch64-unknown-linux-gnu.md b/src/doc/rustc/src/platform-support/aarch64-unknown-linux-gnu.md index 2003a3cb9eaa2..baa46135d534b 100644 --- a/src/doc/rustc/src/platform-support/aarch64-unknown-linux-gnu.md +++ b/src/doc/rustc/src/platform-support/aarch64-unknown-linux-gnu.md @@ -29,7 +29,7 @@ If cross-compiling, make sure your C compiler is included in `$PATH`, then add i `bootstrap.toml`: ```toml -[target.aarch64-unknown-linux-musl] +[target.aarch64-unknown-linux-gnu] cc = "aarch64-linux-gnu-gcc" cxx = "aarch64-linux-gnu-g++" ar = "aarch64-linux-gnu-ar" From fb94e5883ed1b648ae00fd3bc85e3dc002068a78 Mon Sep 17 00:00:00 2001 From: Shoyu Vanilla Date: Wed, 18 Feb 2026 19:12:34 +0900 Subject: [PATCH 80/90] Fix an ICE while checking param env shadowing on an erroneous trait impl --- .../src/error_reporting/infer/mod.rs | 6 ++++ ...uggestion-no-ice-on-missing-assoc-value.rs | 18 +++++++++++ ...stion-no-ice-on-missing-assoc-value.stderr | 32 +++++++++++++++++++ 3 files changed, 56 insertions(+) create mode 100644 tests/ui/associated-types/param-env-shadowing-suggestion-no-ice-on-missing-assoc-value.rs create mode 100644 tests/ui/associated-types/param-env-shadowing-suggestion-no-ice-on-missing-assoc-value.stderr diff --git a/compiler/rustc_trait_selection/src/error_reporting/infer/mod.rs b/compiler/rustc_trait_selection/src/error_reporting/infer/mod.rs index 6003461f35e8b..2e1657a8dc2c3 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/infer/mod.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/infer/mod.rs @@ -299,6 +299,12 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { let trait_def_id = alias.trait_def_id(tcx); let rebased_args = alias.args.rebase_onto(tcx, trait_def_id, impl_substs); + // The impl is erroneous missing a definition for the associated type. + // Skipping it since calling `TyCtxt::type_of` on its assoc ty will trigger an ICE. + if !leaf_def.item.defaultness(tcx).has_value() { + return false; + } + let impl_item_def_id = leaf_def.item.def_id; let impl_assoc_ty = tcx.type_of(impl_item_def_id).instantiate(tcx, rebased_args); diff --git a/tests/ui/associated-types/param-env-shadowing-suggestion-no-ice-on-missing-assoc-value.rs b/tests/ui/associated-types/param-env-shadowing-suggestion-no-ice-on-missing-assoc-value.rs new file mode 100644 index 0000000000000..6f0cd74d2ffe7 --- /dev/null +++ b/tests/ui/associated-types/param-env-shadowing-suggestion-no-ice-on-missing-assoc-value.rs @@ -0,0 +1,18 @@ +// A regression test for https://github.com/rust-lang/rust/issues/152663 +// Previously triggered an ICE when checking whether the param-env +// shadows a global impl. The crash occurred due to calling +// `TyCtxt::type_of` on an erroneous associated type in a trait impl +// that had no corresponding value. + +trait Iterable { + type Iter; +} + +impl Iterable for [T] { + //~^ ERROR: not all trait items implemented + fn iter() -> Self::Iter {} + //~^ ERROR: method `iter` is not a member of trait `Iterable` + //~| ERROR: mismatched types +} + +fn main() {} diff --git a/tests/ui/associated-types/param-env-shadowing-suggestion-no-ice-on-missing-assoc-value.stderr b/tests/ui/associated-types/param-env-shadowing-suggestion-no-ice-on-missing-assoc-value.stderr new file mode 100644 index 0000000000000..9d4f7c58bd7a5 --- /dev/null +++ b/tests/ui/associated-types/param-env-shadowing-suggestion-no-ice-on-missing-assoc-value.stderr @@ -0,0 +1,32 @@ +error[E0407]: method `iter` is not a member of trait `Iterable` + --> $DIR/param-env-shadowing-suggestion-no-ice-on-missing-assoc-value.rs:13:5 + | +LL | fn iter() -> Self::Iter {} + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ not a member of trait `Iterable` + +error[E0046]: not all trait items implemented, missing: `Iter` + --> $DIR/param-env-shadowing-suggestion-no-ice-on-missing-assoc-value.rs:11:1 + | +LL | type Iter; + | --------- `Iter` from trait +... +LL | impl Iterable for [T] { + | ^^^^^^^^^^^^^^^^^^^^^^^^ missing `Iter` in implementation + +error[E0308]: mismatched types + --> $DIR/param-env-shadowing-suggestion-no-ice-on-missing-assoc-value.rs:13:18 + | +LL | fn iter() -> Self::Iter {} + | ---- ^^^^^^^^^^ expected associated type, found `()` + | | + | implicitly returns `()` as its body has no tail or `return` expression + | + = note: expected associated type `<[T] as Iterable>::Iter` + found unit type `()` + = help: consider constraining the associated type `<[T] as Iterable>::Iter` to `()` or calling a method that returns `<[T] as Iterable>::Iter` + = note: for more information, visit https://doc.rust-lang.org/book/ch19-03-advanced-traits.html + +error: aborting due to 3 previous errors + +Some errors have detailed explanations: E0046, E0308, E0407. +For more information about an error, try `rustc --explain E0046`. From 92e60a324f0e99377c9528f2e4c7b44e45d648e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Miku=C5=82a?= Date: Wed, 18 Feb 2026 11:24:10 +0100 Subject: [PATCH 81/90] Do no add -no-pie on Windows MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Windows binaries are always position independent and Clang warns when trying to enable or disable that: ``` ❯ clang hello.c -pie clang: warning: argument unused during compilation: '-pie' [-Wunused-command-line-argument] ❯ clang hello.c -no-pie clang: warning: argument unused during compilation: '-no-pie' [-Wunused-command-line-argument] ``` --- compiler/rustc_codegen_ssa/src/back/linker.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/compiler/rustc_codegen_ssa/src/back/linker.rs b/compiler/rustc_codegen_ssa/src/back/linker.rs index 10c4eedb58e8e..3ace1a8c266cf 100644 --- a/compiler/rustc_codegen_ssa/src/back/linker.rs +++ b/compiler/rustc_codegen_ssa/src/back/linker.rs @@ -527,7 +527,8 @@ impl<'a> Linker for GccLinker<'a> { ) { match output_kind { LinkOutputKind::DynamicNoPicExe => { - if !self.is_ld && self.is_gnu { + // noop on windows w/ gcc, warning w/ clang + if !self.is_ld && self.is_gnu && !self.sess.target.is_like_windows { self.cc_arg("-no-pie"); } } From 8ea3542a9abb60b20e706cf61e292497f3217039 Mon Sep 17 00:00:00 2001 From: Yuki Okushi Date: Wed, 4 Feb 2026 20:26:54 +0900 Subject: [PATCH 82/90] Suggest local variables for captured format args on note --- compiler/rustc_parse_format/src/lib.rs | 12 ++++++-- ...rmat-args-non-identifier-diagnostics.fixed | 5 ++-- .../format-args-non-identifier-diagnostics.rs | 5 ++-- ...mat-args-non-identifier-diagnostics.stderr | 3 +- .../struct-field-as-captured-argument.fixed | 1 + .../fmt/struct-field-as-captured-argument.rs | 1 + .../struct-field-as-captured-argument.stderr | 30 +++++++++++++++---- 7 files changed, 45 insertions(+), 12 deletions(-) diff --git a/compiler/rustc_parse_format/src/lib.rs b/compiler/rustc_parse_format/src/lib.rs index 2338268a874f0..c7ffc1e5f3681 100644 --- a/compiler/rustc_parse_format/src/lib.rs +++ b/compiler/rustc_parse_format/src/lib.rs @@ -934,7 +934,11 @@ impl<'input> Parser<'input> { 0, ParseError { description: "field access isn't supported".to_string(), - note: None, + note: Some( + "consider moving this expression to a local variable and then \ + using the local here instead" + .to_owned(), + ), label: "not supported".to_string(), span: arg.position_span.start..field.position_span.end, secondary_label: None, @@ -947,7 +951,11 @@ impl<'input> Parser<'input> { 0, ParseError { description: "tuple index access isn't supported".to_string(), - note: None, + note: Some( + "consider moving this expression to a local variable and then \ + using the local here instead" + .to_owned(), + ), label: "not supported".to_string(), span: arg.position_span.start..field.position_span.end, secondary_label: None, diff --git a/tests/ui/fmt/format-args-non-identifier-diagnostics.fixed b/tests/ui/fmt/format-args-non-identifier-diagnostics.fixed index bd4db9480674c..a9a10f0e7eb9c 100644 --- a/tests/ui/fmt/format-args-non-identifier-diagnostics.fixed +++ b/tests/ui/fmt/format-args-non-identifier-diagnostics.fixed @@ -1,6 +1,7 @@ // Checks that there is a suggestion for simple tuple index access expression (used where an -// identifier is expected in a format arg) to use positional arg instead. -// Issue: . +// identifier is expected in a format arg) to use positional arg instead, with a note to move +// the expression into a local variable. +// Issue: . //@ run-rustfix fn main() { diff --git a/tests/ui/fmt/format-args-non-identifier-diagnostics.rs b/tests/ui/fmt/format-args-non-identifier-diagnostics.rs index aab705341f71d..f0f714a9af913 100644 --- a/tests/ui/fmt/format-args-non-identifier-diagnostics.rs +++ b/tests/ui/fmt/format-args-non-identifier-diagnostics.rs @@ -1,6 +1,7 @@ // Checks that there is a suggestion for simple tuple index access expression (used where an -// identifier is expected in a format arg) to use positional arg instead. -// Issue: . +// identifier is expected in a format arg) to use positional arg instead, with a note to move +// the expression into a local variable. +// Issue: . //@ run-rustfix fn main() { diff --git a/tests/ui/fmt/format-args-non-identifier-diagnostics.stderr b/tests/ui/fmt/format-args-non-identifier-diagnostics.stderr index af6bb58071fff..4555ce7c8b705 100644 --- a/tests/ui/fmt/format-args-non-identifier-diagnostics.stderr +++ b/tests/ui/fmt/format-args-non-identifier-diagnostics.stderr @@ -1,9 +1,10 @@ error: invalid format string: tuple index access isn't supported - --> $DIR/format-args-non-identifier-diagnostics.rs:8:16 + --> $DIR/format-args-non-identifier-diagnostics.rs:9:16 | LL | println!("{x.0}"); | ^^^ not supported in format string | + = note: consider moving this expression to a local variable and then using the local here instead help: consider using a positional formatting argument instead | LL - println!("{x.0}"); diff --git a/tests/ui/fmt/struct-field-as-captured-argument.fixed b/tests/ui/fmt/struct-field-as-captured-argument.fixed index 0da40737354f7..65e4dd2d58c56 100644 --- a/tests/ui/fmt/struct-field-as-captured-argument.fixed +++ b/tests/ui/fmt/struct-field-as-captured-argument.fixed @@ -10,6 +10,7 @@ fn main() { let bar = 3; let _ = format!("{0}", foo.field); //~ ERROR invalid format string: field access isn't supported let _ = format!("{1} {} {bar}", "aa", foo.field); //~ ERROR invalid format string: field access isn't supported + let _ = format!("{0:value$} {bar}", foo.field, value = 1); //~ ERROR invalid format string: field access isn't supported let _ = format!("{2} {} {1} {bar}", "aa", "bb", foo.field); //~ ERROR invalid format string: field access isn't supported let _ = format!("{1} {} {baz}", "aa", foo.field, baz = 3); //~ ERROR invalid format string: field access isn't supported let _ = format!("{1:?} {} {baz}", "aa", foo.field, baz = 3); //~ ERROR invalid format string: field access isn't supported diff --git a/tests/ui/fmt/struct-field-as-captured-argument.rs b/tests/ui/fmt/struct-field-as-captured-argument.rs index 325b4e3a21878..13087cceb671b 100644 --- a/tests/ui/fmt/struct-field-as-captured-argument.rs +++ b/tests/ui/fmt/struct-field-as-captured-argument.rs @@ -10,6 +10,7 @@ fn main() { let bar = 3; let _ = format!("{foo.field}"); //~ ERROR invalid format string: field access isn't supported let _ = format!("{foo.field} {} {bar}", "aa"); //~ ERROR invalid format string: field access isn't supported + let _ = format!("{foo.field:value$} {bar}", value = 1); //~ ERROR invalid format string: field access isn't supported let _ = format!("{foo.field} {} {1} {bar}", "aa", "bb"); //~ ERROR invalid format string: field access isn't supported let _ = format!("{foo.field} {} {baz}", "aa", baz = 3); //~ ERROR invalid format string: field access isn't supported let _ = format!("{foo.field:?} {} {baz}", "aa", baz = 3); //~ ERROR invalid format string: field access isn't supported diff --git a/tests/ui/fmt/struct-field-as-captured-argument.stderr b/tests/ui/fmt/struct-field-as-captured-argument.stderr index 388c14f932bbc..23576d6a462ca 100644 --- a/tests/ui/fmt/struct-field-as-captured-argument.stderr +++ b/tests/ui/fmt/struct-field-as-captured-argument.stderr @@ -4,6 +4,7 @@ error: invalid format string: field access isn't supported LL | let _ = format!("{foo.field}"); | ^^^^^^^^^ not supported in format string | + = note: consider moving this expression to a local variable and then using the local here instead help: consider using a positional formatting argument instead | LL - let _ = format!("{foo.field}"); @@ -16,6 +17,7 @@ error: invalid format string: field access isn't supported LL | let _ = format!("{foo.field} {} {bar}", "aa"); | ^^^^^^^^^ not supported in format string | + = note: consider moving this expression to a local variable and then using the local here instead help: consider using a positional formatting argument instead | LL - let _ = format!("{foo.field} {} {bar}", "aa"); @@ -25,9 +27,23 @@ LL + let _ = format!("{1} {} {bar}", "aa", foo.field); error: invalid format string: field access isn't supported --> $DIR/struct-field-as-captured-argument.rs:13:23 | +LL | let _ = format!("{foo.field:value$} {bar}", value = 1); + | ^^^^^^^^^ not supported in format string + | + = note: consider moving this expression to a local variable and then using the local here instead +help: consider using a positional formatting argument instead + | +LL - let _ = format!("{foo.field:value$} {bar}", value = 1); +LL + let _ = format!("{0:value$} {bar}", foo.field, value = 1); + | + +error: invalid format string: field access isn't supported + --> $DIR/struct-field-as-captured-argument.rs:14:23 + | LL | let _ = format!("{foo.field} {} {1} {bar}", "aa", "bb"); | ^^^^^^^^^ not supported in format string | + = note: consider moving this expression to a local variable and then using the local here instead help: consider using a positional formatting argument instead | LL - let _ = format!("{foo.field} {} {1} {bar}", "aa", "bb"); @@ -35,11 +51,12 @@ LL + let _ = format!("{2} {} {1} {bar}", "aa", "bb", foo.field); | error: invalid format string: field access isn't supported - --> $DIR/struct-field-as-captured-argument.rs:14:23 + --> $DIR/struct-field-as-captured-argument.rs:15:23 | LL | let _ = format!("{foo.field} {} {baz}", "aa", baz = 3); | ^^^^^^^^^ not supported in format string | + = note: consider moving this expression to a local variable and then using the local here instead help: consider using a positional formatting argument instead | LL - let _ = format!("{foo.field} {} {baz}", "aa", baz = 3); @@ -47,11 +64,12 @@ LL + let _ = format!("{1} {} {baz}", "aa", foo.field, baz = 3); | error: invalid format string: field access isn't supported - --> $DIR/struct-field-as-captured-argument.rs:15:23 + --> $DIR/struct-field-as-captured-argument.rs:16:23 | LL | let _ = format!("{foo.field:?} {} {baz}", "aa", baz = 3); | ^^^^^^^^^ not supported in format string | + = note: consider moving this expression to a local variable and then using the local here instead help: consider using a positional formatting argument instead | LL - let _ = format!("{foo.field:?} {} {baz}", "aa", baz = 3); @@ -59,11 +77,12 @@ LL + let _ = format!("{1:?} {} {baz}", "aa", foo.field, baz = 3); | error: invalid format string: field access isn't supported - --> $DIR/struct-field-as-captured-argument.rs:16:23 + --> $DIR/struct-field-as-captured-argument.rs:17:23 | LL | let _ = format!("{foo.field:#?} {} {baz}", "aa", baz = 3); | ^^^^^^^^^ not supported in format string | + = note: consider moving this expression to a local variable and then using the local here instead help: consider using a positional formatting argument instead | LL - let _ = format!("{foo.field:#?} {} {baz}", "aa", baz = 3); @@ -71,16 +90,17 @@ LL + let _ = format!("{1:#?} {} {baz}", "aa", foo.field, baz = 3); | error: invalid format string: field access isn't supported - --> $DIR/struct-field-as-captured-argument.rs:17:23 + --> $DIR/struct-field-as-captured-argument.rs:18:23 | LL | let _ = format!("{foo.field:.3} {} {baz}", "aa", baz = 3); | ^^^^^^^^^ not supported in format string | + = note: consider moving this expression to a local variable and then using the local here instead help: consider using a positional formatting argument instead | LL - let _ = format!("{foo.field:.3} {} {baz}", "aa", baz = 3); LL + let _ = format!("{1:.3} {} {baz}", "aa", foo.field, baz = 3); | -error: aborting due to 7 previous errors +error: aborting due to 8 previous errors From d5574c578aaa956086f52557bf67521bb598a2f1 Mon Sep 17 00:00:00 2001 From: Shunpoco Date: Tue, 17 Feb 2026 18:28:53 +0000 Subject: [PATCH 83/90] modify around --ci flag in bootstrap --ci flag in bootstrap is not respected in some cases. This commit modifies such inconsistencies in the tool. --- src/bootstrap/src/bin/main.rs | 2 +- src/bootstrap/src/core/build_steps/dist.rs | 2 +- src/bootstrap/src/core/build_steps/format.rs | 4 +-- src/bootstrap/src/core/build_steps/test.rs | 4 +-- src/bootstrap/src/core/builder/cargo.rs | 4 +-- src/bootstrap/src/core/config/config.rs | 31 ++++++++++++-------- src/bootstrap/src/core/config/tests.rs | 9 +++--- src/bootstrap/src/core/download.rs | 13 ++++++-- src/bootstrap/src/utils/cc_detect.rs | 2 +- src/bootstrap/src/utils/metrics.rs | 2 +- src/bootstrap/src/utils/render_tests.rs | 2 +- 11 files changed, 44 insertions(+), 31 deletions(-) diff --git a/src/bootstrap/src/bin/main.rs b/src/bootstrap/src/bin/main.rs index 93c7faf4f0159..44eab9b87783b 100644 --- a/src/bootstrap/src/bin/main.rs +++ b/src/bootstrap/src/bin/main.rs @@ -71,7 +71,7 @@ fn main() { // check_version warnings are not printed during setup, or during CI let changelog_suggestion = if matches!(config.cmd, Subcommand::Setup { .. }) - || config.is_running_on_ci + || config.is_running_on_ci() || config.dry_run() { None diff --git a/src/bootstrap/src/core/build_steps/dist.rs b/src/bootstrap/src/core/build_steps/dist.rs index eee960027a9f9..ed1d96bb821a3 100644 --- a/src/bootstrap/src/core/build_steps/dist.rs +++ b/src/bootstrap/src/core/build_steps/dist.rs @@ -3084,7 +3084,7 @@ impl Step for Gcc { return None; } - if builder.config.is_running_on_ci { + if builder.config.is_running_on_ci() { assert_eq!( builder.config.gcc_ci_mode, GccCiMode::BuildLocally, diff --git a/src/bootstrap/src/core/build_steps/format.rs b/src/bootstrap/src/core/build_steps/format.rs index d487995e98a09..53cb03a41fc4d 100644 --- a/src/bootstrap/src/core/build_steps/format.rs +++ b/src/bootstrap/src/core/build_steps/format.rs @@ -92,7 +92,7 @@ fn update_rustfmt_version(build: &Builder<'_>) { fn get_modified_rs_files(build: &Builder<'_>) -> Result>, String> { // In CI `get_git_modified_files` returns something different to normal environment. // This shouldn't be called in CI anyway. - assert!(!build.config.is_running_on_ci); + assert!(!build.config.is_running_on_ci()); if !verify_rustfmt_version(build) { return Ok(None); @@ -142,7 +142,7 @@ pub fn format(build: &Builder<'_>, check: bool, all: bool, paths: &[PathBuf]) { // `--all` is specified or we are in CI. We check all files in CI to avoid bugs in // `get_modified_rs_files` letting regressions slip through; we also care about CI time less // since this is still very fast compared to building the compiler. - let all = all || build.config.is_running_on_ci; + let all = all || build.config.is_running_on_ci(); let mut builder = ignore::types::TypesBuilder::new(); builder.add_defaults(); diff --git a/src/bootstrap/src/core/build_steps/test.rs b/src/bootstrap/src/core/build_steps/test.rs index fda9f3bbba3a0..f0fe1c03e7e14 100644 --- a/src/bootstrap/src/core/build_steps/test.rs +++ b/src/bootstrap/src/core/build_steps/test.rs @@ -3500,7 +3500,7 @@ impl Step for BootstrapPy { // Bootstrap tests might not be perfectly self-contained and can depend // on the environment, so only run them by default in CI, not locally. // See `test::Bootstrap::should_run`. - builder.config.is_running_on_ci + builder.config.is_running_on_ci() } fn make_run(run: RunConfig<'_>) { @@ -3539,7 +3539,7 @@ impl Step for Bootstrap { // Bootstrap tests might not be perfectly self-contained and can depend on the external // environment, submodules that are checked out, etc. // Therefore we only run them by default on CI. - builder.config.is_running_on_ci + builder.config.is_running_on_ci() } /// Tests the build system itself. diff --git a/src/bootstrap/src/core/builder/cargo.rs b/src/bootstrap/src/core/builder/cargo.rs index 7f4e23881e455..2e3ddd22db60c 100644 --- a/src/bootstrap/src/core/builder/cargo.rs +++ b/src/bootstrap/src/core/builder/cargo.rs @@ -2,8 +2,6 @@ use std::env; use std::ffi::{OsStr, OsString}; use std::path::{Path, PathBuf}; -use build_helper::ci::CiEnv; - use super::{Builder, Kind}; use crate::core::build_steps::test; use crate::core::build_steps::tool::SourceType; @@ -1326,7 +1324,7 @@ impl Builder<'_> { // Try to use a sysroot-relative bindir, in case it was configured absolutely. cargo.env("RUSTC_INSTALL_BINDIR", self.config.bindir_relative()); - if CiEnv::is_ci() { + if self.config.is_running_on_ci() { // Tell cargo to use colored output for nicer logs in CI, even // though CI isn't printing to a terminal. // Also set an explicit `TERM=xterm` so that cargo doesn't warn diff --git a/src/bootstrap/src/core/config/config.rs b/src/bootstrap/src/core/config/config.rs index 61eef3c015928..bc68bfe396425 100644 --- a/src/bootstrap/src/core/config/config.rs +++ b/src/bootstrap/src/core/config/config.rs @@ -88,7 +88,7 @@ pub const RUSTC_IF_UNCHANGED_ALLOWED_PATHS: &[&str] = &[ /// filled out from the decoded forms of the structs below. For documentation /// on each field, see the corresponding fields in /// `bootstrap.example.toml`. -#[derive(Default, Clone)] +#[derive(Clone)] pub struct Config { pub change_id: Option, pub bypass_bootstrap_lock: bool, @@ -318,7 +318,7 @@ pub struct Config { /// Default value for `--extra-checks` pub tidy_extra_checks: Option, - pub is_running_on_ci: bool, + pub ci_env: CiEnv, /// Cache for determining path modifications pub path_modification_cache: Arc, PathFreshness>>>, @@ -728,7 +728,11 @@ impl Config { ); } - let is_running_on_ci = flags_ci.unwrap_or(CiEnv::is_ci()); + let ci_env = match flags_ci { + Some(true) => CiEnv::GitHubActions, + Some(false) => CiEnv::None, + None => CiEnv::current(), + }; let dwn_ctx = DownloadContext { path_modification_cache: path_modification_cache.clone(), src: &src, @@ -739,7 +743,7 @@ impl Config { stage0_metadata: &stage0_metadata, llvm_assertions, bootstrap_cache_path: &build_bootstrap_cache_path, - is_running_on_ci, + ci_env, }; let initial_rustc = build_rustc.unwrap_or_else(|| { @@ -1168,7 +1172,7 @@ impl Config { // CI should always run stage 2 builds, unless it specifically states otherwise #[cfg(not(test))] - if flags_stage.is_none() && is_running_on_ci { + if flags_stage.is_none() && ci_env.is_running_in_ci() { match flags_cmd { Subcommand::Test { .. } | Subcommand::Miri { .. } @@ -1295,6 +1299,7 @@ impl Config { ccache, change_id: toml.change_id.inner, channel, + ci_env, clippy_info, cmd: flags_cmd, codegen_tests: rust_codegen_tests.unwrap_or(true), @@ -1345,7 +1350,6 @@ impl Config { initial_rustc, initial_rustfmt, initial_sysroot, - is_running_on_ci, jemalloc: rust_jemalloc.unwrap_or(false), jobs: Some(threads_from_config(flags_jobs.or(build_jobs).unwrap_or(0))), json_output: flags_json_output, @@ -1500,6 +1504,10 @@ impl Config { self.exec_ctx.dry_run() } + pub fn is_running_on_ci(&self) -> bool { + self.ci_env.is_running_in_ci() + } + pub fn is_explicit_stage(&self) -> bool { self.explicit_stage_from_cli || self.explicit_stage_from_config } @@ -1666,7 +1674,7 @@ impl Config { if !self.llvm_from_ci { // This happens when LLVM submodule is updated in CI, we should disable ci-rustc without an error // to not break CI. For non-CI environments, we should return an error. - if self.is_running_on_ci { + if self.is_running_on_ci() { println!("WARNING: LLVM submodule has changes, `download-rustc` will be disabled."); return None; } else { @@ -1788,8 +1796,7 @@ impl Config { .unwrap() .entry(paths.to_vec()) .or_insert_with(|| { - check_path_modifications(&self.src, &self.git_config(), paths, CiEnv::current()) - .unwrap() + check_path_modifications(&self.src, &self.git_config(), paths, self.ci_env).unwrap() }) .clone() } @@ -2223,7 +2230,7 @@ pub fn download_ci_rustc_commit<'a>( return None; } - if dwn_ctx.is_running_on_ci { + if dwn_ctx.is_running_on_ci() { eprintln!("CI rustc commit matches with HEAD and we are in CI."); eprintln!( "`rustc.download-ci` functionality will be skipped as artifacts are not available." @@ -2267,7 +2274,7 @@ pub fn check_path_modifications_<'a>( dwn_ctx.src, &git_config(dwn_ctx.stage0_metadata), paths, - CiEnv::current(), + dwn_ctx.ci_env, ) .unwrap() }) @@ -2322,7 +2329,7 @@ pub fn parse_download_ci_llvm<'a>( } #[cfg(not(test))] - if b && dwn_ctx.is_running_on_ci && CiEnv::is_rust_lang_managed_ci_job() { + if b && dwn_ctx.is_running_on_ci() && CiEnv::is_rust_lang_managed_ci_job() { // On rust-lang CI, we must always rebuild LLVM if there were any modifications to it panic!( "`llvm.download-ci-llvm` cannot be set to `true` on CI. Use `if-unchanged` instead." diff --git a/src/bootstrap/src/core/config/tests.rs b/src/bootstrap/src/core/config/tests.rs index e19604d4ab12f..277ede8d77456 100644 --- a/src/bootstrap/src/core/config/tests.rs +++ b/src/bootstrap/src/core/config/tests.rs @@ -42,7 +42,7 @@ fn download_ci_llvm() { .config("check") .with_default_toml_config("llvm.download-ci-llvm = \"if-unchanged\"") .create_config(); - if if_unchanged_config.llvm_from_ci && if_unchanged_config.is_running_on_ci { + if if_unchanged_config.llvm_from_ci && if_unchanged_config.is_running_on_ci() { let has_changes = if_unchanged_config.has_changes_from_upstream(LLVM_INVALIDATION_PATHS); assert!( @@ -491,13 +491,14 @@ fn test_exclude() { #[test] fn test_ci_flag() { let config = TestCtx::new().config("check").arg("--ci").arg("false").create_config(); - assert!(!config.is_running_on_ci); + assert!(!config.is_running_on_ci()); let config = TestCtx::new().config("check").arg("--ci").arg("true").create_config(); - assert!(config.is_running_on_ci); + assert!(config.is_running_on_ci()); + // If --ci flag is not added, is_running_on_ci() relies on if it is run on actual CI or not. let config = TestCtx::new().config("check").create_config(); - assert_eq!(config.is_running_on_ci, CiEnv::is_ci()); + assert_eq!(config.is_running_on_ci(), CiEnv::is_ci()); } #[test] diff --git a/src/bootstrap/src/core/download.rs b/src/bootstrap/src/core/download.rs index bf8d0cf4d534a..6177233bc04e5 100644 --- a/src/bootstrap/src/core/download.rs +++ b/src/bootstrap/src/core/download.rs @@ -6,6 +6,7 @@ use std::io::{BufRead, BufReader, BufWriter, ErrorKind, Write}; use std::path::{Path, PathBuf}; use std::sync::{Arc, Mutex, OnceLock}; +use build_helper::ci::CiEnv; use build_helper::git::PathFreshness; use xz2::bufread::XzDecoder; @@ -411,7 +412,13 @@ pub(crate) struct DownloadContext<'a> { pub stage0_metadata: &'a build_helper::stage0_parser::Stage0, pub llvm_assertions: bool, pub bootstrap_cache_path: &'a Option, - pub is_running_on_ci: bool, + pub ci_env: CiEnv, +} + +impl<'a> DownloadContext<'a> { + pub fn is_running_on_ci(&self) -> bool { + self.ci_env.is_running_in_ci() + } } impl<'a> AsRef> for DownloadContext<'a> { @@ -432,7 +439,7 @@ impl<'a> From<&'a Config> for DownloadContext<'a> { stage0_metadata: &value.stage0_metadata, llvm_assertions: value.llvm_assertions, bootstrap_cache_path: &value.bootstrap_cache_path, - is_running_on_ci: value.is_running_on_ci, + ci_env: value.ci_env, } } } @@ -981,7 +988,7 @@ fn download_file<'a>( match url.split_once("://").map(|(proto, _)| proto) { Some("http") | Some("https") => download_http_with_retries( dwn_ctx.host_target, - dwn_ctx.is_running_on_ci, + dwn_ctx.is_running_on_ci(), dwn_ctx.exec_ctx, &tempfile, url, diff --git a/src/bootstrap/src/utils/cc_detect.rs b/src/bootstrap/src/utils/cc_detect.rs index 0662ae304ac06..d010226f0dfdb 100644 --- a/src/bootstrap/src/utils/cc_detect.rs +++ b/src/bootstrap/src/utils/cc_detect.rs @@ -223,7 +223,7 @@ fn default_compiler( let root = if let Some(path) = build.wasi_sdk_path.as_ref() { path } else { - if build.config.is_running_on_ci { + if build.config.is_running_on_ci() { panic!("ERROR: WASI_SDK_PATH must be configured for a -wasi target on CI"); } println!("WARNING: WASI_SDK_PATH not set, using default cc/cxx compiler"); diff --git a/src/bootstrap/src/utils/metrics.rs b/src/bootstrap/src/utils/metrics.rs index 9b1ccc32cb616..e685c64733c66 100644 --- a/src/bootstrap/src/utils/metrics.rs +++ b/src/bootstrap/src/utils/metrics.rs @@ -222,7 +222,7 @@ impl BuildMetrics { format_version: CURRENT_FORMAT_VERSION, system_stats, invocations, - ci_metadata: get_ci_metadata(CiEnv::current()), + ci_metadata: get_ci_metadata(build.config.ci_env), }; t!(std::fs::create_dir_all(dest.parent().unwrap())); diff --git a/src/bootstrap/src/utils/render_tests.rs b/src/bootstrap/src/utils/render_tests.rs index 55eba6c696c5f..1d133a9c9e2f3 100644 --- a/src/bootstrap/src/utils/render_tests.rs +++ b/src/bootstrap/src/utils/render_tests.rs @@ -179,7 +179,7 @@ impl<'a> Renderer<'a> { if self.builder.config.verbose_tests { self.render_test_outcome_verbose(outcome, test); - } else if self.builder.config.is_running_on_ci { + } else if self.builder.config.is_running_on_ci() { self.render_test_outcome_ci(outcome, test); } else { self.render_test_outcome_terse(outcome, test); From abdb98ad4b47117ee3be17b1e43fab34f18f5805 Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Wed, 18 Feb 2026 14:52:59 +0000 Subject: [PATCH 84/90] Rustup to rustc 1.95.0-nightly (838709580 2026-02-17) --- rust-toolchain.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 2aaffdc5d244c..fe967c84352c8 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] -channel = "nightly-2026-02-17" +channel = "nightly-2026-02-18" components = ["rust-src", "rustc-dev", "llvm-tools", "rustfmt"] profile = "minimal" From fb635505491c0e5dbdb8d4349a395c14664638b8 Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Wed, 18 Feb 2026 15:10:37 +0000 Subject: [PATCH 85/90] Fix broken merge --- compiler/rustc_codegen_cranelift/src/abi/mod.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/compiler/rustc_codegen_cranelift/src/abi/mod.rs b/compiler/rustc_codegen_cranelift/src/abi/mod.rs index 133e7e26c0dea..97a19b8976d3a 100644 --- a/compiler/rustc_codegen_cranelift/src/abi/mod.rs +++ b/compiler/rustc_codegen_cranelift/src/abi/mod.rs @@ -58,9 +58,6 @@ pub(crate) fn conv_to_call_conv( // Cranelift doesn't currently have anything for this. CanonAbi::RustPreserveNone => default_call_conv, - // Cranelift doesn't currently have anything for this. - CanonAbi::RustPreserveNone => default_call_conv, - // Functions with this calling convention can only be called from assembly, but it is // possible to declare an `extern "custom"` block, so the backend still needs a calling // convention for declaring foreign functions. From 14b7c8216eb76acc389edefea237e6f1c0578c1f Mon Sep 17 00:00:00 2001 From: bjorn3 <17426603+bjorn3@users.noreply.github.com> Date: Wed, 18 Feb 2026 15:22:36 +0000 Subject: [PATCH 86/90] Format jit-helper.py --- compiler/rustc_codegen_cranelift/scripts/jit-helpers.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/compiler/rustc_codegen_cranelift/scripts/jit-helpers.py b/compiler/rustc_codegen_cranelift/scripts/jit-helpers.py index 1128521c0dfb3..4542aef7cb523 100644 --- a/compiler/rustc_codegen_cranelift/scripts/jit-helpers.py +++ b/compiler/rustc_codegen_cranelift/scripts/jit-helpers.py @@ -1,5 +1,6 @@ import gdb + def jitmap_raw(): pid = gdb.selected_inferior().pid jitmap_file = open("/tmp/perf-%d.map" % (pid,), "r") @@ -7,6 +8,7 @@ def jitmap_raw(): jitmap_file.close() return jitmap + def jit_functions(): jitmap = jitmap_raw() @@ -17,6 +19,7 @@ def jit_functions(): return functions + class JitDecorator(gdb.FrameDecorator.FrameDecorator): def __init__(self, fobj, name): super(JitDecorator, self).__init__(fobj) @@ -25,13 +28,14 @@ def __init__(self, fobj, name): def function(self): return self.name + class JitFilter: """ A backtrace filter which reads perf map files produced by cranelift-jit. """ def __init__(self): - self.name = 'JitFilter' + self.name = "JitFilter" self.enabled = True self.priority = 0 @@ -42,11 +46,12 @@ def __init__(self): def filter(self, frame_iter): for frame in frame_iter: frame_addr = frame.inferior_frame().pc() - for (addr, size, name) in jit_functions(): + for addr, size, name in jit_functions(): if frame_addr >= addr and frame_addr < addr + size: yield JitDecorator(frame, name) break else: yield frame + JitFilter() From 6671f95770a467ee502028db23c6626eed9fef4a Mon Sep 17 00:00:00 2001 From: Folkert de Vries Date: Sun, 15 Feb 2026 13:22:01 +0100 Subject: [PATCH 87/90] use `intrinsics::simd` for vpadd --- .../core_arch/src/aarch64/neon/generated.rs | 155 +++++------------- .../stdarch/crates/core_arch/src/macros.rs | 24 ++- .../spec/neon/aarch64.spec.yml | 91 +++++----- 3 files changed, 102 insertions(+), 168 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs index 119f903de715c..c0e46c30efc5b 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs @@ -16067,14 +16067,11 @@ pub fn vpaddd_u64(a: uint64x2_t) -> u64 { #[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(faddp))] pub fn vpaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.faddp.v8f16" - )] - fn _vpaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; + unsafe { + let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<8>()); + let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<8>()); + simd_add(even, odd) } - unsafe { _vpaddq_f16(a, b) } } #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f32)"] @@ -16083,14 +16080,11 @@ pub fn vpaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(faddp))] pub fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.faddp.v4f32" - )] - fn _vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + unsafe { + let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<4>()); + let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<4>()); + simd_add(even, odd) } - unsafe { _vpaddq_f32(a, b) } } #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f64)"] @@ -16099,14 +16093,11 @@ pub fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(faddp))] pub fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.faddp.v2f64" - )] - fn _vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + unsafe { + let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<2>()); + let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<2>()); + simd_add(even, odd) } - unsafe { _vpaddq_f64(a, b) } } #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s8)"] @@ -16115,14 +16106,11 @@ pub fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.addp.v16i8" - )] - fn _vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + unsafe { + let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<16>()); + let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<16>()); + simd_add(even, odd) } - unsafe { _vpaddq_s8(a, b) } } #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s16)"] @@ -16131,14 +16119,11 @@ pub fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.addp.v8i16" - )] - fn _vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + unsafe { + let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<8>()); + let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<8>()); + simd_add(even, odd) } - unsafe { _vpaddq_s16(a, b) } } #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s32)"] @@ -16147,14 +16132,11 @@ pub fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.addp.v4i32" - )] - fn _vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + unsafe { + let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<4>()); + let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<4>()); + simd_add(even, odd) } - unsafe { _vpaddq_s32(a, b) } } #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s64)"] @@ -16163,119 +16145,62 @@ pub fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.addp.v2i64" - )] - fn _vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + unsafe { + let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<2>()); + let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<2>()); + simd_add(even, odd) } - unsafe { _vpaddq_s64(a, b) } } #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"] #[inline(always)] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe { transmute(vpaddq_s8(transmute(a), transmute(b))) } -} -#[doc = "Add Pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"] -#[inline(always)] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - let b: uint8x16_t = - unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: uint8x16_t = transmute(vpaddq_s8(transmute(a), transmute(b))); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) + let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<16>()); + let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<16>()); + simd_add(even, odd) } } #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"] #[inline(always)] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe { transmute(vpaddq_s16(transmute(a), transmute(b))) } -} -#[doc = "Add Pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"] -#[inline(always)] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let b: uint16x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { - let ret_val: uint16x8_t = transmute(vpaddq_s16(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<8>()); + let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<8>()); + simd_add(even, odd) } } #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"] #[inline(always)] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe { transmute(vpaddq_s32(transmute(a), transmute(b))) } -} -#[doc = "Add Pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"] -#[inline(always)] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - let b: uint32x4_t = unsafe { simd_shuffle!(b, b, [3, 2, 1, 0]) }; unsafe { - let ret_val: uint32x4_t = transmute(vpaddq_s32(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<4>()); + let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<4>()); + simd_add(even, odd) } } #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"] #[inline(always)] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - unsafe { transmute(vpaddq_s64(transmute(a), transmute(b))) } -} -#[doc = "Add Pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"] -#[inline(always)] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - let b: uint64x2_t = unsafe { simd_shuffle!(b, b, [1, 0]) }; unsafe { - let ret_val: uint64x2_t = transmute(vpaddq_s64(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<2>()); + let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<2>()); + simd_add(even, odd) } } #[doc = "Floating-point add pairwise"] diff --git a/library/stdarch/crates/core_arch/src/macros.rs b/library/stdarch/crates/core_arch/src/macros.rs index d40ce51c746c4..9f6922efeeb7d 100644 --- a/library/stdarch/crates/core_arch/src/macros.rs +++ b/library/stdarch/crates/core_arch/src/macros.rs @@ -187,9 +187,31 @@ macro_rules! simd_masked_store { }; } +/// The first N even indices `[0, 2, 4, ...]`. +pub(crate) const fn even() -> [u32; N] { + let mut out = [0u32; N]; + let mut i = 0usize; + while i < N { + out[i] = (2 * i) as u32; + i += 1; + } + out +} + +/// The first N odd indices `[1, 3, 5, ...]`. +pub(crate) const fn odd() -> [u32; N] { + let mut out = [0u32; N]; + let mut i = 0usize; + while i < N { + out[i] = (2 * i + 1) as u32; + i += 1; + } + out +} + +/// Multiples of N offset by K `[K, K+N, K+2N, ...]`. pub(crate) const fn deinterleave_mask() -> [u32; LANES] { - // Produces: [K, K+N, K+2N, ...] let mut out = [0u32; LANES]; let mut i = 0usize; while i < LANES { diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml index b81f04ebc0ebb..7ab68ff5f22a9 100644 --- a/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml +++ b/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml @@ -6961,28 +6961,29 @@ intrinsics: - FnCall: [simd_shuffle!, [a, a, "{type[3]}"]] - FnCall: ["vmovl{neon_type[0].noq}", [a]] - - name: "vpadd{neon_type.no}" - doc: Floating-point add pairwise - arguments: ["a: {neon_type}", "b: {neon_type}"] - return_type: "{type}" + - name: "vpadd{neon_type[0].no}" + doc: "Floating-point add pairwise" + arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] + return_type: "{neon_type[0]}" attr: [*neon-stable] assert_instr: [faddp] safety: safe types: - - float32x4_t - - float64x2_t + - [float32x4_t, "4"] + - [float64x2_t, "2"] compose: - - LLVMLink: - name: "faddp.{neon_type}" - links: - - link: "llvm.aarch64.neon.faddp.{neon_type}" - arch: aarch64,arm64ec - + - Let: + - even + - FnCall: ["simd_shuffle!", [a, b, "crate::core_arch::macros::even::<{type[1]}>()"]] + - Let: + - odd + - FnCall: ["simd_shuffle!", [a, b, "crate::core_arch::macros::odd::<{type[1]}>()"]] + - FnCall: [simd_add, [even, odd]] - - name: "vpadd{neon_type.no}" + - name: "vpadd{neon_type[0].no}" doc: Floating-point add pairwise - arguments: ["a: {neon_type}", "b: {neon_type}"] - return_type: "{type}" + arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] + return_type: "{neon_type[0]}" attr: - *neon-fp16 - *neon-stable-fp16 @@ -6990,14 +6991,15 @@ intrinsics: assert_instr: [faddp] safety: safe types: - - float16x8_t + - [float16x8_t, "8"] compose: - - LLVMLink: - name: "faddp.{neon_type}" - links: - - link: "llvm.aarch64.neon.faddp.{neon_type}" - arch: aarch64,arm64ec - + - Let: + - even + - FnCall: ["simd_shuffle!", [a, b, "crate::core_arch::macros::even::<{type[1]}>()"]] + - Let: + - odd + - FnCall: ["simd_shuffle!", [a, b, "crate::core_arch::macros::odd::<{type[1]}>()"]] + - FnCall: [simd_add, [even, odd]] - name: "vpmax{neon_type.no}" doc: Floating-point add pairwise @@ -13235,26 +13237,6 @@ intrinsics: - link: "llvm.aarch64.neon.usqadd.{neon_type[1]}" arch: aarch64,arm64ec - - name: "vpadd{neon_type.no}" - doc: "Add Pairwise" - arguments: ["a: {neon_type}", "b: {neon_type}"] - return_type: "{neon_type}" - attr: - - *neon-stable - assert_instr: [addp] - safety: safe - types: - - int8x16_t - - int16x8_t - - int32x4_t - - int64x2_t - compose: - - LLVMLink: - name: "vpadd{neon_type.no}" - links: - - link: "llvm.aarch64.neon.addp.{neon_type}" - arch: aarch64,arm64ec - - name: "vpadd{neon_type[0].no}" doc: "Add Pairwise" arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] @@ -13264,17 +13246,22 @@ intrinsics: assert_instr: [addp] safety: safe types: - - [uint8x16_t, int8x16_t] - - [uint16x8_t, int16x8_t] - - [uint32x4_t, int32x4_t] - - [uint64x2_t, int64x2_t] + - [int8x16_t, "16"] + - [int16x8_t, "8"] + - [int32x4_t, "4"] + - [int64x2_t, "2"] + - [uint8x16_t, "16"] + - [uint16x8_t, "8"] + - [uint32x4_t, "4"] + - [uint64x2_t, "2"] compose: - - FnCall: - - transmute - - - FnCall: - - 'vpadd{neon_type[1].no}' - - - FnCall: [transmute, [a]] - - FnCall: [transmute, [b]] + - Let: + - even + - FnCall: ["simd_shuffle!", [a, b, "crate::core_arch::macros::even::<{type[1]}>()"]] + - Let: + - odd + - FnCall: ["simd_shuffle!", [a, b, "crate::core_arch::macros::odd::<{type[1]}>()"]] + - FnCall: [simd_add, [even, odd]] - name: "vpaddd_s64" doc: "Add pairwise" From 7287be900699468bb8a2d9b4705f3545aa28c451 Mon Sep 17 00:00:00 2001 From: jasper3108 Date: Wed, 18 Feb 2026 17:18:16 +0100 Subject: [PATCH 88/90] Implement reflection support for function pointer types and add tests - Implement handling of FnPtr TypeKind in const-eval, including: - Unsafety flag (safe vs unsafe fn) - ABI variants (Rust, Named(C), Named(custom)) - Input and output types - Variadic function pointers - Add const-eval tests covering: - Basic Rust fn() pointers - Unsafe fn() pointers - Extern C and custom ABI pointers - Functions with multiple inputs and output types - Variadic functions - Use const TypeId checks to verify correctness of inputs, outputs, and payloads --- .../src/const_eval/type_info.rs | 76 +++++++- compiler/rustc_span/src/symbol.rs | 8 + library/core/src/mem/type_info.rs | 38 ++++ library/coretests/tests/mem.rs | 1 + library/coretests/tests/mem/fn_ptr.rs | 169 ++++++++++++++++++ 5 files changed, 289 insertions(+), 3 deletions(-) create mode 100644 library/coretests/tests/mem/fn_ptr.rs diff --git a/compiler/rustc_const_eval/src/const_eval/type_info.rs b/compiler/rustc_const_eval/src/const_eval/type_info.rs index 0fd70d784d4fb..0ed04a5ab20b4 100644 --- a/compiler/rustc_const_eval/src/const_eval/type_info.rs +++ b/compiler/rustc_const_eval/src/const_eval/type_info.rs @@ -2,12 +2,12 @@ mod adt; use std::borrow::Cow; -use rustc_abi::{FieldIdx, VariantIdx}; +use rustc_abi::{ExternAbi, FieldIdx, VariantIdx}; use rustc_ast::Mutability; use rustc_hir::LangItem; use rustc_middle::span_bug; use rustc_middle::ty::layout::TyAndLayout; -use rustc_middle::ty::{self, Const, ScalarInt, Ty}; +use rustc_middle::ty::{self, Const, FnHeader, FnSigTys, ScalarInt, Ty, TyCtxt}; use rustc_span::{Symbol, sym}; use crate::const_eval::CompileTimeMachine; @@ -188,10 +188,21 @@ impl<'tcx> InterpCx<'tcx, CompileTimeMachine<'tcx>> { self.write_dyn_trait_type_info(dyn_place, *predicates, *region)?; variant } + ty::FnPtr(sig, fn_header) => { + let (variant, variant_place) = + self.downcast(&field_dest, sym::FnPtr)?; + let fn_ptr_place = + self.project_field(&variant_place, FieldIdx::ZERO)?; + + // FIXME: handle lifetime bounds + let sig = sig.skip_binder(); + + self.write_fn_ptr_type_info(fn_ptr_place, &sig, fn_header)?; + variant + } ty::Foreign(_) | ty::Pat(_, _) | ty::FnDef(..) - | ty::FnPtr(..) | ty::UnsafeBinder(..) | ty::Closure(..) | ty::CoroutineClosure(..) @@ -402,6 +413,65 @@ impl<'tcx> InterpCx<'tcx, CompileTimeMachine<'tcx>> { interp_ok(()) } + pub(crate) fn write_fn_ptr_type_info( + &mut self, + place: impl Writeable<'tcx, CtfeProvenance>, + sig: &FnSigTys>, + fn_header: &FnHeader>, + ) -> InterpResult<'tcx> { + let FnHeader { safety, c_variadic, abi } = fn_header; + + for (field_idx, field) in + place.layout().ty.ty_adt_def().unwrap().non_enum_variant().fields.iter_enumerated() + { + let field_place = self.project_field(&place, field_idx)?; + + match field.name { + sym::unsafety => { + self.write_scalar(Scalar::from_bool(safety.is_unsafe()), &field_place)?; + } + sym::abi => match abi { + ExternAbi::C { .. } => { + let (rust_variant, _rust_place) = + self.downcast(&field_place, sym::ExternC)?; + self.write_discriminant(rust_variant, &field_place)?; + } + ExternAbi::Rust => { + let (rust_variant, _rust_place) = + self.downcast(&field_place, sym::ExternRust)?; + self.write_discriminant(rust_variant, &field_place)?; + } + other_abi => { + let (variant, variant_place) = self.downcast(&field_place, sym::Named)?; + let str_place = self.allocate_str_dedup(other_abi.as_str())?; + let str_ref = self.mplace_to_ref(&str_place)?; + let payload = self.project_field(&variant_place, FieldIdx::ZERO)?; + self.write_immediate(*str_ref, &payload)?; + self.write_discriminant(variant, &field_place)?; + } + }, + sym::inputs => { + let inputs = sig.inputs(); + self.allocate_fill_and_write_slice_ptr( + field_place, + inputs.len() as _, + |this, i, place| this.write_type_id(inputs[i as usize], &place), + )?; + } + sym::output => { + let output = sig.output(); + self.write_type_id(output, &field_place)?; + } + sym::variadic => { + self.write_scalar(Scalar::from_bool(*c_variadic), &field_place)?; + } + other => span_bug!(self.tcx.def_span(field.did), "unimplemented field {other}"), + } + } + + interp_ok(()) + } + pub(crate) fn write_pointer_type_info( &mut self, place: impl Writeable<'tcx, CtfeProvenance>, diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs index bf3d28a654403..e9b600a98ba9f 100644 --- a/compiler/rustc_span/src/symbol.rs +++ b/compiler/rustc_span/src/symbol.rs @@ -242,6 +242,8 @@ symbols! { Equal, Err, Error, + ExternC, + ExternRust, File, FileType, Float, @@ -250,6 +252,7 @@ symbols! { Fn, FnMut, FnOnce, + FnPtr, Formatter, Forward, From, @@ -303,6 +306,7 @@ symbols! { Mutex, MutexGuard, N, + Named, NonNull, NonZero, None, @@ -1290,6 +1294,7 @@ symbols! { inline_const, inline_const_pat, inout, + inputs, instant_now, instruction_set, integer_: "integer", // underscore to avoid clashing with the function `sym::integer` below @@ -1660,6 +1665,7 @@ symbols! { os_string_as_os_str, other, out, + output, overflow_checks, overlapping_marker_traits, owned_box, @@ -2440,6 +2446,7 @@ symbols! { unsafe_no_drop_flag, unsafe_pinned, unsafe_unpin, + unsafety, unsize, unsized_const_param_ty, unsized_const_params, @@ -2484,6 +2491,7 @@ symbols! { value, values, var, + variadic, variant_count, variants, vec, diff --git a/library/core/src/mem/type_info.rs b/library/core/src/mem/type_info.rs index f8c2a259ba7ef..18612565aeef2 100644 --- a/library/core/src/mem/type_info.rs +++ b/library/core/src/mem/type_info.rs @@ -75,6 +75,8 @@ pub enum TypeKind { Reference(Reference), /// Pointers. Pointer(Pointer), + /// Function pointers. + FnPtr(FnPtr), /// FIXME(#146922): add all the common types Other, } @@ -305,3 +307,39 @@ pub struct Pointer { /// Whether this pointer is mutable or not. pub mutable: bool, } + +#[derive(Debug)] +#[unstable(feature = "type_info", issue = "146922")] +/// Function pointer, e.g. fn(u8), +pub struct FnPtr { + /// Unsafety, true is unsafe + pub unsafety: bool, + + /// Abi, e.g. extern "C" + pub abi: Abi, + + /// Function inputs + pub inputs: &'static [TypeId], + + /// Function return type, default is TypeId::of::<()> + pub output: TypeId, + + /// Vardiadic function, e.g. extern "C" fn add(n: usize, mut args: ...); + pub variadic: bool, +} + +#[derive(Debug, Default)] +#[non_exhaustive] +#[unstable(feature = "type_info", issue = "146922")] +/// Abi of [FnPtr] +pub enum Abi { + /// Named abi, e.g. extern "custom", "stdcall" etc. + Named(&'static str), + + /// Default + #[default] + ExternRust, + + /// C-calling convention + ExternC, +} diff --git a/library/coretests/tests/mem.rs b/library/coretests/tests/mem.rs index 193d5416b06a7..236c02d2a243a 100644 --- a/library/coretests/tests/mem.rs +++ b/library/coretests/tests/mem.rs @@ -1,3 +1,4 @@ +mod fn_ptr; mod type_info; use core::mem::*; diff --git a/library/coretests/tests/mem/fn_ptr.rs b/library/coretests/tests/mem/fn_ptr.rs new file mode 100644 index 0000000000000..1d50a2552a193 --- /dev/null +++ b/library/coretests/tests/mem/fn_ptr.rs @@ -0,0 +1,169 @@ +use std::any::TypeId; +use std::mem::type_info::{Abi, FnPtr, Type, TypeKind}; + +const STRING_TY: TypeId = const { TypeId::of::() }; +const U8_TY: TypeId = const { TypeId::of::() }; +const _U8_REF_TY: TypeId = const { TypeId::of::<&u8>() }; +const UNIT_TY: TypeId = const { TypeId::of::<()>() }; + +#[test] +fn test_fn_ptrs() { + let TypeKind::FnPtr(FnPtr { + unsafety: false, + abi: Abi::ExternRust, + inputs: &[], + output, + variadic: false, + }) = (const { Type::of::().kind }) + else { + panic!(); + }; + assert_eq!(output, UNIT_TY); +} +#[test] +fn test_ref() { + const { + // references are tricky because the lifetimes give the references different type ids + // so we check the pointees instead + let TypeKind::FnPtr(FnPtr { + unsafety: false, + abi: Abi::ExternRust, + inputs: &[ty1, ty2], + output, + variadic: false, + }) = (const { Type::of::().kind }) + else { + panic!(); + }; + if output != UNIT_TY { + panic!(); + } + let TypeKind::Reference(reference) = ty1.info().kind else { + panic!(); + }; + if reference.pointee != U8_TY { + panic!(); + } + let TypeKind::Reference(reference) = ty2.info().kind else { + panic!(); + }; + if reference.pointee != U8_TY { + panic!(); + } + } +} + +#[test] +fn test_unsafe() { + let TypeKind::FnPtr(FnPtr { + unsafety: true, + abi: Abi::ExternRust, + inputs: &[], + output, + variadic: false, + }) = (const { Type::of::().kind }) + else { + panic!(); + }; + assert_eq!(output, UNIT_TY); +} +#[test] +fn test_abi() { + let TypeKind::FnPtr(FnPtr { + unsafety: false, + abi: Abi::ExternRust, + inputs: &[], + output, + variadic: false, + }) = (const { Type::of::().kind }) + else { + panic!(); + }; + assert_eq!(output, UNIT_TY); + + let TypeKind::FnPtr(FnPtr { + unsafety: false, + abi: Abi::ExternC, + inputs: &[], + output, + variadic: false, + }) = (const { Type::of::().kind }) + else { + panic!(); + }; + assert_eq!(output, UNIT_TY); + + let TypeKind::FnPtr(FnPtr { + unsafety: true, + abi: Abi::Named("system"), + inputs: &[], + output, + variadic: false, + }) = (const { Type::of::().kind }) + else { + panic!(); + }; + assert_eq!(output, UNIT_TY); +} + +#[test] +fn test_inputs() { + let TypeKind::FnPtr(FnPtr { + unsafety: false, + abi: Abi::ExternRust, + inputs: &[ty1, ty2], + output, + variadic: false, + }) = (const { Type::of::().kind }) + else { + panic!(); + }; + assert_eq!(output, UNIT_TY); + assert_eq!(ty1, STRING_TY); + assert_eq!(ty2, U8_TY); + + let TypeKind::FnPtr(FnPtr { + unsafety: false, + abi: Abi::ExternRust, + inputs: &[ty1, ty2], + output, + variadic: false, + }) = (const { Type::of::().kind }) + else { + panic!(); + }; + assert_eq!(output, UNIT_TY); + assert_eq!(ty1, STRING_TY); + assert_eq!(ty2, U8_TY); +} + +#[test] +fn test_output() { + let TypeKind::FnPtr(FnPtr { + unsafety: false, + abi: Abi::ExternRust, + inputs: &[], + output, + variadic: false, + }) = (const { Type::of:: u8>().kind }) + else { + panic!(); + }; + assert_eq!(output, U8_TY); +} + +#[test] +fn test_variadic() { + let TypeKind::FnPtr(FnPtr { + unsafety: false, + abi: Abi::ExternC, + inputs: [ty1], + output, + variadic: true, + }) = &(const { Type::of::().kind }) + else { + panic!(); + }; + assert_eq!(output, &UNIT_TY); + assert_eq!(*ty1, U8_TY); +} From 61c9af20c7d36fc4f33e0a90fbb86ce5b519a468 Mon Sep 17 00:00:00 2001 From: Takayuki Maeda Date: Thu, 19 Feb 2026 01:08:35 +0900 Subject: [PATCH 89/90] avoid delayed-bug ICE for malformed diagnostic attrs remove -Znext-solver from ui tests --- compiler/rustc_attr_parsing/src/parser.rs | 7 +++++++ .../malformed-diagnostic-attributes-if-expression.rs | 10 ++++++++++ ...alformed-diagnostic-attributes-if-expression.stderr | 10 ++++++++++ 3 files changed, 27 insertions(+) create mode 100644 tests/ui/diagnostic_namespace/do_not_recommend/malformed-diagnostic-attributes-if-expression.rs create mode 100644 tests/ui/diagnostic_namespace/do_not_recommend/malformed-diagnostic-attributes-if-expression.stderr diff --git a/compiler/rustc_attr_parsing/src/parser.rs b/compiler/rustc_attr_parsing/src/parser.rs index 973635f432e8c..354fbab9cfcf0 100644 --- a/compiler/rustc_attr_parsing/src/parser.rs +++ b/compiler/rustc_attr_parsing/src/parser.rs @@ -522,6 +522,13 @@ impl<'a, 'sess> MetaItemListParserContext<'a, 'sess> { return self.parser.dcx().create_err(err); } + if let ShouldEmit::ErrorsAndLints { recovery: Recovery::Forbidden } = self.should_emit { + // Do not attempt to suggest anything in `Recovery::Forbidden` mode. + // Malformed diagnostic-attr arguments that start with an `if` expression can lead to + // an ICE (https://github.com/rust-lang/rust/issues/152744), because callers may cancel the `InvalidMetaItem` error. + return self.parser.dcx().create_err(err); + } + // Suggest quoting idents, e.g. in `#[cfg(key = value)]`. We don't use `Token::ident` and // don't `uninterpolate` the token to avoid suggesting anything butchered or questionable // when macro metavariables are involved. diff --git a/tests/ui/diagnostic_namespace/do_not_recommend/malformed-diagnostic-attributes-if-expression.rs b/tests/ui/diagnostic_namespace/do_not_recommend/malformed-diagnostic-attributes-if-expression.rs new file mode 100644 index 0000000000000..3e00e42f9d9d2 --- /dev/null +++ b/tests/ui/diagnostic_namespace/do_not_recommend/malformed-diagnostic-attributes-if-expression.rs @@ -0,0 +1,10 @@ +//@ check-pass +//@ reference: attributes.diagnostic.do_not_recommend.syntax + +trait Foo {} + +#[diagnostic::do_not_recommend(if not_accepted)] +//~^ WARNING `#[diagnostic::do_not_recommend]` does not expect any arguments +impl Foo for () {} + +fn main() {} diff --git a/tests/ui/diagnostic_namespace/do_not_recommend/malformed-diagnostic-attributes-if-expression.stderr b/tests/ui/diagnostic_namespace/do_not_recommend/malformed-diagnostic-attributes-if-expression.stderr new file mode 100644 index 0000000000000..d93a9f868a74c --- /dev/null +++ b/tests/ui/diagnostic_namespace/do_not_recommend/malformed-diagnostic-attributes-if-expression.stderr @@ -0,0 +1,10 @@ +warning: `#[diagnostic::do_not_recommend]` does not expect any arguments + --> $DIR/malformed-diagnostic-attributes-if-expression.rs:6:1 + | +LL | #[diagnostic::do_not_recommend(if not_accepted)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: `#[warn(malformed_diagnostic_attributes)]` (part of `#[warn(unknown_or_malformed_diagnostic_attributes)]`) on by default + +warning: 1 warning emitted + From 3cb093af1fae3eb5370c702a32ca5e334e84dbde Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Wed, 18 Feb 2026 18:22:26 +0100 Subject: [PATCH 90/90] interpret: fix comment typo --- compiler/rustc_const_eval/src/interpret/validity.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index 6fc8d5ef8f965..2cf490350e907 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -647,8 +647,9 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> { } } else { // This is not CTFE, so it's Miri with recursive checking. - // FIXME: should we also `UnsafeCell` behind shared references? Currently that is not - // needed since validation reads bypass Stacked Borrows and data race checks. + // FIXME: should we skip `UnsafeCell` behind shared references? Currently that is + // not needed since validation reads bypass Stacked Borrows and data race checks, + // but is that really coherent? } let path = &self.path; ref_tracking.track(place, || {