mark gfni, vaes, vpclmulqdq intrinsics as safe
diff --git a/crates/core_arch/src/x86/gfni.rs b/crates/core_arch/src/x86/gfni.rs
index 4ba1e62..dbb1191 100644
--- a/crates/core_arch/src/x86/gfni.rs
+++ b/crates/core_arch/src/x86/gfni.rs
@@ -62,8 +62,8 @@
#[target_feature(enable = "gfni,avx512f")]
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vgf2p8mulb))]
-pub unsafe fn _mm512_gf2p8mul_epi8(a: __m512i, b: __m512i) -> __m512i {
- transmute(vgf2p8mulb_512(a.as_i8x64(), b.as_i8x64()))
+pub fn _mm512_gf2p8mul_epi8(a: __m512i, b: __m512i) -> __m512i {
+ unsafe { transmute(vgf2p8mulb_512(a.as_i8x64(), b.as_i8x64())) }
}
/// Performs a multiplication in GF(2^8) on the packed bytes.
@@ -78,17 +78,14 @@
#[target_feature(enable = "gfni,avx512bw,avx512f")]
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vgf2p8mulb))]
-pub unsafe fn _mm512_mask_gf2p8mul_epi8(
- src: __m512i,
- k: __mmask64,
- a: __m512i,
- b: __m512i,
-) -> __m512i {
- transmute(simd_select_bitmask(
- k,
- vgf2p8mulb_512(a.as_i8x64(), b.as_i8x64()),
- src.as_i8x64(),
- ))
+pub fn _mm512_mask_gf2p8mul_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i {
+ unsafe {
+ transmute(simd_select_bitmask(
+ k,
+ vgf2p8mulb_512(a.as_i8x64(), b.as_i8x64()),
+ src.as_i8x64(),
+ ))
+ }
}
/// Performs a multiplication in GF(2^8) on the packed bytes.
@@ -103,13 +100,15 @@
#[target_feature(enable = "gfni,avx512bw,avx512f")]
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vgf2p8mulb))]
-pub unsafe fn _mm512_maskz_gf2p8mul_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i {
+pub fn _mm512_maskz_gf2p8mul_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i {
let zero = i8x64::ZERO;
- transmute(simd_select_bitmask(
- k,
- vgf2p8mulb_512(a.as_i8x64(), b.as_i8x64()),
- zero,
- ))
+ unsafe {
+ transmute(simd_select_bitmask(
+ k,
+ vgf2p8mulb_512(a.as_i8x64(), b.as_i8x64()),
+ zero,
+ ))
+ }
}
/// Performs a multiplication in GF(2^8) on the packed bytes.
@@ -121,8 +120,8 @@
#[target_feature(enable = "gfni,avx")]
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vgf2p8mulb))]
-pub unsafe fn _mm256_gf2p8mul_epi8(a: __m256i, b: __m256i) -> __m256i {
- transmute(vgf2p8mulb_256(a.as_i8x32(), b.as_i8x32()))
+pub fn _mm256_gf2p8mul_epi8(a: __m256i, b: __m256i) -> __m256i {
+ unsafe { transmute(vgf2p8mulb_256(a.as_i8x32(), b.as_i8x32())) }
}
/// Performs a multiplication in GF(2^8) on the packed bytes.
@@ -137,17 +136,14 @@
#[target_feature(enable = "gfni,avx512bw,avx512vl")]
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vgf2p8mulb))]
-pub unsafe fn _mm256_mask_gf2p8mul_epi8(
- src: __m256i,
- k: __mmask32,
- a: __m256i,
- b: __m256i,
-) -> __m256i {
- transmute(simd_select_bitmask(
- k,
- vgf2p8mulb_256(a.as_i8x32(), b.as_i8x32()),
- src.as_i8x32(),
- ))
+pub fn _mm256_mask_gf2p8mul_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i {
+ unsafe {
+ transmute(simd_select_bitmask(
+ k,
+ vgf2p8mulb_256(a.as_i8x32(), b.as_i8x32()),
+ src.as_i8x32(),
+ ))
+ }
}
/// Performs a multiplication in GF(2^8) on the packed bytes.
@@ -162,13 +158,15 @@
#[target_feature(enable = "gfni,avx512bw,avx512vl")]
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vgf2p8mulb))]
-pub unsafe fn _mm256_maskz_gf2p8mul_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i {
+pub fn _mm256_maskz_gf2p8mul_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i {
let zero = i8x32::ZERO;
- transmute(simd_select_bitmask(
- k,
- vgf2p8mulb_256(a.as_i8x32(), b.as_i8x32()),
- zero,
- ))
+ unsafe {
+ transmute(simd_select_bitmask(
+ k,
+ vgf2p8mulb_256(a.as_i8x32(), b.as_i8x32()),
+ zero,
+ ))
+ }
}
/// Performs a multiplication in GF(2^8) on the packed bytes.
@@ -180,8 +178,8 @@
#[target_feature(enable = "gfni")]
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(gf2p8mulb))]
-pub unsafe fn _mm_gf2p8mul_epi8(a: __m128i, b: __m128i) -> __m128i {
- transmute(vgf2p8mulb_128(a.as_i8x16(), b.as_i8x16()))
+pub fn _mm_gf2p8mul_epi8(a: __m128i, b: __m128i) -> __m128i {
+ unsafe { transmute(vgf2p8mulb_128(a.as_i8x16(), b.as_i8x16())) }
}
/// Performs a multiplication in GF(2^8) on the packed bytes.
@@ -196,17 +194,14 @@
#[target_feature(enable = "gfni,avx512bw,avx512vl")]
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vgf2p8mulb))]
-pub unsafe fn _mm_mask_gf2p8mul_epi8(
- src: __m128i,
- k: __mmask16,
- a: __m128i,
- b: __m128i,
-) -> __m128i {
- transmute(simd_select_bitmask(
- k,
- vgf2p8mulb_128(a.as_i8x16(), b.as_i8x16()),
- src.as_i8x16(),
- ))
+pub fn _mm_mask_gf2p8mul_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i {
+ unsafe {
+ transmute(simd_select_bitmask(
+ k,
+ vgf2p8mulb_128(a.as_i8x16(), b.as_i8x16()),
+ src.as_i8x16(),
+ ))
+ }
}
/// Performs a multiplication in GF(2^8) on the packed bytes.
@@ -221,13 +216,15 @@
#[target_feature(enable = "gfni,avx512bw,avx512vl")]
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vgf2p8mulb))]
-pub unsafe fn _mm_maskz_gf2p8mul_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i {
- let zero = i8x16::ZERO;
- transmute(simd_select_bitmask(
- k,
- vgf2p8mulb_128(a.as_i8x16(), b.as_i8x16()),
- zero,
- ))
+pub fn _mm_maskz_gf2p8mul_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i {
+ unsafe {
+ let zero = i8x16::ZERO;
+ transmute(simd_select_bitmask(
+ k,
+ vgf2p8mulb_128(a.as_i8x16(), b.as_i8x16()),
+ zero,
+ ))
+ }
}
/// Performs an affine transformation on the packed bytes in x.
@@ -241,13 +238,15 @@
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))]
#[rustc_legacy_const_generics(2)]
-pub unsafe fn _mm512_gf2p8affine_epi64_epi8<const B: i32>(x: __m512i, a: __m512i) -> __m512i {
+pub fn _mm512_gf2p8affine_epi64_epi8<const B: i32>(x: __m512i, a: __m512i) -> __m512i {
static_assert_uimm_bits!(B, 8);
let b = B as u8;
let x = x.as_i8x64();
let a = a.as_i8x64();
- let r = vgf2p8affineqb_512(x, a, b);
- transmute(r)
+ unsafe {
+ let r = vgf2p8affineqb_512(x, a, b);
+ transmute(r)
+ }
}
/// Performs an affine transformation on the packed bytes in x.
@@ -264,7 +263,7 @@
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))]
#[rustc_legacy_const_generics(3)]
-pub unsafe fn _mm512_maskz_gf2p8affine_epi64_epi8<const B: i32>(
+pub fn _mm512_maskz_gf2p8affine_epi64_epi8<const B: i32>(
k: __mmask64,
x: __m512i,
a: __m512i,
@@ -274,8 +273,10 @@
let zero = i8x64::ZERO;
let x = x.as_i8x64();
let a = a.as_i8x64();
- let r = vgf2p8affineqb_512(x, a, b);
- transmute(simd_select_bitmask(k, r, zero))
+ unsafe {
+ let r = vgf2p8affineqb_512(x, a, b);
+ transmute(simd_select_bitmask(k, r, zero))
+ }
}
/// Performs an affine transformation on the packed bytes in x.
@@ -292,7 +293,7 @@
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))]
#[rustc_legacy_const_generics(4)]
-pub unsafe fn _mm512_mask_gf2p8affine_epi64_epi8<const B: i32>(
+pub fn _mm512_mask_gf2p8affine_epi64_epi8<const B: i32>(
src: __m512i,
k: __mmask64,
x: __m512i,
@@ -302,8 +303,10 @@
let b = B as u8;
let x = x.as_i8x64();
let a = a.as_i8x64();
- let r = vgf2p8affineqb_512(x, a, b);
- transmute(simd_select_bitmask(k, r, src.as_i8x64()))
+ unsafe {
+ let r = vgf2p8affineqb_512(x, a, b);
+ transmute(simd_select_bitmask(k, r, src.as_i8x64()))
+ }
}
/// Performs an affine transformation on the packed bytes in x.
@@ -317,13 +320,15 @@
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))]
#[rustc_legacy_const_generics(2)]
-pub unsafe fn _mm256_gf2p8affine_epi64_epi8<const B: i32>(x: __m256i, a: __m256i) -> __m256i {
+pub fn _mm256_gf2p8affine_epi64_epi8<const B: i32>(x: __m256i, a: __m256i) -> __m256i {
static_assert_uimm_bits!(B, 8);
let b = B as u8;
let x = x.as_i8x32();
let a = a.as_i8x32();
- let r = vgf2p8affineqb_256(x, a, b);
- transmute(r)
+ unsafe {
+ let r = vgf2p8affineqb_256(x, a, b);
+ transmute(r)
+ }
}
/// Performs an affine transformation on the packed bytes in x.
@@ -340,7 +345,7 @@
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))]
#[rustc_legacy_const_generics(3)]
-pub unsafe fn _mm256_maskz_gf2p8affine_epi64_epi8<const B: i32>(
+pub fn _mm256_maskz_gf2p8affine_epi64_epi8<const B: i32>(
k: __mmask32,
x: __m256i,
a: __m256i,
@@ -350,8 +355,10 @@
let zero = i8x32::ZERO;
let x = x.as_i8x32();
let a = a.as_i8x32();
- let r = vgf2p8affineqb_256(x, a, b);
- transmute(simd_select_bitmask(k, r, zero))
+ unsafe {
+ let r = vgf2p8affineqb_256(x, a, b);
+ transmute(simd_select_bitmask(k, r, zero))
+ }
}
/// Performs an affine transformation on the packed bytes in x.
@@ -368,7 +375,7 @@
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))]
#[rustc_legacy_const_generics(4)]
-pub unsafe fn _mm256_mask_gf2p8affine_epi64_epi8<const B: i32>(
+pub fn _mm256_mask_gf2p8affine_epi64_epi8<const B: i32>(
src: __m256i,
k: __mmask32,
x: __m256i,
@@ -378,8 +385,10 @@
let b = B as u8;
let x = x.as_i8x32();
let a = a.as_i8x32();
- let r = vgf2p8affineqb_256(x, a, b);
- transmute(simd_select_bitmask(k, r, src.as_i8x32()))
+ unsafe {
+ let r = vgf2p8affineqb_256(x, a, b);
+ transmute(simd_select_bitmask(k, r, src.as_i8x32()))
+ }
}
/// Performs an affine transformation on the packed bytes in x.
@@ -393,13 +402,15 @@
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(gf2p8affineqb, B = 0))]
#[rustc_legacy_const_generics(2)]
-pub unsafe fn _mm_gf2p8affine_epi64_epi8<const B: i32>(x: __m128i, a: __m128i) -> __m128i {
+pub fn _mm_gf2p8affine_epi64_epi8<const B: i32>(x: __m128i, a: __m128i) -> __m128i {
static_assert_uimm_bits!(B, 8);
let b = B as u8;
let x = x.as_i8x16();
let a = a.as_i8x16();
- let r = vgf2p8affineqb_128(x, a, b);
- transmute(r)
+ unsafe {
+ let r = vgf2p8affineqb_128(x, a, b);
+ transmute(r)
+ }
}
/// Performs an affine transformation on the packed bytes in x.
@@ -416,7 +427,7 @@
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))]
#[rustc_legacy_const_generics(3)]
-pub unsafe fn _mm_maskz_gf2p8affine_epi64_epi8<const B: i32>(
+pub fn _mm_maskz_gf2p8affine_epi64_epi8<const B: i32>(
k: __mmask16,
x: __m128i,
a: __m128i,
@@ -426,8 +437,10 @@
let zero = i8x16::ZERO;
let x = x.as_i8x16();
let a = a.as_i8x16();
- let r = vgf2p8affineqb_128(x, a, b);
- transmute(simd_select_bitmask(k, r, zero))
+ unsafe {
+ let r = vgf2p8affineqb_128(x, a, b);
+ transmute(simd_select_bitmask(k, r, zero))
+ }
}
/// Performs an affine transformation on the packed bytes in x.
@@ -444,7 +457,7 @@
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))]
#[rustc_legacy_const_generics(4)]
-pub unsafe fn _mm_mask_gf2p8affine_epi64_epi8<const B: i32>(
+pub fn _mm_mask_gf2p8affine_epi64_epi8<const B: i32>(
src: __m128i,
k: __mmask16,
x: __m128i,
@@ -454,8 +467,10 @@
let b = B as u8;
let x = x.as_i8x16();
let a = a.as_i8x16();
- let r = vgf2p8affineqb_128(x, a, b);
- transmute(simd_select_bitmask(k, r, src.as_i8x16()))
+ unsafe {
+ let r = vgf2p8affineqb_128(x, a, b);
+ transmute(simd_select_bitmask(k, r, src.as_i8x16()))
+ }
}
/// Performs an affine transformation on the inverted packed bytes in x.
@@ -471,13 +486,15 @@
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))]
#[rustc_legacy_const_generics(2)]
-pub unsafe fn _mm512_gf2p8affineinv_epi64_epi8<const B: i32>(x: __m512i, a: __m512i) -> __m512i {
+pub fn _mm512_gf2p8affineinv_epi64_epi8<const B: i32>(x: __m512i, a: __m512i) -> __m512i {
static_assert_uimm_bits!(B, 8);
let b = B as u8;
let x = x.as_i8x64();
let a = a.as_i8x64();
- let r = vgf2p8affineinvqb_512(x, a, b);
- transmute(r)
+ unsafe {
+ let r = vgf2p8affineinvqb_512(x, a, b);
+ transmute(r)
+ }
}
/// Performs an affine transformation on the inverted packed bytes in x.
@@ -496,7 +513,7 @@
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))]
#[rustc_legacy_const_generics(3)]
-pub unsafe fn _mm512_maskz_gf2p8affineinv_epi64_epi8<const B: i32>(
+pub fn _mm512_maskz_gf2p8affineinv_epi64_epi8<const B: i32>(
k: __mmask64,
x: __m512i,
a: __m512i,
@@ -506,8 +523,10 @@
let zero = i8x64::ZERO;
let x = x.as_i8x64();
let a = a.as_i8x64();
- let r = vgf2p8affineinvqb_512(x, a, b);
- transmute(simd_select_bitmask(k, r, zero))
+ unsafe {
+ let r = vgf2p8affineinvqb_512(x, a, b);
+ transmute(simd_select_bitmask(k, r, zero))
+ }
}
/// Performs an affine transformation on the inverted packed bytes in x.
@@ -526,7 +545,7 @@
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))]
#[rustc_legacy_const_generics(4)]
-pub unsafe fn _mm512_mask_gf2p8affineinv_epi64_epi8<const B: i32>(
+pub fn _mm512_mask_gf2p8affineinv_epi64_epi8<const B: i32>(
src: __m512i,
k: __mmask64,
x: __m512i,
@@ -536,8 +555,10 @@
let b = B as u8;
let x = x.as_i8x64();
let a = a.as_i8x64();
- let r = vgf2p8affineinvqb_512(x, a, b);
- transmute(simd_select_bitmask(k, r, src.as_i8x64()))
+ unsafe {
+ let r = vgf2p8affineinvqb_512(x, a, b);
+ transmute(simd_select_bitmask(k, r, src.as_i8x64()))
+ }
}
/// Performs an affine transformation on the inverted packed bytes in x.
@@ -553,13 +574,15 @@
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))]
#[rustc_legacy_const_generics(2)]
-pub unsafe fn _mm256_gf2p8affineinv_epi64_epi8<const B: i32>(x: __m256i, a: __m256i) -> __m256i {
+pub fn _mm256_gf2p8affineinv_epi64_epi8<const B: i32>(x: __m256i, a: __m256i) -> __m256i {
static_assert_uimm_bits!(B, 8);
let b = B as u8;
let x = x.as_i8x32();
let a = a.as_i8x32();
- let r = vgf2p8affineinvqb_256(x, a, b);
- transmute(r)
+ unsafe {
+ let r = vgf2p8affineinvqb_256(x, a, b);
+ transmute(r)
+ }
}
/// Performs an affine transformation on the inverted packed bytes in x.
@@ -578,7 +601,7 @@
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))]
#[rustc_legacy_const_generics(3)]
-pub unsafe fn _mm256_maskz_gf2p8affineinv_epi64_epi8<const B: i32>(
+pub fn _mm256_maskz_gf2p8affineinv_epi64_epi8<const B: i32>(
k: __mmask32,
x: __m256i,
a: __m256i,
@@ -588,8 +611,10 @@
let zero = i8x32::ZERO;
let x = x.as_i8x32();
let a = a.as_i8x32();
- let r = vgf2p8affineinvqb_256(x, a, b);
- transmute(simd_select_bitmask(k, r, zero))
+ unsafe {
+ let r = vgf2p8affineinvqb_256(x, a, b);
+ transmute(simd_select_bitmask(k, r, zero))
+ }
}
/// Performs an affine transformation on the inverted packed bytes in x.
@@ -608,7 +633,7 @@
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))]
#[rustc_legacy_const_generics(4)]
-pub unsafe fn _mm256_mask_gf2p8affineinv_epi64_epi8<const B: i32>(
+pub fn _mm256_mask_gf2p8affineinv_epi64_epi8<const B: i32>(
src: __m256i,
k: __mmask32,
x: __m256i,
@@ -618,8 +643,10 @@
let b = B as u8;
let x = x.as_i8x32();
let a = a.as_i8x32();
- let r = vgf2p8affineinvqb_256(x, a, b);
- transmute(simd_select_bitmask(k, r, src.as_i8x32()))
+ unsafe {
+ let r = vgf2p8affineinvqb_256(x, a, b);
+ transmute(simd_select_bitmask(k, r, src.as_i8x32()))
+ }
}
/// Performs an affine transformation on the inverted packed bytes in x.
@@ -635,13 +662,15 @@
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(gf2p8affineinvqb, B = 0))]
#[rustc_legacy_const_generics(2)]
-pub unsafe fn _mm_gf2p8affineinv_epi64_epi8<const B: i32>(x: __m128i, a: __m128i) -> __m128i {
+pub fn _mm_gf2p8affineinv_epi64_epi8<const B: i32>(x: __m128i, a: __m128i) -> __m128i {
static_assert_uimm_bits!(B, 8);
let b = B as u8;
let x = x.as_i8x16();
let a = a.as_i8x16();
- let r = vgf2p8affineinvqb_128(x, a, b);
- transmute(r)
+ unsafe {
+ let r = vgf2p8affineinvqb_128(x, a, b);
+ transmute(r)
+ }
}
/// Performs an affine transformation on the inverted packed bytes in x.
@@ -660,7 +689,7 @@
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))]
#[rustc_legacy_const_generics(3)]
-pub unsafe fn _mm_maskz_gf2p8affineinv_epi64_epi8<const B: i32>(
+pub fn _mm_maskz_gf2p8affineinv_epi64_epi8<const B: i32>(
k: __mmask16,
x: __m128i,
a: __m128i,
@@ -670,8 +699,10 @@
let zero = i8x16::ZERO;
let x = x.as_i8x16();
let a = a.as_i8x16();
- let r = vgf2p8affineinvqb_128(x, a, b);
- transmute(simd_select_bitmask(k, r, zero))
+ unsafe {
+ let r = vgf2p8affineinvqb_128(x, a, b);
+ transmute(simd_select_bitmask(k, r, zero))
+ }
}
/// Performs an affine transformation on the inverted packed bytes in x.
@@ -690,7 +721,7 @@
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))]
#[rustc_legacy_const_generics(4)]
-pub unsafe fn _mm_mask_gf2p8affineinv_epi64_epi8<const B: i32>(
+pub fn _mm_mask_gf2p8affineinv_epi64_epi8<const B: i32>(
src: __m128i,
k: __mmask16,
x: __m128i,
@@ -700,8 +731,10 @@
let b = B as u8;
let x = x.as_i8x16();
let a = a.as_i8x16();
- let r = vgf2p8affineinvqb_128(x, a, b);
- transmute(simd_select_bitmask(k, r, src.as_i8x16()))
+ unsafe {
+ let r = vgf2p8affineinvqb_128(x, a, b);
+ transmute(simd_select_bitmask(k, r, src.as_i8x16()))
+ }
}
#[cfg(test)]
diff --git a/crates/core_arch/src/x86/vaes.rs b/crates/core_arch/src/x86/vaes.rs
index 0a7abbe..99ee0f2 100644
--- a/crates/core_arch/src/x86/vaes.rs
+++ b/crates/core_arch/src/x86/vaes.rs
@@ -41,8 +41,8 @@
#[target_feature(enable = "vaes")]
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vaesenc))]
-pub unsafe fn _mm256_aesenc_epi128(a: __m256i, round_key: __m256i) -> __m256i {
- aesenc_256(a, round_key)
+pub fn _mm256_aesenc_epi128(a: __m256i, round_key: __m256i) -> __m256i {
+ unsafe { aesenc_256(a, round_key) }
}
/// Performs the last round of an AES encryption flow on each 128-bit word (state) in `a` using
@@ -53,8 +53,8 @@
#[target_feature(enable = "vaes")]
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vaesenclast))]
-pub unsafe fn _mm256_aesenclast_epi128(a: __m256i, round_key: __m256i) -> __m256i {
- aesenclast_256(a, round_key)
+pub fn _mm256_aesenclast_epi128(a: __m256i, round_key: __m256i) -> __m256i {
+ unsafe { aesenclast_256(a, round_key) }
}
/// Performs one round of an AES decryption flow on each 128-bit word (state) in `a` using
@@ -65,8 +65,8 @@
#[target_feature(enable = "vaes")]
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vaesdec))]
-pub unsafe fn _mm256_aesdec_epi128(a: __m256i, round_key: __m256i) -> __m256i {
- aesdec_256(a, round_key)
+pub fn _mm256_aesdec_epi128(a: __m256i, round_key: __m256i) -> __m256i {
+ unsafe { aesdec_256(a, round_key) }
}
/// Performs the last round of an AES decryption flow on each 128-bit word (state) in `a` using
@@ -77,8 +77,8 @@
#[target_feature(enable = "vaes")]
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vaesdeclast))]
-pub unsafe fn _mm256_aesdeclast_epi128(a: __m256i, round_key: __m256i) -> __m256i {
- aesdeclast_256(a, round_key)
+pub fn _mm256_aesdeclast_epi128(a: __m256i, round_key: __m256i) -> __m256i {
+ unsafe { aesdeclast_256(a, round_key) }
}
/// Performs one round of an AES encryption flow on each 128-bit word (state) in `a` using
@@ -89,8 +89,8 @@
#[target_feature(enable = "vaes,avx512f")]
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vaesenc))]
-pub unsafe fn _mm512_aesenc_epi128(a: __m512i, round_key: __m512i) -> __m512i {
- aesenc_512(a, round_key)
+pub fn _mm512_aesenc_epi128(a: __m512i, round_key: __m512i) -> __m512i {
+ unsafe { aesenc_512(a, round_key) }
}
/// Performs the last round of an AES encryption flow on each 128-bit word (state) in `a` using
@@ -101,8 +101,8 @@
#[target_feature(enable = "vaes,avx512f")]
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vaesenclast))]
-pub unsafe fn _mm512_aesenclast_epi128(a: __m512i, round_key: __m512i) -> __m512i {
- aesenclast_512(a, round_key)
+pub fn _mm512_aesenclast_epi128(a: __m512i, round_key: __m512i) -> __m512i {
+ unsafe { aesenclast_512(a, round_key) }
}
/// Performs one round of an AES decryption flow on each 128-bit word (state) in `a` using
@@ -113,8 +113,8 @@
#[target_feature(enable = "vaes,avx512f")]
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vaesdec))]
-pub unsafe fn _mm512_aesdec_epi128(a: __m512i, round_key: __m512i) -> __m512i {
- aesdec_512(a, round_key)
+pub fn _mm512_aesdec_epi128(a: __m512i, round_key: __m512i) -> __m512i {
+ unsafe { aesdec_512(a, round_key) }
}
/// Performs the last round of an AES decryption flow on each 128-bit word (state) in `a` using
@@ -125,8 +125,8 @@
#[target_feature(enable = "vaes,avx512f")]
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vaesdeclast))]
-pub unsafe fn _mm512_aesdeclast_epi128(a: __m512i, round_key: __m512i) -> __m512i {
- aesdeclast_512(a, round_key)
+pub fn _mm512_aesdeclast_epi128(a: __m512i, round_key: __m512i) -> __m512i {
+ unsafe { aesdeclast_512(a, round_key) }
}
#[cfg(test)]
diff --git a/crates/core_arch/src/x86/vpclmulqdq.rs b/crates/core_arch/src/x86/vpclmulqdq.rs
index 2afc4a0..16af2a9 100644
--- a/crates/core_arch/src/x86/vpclmulqdq.rs
+++ b/crates/core_arch/src/x86/vpclmulqdq.rs
@@ -37,9 +37,9 @@
// technically according to Intel's documentation we don't need avx512f here, however LLVM gets confused otherwise
#[cfg_attr(test, assert_instr(vpclmul, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
-pub unsafe fn _mm512_clmulepi64_epi128<const IMM8: i32>(a: __m512i, b: __m512i) -> __m512i {
+pub fn _mm512_clmulepi64_epi128<const IMM8: i32>(a: __m512i, b: __m512i) -> __m512i {
static_assert_uimm_bits!(IMM8, 8);
- pclmulqdq_512(a, b, IMM8 as u8)
+ unsafe { pclmulqdq_512(a, b, IMM8 as u8) }
}
/// Performs a carry-less multiplication of two 64-bit polynomials over the
@@ -55,9 +55,9 @@
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
#[cfg_attr(test, assert_instr(vpclmul, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
-pub unsafe fn _mm256_clmulepi64_epi128<const IMM8: i32>(a: __m256i, b: __m256i) -> __m256i {
+pub fn _mm256_clmulepi64_epi128<const IMM8: i32>(a: __m256i, b: __m256i) -> __m256i {
static_assert_uimm_bits!(IMM8, 8);
- pclmulqdq_256(a, b, IMM8 as u8)
+ unsafe { pclmulqdq_256(a, b, IMM8 as u8) }
}
#[cfg(test)]