Moved const fn outside unsafe block
diff --git a/crates/core_arch/src/x86/avx512bw.rs b/crates/core_arch/src/x86/avx512bw.rs
index e8807c4..7abc545 100644
--- a/crates/core_arch/src/x86/avx512bw.rs
+++ b/crates/core_arch/src/x86/avx512bw.rs
@@ -11303,7 +11303,16 @@
#[cfg_attr(test, assert_instr(vpalignr, IMM8 = 1))]
#[rustc_legacy_const_generics(2)]
pub fn _mm512_alignr_epi8<const IMM8: i32>(a: __m512i, b: __m512i) -> __m512i {
- unsafe {
+ const fn mask(shift: u32, i: u32) -> u32 {
+ let shift = shift % 16;
+ let mod_i = i%16;
+ if mod_i < (16 - shift) {
+ i + shift
+ } else {
+ i + 48 + shift
+ }
+ }
+
// If palignr is shifting the pair of vectors more than the size of two
// lanes, emit zero.
if IMM8 >= 32 {
@@ -11316,18 +11325,11 @@
} else {
(a, b)
};
+ unsafe {
if IMM8 == 16 {
return transmute(a);
}
- const fn mask(shift: u32, i: u32) -> u32 {
- let shift = shift % 16;
- let mod_i = i%16;
- if mod_i < (16 - shift) {
- i + shift
- } else {
- i + 48 + shift
- }
- }
+
let r: i8x64 = simd_shuffle!(
b.as_i8x64(),
@@ -11491,7 +11493,7 @@
a: __m128i,
b: __m128i,
) -> __m128i {
- unsafe {
+ unsafe {`
static_assert_uimm_bits!(IMM8, 8);
let r = _mm_alignr_epi8::<IMM8>(a, b);
transmute(simd_select_bitmask(k, r.as_i8x16(), src.as_i8x16()))