| searchIndex["core_arch"]={"doc":"SIMD and vendor intrinsics module.","i":[[0,"x86","core_arch","Platform-specific intrinsics for the `x86` platform.",N,N],[3,"__m64","core_arch::x86","64-bit wide integer vector type, x86-specific",N,N],[3,R[32],E,"128-bit wide integer vector type, x86-specific",N,N],[3,R[4],E,"128-bit wide set of four `f32` types, x86-specific",N,N],[3,R[44],E,"128-bit wide set of two `f64` types, x86-specific",N,N],[3,R[112],E,"256-bit wide integer vector type, x86-specific",N,N],[3,"__m256",E,"256-bit wide set of eight `f32` types, x86-specific",N,N],[3,R[100],E,"256-bit wide set of four `f64` types, x86-specific",N,N],[3,R[217],E,"512-bit wide integer vector type, x86-specific",N,N],[3,"__m512",E,"512-bit wide set of sixteen `f32` types, x86-specific",N,N],[3,"__m512d",E,"512-bit wide set of eight `f64` types, x86-specific",N,N],[3,R[229],E,"Result of the `cpuid` instruction.",N,N],[12,"eax",E,"EAX register.",0,N],[12,"ebx",E,"EBX register.",0,N],[12,"ecx",E,"ECX register.",0,N],[12,"edx",E,"EDX register.",0,N],[5,"_fxsave",E,"Saves the `x87` FPU, `MMX` technology, `XMM`, and `MXCSR`…",N,[[]]],[5,"_fxrstor",E,"Restores the `XMM`, `MMX`, `MXCSR`, and `x87` FPU…",N,[[]]],[5,"_bswap",E,"Returns an integer with the reversed byte order of x",N,[[["i32"]],["i32"]]],[5,"_rdtsc",E,R[0],N,[[],["u64"]]],[5,"__rdtscp",E,R[0],N,[[],["u64"]]],[5,"__cpuid_count",E,"Returns the result of the `cpuid` instruction for a given…",N,[[["u32"]],[R[1]]]],[5,"__cpuid",E,"See `__cpuid_count`.",N,[[["u32"]],[R[1]]]],[5,"has_cpuid",E,"Does the host support the `cpuid` instruction?",N,[[],["bool"]]],[5,"__get_cpuid_max",E,"Returns the highest-supported `leaf` (`EAX`) and sub-leaf…",N,[[["u32"]]]],[5,"_xsave",E,R[2],N,[[["u64"]]]],[5,"_xrstor",E,R[3],N,[[["u64"]]]],[5,"_xsetbv",E,"Copies 64-bits from `val` to the extended control register…",N,[[["u32"],["u64"]]]],[5,"_xgetbv",E,"Reads the contents of the extended control register `XCR`…",N,[[["u32"]],["u64"]]],[5,"_xsaveopt",E,R[2],N,[[["u64"]]]],[5,"_xsavec",E,R[2],N,[[["u64"]]]],[5,"_xsaves",E,R[2],N,[[["u64"]]]],[5,"_xrstors",E,R[3],N,[[["u64"]]]],[5,"_mm_add_ss",E,"Adds the first component of `a` and `b`, the other…",N,[[[R[4]]],[R[4]]]],[5,"_mm_add_ps",E,"Adds __m128 vectors.",N,[[[R[4]]],[R[4]]]],[5,"_mm_sub_ss",E,"Subtracts the first component of `b` from `a`, the other…",N,[[[R[4]]],[R[4]]]],[5,"_mm_sub_ps",E,"Subtracts __m128 vectors.",N,[[[R[4]]],[R[4]]]],[5,"_mm_mul_ss",E,"Multiplies the first component of `a` and `b`, the other…",N,[[[R[4]]],[R[4]]]],[5,"_mm_mul_ps",E,"Multiplies __m128 vectors.",N,[[[R[4]]],[R[4]]]],[5,"_mm_div_ss",E,"Divides the first component of `b` by `a`, the other…",N,[[[R[4]]],[R[4]]]],[5,"_mm_div_ps",E,"Divides __m128 vectors.",N,[[[R[4]]],[R[4]]]],[5,"_mm_sqrt_ss",E,"Returns the square root of the first single-precision…",N,[[[R[4]]],[R[4]]]],[5,"_mm_sqrt_ps",E,R[105],N,[[[R[4]]],[R[4]]]],[5,"_mm_rcp_ss",E,"Returns the approximate reciprocal of the first…",N,[[[R[4]]],[R[4]]]],[5,"_mm_rcp_ps",E,"Returns the approximate reciprocal of packed…",N,[[[R[4]]],[R[4]]]],[5,"_mm_rsqrt_ss",E,"Returns the approximate reciprocal square root of the fist…",N,[[[R[4]]],[R[4]]]],[5,"_mm_rsqrt_ps",E,"Returns the approximate reciprocal square root of packed…",N,[[[R[4]]],[R[4]]]],[5,"_mm_min_ss",E,R[5],N,[[[R[4]]],[R[4]]]],[5,"_mm_min_ps",E,R[6],N,[[[R[4]]],[R[4]]]],[5,"_mm_max_ss",E,R[5],N,[[[R[4]]],[R[4]]]],[5,"_mm_max_ps",E,R[6],N,[[[R[4]]],[R[4]]]],[5,"_mm_and_ps",E,"Bitwise AND of packed single-precision (32-bit)…",N,[[[R[4]]],[R[4]]]],[5,"_mm_andnot_ps",E,"Bitwise AND-NOT of packed single-precision (32-bit)…",N,[[[R[4]]],[R[4]]]],[5,"_mm_or_ps",E,"Bitwise OR of packed single-precision (32-bit)…",N,[[[R[4]]],[R[4]]]],[5,"_mm_xor_ps",E,"Bitwise exclusive OR of packed single-precision (32-bit)…",N,[[[R[4]]],[R[4]]]],[5,"_mm_cmpeq_ss",E,"Compares the lowest `f32` of both inputs for equality. The…",N,[[[R[4]]],[R[4]]]],[5,"_mm_cmplt_ss",E,"Compares the lowest `f32` of both inputs for less than.…",N,[[[R[4]]],[R[4]]]],[5,"_mm_cmple_ss",E,"Compares the lowest `f32` of both inputs for less than or…",N,[[[R[4]]],[R[4]]]],[5,"_mm_cmpgt_ss",E,"Compares the lowest `f32` of both inputs for greater than.…",N,[[[R[4]]],[R[4]]]],[5,"_mm_cmpge_ss",E,"Compares the lowest `f32` of both inputs for greater than…",N,[[[R[4]]],[R[4]]]],[5,"_mm_cmpneq_ss",E,"Compares the lowest `f32` of both inputs for inequality.…",N,[[[R[4]]],[R[4]]]],[5,"_mm_cmpnlt_ss",E,R[7],N,[[[R[4]]],[R[4]]]],[5,"_mm_cmpnle_ss",E,R[7],N,[[[R[4]]],[R[4]]]],[5,"_mm_cmpngt_ss",E,R[7],N,[[[R[4]]],[R[4]]]],[5,"_mm_cmpnge_ss",E,R[7],N,[[[R[4]]],[R[4]]]],[5,"_mm_cmpord_ss",E,"Checks if the lowest `f32` of both inputs are ordered. The…",N,[[[R[4]]],[R[4]]]],[5,"_mm_cmpunord_ss",E,"Checks if the lowest `f32` of both inputs are unordered.…",N,[[[R[4]]],[R[4]]]],[5,"_mm_cmpeq_ps",E,R[8],N,[[[R[4]]],[R[4]]]],[5,"_mm_cmplt_ps",E,R[8],N,[[[R[4]]],[R[4]]]],[5,"_mm_cmple_ps",E,R[8],N,[[[R[4]]],[R[4]]]],[5,"_mm_cmpgt_ps",E,R[8],N,[[[R[4]]],[R[4]]]],[5,"_mm_cmpge_ps",E,R[8],N,[[[R[4]]],[R[4]]]],[5,"_mm_cmpneq_ps",E,R[8],N,[[[R[4]]],[R[4]]]],[5,"_mm_cmpnlt_ps",E,R[8],N,[[[R[4]]],[R[4]]]],[5,"_mm_cmpnle_ps",E,R[8],N,[[[R[4]]],[R[4]]]],[5,"_mm_cmpngt_ps",E,R[8],N,[[[R[4]]],[R[4]]]],[5,"_mm_cmpnge_ps",E,R[8],N,[[[R[4]]],[R[4]]]],[5,"_mm_cmpord_ps",E,R[8],N,[[[R[4]]],[R[4]]]],[5,"_mm_cmpunord_ps",E,R[8],N,[[[R[4]]],[R[4]]]],[5,"_mm_comieq_ss",E,R[9],N,[[[R[4]]],["i32"]]],[5,"_mm_comilt_ss",E,R[9],N,[[[R[4]]],["i32"]]],[5,"_mm_comile_ss",E,R[9],N,[[[R[4]]],["i32"]]],[5,"_mm_comigt_ss",E,R[9],N,[[[R[4]]],["i32"]]],[5,"_mm_comige_ss",E,R[9],N,[[[R[4]]],["i32"]]],[5,"_mm_comineq_ss",E,R[9],N,[[[R[4]]],["i32"]]],[5,"_mm_ucomieq_ss",E,R[9],N,[[[R[4]]],["i32"]]],[5,"_mm_ucomilt_ss",E,R[9],N,[[[R[4]]],["i32"]]],[5,"_mm_ucomile_ss",E,R[9],N,[[[R[4]]],["i32"]]],[5,"_mm_ucomigt_ss",E,R[9],N,[[[R[4]]],["i32"]]],[5,"_mm_ucomige_ss",E,R[9],N,[[[R[4]]],["i32"]]],[5,"_mm_ucomineq_ss",E,R[9],N,[[[R[4]]],["i32"]]],[5,"_mm_cvtss_si32",E,R[10],N,[[[R[4]]],["i32"]]],[5,"_mm_cvt_ss2si",E,"Alias for `_mm_cvtss_si32`.",N,[[[R[4]]],["i32"]]],[5,"_mm_cvttss_si32",E,R[10],N,[[[R[4]]],["i32"]]],[5,"_mm_cvtt_ss2si",E,"Alias for `_mm_cvttss_si32`.",N,[[[R[4]]],["i32"]]],[5,"_mm_cvtss_f32",E,"Extracts the lowest 32 bit float from the input vector.",N,[[[R[4]]],["f32"]]],[5,"_mm_cvtsi32_ss",E,"Converts a 32 bit integer to a 32 bit float. The result…",N,[[["i32"],[R[4]]],[R[4]]]],[5,"_mm_cvt_si2ss",E,"Alias for `_mm_cvtsi32_ss`.",N,[[["i32"],[R[4]]],[R[4]]]],[5,"_mm_set_ss",E,"Construct a `__m128` with the lowest element set to `a`…",N,[[["f32"]],[R[4]]]],[5,"_mm_set1_ps",E,"Construct a `__m128` with all element set to `a`.",N,[[["f32"]],[R[4]]]],[5,"_mm_set_ps1",E,"Alias for `_mm_set1_ps`",N,[[["f32"]],[R[4]]]],[5,"_mm_set_ps",E,R[11],N,[[["f32"]],[R[4]]]],[5,"_mm_setr_ps",E,R[11],N,[[["f32"]],[R[4]]]],[5,"_mm_setzero_ps",E,"Construct a `__m128` with all elements initialized to zero.",N,[[],[R[4]]]],[5,"_MM_SHUFFLE",E,"A utility function for creating masks to use with Intel…",N,[[["u32"]],["i32"]]],[5,"_mm_shuffle_ps",E,"Shuffles packed single-precision (32-bit) floating-point…",N,[[["i32"],[R[4]]],[R[4]]]],[5,"_mm_unpackhi_ps",E,R[12],N,[[[R[4]]],[R[4]]]],[5,"_mm_unpacklo_ps",E,R[12],N,[[[R[4]]],[R[4]]]],[5,"_mm_movehl_ps",E,"Combine higher half of `a` and `b`. The highwe half of `b`…",N,[[[R[4]]],[R[4]]]],[5,"_mm_movelh_ps",E,"Combine lower half of `a` and `b`. The lower half of `b`…",N,[[[R[4]]],[R[4]]]],[5,"_mm_movemask_ps",E,R[48],N,[[[R[4]]],["i32"]]],[5,"_mm_loadh_pi",E,"Sets the upper two single-precision floating-point values…",N,[[[R[4]]],[R[4]]]],[5,"_mm_loadl_pi",E,"Loads two floats from `p` into the lower half of a…",N,[[[R[4]]],[R[4]]]],[5,"_mm_load_ss",E,"Construct a `__m128` with the lowest element read from `p`…",N,[[],[R[4]]]],[5,"_mm_load1_ps",E,"Construct a `__m128` by duplicating the value read from…",N,[[],[R[4]]]],[5,"_mm_load_ps1",E,"Alias for `_mm_load1_ps`",N,[[],[R[4]]]],[5,"_mm_load_ps",E,R[13],N,[[],[R[4]]]],[5,"_mm_loadu_ps",E,"Loads four `f32` values from memory into a `__m128`. There…",N,[[],[R[4]]]],[5,"_mm_loadr_ps",E,R[13],N,[[],[R[4]]]],[5,"_mm_storeh_pi",E,"Stores the upper half of `a` (64 bits) into memory.",N,[[[R[4]]]]],[5,"_mm_storel_pi",E,"Stores the lower half of `a` (64 bits) into memory.",N,[[[R[4]]]]],[5,"_mm_store_ss",E,"Stores the lowest 32 bit float of `a` into memory.",N,[[[R[4]]]]],[5,"_mm_store1_ps",E,"Stores the lowest 32 bit float of `a` repeated four times…",N,[[[R[4]]]]],[5,"_mm_store_ps1",E,"Alias for `_mm_store1_ps`",N,[[[R[4]]]]],[5,"_mm_store_ps",E,"Stores four 32-bit floats into aligned memory.",N,[[[R[4]]]]],[5,"_mm_storeu_ps",E,"Stores four 32-bit floats into memory. There are no…",N,[[[R[4]]]]],[5,"_mm_storer_ps",E,"Stores four 32-bit floats into aligned memory in reverse…",N,[[[R[4]]]]],[5,"_mm_move_ss",E,"Returns a `__m128` with the first component from `b` and…",N,[[[R[4]]],[R[4]]]],[5,"_mm_sfence",E,"Performs a serializing operation on all store-to-memory…",N,[[]]],[5,"_mm_getcsr",E,"Gets the unsigned 32-bit value of the MXCSR control and…",N,[[],["u32"]]],[5,"_mm_setcsr",E,"Sets the MXCSR register with the 32-bit unsigned integer…",N,[[["u32"]]]],[5,"_MM_GET_EXCEPTION_MASK",E,R[14],N,[[],["u32"]]],[5,"_MM_GET_EXCEPTION_STATE",E,R[14],N,[[],["u32"]]],[5,"_MM_GET_FLUSH_ZERO_MODE",E,R[14],N,[[],["u32"]]],[5,"_MM_GET_ROUNDING_MODE",E,R[14],N,[[],["u32"]]],[5,"_MM_SET_EXCEPTION_MASK",E,R[14],N,[[["u32"]]]],[5,"_MM_SET_EXCEPTION_STATE",E,R[14],N,[[["u32"]]]],[5,"_MM_SET_FLUSH_ZERO_MODE",E,R[14],N,[[["u32"]]]],[5,"_MM_SET_ROUNDING_MODE",E,R[14],N,[[["u32"]]]],[5,"_mm_prefetch",E,"Fetch the cache line that contains address `p` using the…",N,[[["i32"]]]],[5,"_mm_undefined_ps",E,"Returns vector of type __m128 with undefined elements.",N,[[],[R[4]]]],[5,"_MM_TRANSPOSE4_PS",E,"Transpose the 4x4 matrix formed by 4 rows of __m128 in…",N,[[[R[4]]]]],[5,"_mm_stream_ps",E,"Stores `a` into the memory at `mem_addr` using a…",N,[[[R[4]]]]],[5,"_mm_stream_pi",E,"Stores 64-bits of integer data from a into memory using a…",N,[[["__m64"]]]],[5,"_mm_max_pi16",E,R[15],N,[[["__m64"]],["__m64"]]],[5,"_m_pmaxsw",E,R[15],N,[[["__m64"]],["__m64"]]],[5,"_mm_max_pu8",E,R[16],N,[[["__m64"]],["__m64"]]],[5,"_m_pmaxub",E,R[16],N,[[["__m64"]],["__m64"]]],[5,"_mm_min_pi16",E,R[15],N,[[["__m64"]],["__m64"]]],[5,"_m_pminsw",E,R[15],N,[[["__m64"]],["__m64"]]],[5,"_mm_min_pu8",E,R[16],N,[[["__m64"]],["__m64"]]],[5,"_m_pminub",E,R[16],N,[[["__m64"]],["__m64"]]],[5,"_mm_mulhi_pu16",E,R[17],N,[[["__m64"]],["__m64"]]],[5,"_mm_mullo_pi16",E,"Multiplies packed 16-bit integer values and writes the…",N,[[["__m64"]],["__m64"]]],[5,"_m_pmulhuw",E,R[17],N,[[["__m64"]],["__m64"]]],[5,"_mm_avg_pu8",E,R[18],N,[[["__m64"]],["__m64"]]],[5,"_m_pavgb",E,R[18],N,[[["__m64"]],["__m64"]]],[5,"_mm_avg_pu16",E,R[19],N,[[["__m64"]],["__m64"]]],[5,"_m_pavgw",E,R[19],N,[[["__m64"]],["__m64"]]],[5,"_mm_sad_pu8",E,R[20],N,[[["__m64"]],["__m64"]]],[5,"_m_psadbw",E,R[20],N,[[["__m64"]],["__m64"]]],[5,"_mm_cvtpi32_ps",E,R[21],N,[[["__m64"],[R[4]]],[R[4]]]],[5,"_mm_cvt_pi2ps",E,R[21],N,[[["__m64"],[R[4]]],[R[4]]]],[5,"_mm_cvtpi8_ps",E,R[22],N,[[["__m64"]],[R[4]]]],[5,"_mm_cvtpu8_ps",E,R[22],N,[[["__m64"]],[R[4]]]],[5,"_mm_cvtpi16_ps",E,R[23],N,[[["__m64"]],[R[4]]]],[5,"_mm_cvtpu16_ps",E,R[23],N,[[["__m64"]],[R[4]]]],[5,"_mm_cvtpi32x2_ps",E,"Converts the two 32-bit signed integer values from each…",N,[[["__m64"]],[R[4]]]],[5,"_mm_maskmove_si64",E,R[24],N,[[["__m64"]]]],[5,"_m_maskmovq",E,R[24],N,[[["__m64"]]]],[5,"_mm_extract_pi16",E,R[25],N,[[["__m64"],["i32"]],["i32"]]],[5,"_m_pextrw",E,R[25],N,[[["__m64"],["i32"]],["i32"]]],[5,"_mm_insert_pi16",E,R[26],N,[[["__m64"],["i32"]],["__m64"]]],[5,"_m_pinsrw",E,R[26],N,[[["__m64"],["i32"]],["__m64"]]],[5,"_mm_movemask_pi8",E,R[27],N,[[["__m64"]],["i32"]]],[5,"_m_pmovmskb",E,R[27],N,[[["__m64"]],["i32"]]],[5,"_mm_shuffle_pi16",E,R[28],N,[[["__m64"],["i32"]],["__m64"]]],[5,"_m_pshufw",E,R[28],N,[[["__m64"],["i32"]],["__m64"]]],[5,"_mm_cvttps_pi32",E,R[29],N,[[[R[4]]],["__m64"]]],[5,"_mm_cvtt_ps2pi",E,R[29],N,[[[R[4]]],["__m64"]]],[5,"_mm_cvtps_pi32",E,R[29],N,[[[R[4]]],["__m64"]]],[5,"_mm_cvt_ps2pi",E,R[29],N,[[[R[4]]],["__m64"]]],[5,"_mm_cvtps_pi16",E,R[30],N,[[[R[4]]],["__m64"]]],[5,"_mm_cvtps_pi8",E,R[30],N,[[[R[4]]],["__m64"]]],[5,"_mm_pause",E,"Provides a hint to the processor that the code sequence is…",N,[[]]],[5,"_mm_clflush",E,"Invalidates and flushes the cache line that contains `p`…",N,[[]]],[5,"_mm_lfence",E,R[31],N,[[]]],[5,"_mm_mfence",E,R[31],N,[[]]],[5,"_mm_add_epi8",E,R[143],N,[[[R[32]]],[R[32]]]],[5,"_mm_add_epi16",E,R[142],N,[[[R[32]]],[R[32]]]],[5,"_mm_add_epi32",E,R[141],N,[[[R[32]]],[R[32]]]],[5,"_mm_add_epi64",E,R[140],N,[[[R[32]]],[R[32]]]],[5,"_mm_adds_epi8",E,R[144],N,[[[R[32]]],[R[32]]]],[5,"_mm_adds_epi16",E,R[145],N,[[[R[32]]],[R[32]]]],[5,"_mm_adds_epu8",E,R[146],N,[[[R[32]]],[R[32]]]],[5,"_mm_adds_epu16",E,R[147],N,[[[R[32]]],[R[32]]]],[5,"_mm_avg_epu8",E,R[149],N,[[[R[32]]],[R[32]]]],[5,"_mm_avg_epu16",E,R[148],N,[[[R[32]]],[R[32]]]],[5,"_mm_madd_epi16",E,"Multiplies and then horizontally add signed 16 bit…",N,[[[R[32]]],[R[32]]]],[5,"_mm_max_epi16",E,R[33],N,[[[R[32]]],[R[32]]]],[5,"_mm_max_epu8",E,R[34],N,[[[R[32]]],[R[32]]]],[5,"_mm_min_epi16",E,R[33],N,[[[R[32]]],[R[32]]]],[5,"_mm_min_epu8",E,R[34],N,[[[R[32]]],[R[32]]]],[5,"_mm_mulhi_epi16",E,R[35],N,[[[R[32]]],[R[32]]]],[5,"_mm_mulhi_epu16",E,R[172],N,[[[R[32]]],[R[32]]]],[5,"_mm_mullo_epi16",E,R[35],N,[[[R[32]]],[R[32]]]],[5,"_mm_mul_epu32",E,R[171],N,[[[R[32]]],[R[32]]]],[5,"_mm_sad_epu8",E,"Sum the absolute differences of packed unsigned 8-bit…",N,[[[R[32]]],[R[32]]]],[5,"_mm_sub_epi8",E,"Subtracts packed 8-bit integers in `b` from packed 8-bit…",N,[[[R[32]]],[R[32]]]],[5,"_mm_sub_epi16",E,"Subtracts packed 16-bit integers in `b` from packed 16-bit…",N,[[[R[32]]],[R[32]]]],[5,"_mm_sub_epi32",E,R[210],N,[[[R[32]]],[R[32]]]],[5,"_mm_sub_epi64",E,"Subtract packed 64-bit integers in `b` from packed 64-bit…",N,[[[R[32]]],[R[32]]]],[5,"_mm_subs_epi8",E,R[191],N,[[[R[32]]],[R[32]]]],[5,"_mm_subs_epi16",E,R[190],N,[[[R[32]]],[R[32]]]],[5,"_mm_subs_epu8",E,R[193],N,[[[R[32]]],[R[32]]]],[5,"_mm_subs_epu16",E,R[192],N,[[[R[32]]],[R[32]]]],[5,"_mm_slli_si128",E,R[36],N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_bslli_si128",E,R[36],N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_bsrli_si128",E,R[37],N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_slli_epi16",E,R[179],N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_sll_epi16",E,R[176],N,[[[R[32]]],[R[32]]]],[5,"_mm_slli_epi32",E,R[180],N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_sll_epi32",E,R[177],N,[[[R[32]]],[R[32]]]],[5,"_mm_slli_epi64",E,R[181],N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_sll_epi64",E,R[178],N,[[[R[32]]],[R[32]]]],[5,"_mm_srai_epi16",E,R[38],N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_sra_epi16",E,R[39],N,[[[R[32]]],[R[32]]]],[5,"_mm_srai_epi32",E,R[40],N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_sra_epi32",E,R[41],N,[[[R[32]]],[R[32]]]],[5,"_mm_srli_si128",E,R[37],N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_srli_epi16",E,R[38],N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_srl_epi16",E,R[39],N,[[[R[32]]],[R[32]]]],[5,"_mm_srli_epi32",E,R[40],N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_srl_epi32",E,R[41],N,[[[R[32]]],[R[32]]]],[5,"_mm_srli_epi64",E,R[188],N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_srl_epi64",E,R[187],N,[[[R[32]]],[R[32]]]],[5,"_mm_and_si128",E,"Computes the bitwise AND of 128 bits (representing integer…",N,[[[R[32]]],[R[32]]]],[5,"_mm_andnot_si128",E,"Computes the bitwise NOT of 128 bits (representing integer…",N,[[[R[32]]],[R[32]]]],[5,"_mm_or_si128",E,"Computes the bitwise OR of 128 bits (representing integer…",N,[[[R[32]]],[R[32]]]],[5,"_mm_xor_si128",E,"Computes the bitwise XOR of 128 bits (representing integer…",N,[[[R[32]]],[R[32]]]],[5,"_mm_cmpeq_epi8",E,R[159],N,[[[R[32]]],[R[32]]]],[5,"_mm_cmpeq_epi16",E,R[158],N,[[[R[32]]],[R[32]]]],[5,"_mm_cmpeq_epi32",E,R[157],N,[[[R[32]]],[R[32]]]],[5,"_mm_cmpgt_epi8",E,R[161],N,[[[R[32]]],[R[32]]]],[5,"_mm_cmpgt_epi16",E,R[42],N,[[[R[32]]],[R[32]]]],[5,"_mm_cmpgt_epi32",E,R[43],N,[[[R[32]]],[R[32]]]],[5,"_mm_cmplt_epi8",E,"Compares packed 8-bit integers in `a` and `b` for less-than.",N,[[[R[32]]],[R[32]]]],[5,"_mm_cmplt_epi16",E,R[42],N,[[[R[32]]],[R[32]]]],[5,"_mm_cmplt_epi32",E,R[43],N,[[[R[32]]],[R[32]]]],[5,"_mm_cvtepi32_pd",E,"Converts the lower two packed 32-bit integers in `a` to…",N,[[[R[32]]],[R[44]]]],[5,"_mm_cvtsi32_sd",E,"Returns `a` with its lower element replaced by `b` after…",N,[[[R[44]],["i32"]],[R[44]]]],[5,"_mm_cvtepi32_ps",E,R[110],N,[[[R[32]]],[R[4]]]],[5,"_mm_cvtps_epi32",E,R[30],N,[[[R[4]]],[R[32]]]],[5,"_mm_cvtsi32_si128",E,"Returns a vector whose lowest element is `a` and all…",N,[[["i32"]],[R[32]]]],[5,"_mm_cvtsi128_si32",E,"Returns the lowest element of `a`.",N,[[[R[32]]],["i32"]]],[5,"_mm_set_epi64x",E,"Sets packed 64-bit integers with the supplied values, from…",N,[[["i64"]],[R[32]]]],[5,"_mm_set_epi32",E,"Sets packed 32-bit integers with the supplied values.",N,[[["i32"]],[R[32]]]],[5,"_mm_set_epi16",E,"Sets packed 16-bit integers with the supplied values.",N,[[["i16"]],[R[32]]]],[5,"_mm_set_epi8",E,"Sets packed 8-bit integers with the supplied values.",N,[[["i8"]],[R[32]]]],[5,"_mm_set1_epi64x",E,"Broadcasts 64-bit integer `a` to all elements.",N,[[["i64"]],[R[32]]]],[5,"_mm_set1_epi32",E,"Broadcasts 32-bit integer `a` to all elements.",N,[[["i32"]],[R[32]]]],[5,"_mm_set1_epi16",E,"Broadcasts 16-bit integer `a` to all elements.",N,[[["i16"]],[R[32]]]],[5,"_mm_set1_epi8",E,"Broadcasts 8-bit integer `a` to all elements.",N,[[["i8"]],[R[32]]]],[5,"_mm_setr_epi32",E,"Sets packed 32-bit integers with the supplied values in…",N,[[["i32"]],[R[32]]]],[5,"_mm_setr_epi16",E,"Sets packed 16-bit integers with the supplied values in…",N,[[["i16"]],[R[32]]]],[5,"_mm_setr_epi8",E,"Sets packed 8-bit integers with the supplied values in…",N,[[["i8"]],[R[32]]]],[5,"_mm_setzero_si128",E,"Returns a vector with all elements set to zero.",N,[[],[R[32]]]],[5,"_mm_loadl_epi64",E,"Loads 64-bit integer from memory into first element of…",N,[[],[R[32]]]],[5,"_mm_load_si128",E,R[45],N,[[],[R[32]]]],[5,"_mm_loadu_si128",E,R[45],N,[[],[R[32]]]],[5,"_mm_maskmoveu_si128",E,"Conditionally store 8-bit integer elements from `a` into…",N,[[[R[32]]]]],[5,"_mm_store_si128",E,R[46],N,[[[R[32]]]]],[5,"_mm_storeu_si128",E,R[46],N,[[[R[32]]]]],[5,"_mm_storel_epi64",E,"Stores the lower 64-bit integer `a` to a memory location.",N,[[[R[32]]]]],[5,"_mm_stream_si128",E,"Stores a 128-bit integer vector to a 128-bit aligned…",N,[[[R[32]]]]],[5,"_mm_stream_si32",E,"Stores a 32-bit integer value in the specified memory…",N,[[["i32"]]]],[5,"_mm_move_epi64",E,"Returns a vector where the low element is extracted from…",N,[[[R[32]]],[R[32]]]],[5,"_mm_packs_epi16",E,R[47],N,[[[R[32]]],[R[32]]]],[5,"_mm_packs_epi32",E,R[88],N,[[[R[32]]],[R[32]]]],[5,"_mm_packus_epi16",E,R[47],N,[[[R[32]]],[R[32]]]],[5,"_mm_extract_epi16",E,"Returns the `imm8` element of `a`.",N,[[[R[32]],["i32"]],["i32"]]],[5,"_mm_insert_epi16",E,"Returns a new vector where the `imm8` element of `a` is…",N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_movemask_epi8",E,R[48],N,[[[R[32]]],["i32"]]],[5,"_mm_shuffle_epi32",E,"Shuffles 32-bit integers in `a` using the control in `imm8`.",N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_shufflehi_epi16",E,"Shuffles 16-bit integers in the high 64 bits of `a` using…",N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_shufflelo_epi16",E,"Shuffles 16-bit integers in the low 64 bits of `a` using…",N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_unpackhi_epi8",E,R[194],N,[[[R[32]]],[R[32]]]],[5,"_mm_unpackhi_epi16",E,R[196],N,[[[R[32]]],[R[32]]]],[5,"_mm_unpackhi_epi32",E,R[198],N,[[[R[32]]],[R[32]]]],[5,"_mm_unpackhi_epi64",E,R[200],N,[[[R[32]]],[R[32]]]],[5,"_mm_unpacklo_epi8",E,R[195],N,[[[R[32]]],[R[32]]]],[5,"_mm_unpacklo_epi16",E,R[197],N,[[[R[32]]],[R[32]]]],[5,"_mm_unpacklo_epi32",E,R[199],N,[[[R[32]]],[R[32]]]],[5,"_mm_unpacklo_epi64",E,R[201],N,[[[R[32]]],[R[32]]]],[5,"_mm_add_sd",E,R[49],N,[[[R[44]]],[R[44]]]],[5,"_mm_add_pd",E,R[99],N,[[[R[44]]],[R[44]]]],[5,"_mm_div_sd",E,R[49],N,[[[R[44]]],[R[44]]]],[5,"_mm_div_pd",E,"Divide packed double-precision (64-bit) floating-point…",N,[[[R[44]]],[R[44]]]],[5,"_mm_max_sd",E,R[49],N,[[[R[44]]],[R[44]]]],[5,"_mm_max_pd",E,"Returns a new vector with the maximum values from…",N,[[[R[44]]],[R[44]]]],[5,"_mm_min_sd",E,R[49],N,[[[R[44]]],[R[44]]]],[5,"_mm_min_pd",E,"Returns a new vector with the minimum values from…",N,[[[R[44]]],[R[44]]]],[5,"_mm_mul_sd",E,R[49],N,[[[R[44]]],[R[44]]]],[5,"_mm_mul_pd",E,R[102],N,[[[R[44]]],[R[44]]]],[5,"_mm_sqrt_sd",E,R[49],N,[[[R[44]]],[R[44]]]],[5,"_mm_sqrt_pd",E,"Returns a new vector with the square root of each of the…",N,[[[R[44]]],[R[44]]]],[5,"_mm_sub_sd",E,R[49],N,[[[R[44]]],[R[44]]]],[5,"_mm_sub_pd",E,"Subtract packed double-precision (64-bit) floating-point…",N,[[[R[44]]],[R[44]]]],[5,"_mm_and_pd",E,"Computes the bitwise AND of packed double-precision…",N,[[[R[44]]],[R[44]]]],[5,"_mm_andnot_pd",E,"Computes the bitwise NOT of `a` and then AND with `b`.",N,[[[R[44]]],[R[44]]]],[5,"_mm_or_pd",E,R[50],N,[[[R[44]]],[R[44]]]],[5,"_mm_xor_pd",E,R[50],N,[[[R[44]]],[R[44]]]],[5,"_mm_cmpeq_sd",E,R[49],N,[[[R[44]]],[R[44]]]],[5,"_mm_cmplt_sd",E,R[49],N,[[[R[44]]],[R[44]]]],[5,"_mm_cmple_sd",E,R[49],N,[[[R[44]]],[R[44]]]],[5,"_mm_cmpgt_sd",E,R[49],N,[[[R[44]]],[R[44]]]],[5,"_mm_cmpge_sd",E,R[49],N,[[[R[44]]],[R[44]]]],[5,"_mm_cmpord_sd",E,R[49],N,[[[R[44]]],[R[44]]]],[5,"_mm_cmpunord_sd",E,R[49],N,[[[R[44]]],[R[44]]]],[5,"_mm_cmpneq_sd",E,R[49],N,[[[R[44]]],[R[44]]]],[5,"_mm_cmpnlt_sd",E,R[49],N,[[[R[44]]],[R[44]]]],[5,"_mm_cmpnle_sd",E,R[49],N,[[[R[44]]],[R[44]]]],[5,"_mm_cmpngt_sd",E,R[49],N,[[[R[44]]],[R[44]]]],[5,"_mm_cmpnge_sd",E,R[49],N,[[[R[44]]],[R[44]]]],[5,"_mm_cmpeq_pd",E,"Compares corresponding elements in `a` and `b` for equality.",N,[[[R[44]]],[R[44]]]],[5,"_mm_cmplt_pd",E,R[51],N,[[[R[44]]],[R[44]]]],[5,"_mm_cmple_pd",E,R[51],N,[[[R[44]]],[R[44]]]],[5,"_mm_cmpgt_pd",E,R[51],N,[[[R[44]]],[R[44]]]],[5,"_mm_cmpge_pd",E,R[51],N,[[[R[44]]],[R[44]]]],[5,"_mm_cmpord_pd",E,R[52],N,[[[R[44]]],[R[44]]]],[5,"_mm_cmpunord_pd",E,R[52],N,[[[R[44]]],[R[44]]]],[5,"_mm_cmpneq_pd",E,R[51],N,[[[R[44]]],[R[44]]]],[5,"_mm_cmpnlt_pd",E,R[51],N,[[[R[44]]],[R[44]]]],[5,"_mm_cmpnle_pd",E,R[51],N,[[[R[44]]],[R[44]]]],[5,"_mm_cmpngt_pd",E,R[51],N,[[[R[44]]],[R[44]]]],[5,"_mm_cmpnge_pd",E,R[51],N,[[[R[44]]],[R[44]]]],[5,"_mm_comieq_sd",E,R[54],N,[[[R[44]]],["i32"]]],[5,"_mm_comilt_sd",E,R[55],N,[[[R[44]]],["i32"]]],[5,"_mm_comile_sd",E,R[53],N,[[[R[44]]],["i32"]]],[5,"_mm_comigt_sd",E,R[56],N,[[[R[44]]],["i32"]]],[5,"_mm_comige_sd",E,R[53],N,[[[R[44]]],["i32"]]],[5,"_mm_comineq_sd",E,R[57],N,[[[R[44]]],["i32"]]],[5,"_mm_ucomieq_sd",E,R[54],N,[[[R[44]]],["i32"]]],[5,"_mm_ucomilt_sd",E,R[55],N,[[[R[44]]],["i32"]]],[5,"_mm_ucomile_sd",E,R[53],N,[[[R[44]]],["i32"]]],[5,"_mm_ucomigt_sd",E,R[56],N,[[[R[44]]],["i32"]]],[5,"_mm_ucomige_sd",E,R[53],N,[[[R[44]]],["i32"]]],[5,"_mm_ucomineq_sd",E,R[57],N,[[[R[44]]],["i32"]]],[5,"_mm_cvtpd_ps",E,R[58],N,[[[R[44]]],[R[4]]]],[5,"_mm_cvtps_pd",E,R[30],N,[[[R[4]]],[R[44]]]],[5,"_mm_cvtpd_epi32",E,R[58],N,[[[R[44]]],[R[32]]]],[5,"_mm_cvtsd_si32",E,R[59],N,[[[R[44]]],["i32"]]],[5,"_mm_cvtsd_ss",E,R[59],N,[[[R[44]],[R[4]]],[R[4]]]],[5,"_mm_cvtsd_f64",E,"Returns the lower double-precision (64-bit) floating-point…",N,[[[R[44]]],["f64"]]],[5,"_mm_cvtss_sd",E,"Converts the lower single-precision (32-bit)…",N,[[[R[44]],[R[4]]],[R[44]]]],[5,"_mm_cvttpd_epi32",E,R[58],N,[[[R[44]]],[R[32]]]],[5,"_mm_cvttsd_si32",E,R[59],N,[[[R[44]]],["i32"]]],[5,"_mm_cvttps_epi32",E,R[30],N,[[[R[4]]],[R[32]]]],[5,"_mm_set_sd",E,"Copies double-precision (64-bit) floating-point element…",N,[[["f64"]],[R[44]]]],[5,"_mm_set1_pd",E,R[60],N,[[["f64"]],[R[44]]]],[5,"_mm_set_pd1",E,R[60],N,[[["f64"]],[R[44]]]],[5,"_mm_set_pd",E,R[61],N,[[["f64"]],[R[44]]]],[5,"_mm_setr_pd",E,R[61],N,[[["f64"]],[R[44]]]],[5,"_mm_setzero_pd",E,"Returns packed double-precision (64-bit) floating-point…",N,[[],[R[44]]]],[5,"_mm_movemask_pd",E,R[48],N,[[[R[44]]],["i32"]]],[5,"_mm_load_pd",E,R[66],N,[[],[R[44]]]],[5,"_mm_load_sd",E,"Loads a 64-bit double-precision value to the low element…",N,[[],[R[44]]]],[5,"_mm_loadh_pd",E,"Loads a double-precision value into the high-order bits of…",N,[[[R[44]]],[R[44]]]],[5,"_mm_loadl_pd",E,"Loads a double-precision value into the low-order bits of…",N,[[[R[44]]],[R[44]]]],[5,"_mm_stream_pd",E,"Stores a 128-bit floating point vector of `[2 x double]`…",N,[[[R[44]]]]],[5,"_mm_store_sd",E,R[64],N,[[[R[44]]]]],[5,"_mm_store_pd",E,R[62],N,[[[R[44]]]]],[5,"_mm_storeu_pd",E,R[62],N,[[[R[44]]]]],[5,"_mm_store1_pd",E,R[63],N,[[[R[44]]]]],[5,"_mm_store_pd1",E,R[63],N,[[[R[44]]]]],[5,"_mm_storer_pd",E,"Stores 2 double-precision (64-bit) floating-point elements…",N,[[[R[44]]]]],[5,"_mm_storeh_pd",E,"Stores the upper 64 bits of a 128-bit vector of `[2 x…",N,[[[R[44]]]]],[5,"_mm_storel_pd",E,R[64],N,[[[R[44]]]]],[5,"_mm_load1_pd",E,R[65],N,[[],[R[44]]]],[5,"_mm_load_pd1",E,R[65],N,[[],[R[44]]]],[5,"_mm_loadr_pd",E,"Loads 2 double-precision (64-bit) floating-point elements…",N,[[],[R[44]]]],[5,"_mm_loadu_pd",E,R[66],N,[[],[R[44]]]],[5,"_mm_shuffle_pd",E,R[67],N,[[[R[44]],["i32"]],[R[44]]]],[5,"_mm_move_sd",E,R[67],N,[[[R[44]]],[R[44]]]],[5,"_mm_castpd_ps",E,R[68],N,[[[R[44]]],[R[4]]]],[5,"_mm_castpd_si128",E,R[68],N,[[[R[44]]],[R[32]]]],[5,"_mm_castps_pd",E,R[69],N,[[[R[4]]],[R[44]]]],[5,"_mm_castps_si128",E,R[69],N,[[[R[4]]],[R[32]]]],[5,"_mm_castsi128_pd",E,R[70],N,[[[R[32]]],[R[44]]]],[5,"_mm_castsi128_ps",E,R[70],N,[[[R[32]]],[R[4]]]],[5,"_mm_undefined_pd",E,"Returns vector of type __m128d with undefined elements.",N,[[],[R[44]]]],[5,"_mm_undefined_si128",E,"Returns vector of type __m128i with undefined elements.",N,[[],[R[32]]]],[5,"_mm_unpackhi_pd",E,R[71],N,[[[R[44]]],[R[44]]]],[5,"_mm_unpacklo_pd",E,R[71],N,[[[R[44]]],[R[44]]]],[5,"_mm_add_si64",E,"Adds two signed or unsigned 64-bit integer values,…",N,[[["__m64"]],["__m64"]]],[5,"_mm_mul_su32",E,"Multiplies 32-bit unsigned integer values contained in the…",N,[[["__m64"]],["__m64"]]],[5,"_mm_sub_si64",E,"Subtracts signed or unsigned 64-bit integer values and…",N,[[["__m64"]],["__m64"]]],[5,"_mm_cvtpi32_pd",E,"Converts the two signed 32-bit integer elements of a…",N,[[["__m64"]],[R[44]]]],[5,"_mm_set_epi64",E,"Initializes both 64-bit values in a 128-bit vector of `[2…",N,[[["__m64"]],[R[32]]]],[5,"_mm_set1_epi64",E,"Initializes both values in a 128-bit vector of `[2 x i64]`…",N,[[["__m64"]],[R[32]]]],[5,"_mm_setr_epi64",E,"Constructs a 128-bit integer vector, initialized in…",N,[[["__m64"]],[R[32]]]],[5,"_mm_movepi64_pi64",E,"Returns the lower 64 bits of a 128-bit integer vector as a…",N,[[[R[32]]],["__m64"]]],[5,"_mm_movpi64_epi64",E,"Moves the 64-bit operand to a 128-bit integer vector,…",N,[[["__m64"]],[R[32]]]],[5,"_mm_cvtpd_pi32",E,R[72],N,[[[R[44]]],["__m64"]]],[5,"_mm_cvttpd_pi32",E,R[72],N,[[[R[44]]],["__m64"]]],[5,"_mm_addsub_ps",E,"Alternatively add and subtract packed single-precision…",N,[[[R[4]]],[R[4]]]],[5,"_mm_addsub_pd",E,"Alternatively add and subtract packed double-precision…",N,[[[R[44]]],[R[44]]]],[5,"_mm_hadd_pd",E,"Horizontally adds adjacent pairs of double-precision…",N,[[[R[44]]],[R[44]]]],[5,"_mm_hadd_ps",E,R[73],N,[[[R[4]]],[R[4]]]],[5,"_mm_hsub_pd",E,"Horizontally subtract adjacent pairs of double-precision…",N,[[[R[44]]],[R[44]]]],[5,"_mm_hsub_ps",E,R[73],N,[[[R[4]]],[R[4]]]],[5,"_mm_lddqu_si128",E,"Loads 128-bits of integer data from unaligned memory. This…",N,[[],[R[32]]]],[5,"_mm_movedup_pd",E,"Duplicate the low double-precision (64-bit) floating-point…",N,[[[R[44]]],[R[44]]]],[5,"_mm_loaddup_pd",E,R[65],N,[[],[R[44]]]],[5,"_mm_movehdup_ps",E,R[125],N,[[[R[4]]],[R[4]]]],[5,"_mm_moveldup_ps",E,R[126],N,[[[R[4]]],[R[4]]]],[5,"_mm_abs_epi8",E,"Computes the absolute value of packed 8-bit signed…",N,[[[R[32]]],[R[32]]]],[5,"_mm_abs_epi16",E,"Computes the absolute value of each of the packed 16-bit…",N,[[[R[32]]],[R[32]]]],[5,"_mm_abs_epi32",E,"Computes the absolute value of each of the packed 32-bit…",N,[[[R[32]]],[R[32]]]],[5,"_mm_shuffle_epi8",E,R[175],N,[[[R[32]]],[R[32]]]],[5,"_mm_alignr_epi8",E,"Concatenate 16-byte blocks in `a` and `b` into a 32-byte…",N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_hadd_epi16",E,R[74],N,[[[R[32]]],[R[32]]]],[5,"_mm_hadds_epi16",E,R[74],N,[[[R[32]]],[R[32]]]],[5,"_mm_hadd_epi32",E,R[74],N,[[[R[32]]],[R[32]]]],[5,"_mm_hsub_epi16",E,R[75],N,[[[R[32]]],[R[32]]]],[5,"_mm_hsubs_epi16",E,R[75],N,[[[R[32]]],[R[32]]]],[5,"_mm_hsub_epi32",E,R[75],N,[[[R[32]]],[R[32]]]],[5,"_mm_maddubs_epi16",E,R[78],N,[[[R[32]]],[R[32]]]],[5,"_mm_mulhrs_epi16",E,"Multiplies packed 16-bit signed integer values, truncate…",N,[[[R[32]]],[R[32]]]],[5,"_mm_sign_epi8",E,R[79],N,[[[R[32]]],[R[32]]]],[5,"_mm_sign_epi16",E,R[80],N,[[[R[32]]],[R[32]]]],[5,"_mm_sign_epi32",E,R[81],N,[[[R[32]]],[R[32]]]],[5,"_mm_abs_pi8",E,R[76],N,[[["__m64"]],["__m64"]]],[5,"_mm_abs_pi16",E,R[76],N,[[["__m64"]],["__m64"]]],[5,"_mm_abs_pi32",E,R[216],N,[[["__m64"]],["__m64"]]],[5,"_mm_shuffle_pi8",E,"Shuffles packed 8-bit integers in `a` according to shuffle…",N,[[["__m64"]],["__m64"]]],[5,"_mm_alignr_pi8",E,"Concatenates the two 64-bit integer vector operands, and…",N,[[["__m64"],["i32"]],["__m64"]]],[5,"_mm_hadd_pi16",E,R[74],N,[[["__m64"]],["__m64"]]],[5,"_mm_hadd_pi32",E,R[74],N,[[["__m64"]],["__m64"]]],[5,"_mm_hadds_pi16",E,R[74],N,[[["__m64"]],["__m64"]]],[5,"_mm_hsub_pi16",E,R[77],N,[[["__m64"]],["__m64"]]],[5,"_mm_hsub_pi32",E,R[77],N,[[["__m64"]],["__m64"]]],[5,"_mm_hsubs_pi16",E,R[77],N,[[["__m64"]],["__m64"]]],[5,"_mm_maddubs_pi16",E,R[78],N,[[["__m64"]],["__m64"]]],[5,"_mm_mulhrs_pi16",E,"Multiplies packed 16-bit signed integer values, truncates…",N,[[["__m64"]],["__m64"]]],[5,"_mm_sign_pi8",E,R[79],N,[[["__m64"]],["__m64"]]],[5,"_mm_sign_pi16",E,R[80],N,[[["__m64"]],["__m64"]]],[5,"_mm_sign_pi32",E,R[81],N,[[["__m64"]],["__m64"]]],[5,"_mm_blendv_epi8",E,"Blend packed 8-bit integers from `a` and `b` using `mask`",N,[[[R[32]]],[R[32]]]],[5,"_mm_blend_epi16",E,"Blend packed 16-bit integers from `a` and `b` using the…",N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_blendv_pd",E,R[82],N,[[[R[44]]],[R[44]]]],[5,"_mm_blendv_ps",E,R[83],N,[[[R[4]]],[R[4]]]],[5,"_mm_blend_pd",E,R[82],N,[[[R[44]],["i32"]],[R[44]]]],[5,"_mm_blend_ps",E,R[83],N,[[["i32"],[R[4]]],[R[4]]]],[5,"_mm_extract_ps",E,"Extracts a single-precision (32-bit) floating-point…",N,[[["i32"],[R[4]]],["i32"]]],[5,"_mm_extract_epi8",E,R[202],N,[[[R[32]],["i32"]],["i32"]]],[5,"_mm_extract_epi32",E,"Extracts an 32-bit integer from `a` selected with `imm8`",N,[[[R[32]],["i32"]],["i32"]]],[5,"_mm_insert_ps",E,"Select a single value in `a` to store at some position in…",N,[[["i32"],[R[4]]],[R[4]]]],[5,"_mm_insert_epi8",E,"Returns a copy of `a` with the 8-bit integer from `i`…",N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_insert_epi32",E,"Returns a copy of `a` with the 32-bit integer from `i`…",N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_max_epi8",E,R[84],N,[[[R[32]]],[R[32]]]],[5,"_mm_max_epu16",E,R[85],N,[[[R[32]]],[R[32]]]],[5,"_mm_max_epi32",E,R[86],N,[[[R[32]]],[R[32]]]],[5,"_mm_max_epu32",E,R[87],N,[[[R[32]]],[R[32]]]],[5,"_mm_min_epi8",E,R[84],N,[[[R[32]]],[R[32]]]],[5,"_mm_min_epu16",E,R[85],N,[[[R[32]]],[R[32]]]],[5,"_mm_min_epi32",E,R[86],N,[[[R[32]]],[R[32]]]],[5,"_mm_min_epu32",E,R[87],N,[[[R[32]]],[R[32]]]],[5,"_mm_packus_epi32",E,R[88],N,[[[R[32]]],[R[32]]]],[5,"_mm_cmpeq_epi64",E,"Compares packed 64-bit integers in `a` and `b` for equality",N,[[[R[32]]],[R[32]]]],[5,"_mm_cvtepi8_epi16",E,"Sign extend packed 8-bit integers in `a` to packed 16-bit…",N,[[[R[32]]],[R[32]]]],[5,"_mm_cvtepi8_epi32",E,"Sign extend packed 8-bit integers in `a` to packed 32-bit…",N,[[[R[32]]],[R[32]]]],[5,"_mm_cvtepi8_epi64",E,"Sign extend packed 8-bit integers in the low 8 bytes of…",N,[[[R[32]]],[R[32]]]],[5,"_mm_cvtepi16_epi32",E,"Sign extend packed 16-bit integers in `a` to packed 32-bit…",N,[[[R[32]]],[R[32]]]],[5,"_mm_cvtepi16_epi64",E,"Sign extend packed 16-bit integers in `a` to packed 64-bit…",N,[[[R[32]]],[R[32]]]],[5,"_mm_cvtepi32_epi64",E,"Sign extend packed 32-bit integers in `a` to packed 64-bit…",N,[[[R[32]]],[R[32]]]],[5,"_mm_cvtepu8_epi16",E,R[89],N,[[[R[32]]],[R[32]]]],[5,"_mm_cvtepu8_epi32",E,R[89],N,[[[R[32]]],[R[32]]]],[5,"_mm_cvtepu8_epi64",E,R[89],N,[[[R[32]]],[R[32]]]],[5,"_mm_cvtepu16_epi32",E,R[90],N,[[[R[32]]],[R[32]]]],[5,"_mm_cvtepu16_epi64",E,R[90],N,[[[R[32]]],[R[32]]]],[5,"_mm_cvtepu32_epi64",E,"Zeroes extend packed unsigned 32-bit integers in `a` to…",N,[[[R[32]]],[R[32]]]],[5,"_mm_dp_pd",E,"Returns the dot product of two __m128d vectors.",N,[[[R[44]],["i32"]],[R[44]]]],[5,"_mm_dp_ps",E,"Returns the dot product of two __m128 vectors.",N,[[["i32"],[R[4]]],[R[4]]]],[5,"_mm_floor_pd",E,R[91],N,[[[R[44]]],[R[44]]]],[5,"_mm_floor_ps",E,R[92],N,[[[R[4]]],[R[4]]]],[5,"_mm_floor_sd",E,R[93],N,[[[R[44]]],[R[44]]]],[5,"_mm_floor_ss",E,R[94],N,[[[R[4]]],[R[4]]]],[5,"_mm_ceil_pd",E,R[91],N,[[[R[44]]],[R[44]]]],[5,"_mm_ceil_ps",E,R[92],N,[[[R[4]]],[R[4]]]],[5,"_mm_ceil_sd",E,R[93],N,[[[R[44]]],[R[44]]]],[5,"_mm_ceil_ss",E,R[94],N,[[[R[4]]],[R[4]]]],[5,"_mm_round_pd",E,R[91],N,[[[R[44]],["i32"]],[R[44]]]],[5,"_mm_round_ps",E,R[92],N,[[["i32"],[R[4]]],[R[4]]]],[5,"_mm_round_sd",E,R[93],N,[[[R[44]],["i32"]],[R[44]]]],[5,"_mm_round_ss",E,R[94],N,[[["i32"],[R[4]]],[R[4]]]],[5,"_mm_minpos_epu16",E,"Finds the minimum unsigned 16-bit element in the 128-bit…",N,[[[R[32]]],[R[32]]]],[5,"_mm_mul_epi32",E,R[170],N,[[[R[32]]],[R[32]]]],[5,"_mm_mullo_epi32",E,R[174],N,[[[R[32]]],[R[32]]]],[5,"_mm_mpsadbw_epu8",E,"Subtracts 8-bit unsigned integer values and computes the…",N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_testz_si128",E,R[95],N,[[[R[32]]],["i32"]]],[5,"_mm_testc_si128",E,R[95],N,[[[R[32]]],["i32"]]],[5,"_mm_testnzc_si128",E,R[95],N,[[[R[32]]],["i32"]]],[5,"_mm_test_all_zeros",E,R[95],N,[[[R[32]]],["i32"]]],[5,"_mm_test_all_ones",E,"Tests whether the specified bits in `a` 128-bit integer…",N,[[[R[32]]],["i32"]]],[5,"_mm_test_mix_ones_zeros",E,R[95],N,[[[R[32]]],["i32"]]],[5,"_mm_cmpistrm",E,R[96],N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_cmpistri",E,R[96],N,[[[R[32]],["i32"]],["i32"]]],[5,"_mm_cmpistrz",E,R[96],N,[[[R[32]],["i32"]],["i32"]]],[5,"_mm_cmpistrc",E,R[96],N,[[[R[32]],["i32"]],["i32"]]],[5,"_mm_cmpistrs",E,R[96],N,[[[R[32]],["i32"]],["i32"]]],[5,"_mm_cmpistro",E,R[96],N,[[[R[32]],["i32"]],["i32"]]],[5,"_mm_cmpistra",E,R[96],N,[[[R[32]],["i32"]],["i32"]]],[5,"_mm_cmpestrm",E,R[97],N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_cmpestri",E,"Compares packed strings `a` and `b` with lengths `la` and…",N,[[[R[32]],["i32"]],["i32"]]],[5,"_mm_cmpestrz",E,R[97],N,[[[R[32]],["i32"]],["i32"]]],[5,"_mm_cmpestrc",E,R[97],N,[[[R[32]],["i32"]],["i32"]]],[5,"_mm_cmpestrs",E,R[97],N,[[[R[32]],["i32"]],["i32"]]],[5,"_mm_cmpestro",E,R[97],N,[[[R[32]],["i32"]],["i32"]]],[5,"_mm_cmpestra",E,R[97],N,[[[R[32]],["i32"]],["i32"]]],[5,"_mm_crc32_u8",E,R[98],N,[[["u32"],["u8"]],["u32"]]],[5,"_mm_crc32_u16",E,R[98],N,[[["u16"],["u32"]],["u32"]]],[5,"_mm_crc32_u32",E,R[98],N,[[["u32"]],["u32"]]],[5,"_mm_cmpgt_epi64",E,R[160],N,[[[R[32]]],[R[32]]]],[5,"_mm256_add_pd",E,R[99],N,[[[R[100]]],[R[100]]]],[5,"_mm256_add_ps",E,"Adds packed single-precision (32-bit) floating-point…",N,[[["__m256"]],["__m256"]]],[5,"_mm256_and_pd",E,"Computes the bitwise AND of a packed double-precision…",N,[[[R[100]]],[R[100]]]],[5,"_mm256_and_ps",E,"Computes the bitwise AND of packed single-precision…",N,[[["__m256"]],["__m256"]]],[5,"_mm256_or_pd",E,"Computes the bitwise OR packed double-precision (64-bit)…",N,[[[R[100]]],[R[100]]]],[5,"_mm256_or_ps",E,"Computes the bitwise OR packed single-precision (32-bit)…",N,[[["__m256"]],["__m256"]]],[5,"_mm256_shuffle_pd",E,R[113],N,[[[R[100]],["i32"]],[R[100]]]],[5,"_mm256_shuffle_ps",E,R[111],N,[[["i32"],["__m256"]],["__m256"]]],[5,"_mm256_andnot_pd",E,"Computes the bitwise NOT of packed double-precision…",N,[[[R[100]]],[R[100]]]],[5,"_mm256_andnot_ps",E,"Computes the bitwise NOT of packed single-precision…",N,[[["__m256"]],["__m256"]]],[5,"_mm256_max_pd",E,R[101],N,[[[R[100]]],[R[100]]]],[5,"_mm256_max_ps",E,R[6],N,[[["__m256"]],["__m256"]]],[5,"_mm256_min_pd",E,R[101],N,[[[R[100]]],[R[100]]]],[5,"_mm256_min_ps",E,R[6],N,[[["__m256"]],["__m256"]]],[5,"_mm256_mul_pd",E,R[102],N,[[[R[100]]],[R[100]]]],[5,"_mm256_mul_ps",E,R[204],N,[[["__m256"]],["__m256"]]],[5,"_mm256_addsub_pd",E,"Alternatively adds and subtracts packed double-precision…",N,[[[R[100]]],[R[100]]]],[5,"_mm256_addsub_ps",E,"Alternatively adds and subtracts packed single-precision…",N,[[["__m256"]],["__m256"]]],[5,"_mm256_sub_pd",E,"Subtracts packed double-precision (64-bit) floating-point…",N,[[[R[100]]],[R[100]]]],[5,"_mm256_sub_ps",E,"Subtracts packed single-precision (32-bit) floating-point…",N,[[["__m256"]],["__m256"]]],[5,"_mm256_div_ps",E,"Computes the division of each of the 8 packed 32-bit…",N,[[["__m256"]],["__m256"]]],[5,"_mm256_div_pd",E,"Computes the division of each of the 4 packed 64-bit…",N,[[[R[100]]],[R[100]]]],[5,"_mm256_round_pd",E,R[103],N,[[[R[100]],["i32"]],[R[100]]]],[5,"_mm256_ceil_pd",E,R[103],N,[[[R[100]]],[R[100]]]],[5,"_mm256_floor_pd",E,R[103],N,[[[R[100]]],[R[100]]]],[5,"_mm256_round_ps",E,R[104],N,[[["i32"],["__m256"]],["__m256"]]],[5,"_mm256_ceil_ps",E,R[104],N,[[["__m256"]],["__m256"]]],[5,"_mm256_floor_ps",E,R[104],N,[[["__m256"]],["__m256"]]],[5,"_mm256_sqrt_ps",E,R[105],N,[[["__m256"]],["__m256"]]],[5,"_mm256_sqrt_pd",E,"Returns the square root of packed double-precision…",N,[[[R[100]]],[R[100]]]],[5,"_mm256_blend_pd",E,R[106],N,[[[R[100]],["i32"]],[R[100]]]],[5,"_mm256_blend_ps",E,R[107],N,[[["i32"],["__m256"]],["__m256"]]],[5,"_mm256_blendv_pd",E,R[106],N,[[[R[100]]],[R[100]]]],[5,"_mm256_blendv_ps",E,R[107],N,[[["__m256"]],["__m256"]]],[5,"_mm256_dp_ps",E,"Conditionally multiplies the packed single-precision…",N,[[["i32"],["__m256"]],["__m256"]]],[5,"_mm256_hadd_pd",E,R[108],N,[[[R[100]]],[R[100]]]],[5,"_mm256_hadd_ps",E,R[108],N,[[["__m256"]],["__m256"]]],[5,"_mm256_hsub_pd",E,R[109],N,[[[R[100]]],[R[100]]]],[5,"_mm256_hsub_ps",E,R[109],N,[[["__m256"]],["__m256"]]],[5,"_mm256_xor_pd",E,"Computes the bitwise XOR of packed double-precision…",N,[[[R[100]]],[R[100]]]],[5,"_mm256_xor_ps",E,"Computes the bitwise XOR of packed single-precision…",N,[[["__m256"]],["__m256"]]],[5,"_mm_cmp_pd",E,R[101],N,[[[R[44]],["i32"]],[R[44]]]],[5,"_mm256_cmp_pd",E,R[101],N,[[[R[100]],["i32"]],[R[100]]]],[5,"_mm_cmp_ps",E,R[6],N,[[["i32"],[R[4]]],[R[4]]]],[5,"_mm256_cmp_ps",E,R[6],N,[[["i32"],["__m256"]],["__m256"]]],[5,"_mm_cmp_sd",E,"Compares the lower double-precision (64-bit)…",N,[[[R[44]],["i32"]],[R[44]]]],[5,"_mm_cmp_ss",E,"Compares the lower single-precision (32-bit)…",N,[[["i32"],[R[4]]],[R[4]]]],[5,"_mm256_cvtepi32_pd",E,R[110],N,[[[R[32]]],[R[100]]]],[5,"_mm256_cvtepi32_ps",E,R[110],N,[[[R[112]]],["__m256"]]],[5,"_mm256_cvtpd_ps",E,R[58],N,[[[R[100]]],[R[4]]]],[5,"_mm256_cvtps_epi32",E,R[30],N,[[["__m256"]],[R[112]]]],[5,"_mm256_cvtps_pd",E,R[30],N,[[[R[4]]],[R[100]]]],[5,"_mm256_cvttpd_epi32",E,R[58],N,[[[R[100]]],[R[32]]]],[5,"_mm256_cvtpd_epi32",E,R[58],N,[[[R[100]]],[R[32]]]],[5,"_mm256_cvttps_epi32",E,R[30],N,[[["__m256"]],[R[112]]]],[5,"_mm256_extractf128_ps",E,"Extracts 128 bits (composed of 4 packed single-precision…",N,[[["i32"],["__m256"]],[R[4]]]],[5,"_mm256_extractf128_pd",E,"Extracts 128 bits (composed of 2 packed double-precision…",N,[[[R[100]],["i32"]],[R[44]]]],[5,"_mm256_extractf128_si256",E,"Extracts 128 bits (composed of integer data) from `a`,…",N,[[[R[112]],["i32"]],[R[32]]]],[5,"_mm256_zeroall",E,"Zeroes the contents of all XMM or YMM registers.",N,[[]]],[5,"_mm256_zeroupper",E,"Zeroes the upper 128 bits of all YMM registers; the lower…",N,[[]]],[5,"_mm256_permutevar_ps",E,R[111],N,[[[R[112]],["__m256"]],["__m256"]]],[5,"_mm_permutevar_ps",E,R[111],N,[[[R[32]],[R[4]]],[R[4]]]],[5,"_mm256_permute_ps",E,R[111],N,[[["i32"],["__m256"]],["__m256"]]],[5,"_mm_permute_ps",E,R[111],N,[[["i32"],[R[4]]],[R[4]]]],[5,"_mm256_permutevar_pd",E,R[113],N,[[[R[100]],[R[112]]],[R[100]]]],[5,"_mm_permutevar_pd",E,R[113],N,[[[R[44]],[R[32]]],[R[44]]]],[5,"_mm256_permute_pd",E,R[113],N,[[[R[100]],["i32"]],[R[100]]]],[5,"_mm_permute_pd",E,R[113],N,[[[R[44]],["i32"]],[R[44]]]],[5,"_mm256_permute2f128_ps",E,"Shuffles 256 bits (composed of 8 packed single-precision…",N,[[["i32"],["__m256"]],["__m256"]]],[5,"_mm256_permute2f128_pd",E,"Shuffles 256 bits (composed of 4 packed double-precision…",N,[[[R[100]],["i32"]],[R[100]]]],[5,"_mm256_permute2f128_si256",E,"Shuffles 258-bits (composed of integer data) selected by…",N,[[[R[112]],["i32"]],[R[112]]]],[5,"_mm256_broadcast_ss",E,R[114],N,[[["f32"]],["__m256"]]],[5,"_mm_broadcast_ss",E,R[114],N,[[["f32"]],[R[4]]]],[5,"_mm256_broadcast_sd",E,"Broadcasts a double-precision (64-bit) floating-point…",N,[[["f64"]],[R[100]]]],[5,"_mm256_broadcast_ps",E,"Broadcasts 128 bits from memory (composed of 4 packed…",N,[[[R[4]]],["__m256"]]],[5,"_mm256_broadcast_pd",E,"Broadcasts 128 bits from memory (composed of 2 packed…",N,[[[R[44]]],[R[100]]]],[5,"_mm256_insertf128_ps",E,"Copies `a` to result, then inserts 128 bits (composed of 4…",N,[[[R[4]],["i32"],["__m256"]],["__m256"]]],[5,"_mm256_insertf128_pd",E,"Copies `a` to result, then inserts 128 bits (composed of 2…",N,[[[R[100]],[R[44]],["i32"]],[R[100]]]],[5,"_mm256_insertf128_si256",E,"Copies `a` to result, then inserts 128 bits from `b` into…",N,[[[R[32]],[R[112]],["i32"]],[R[112]]]],[5,"_mm256_insert_epi8",E,"Copies `a` to result, and inserts the 8-bit integer `i`…",N,[[["i8"],[R[112]],["i32"]],[R[112]]]],[5,"_mm256_insert_epi16",E,"Copies `a` to result, and inserts the 16-bit integer `i`…",N,[[["i32"],[R[112]],["i16"]],[R[112]]]],[5,"_mm256_insert_epi32",E,"Copies `a` to result, and inserts the 32-bit integer `i`…",N,[[[R[112]],["i32"]],[R[112]]]],[5,"_mm256_load_pd",E,R[115],N,[[],[R[100]]]],[5,"_mm256_store_pd",E,R[116],N,[[[R[100]]]]],[5,"_mm256_load_ps",E,R[117],N,[[],["__m256"]]],[5,"_mm256_store_ps",E,R[118],N,[[["__m256"]]]],[5,"_mm256_loadu_pd",E,R[115],N,[[],[R[100]]]],[5,"_mm256_storeu_pd",E,R[116],N,[[[R[100]]]]],[5,"_mm256_loadu_ps",E,R[117],N,[[],["__m256"]]],[5,"_mm256_storeu_ps",E,R[118],N,[[["__m256"]]]],[5,"_mm256_load_si256",E,R[119],N,[[],[R[112]]]],[5,"_mm256_store_si256",E,R[120],N,[[[R[112]]]]],[5,"_mm256_loadu_si256",E,R[119],N,[[],[R[112]]]],[5,"_mm256_storeu_si256",E,R[120],N,[[[R[112]]]]],[5,"_mm256_maskload_pd",E,R[121],N,[[[R[112]]],[R[100]]]],[5,"_mm256_maskstore_pd",E,R[122],N,[[[R[100]],[R[112]]]]],[5,"_mm_maskload_pd",E,R[121],N,[[[R[32]]],[R[44]]]],[5,"_mm_maskstore_pd",E,R[122],N,[[[R[44]],[R[32]]]]],[5,"_mm256_maskload_ps",E,R[123],N,[[[R[112]]],["__m256"]]],[5,"_mm256_maskstore_ps",E,R[124],N,[[["__m256"],[R[112]]]]],[5,"_mm_maskload_ps",E,R[123],N,[[[R[32]]],[R[4]]]],[5,"_mm_maskstore_ps",E,R[124],N,[[[R[4]],[R[32]]]]],[5,"_mm256_movehdup_ps",E,R[125],N,[[["__m256"]],["__m256"]]],[5,"_mm256_moveldup_ps",E,R[126],N,[[["__m256"]],["__m256"]]],[5,"_mm256_movedup_pd",E,"Duplicate even-indexed double-precision (64-bit)…",N,[[[R[100]]],[R[100]]]],[5,"_mm256_lddqu_si256",E,"Loads 256-bits of integer data from unaligned memory into…",N,[[],[R[112]]]],[5,"_mm256_stream_si256",E,"Moves integer data from a 256-bit integer vector to a…",N,[[[R[112]]]]],[5,"_mm256_stream_pd",E,"Moves double-precision values from a 256-bit vector of `[4…",N,[[[R[100]]]]],[5,"_mm256_stream_ps",E,"Moves single-precision floating point values from a…",N,[[["__m256"]]]],[5,"_mm256_rcp_ps",E,"Computes the approximate reciprocal of packed…",N,[[["__m256"]],["__m256"]]],[5,"_mm256_rsqrt_ps",E,"Computes the approximate reciprocal square root of packed…",N,[[["__m256"]],["__m256"]]],[5,"_mm256_unpackhi_pd",E,R[127],N,[[[R[100]]],[R[100]]]],[5,"_mm256_unpackhi_ps",E,R[12],N,[[["__m256"]],["__m256"]]],[5,"_mm256_unpacklo_pd",E,R[127],N,[[[R[100]]],[R[100]]]],[5,"_mm256_unpacklo_ps",E,R[12],N,[[["__m256"]],["__m256"]]],[5,"_mm256_testz_si256",E,R[128],N,[[[R[112]]],["i32"]]],[5,"_mm256_testc_si256",E,R[128],N,[[[R[112]]],["i32"]]],[5,"_mm256_testnzc_si256",E,R[128],N,[[[R[112]]],["i32"]]],[5,"_mm256_testz_pd",E,R[129],N,[[[R[100]]],["i32"]]],[5,"_mm256_testc_pd",E,R[129],N,[[[R[100]]],["i32"]]],[5,"_mm256_testnzc_pd",E,R[129],N,[[[R[100]]],["i32"]]],[5,"_mm_testz_pd",E,R[130],N,[[[R[44]]],["i32"]]],[5,"_mm_testc_pd",E,R[130],N,[[[R[44]]],["i32"]]],[5,"_mm_testnzc_pd",E,R[130],N,[[[R[44]]],["i32"]]],[5,"_mm256_testz_ps",E,R[129],N,[[["__m256"]],["i32"]]],[5,"_mm256_testc_ps",E,R[129],N,[[["__m256"]],["i32"]]],[5,"_mm256_testnzc_ps",E,R[129],N,[[["__m256"]],["i32"]]],[5,"_mm_testz_ps",E,R[130],N,[[[R[4]]],["i32"]]],[5,"_mm_testc_ps",E,R[130],N,[[[R[4]]],["i32"]]],[5,"_mm_testnzc_ps",E,R[130],N,[[[R[4]]],["i32"]]],[5,"_mm256_movemask_pd",E,R[131],N,[[[R[100]]],["i32"]]],[5,"_mm256_movemask_ps",E,R[131],N,[[["__m256"]],["i32"]]],[5,"_mm256_setzero_pd",E,"Returns vector of type __m256d with all elements set to…",N,[[],[R[100]]]],[5,"_mm256_setzero_ps",E,"Returns vector of type __m256 with all elements set to zero.",N,[[],["__m256"]]],[5,"_mm256_setzero_si256",E,"Returns vector of type __m256i with all elements set to…",N,[[],[R[112]]]],[5,"_mm256_set_pd",E,R[61],N,[[["f64"]],[R[100]]]],[5,"_mm256_set_ps",E,R[132],N,[[["f32"]],["__m256"]]],[5,"_mm256_set_epi8",E,R[133],N,[[["i8"]],[R[112]]]],[5,"_mm256_set_epi16",E,R[134],N,[[["i16"]],[R[112]]]],[5,"_mm256_set_epi32",E,R[135],N,[[["i32"]],[R[112]]]],[5,"_mm256_set_epi64x",E,R[136],N,[[["i64"]],[R[112]]]],[5,"_mm256_setr_pd",E,R[61],N,[[["f64"]],[R[100]]]],[5,"_mm256_setr_ps",E,R[132],N,[[["f32"]],["__m256"]]],[5,"_mm256_setr_epi8",E,R[133],N,[[["i8"]],[R[112]]]],[5,"_mm256_setr_epi16",E,R[134],N,[[["i16"]],[R[112]]]],[5,"_mm256_setr_epi32",E,R[135],N,[[["i32"]],[R[112]]]],[5,"_mm256_setr_epi64x",E,R[136],N,[[["i64"]],[R[112]]]],[5,"_mm256_set1_pd",E,R[60],N,[[["f64"]],[R[100]]]],[5,"_mm256_set1_ps",E,"Broadcasts single-precision (32-bit) floating-point value…",N,[[["f32"]],["__m256"]]],[5,"_mm256_set1_epi8",E,"Broadcasts 8-bit integer `a` to all elements of returned…",N,[[["i8"]],[R[112]]]],[5,"_mm256_set1_epi16",E,"Broadcasts 16-bit integer `a` to all all elements of…",N,[[["i16"]],[R[112]]]],[5,"_mm256_set1_epi32",E,"Broadcasts 32-bit integer `a` to all elements of returned…",N,[[["i32"]],[R[112]]]],[5,"_mm256_set1_epi64x",E,"Broadcasts 64-bit integer `a` to all elements of returned…",N,[[["i64"]],[R[112]]]],[5,"_mm256_castpd_ps",E,"Cast vector of type __m256d to type __m256.",N,[[[R[100]]],["__m256"]]],[5,"_mm256_castps_pd",E,"Cast vector of type __m256 to type __m256d.",N,[[["__m256"]],[R[100]]]],[5,"_mm256_castps_si256",E,"Casts vector of type __m256 to type __m256i.",N,[[["__m256"]],[R[112]]]],[5,"_mm256_castsi256_ps",E,"Casts vector of type __m256i to type __m256.",N,[[[R[112]]],["__m256"]]],[5,"_mm256_castpd_si256",E,"Casts vector of type __m256d to type __m256i.",N,[[[R[100]]],[R[112]]]],[5,"_mm256_castsi256_pd",E,"Casts vector of type __m256i to type __m256d.",N,[[[R[112]]],[R[100]]]],[5,"_mm256_castps256_ps128",E,"Casts vector of type __m256 to type __m128.",N,[[["__m256"]],[R[4]]]],[5,"_mm256_castpd256_pd128",E,"Casts vector of type __m256d to type __m128d.",N,[[[R[100]]],[R[44]]]],[5,"_mm256_castsi256_si128",E,"Casts vector of type __m256i to type __m128i.",N,[[[R[112]]],[R[32]]]],[5,"_mm256_castps128_ps256",E,"Casts vector of type __m128 to type __m256; the upper 128…",N,[[[R[4]]],["__m256"]]],[5,"_mm256_castpd128_pd256",E,"Casts vector of type __m128d to type __m256d; the upper…",N,[[[R[44]]],[R[100]]]],[5,"_mm256_castsi128_si256",E,"Casts vector of type __m128i to type __m256i; the upper…",N,[[[R[32]]],[R[112]]]],[5,"_mm256_zextps128_ps256",E,"Constructs a 256-bit floating-point vector of `[8 x…",N,[[[R[4]]],["__m256"]]],[5,"_mm256_zextsi128_si256",E,"Constructs a 256-bit integer vector from a 128-bit integer…",N,[[[R[32]]],[R[112]]]],[5,"_mm256_zextpd128_pd256",E,"Constructs a 256-bit floating-point vector of `[4 x…",N,[[[R[44]]],[R[100]]]],[5,"_mm256_undefined_ps",E,"Returns vector of type `__m256` with undefined elements.",N,[[],["__m256"]]],[5,"_mm256_undefined_pd",E,"Returns vector of type `__m256d` with undefined elements.",N,[[],[R[100]]]],[5,"_mm256_undefined_si256",E,"Returns vector of type __m256i with undefined elements.",N,[[],[R[112]]]],[5,"_mm256_set_m128",E,R[137],N,[[[R[4]]],["__m256"]]],[5,"_mm256_set_m128d",E,R[138],N,[[[R[44]]],[R[100]]]],[5,"_mm256_set_m128i",E,R[139],N,[[[R[32]]],[R[112]]]],[5,"_mm256_setr_m128",E,R[137],N,[[[R[4]]],["__m256"]]],[5,"_mm256_setr_m128d",E,R[138],N,[[[R[44]]],[R[100]]]],[5,"_mm256_setr_m128i",E,R[139],N,[[[R[32]]],[R[112]]]],[5,"_mm256_loadu2_m128",E,"Loads two 128-bit values (composed of 4 packed…",N,[[],["__m256"]]],[5,"_mm256_loadu2_m128d",E,"Loads two 128-bit values (composed of 2 packed…",N,[[],[R[100]]]],[5,"_mm256_loadu2_m128i",E,"Loads two 128-bit values (composed of integer data) from…",N,[[],[R[112]]]],[5,"_mm256_storeu2_m128",E,"Stores the high and low 128-bit halves (each composed of 4…",N,[[["__m256"]]]],[5,"_mm256_storeu2_m128d",E,"Stores the high and low 128-bit halves (each composed of 2…",N,[[[R[100]]]]],[5,"_mm256_storeu2_m128i",E,"Stores the high and low 128-bit halves (each composed of…",N,[[[R[112]]]]],[5,"_mm256_cvtss_f32",E,R[203],N,[[["__m256"]],["f32"]]],[5,"_mm256_abs_epi32",E,R[215],N,[[[R[112]]],[R[112]]]],[5,"_mm256_abs_epi16",E,"Computes the absolute values of packed 16-bit integers in…",N,[[[R[112]]],[R[112]]]],[5,"_mm256_abs_epi8",E,"Computes the absolute values of packed 8-bit integers in…",N,[[[R[112]]],[R[112]]]],[5,"_mm256_add_epi64",E,R[140],N,[[[R[112]]],[R[112]]]],[5,"_mm256_add_epi32",E,R[141],N,[[[R[112]]],[R[112]]]],[5,"_mm256_add_epi16",E,R[142],N,[[[R[112]]],[R[112]]]],[5,"_mm256_add_epi8",E,R[143],N,[[[R[112]]],[R[112]]]],[5,"_mm256_adds_epi8",E,R[144],N,[[[R[112]]],[R[112]]]],[5,"_mm256_adds_epi16",E,R[145],N,[[[R[112]]],[R[112]]]],[5,"_mm256_adds_epu8",E,R[146],N,[[[R[112]]],[R[112]]]],[5,"_mm256_adds_epu16",E,R[147],N,[[[R[112]]],[R[112]]]],[5,"_mm256_alignr_epi8",E,"Concatenates pairs of 16-byte blocks in `a` and `b` into a…",N,[[[R[112]],["i32"]],[R[112]]]],[5,"_mm256_and_si256",E,R[128],N,[[[R[112]]],[R[112]]]],[5,"_mm256_andnot_si256",E,"Computes the bitwise NOT of 256 bits (representing integer…",N,[[[R[112]]],[R[112]]]],[5,"_mm256_avg_epu16",E,R[148],N,[[[R[112]]],[R[112]]]],[5,"_mm256_avg_epu8",E,R[149],N,[[[R[112]]],[R[112]]]],[5,"_mm_blend_epi32",E,R[150],N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm256_blend_epi32",E,R[150],N,[[[R[112]],["i32"]],[R[112]]]],[5,"_mm256_blend_epi16",E,"Blends packed 16-bit integers from `a` and `b` using…",N,[[[R[112]],["i32"]],[R[112]]]],[5,"_mm256_blendv_epi8",E,"Blends packed 8-bit integers from `a` and `b` using `mask`.",N,[[[R[112]]],[R[112]]]],[5,"_mm_broadcastb_epi8",E,R[151],N,[[[R[32]]],[R[32]]]],[5,"_mm256_broadcastb_epi8",E,R[151],N,[[[R[32]]],[R[112]]]],[5,"_mm_broadcastd_epi32",E,R[152],N,[[[R[32]]],[R[32]]]],[5,"_mm256_broadcastd_epi32",E,R[152],N,[[[R[32]]],[R[112]]]],[5,"_mm_broadcastq_epi64",E,R[153],N,[[[R[32]]],[R[32]]]],[5,"_mm256_broadcastq_epi64",E,R[153],N,[[[R[32]]],[R[112]]]],[5,"_mm_broadcastsd_pd",E,R[154],N,[[[R[44]]],[R[44]]]],[5,"_mm256_broadcastsd_pd",E,R[154],N,[[[R[44]]],[R[100]]]],[5,"_mm256_broadcastsi128_si256",E,"Broadcasts 128 bits of integer data from a to all 128-bit…",N,[[[R[32]]],[R[112]]]],[5,"_mm_broadcastss_ps",E,R[155],N,[[[R[4]]],[R[4]]]],[5,"_mm256_broadcastss_ps",E,R[155],N,[[[R[4]]],["__m256"]]],[5,"_mm_broadcastw_epi16",E,R[156],N,[[[R[32]]],[R[32]]]],[5,"_mm256_broadcastw_epi16",E,R[156],N,[[[R[32]]],[R[112]]]],[5,"_mm256_cmpeq_epi64",E,"Compares packed 64-bit integers in `a` and `b` for equality.",N,[[[R[112]]],[R[112]]]],[5,"_mm256_cmpeq_epi32",E,R[157],N,[[[R[112]]],[R[112]]]],[5,"_mm256_cmpeq_epi16",E,R[158],N,[[[R[112]]],[R[112]]]],[5,"_mm256_cmpeq_epi8",E,R[159],N,[[[R[112]]],[R[112]]]],[5,"_mm256_cmpgt_epi64",E,R[160],N,[[[R[112]]],[R[112]]]],[5,"_mm256_cmpgt_epi32",E,R[43],N,[[[R[112]]],[R[112]]]],[5,"_mm256_cmpgt_epi16",E,R[42],N,[[[R[112]]],[R[112]]]],[5,"_mm256_cmpgt_epi8",E,R[161],N,[[[R[112]]],[R[112]]]],[5,"_mm256_cvtepi16_epi32",E,"Sign-extend 16-bit integers to 32-bit integers.",N,[[[R[32]]],[R[112]]]],[5,"_mm256_cvtepi16_epi64",E,"Sign-extend 16-bit integers to 64-bit integers.",N,[[[R[32]]],[R[112]]]],[5,"_mm256_cvtepi32_epi64",E,"Sign-extend 32-bit integers to 64-bit integers.",N,[[[R[32]]],[R[112]]]],[5,"_mm256_cvtepi8_epi16",E,"Sign-extend 8-bit integers to 16-bit integers.",N,[[[R[32]]],[R[112]]]],[5,"_mm256_cvtepi8_epi32",E,"Sign-extend 8-bit integers to 32-bit integers.",N,[[[R[32]]],[R[112]]]],[5,"_mm256_cvtepi8_epi64",E,"Sign-extend 8-bit integers to 64-bit integers.",N,[[[R[32]]],[R[112]]]],[5,"_mm256_cvtepu16_epi32",E,R[90],N,[[[R[32]]],[R[112]]]],[5,"_mm256_cvtepu16_epi64",E,"Zero-extend the lower four unsigned 16-bit integers in `a`…",N,[[[R[32]]],[R[112]]]],[5,"_mm256_cvtepu32_epi64",E,"Zero-extend unsigned 32-bit integers in `a` to 64-bit…",N,[[[R[32]]],[R[112]]]],[5,"_mm256_cvtepu8_epi16",E,"Zero-extend unsigned 8-bit integers in `a` to 16-bit…",N,[[[R[32]]],[R[112]]]],[5,"_mm256_cvtepu8_epi32",E,"Zero-extend the lower eight unsigned 8-bit integers in `a`…",N,[[[R[32]]],[R[112]]]],[5,"_mm256_cvtepu8_epi64",E,"Zero-extend the lower four unsigned 8-bit integers in `a`…",N,[[[R[32]]],[R[112]]]],[5,"_mm256_extracti128_si256",E,"Extracts 128 bits (of integer data) from `a` selected with…",N,[[[R[112]],["i32"]],[R[32]]]],[5,"_mm256_hadd_epi16",E,R[162],N,[[[R[112]]],[R[112]]]],[5,"_mm256_hadd_epi32",E,"Horizontally adds adjacent pairs of 32-bit integers in `a`…",N,[[[R[112]]],[R[112]]]],[5,"_mm256_hadds_epi16",E,R[162],N,[[[R[112]]],[R[112]]]],[5,"_mm256_hsub_epi16",E,R[163],N,[[[R[112]]],[R[112]]]],[5,"_mm256_hsub_epi32",E,"Horizontally subtract adjacent pairs of 32-bit integers in…",N,[[[R[112]]],[R[112]]]],[5,"_mm256_hsubs_epi16",E,R[163],N,[[[R[112]]],[R[112]]]],[5,"_mm_i32gather_epi32",E,R[164],N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_mask_i32gather_epi32",E,R[164],N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm256_i32gather_epi32",E,R[164],N,[[["i32"],[R[112]]],[R[112]]]],[5,"_mm256_mask_i32gather_epi32",E,R[164],N,[[["i32"],[R[112]]],[R[112]]]],[5,"_mm_i32gather_ps",E,R[164],N,[[[R[32]],["i32"]],[R[4]]]],[5,"_mm_mask_i32gather_ps",E,R[164],N,[[["i32"],[R[32]],[R[4]]],[R[4]]]],[5,"_mm256_i32gather_ps",E,R[164],N,[[[R[112]],["i32"]],["__m256"]]],[5,"_mm256_mask_i32gather_ps",E,R[164],N,[[[R[112]],["i32"],["__m256"]],["__m256"]]],[5,"_mm_i32gather_epi64",E,R[164],N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_mask_i32gather_epi64",E,R[164],N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm256_i32gather_epi64",E,R[164],N,[[[R[32]],["i32"]],[R[112]]]],[5,"_mm256_mask_i32gather_epi64",E,R[164],N,[[[R[112]],["i32"],[R[32]]],[R[112]]]],[5,"_mm_i32gather_pd",E,R[164],N,[[[R[32]],["i32"]],[R[44]]]],[5,"_mm_mask_i32gather_pd",E,R[164],N,[[[R[44]],["i32"],[R[32]]],[R[44]]]],[5,"_mm256_i32gather_pd",E,R[164],N,[[[R[32]],["i32"]],[R[100]]]],[5,"_mm256_mask_i32gather_pd",E,R[164],N,[[[R[100]],["i32"],[R[32]]],[R[100]]]],[5,"_mm_i64gather_epi32",E,R[164],N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_mask_i64gather_epi32",E,R[164],N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm256_i64gather_epi32",E,R[164],N,[[["i32"],[R[112]]],[R[32]]]],[5,"_mm256_mask_i64gather_epi32",E,R[164],N,[[[R[112]],["i32"],[R[32]]],[R[32]]]],[5,"_mm_i64gather_ps",E,R[164],N,[[[R[32]],["i32"]],[R[4]]]],[5,"_mm_mask_i64gather_ps",E,R[164],N,[[["i32"],[R[32]],[R[4]]],[R[4]]]],[5,"_mm256_i64gather_ps",E,R[164],N,[[[R[112]],["i32"]],[R[4]]]],[5,"_mm256_mask_i64gather_ps",E,R[164],N,[[[R[112]],["i32"],[R[4]]],[R[4]]]],[5,"_mm_i64gather_epi64",E,R[164],N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_mask_i64gather_epi64",E,R[164],N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm256_i64gather_epi64",E,R[164],N,[[[R[112]],["i32"]],[R[112]]]],[5,"_mm256_mask_i64gather_epi64",E,R[164],N,[[[R[112]],["i32"]],[R[112]]]],[5,"_mm_i64gather_pd",E,R[164],N,[[[R[32]],["i32"]],[R[44]]]],[5,"_mm_mask_i64gather_pd",E,R[164],N,[[[R[44]],["i32"],[R[32]]],[R[44]]]],[5,"_mm256_i64gather_pd",E,R[164],N,[[[R[112]],["i32"]],[R[100]]]],[5,"_mm256_mask_i64gather_pd",E,R[164],N,[[[R[100]],[R[112]],["i32"]],[R[100]]]],[5,"_mm256_inserti128_si256",E,"Copies `a` to `dst`, then insert 128 bits (of integer…",N,[[[R[32]],[R[112]],["i32"]],[R[112]]]],[5,"_mm256_madd_epi16",E,"Multiplies packed signed 16-bit integers in `a` and `b`,…",N,[[[R[112]]],[R[112]]]],[5,"_mm256_maddubs_epi16",E,"Vertically multiplies each unsigned 8-bit integer from `a`…",N,[[[R[112]]],[R[112]]]],[5,"_mm_maskload_epi32",E,R[165],N,[[[R[32]]],[R[32]]]],[5,"_mm256_maskload_epi32",E,R[165],N,[[[R[112]]],[R[112]]]],[5,"_mm_maskload_epi64",E,R[166],N,[[[R[32]]],[R[32]]]],[5,"_mm256_maskload_epi64",E,R[166],N,[[[R[112]]],[R[112]]]],[5,"_mm_maskstore_epi32",E,R[167],N,[[[R[32]]]]],[5,"_mm256_maskstore_epi32",E,R[167],N,[[[R[112]]]]],[5,"_mm_maskstore_epi64",E,R[168],N,[[[R[32]]]]],[5,"_mm256_maskstore_epi64",E,R[168],N,[[[R[112]]]]],[5,"_mm256_max_epi16",E,R[33],N,[[[R[112]]],[R[112]]]],[5,"_mm256_max_epi32",E,R[86],N,[[[R[112]]],[R[112]]]],[5,"_mm256_max_epi8",E,R[169],N,[[[R[112]]],[R[112]]]],[5,"_mm256_max_epu16",E,R[85],N,[[[R[112]]],[R[112]]]],[5,"_mm256_max_epu32",E,R[87],N,[[[R[112]]],[R[112]]]],[5,"_mm256_max_epu8",E,R[34],N,[[[R[112]]],[R[112]]]],[5,"_mm256_min_epi16",E,R[33],N,[[[R[112]]],[R[112]]]],[5,"_mm256_min_epi32",E,R[86],N,[[[R[112]]],[R[112]]]],[5,"_mm256_min_epi8",E,R[169],N,[[[R[112]]],[R[112]]]],[5,"_mm256_min_epu16",E,R[85],N,[[[R[112]]],[R[112]]]],[5,"_mm256_min_epu32",E,R[87],N,[[[R[112]]],[R[112]]]],[5,"_mm256_min_epu8",E,R[34],N,[[[R[112]]],[R[112]]]],[5,"_mm256_movemask_epi8",E,"Creates mask from the most significant bit of each 8-bit…",N,[[[R[112]]],["i32"]]],[5,"_mm256_mpsadbw_epu8",E,"Computes the sum of absolute differences (SADs) of…",N,[[[R[112]],["i32"]],[R[112]]]],[5,"_mm256_mul_epi32",E,R[170],N,[[[R[112]]],[R[112]]]],[5,"_mm256_mul_epu32",E,R[171],N,[[[R[112]]],[R[112]]]],[5,"_mm256_mulhi_epi16",E,R[173],N,[[[R[112]]],[R[112]]]],[5,"_mm256_mulhi_epu16",E,R[172],N,[[[R[112]]],[R[112]]]],[5,"_mm256_mullo_epi16",E,R[173],N,[[[R[112]]],[R[112]]]],[5,"_mm256_mullo_epi32",E,R[174],N,[[[R[112]]],[R[112]]]],[5,"_mm256_mulhrs_epi16",E,"Multiplies packed 16-bit integers in `a` and `b`,…",N,[[[R[112]]],[R[112]]]],[5,"_mm256_or_si256",E,"Computes the bitwise OR of 256 bits (representing integer…",N,[[[R[112]]],[R[112]]]],[5,"_mm256_packs_epi16",E,R[47],N,[[[R[112]]],[R[112]]]],[5,"_mm256_packs_epi32",E,R[88],N,[[[R[112]]],[R[112]]]],[5,"_mm256_packus_epi16",E,R[47],N,[[[R[112]]],[R[112]]]],[5,"_mm256_packus_epi32",E,R[88],N,[[[R[112]]],[R[112]]]],[5,"_mm256_permutevar8x32_epi32",E,"Permutes packed 32-bit integers from `a` according to the…",N,[[[R[112]]],[R[112]]]],[5,"_mm256_permute4x64_epi64",E,"Permutes 64-bit integers from `a` using control mask `imm8`.",N,[[[R[112]],["i32"]],[R[112]]]],[5,"_mm256_permute2x128_si256",E,"Shuffles 128-bits of integer data selected by `imm8` from…",N,[[[R[112]],["i32"]],[R[112]]]],[5,"_mm256_permute4x64_pd",E,"Shuffles 64-bit floating-point elements in `a` across…",N,[[[R[100]],["i32"]],[R[100]]]],[5,"_mm256_permutevar8x32_ps",E,"Shuffles eight 32-bit foating-point elements in `a` across…",N,[[[R[112]],["__m256"]],["__m256"]]],[5,"_mm256_sad_epu8",E,"Computes the absolute differences of packed unsigned 8-bit…",N,[[[R[112]]],[R[112]]]],[5,"_mm256_shuffle_epi8",E,R[175],N,[[[R[112]]],[R[112]]]],[5,"_mm256_shuffle_epi32",E,"Shuffles 32-bit integers in 128-bit lanes of `a` using the…",N,[[[R[112]],["i32"]],[R[112]]]],[5,"_mm256_shufflehi_epi16",E,"Shuffles 16-bit integers in the high 64 bits of 128-bit…",N,[[[R[112]],["i32"]],[R[112]]]],[5,"_mm256_shufflelo_epi16",E,"Shuffles 16-bit integers in the low 64 bits of 128-bit…",N,[[[R[112]],["i32"]],[R[112]]]],[5,"_mm256_sign_epi16",E,R[80],N,[[[R[112]]],[R[112]]]],[5,"_mm256_sign_epi32",E,R[81],N,[[[R[112]]],[R[112]]]],[5,"_mm256_sign_epi8",E,R[79],N,[[[R[112]]],[R[112]]]],[5,"_mm256_sll_epi16",E,R[176],N,[[[R[32]],[R[112]]],[R[112]]]],[5,"_mm256_sll_epi32",E,R[177],N,[[[R[32]],[R[112]]],[R[112]]]],[5,"_mm256_sll_epi64",E,R[178],N,[[[R[32]],[R[112]]],[R[112]]]],[5,"_mm256_slli_epi16",E,R[179],N,[[[R[112]],["i32"]],[R[112]]]],[5,"_mm256_slli_epi32",E,R[180],N,[[[R[112]],["i32"]],[R[112]]]],[5,"_mm256_slli_epi64",E,R[181],N,[[[R[112]],["i32"]],[R[112]]]],[5,"_mm256_slli_si256",E,R[182],N,[[[R[112]],["i32"]],[R[112]]]],[5,"_mm256_bslli_epi128",E,R[182],N,[[[R[112]],["i32"]],[R[112]]]],[5,"_mm_sllv_epi32",E,R[183],N,[[[R[32]]],[R[32]]]],[5,"_mm256_sllv_epi32",E,R[183],N,[[[R[112]]],[R[112]]]],[5,"_mm_sllv_epi64",E,R[184],N,[[[R[32]]],[R[32]]]],[5,"_mm256_sllv_epi64",E,R[184],N,[[[R[112]]],[R[112]]]],[5,"_mm256_sra_epi16",E,R[39],N,[[[R[32]],[R[112]]],[R[112]]]],[5,"_mm256_sra_epi32",E,R[41],N,[[[R[32]],[R[112]]],[R[112]]]],[5,"_mm256_srai_epi16",E,R[38],N,[[[R[112]],["i32"]],[R[112]]]],[5,"_mm256_srai_epi32",E,R[40],N,[[[R[112]],["i32"]],[R[112]]]],[5,"_mm_srav_epi32",E,R[185],N,[[[R[32]]],[R[32]]]],[5,"_mm256_srav_epi32",E,R[185],N,[[[R[112]]],[R[112]]]],[5,"_mm256_srli_si256",E,R[186],N,[[[R[112]],["i32"]],[R[112]]]],[5,"_mm256_bsrli_epi128",E,R[186],N,[[[R[112]],["i32"]],[R[112]]]],[5,"_mm256_srl_epi16",E,R[39],N,[[[R[32]],[R[112]]],[R[112]]]],[5,"_mm256_srl_epi32",E,R[41],N,[[[R[32]],[R[112]]],[R[112]]]],[5,"_mm256_srl_epi64",E,R[187],N,[[[R[32]],[R[112]]],[R[112]]]],[5,"_mm256_srli_epi16",E,R[38],N,[[[R[112]],["i32"]],[R[112]]]],[5,"_mm256_srli_epi32",E,R[40],N,[[[R[112]],["i32"]],[R[112]]]],[5,"_mm256_srli_epi64",E,R[188],N,[[[R[112]],["i32"]],[R[112]]]],[5,"_mm_srlv_epi32",E,R[185],N,[[[R[32]]],[R[32]]]],[5,"_mm256_srlv_epi32",E,R[185],N,[[[R[112]]],[R[112]]]],[5,"_mm_srlv_epi64",E,R[189],N,[[[R[32]]],[R[32]]]],[5,"_mm256_srlv_epi64",E,R[189],N,[[[R[112]]],[R[112]]]],[5,"_mm256_sub_epi16",E,R[190],N,[[[R[112]]],[R[112]]]],[5,"_mm256_sub_epi32",E,"Subtract packed 32-bit integers in `b` from packed 16-bit…",N,[[[R[112]]],[R[112]]]],[5,"_mm256_sub_epi64",E,"Subtract packed 64-bit integers in `b` from packed 16-bit…",N,[[[R[112]]],[R[112]]]],[5,"_mm256_sub_epi8",E,"Subtract packed 8-bit integers in `b` from packed 16-bit…",N,[[[R[112]]],[R[112]]]],[5,"_mm256_subs_epi16",E,R[190],N,[[[R[112]]],[R[112]]]],[5,"_mm256_subs_epi8",E,R[191],N,[[[R[112]]],[R[112]]]],[5,"_mm256_subs_epu16",E,R[192],N,[[[R[112]]],[R[112]]]],[5,"_mm256_subs_epu8",E,R[193],N,[[[R[112]]],[R[112]]]],[5,"_mm256_unpackhi_epi8",E,R[194],N,[[[R[112]]],[R[112]]]],[5,"_mm256_unpacklo_epi8",E,R[195],N,[[[R[112]]],[R[112]]]],[5,"_mm256_unpackhi_epi16",E,R[196],N,[[[R[112]]],[R[112]]]],[5,"_mm256_unpacklo_epi16",E,R[197],N,[[[R[112]]],[R[112]]]],[5,"_mm256_unpackhi_epi32",E,R[198],N,[[[R[112]]],[R[112]]]],[5,"_mm256_unpacklo_epi32",E,R[199],N,[[[R[112]]],[R[112]]]],[5,"_mm256_unpackhi_epi64",E,R[200],N,[[[R[112]]],[R[112]]]],[5,"_mm256_unpacklo_epi64",E,R[201],N,[[[R[112]]],[R[112]]]],[5,"_mm256_xor_si256",E,"Computes the bitwise XOR of 256 bits (representing integer…",N,[[[R[112]]],[R[112]]]],[5,"_mm256_extract_epi8",E,R[202],N,[[[R[112]],["i32"]],["i8"]]],[5,"_mm256_extract_epi16",E,"Extracts a 16-bit integer from `a`, selected with `imm8`.…",N,[[[R[112]],["i32"]],["i16"]]],[5,"_mm256_extract_epi32",E,"Extracts a 32-bit integer from `a`, selected with `imm8`.",N,[[[R[112]],["i32"]],["i32"]]],[5,"_mm256_cvtsd_f64",E,"Returns the first element of the input vector of `[4 x…",N,[[[R[100]]],["f64"]]],[5,"_mm256_cvtsi256_si32",E,R[203],N,[[[R[112]]],["i32"]]],[5,"_mm_fmadd_pd",E,R[102],N,[[[R[44]]],[R[44]]]],[5,"_mm256_fmadd_pd",E,R[102],N,[[[R[100]]],[R[100]]]],[5,"_mm_fmadd_ps",E,R[204],N,[[[R[4]]],[R[4]]]],[5,"_mm256_fmadd_ps",E,R[204],N,[[["__m256"]],["__m256"]]],[5,"_mm_fmadd_sd",E,R[205],N,[[[R[44]]],[R[44]]]],[5,"_mm_fmadd_ss",E,R[206],N,[[[R[4]]],[R[4]]]],[5,"_mm_fmaddsub_pd",E,R[102],N,[[[R[44]]],[R[44]]]],[5,"_mm256_fmaddsub_pd",E,R[102],N,[[[R[100]]],[R[100]]]],[5,"_mm_fmaddsub_ps",E,R[204],N,[[[R[4]]],[R[4]]]],[5,"_mm256_fmaddsub_ps",E,R[204],N,[[["__m256"]],["__m256"]]],[5,"_mm_fmsub_pd",E,R[102],N,[[[R[44]]],[R[44]]]],[5,"_mm256_fmsub_pd",E,R[102],N,[[[R[100]]],[R[100]]]],[5,"_mm_fmsub_ps",E,R[204],N,[[[R[4]]],[R[4]]]],[5,"_mm256_fmsub_ps",E,R[204],N,[[["__m256"]],["__m256"]]],[5,"_mm_fmsub_sd",E,R[205],N,[[[R[44]]],[R[44]]]],[5,"_mm_fmsub_ss",E,R[206],N,[[[R[4]]],[R[4]]]],[5,"_mm_fmsubadd_pd",E,R[102],N,[[[R[44]]],[R[44]]]],[5,"_mm256_fmsubadd_pd",E,R[102],N,[[[R[100]]],[R[100]]]],[5,"_mm_fmsubadd_ps",E,R[204],N,[[[R[4]]],[R[4]]]],[5,"_mm256_fmsubadd_ps",E,R[204],N,[[["__m256"]],["__m256"]]],[5,"_mm_fnmadd_pd",E,R[102],N,[[[R[44]]],[R[44]]]],[5,"_mm256_fnmadd_pd",E,R[102],N,[[[R[100]]],[R[100]]]],[5,"_mm_fnmadd_ps",E,R[204],N,[[[R[4]]],[R[4]]]],[5,"_mm256_fnmadd_ps",E,R[204],N,[[["__m256"]],["__m256"]]],[5,"_mm_fnmadd_sd",E,R[205],N,[[[R[44]]],[R[44]]]],[5,"_mm_fnmadd_ss",E,R[206],N,[[[R[4]]],[R[4]]]],[5,"_mm_fnmsub_pd",E,R[102],N,[[[R[44]]],[R[44]]]],[5,"_mm256_fnmsub_pd",E,R[102],N,[[[R[100]]],[R[100]]]],[5,"_mm_fnmsub_ps",E,R[204],N,[[[R[4]]],[R[4]]]],[5,"_mm256_fnmsub_ps",E,R[204],N,[[["__m256"]],["__m256"]]],[5,"_mm_fnmsub_sd",E,R[205],N,[[[R[44]]],[R[44]]]],[5,"_mm_fnmsub_ss",E,R[206],N,[[[R[4]]],[R[4]]]],[5,"_lzcnt_u32",E,"Counts the leading most significant zero bits.",N,[[["u32"]],["u32"]]],[5,"_popcnt32",E,"Counts the bits that are set.",N,[[["i32"]],["i32"]]],[5,"_bextr_u32",E,"Extracts bits in range [`start`, `start` + `length`) from…",N,[[["u32"]],["u32"]]],[5,"_bextr2_u32",E,"Extracts bits of `a` specified by `control` into the least…",N,[[["u32"]],["u32"]]],[5,"_andn_u32",E,"Bitwise logical `AND` of inverted `a` with `b`.",N,[[["u32"]],["u32"]]],[5,"_blsi_u32",E,"Extracts lowest set isolated bit.",N,[[["u32"]],["u32"]]],[5,"_blsmsk_u32",E,"Gets mask up to lowest set bit.",N,[[["u32"]],["u32"]]],[5,"_blsr_u32",E,"Resets the lowest set bit of `x`.",N,[[["u32"]],["u32"]]],[5,"_tzcnt_u32",E,R[207],N,[[["u32"]],["u32"]]],[5,"_mm_tzcnt_32",E,R[207],N,[[["u32"]],["i32"]]],[5,"_mulx_u32",E,"Unsigned multiply without affecting flags.",N,[[["u32"],["u32"]],["u32"]]],[5,"_bzhi_u32",E,"Zeroes higher bits of `a` >= `index`.",N,[[["u32"]],["u32"]]],[5,"_pdep_u32",E,"Scatter contiguous low order bits of `a` to the result at…",N,[[["u32"]],["u32"]]],[5,"_pext_u32",E,"Gathers the bits of `x` specified by the `mask` into the…",N,[[["u32"]],["u32"]]],[5,"_mm_extract_si64",E,"Extracts the bit range specified by `y` from the lower 64…",N,[[[R[32]]],[R[32]]]],[5,"_mm_insert_si64",E,"Inserts the `[length:0]` bits of `y` into `x` at `index`.",N,[[[R[32]]],[R[32]]]],[5,"_mm_stream_sd",E,R[208],N,[[[R[44]]]]],[5,"_mm_stream_ss",E,R[208],N,[[[R[4]]]]],[5,"_blcfill_u32",E,"Clears all bits below the least significant zero bit of `x`.",N,[[["u32"]],["u32"]]],[5,"_blci_u32",E,"Sets all bits of `x` to 1 except for the least significant…",N,[[["u32"]],["u32"]]],[5,"_blcic_u32",E,R[209],N,[[["u32"]],["u32"]]],[5,"_blcmsk_u32",E,R[209],N,[[["u32"]],["u32"]]],[5,"_blcs_u32",E,"Sets the least significant zero bit of `x`.",N,[[["u32"]],["u32"]]],[5,"_blsfill_u32",E,"Sets all bits of `x` below the least significant one.",N,[[["u32"]],["u32"]]],[5,"_blsic_u32",E,"Clears least significant bit and sets all other bits.",N,[[["u32"]],["u32"]]],[5,"_t1mskc_u32",E,"Clears all bits below the least significant zero of `x`…",N,[[["u32"]],["u32"]]],[5,"_tzmsk_u32",E,"Sets all bits below the least significant one of `x` and…",N,[[["u32"]],["u32"]]],[5,"_mm_setzero_si64",E,"Constructs a 64-bit integer vector initialized to zero.",N,[[],["__m64"]]],[5,"_mm_add_pi8",E,R[143],N,[[["__m64"]],["__m64"]]],[5,"_m_paddb",E,R[143],N,[[["__m64"]],["__m64"]]],[5,"_mm_add_pi16",E,R[142],N,[[["__m64"]],["__m64"]]],[5,"_m_paddw",E,R[142],N,[[["__m64"]],["__m64"]]],[5,"_mm_add_pi32",E,R[141],N,[[["__m64"]],["__m64"]]],[5,"_m_paddd",E,R[141],N,[[["__m64"]],["__m64"]]],[5,"_mm_adds_pi8",E,R[144],N,[[["__m64"]],["__m64"]]],[5,"_m_paddsb",E,R[144],N,[[["__m64"]],["__m64"]]],[5,"_mm_adds_pi16",E,R[145],N,[[["__m64"]],["__m64"]]],[5,"_m_paddsw",E,R[145],N,[[["__m64"]],["__m64"]]],[5,"_mm_adds_pu8",E,R[146],N,[[["__m64"]],["__m64"]]],[5,"_m_paddusb",E,R[146],N,[[["__m64"]],["__m64"]]],[5,"_mm_adds_pu16",E,R[147],N,[[["__m64"]],["__m64"]]],[5,"_m_paddusw",E,R[147],N,[[["__m64"]],["__m64"]]],[5,"_mm_sub_pi8",E,R[191],N,[[["__m64"]],["__m64"]]],[5,"_m_psubb",E,R[191],N,[[["__m64"]],["__m64"]]],[5,"_mm_sub_pi16",E,R[190],N,[[["__m64"]],["__m64"]]],[5,"_m_psubw",E,R[190],N,[[["__m64"]],["__m64"]]],[5,"_mm_sub_pi32",E,R[210],N,[[["__m64"]],["__m64"]]],[5,"_m_psubd",E,R[210],N,[[["__m64"]],["__m64"]]],[5,"_mm_subs_pi8",E,R[191],N,[[["__m64"]],["__m64"]]],[5,"_m_psubsb",E,R[191],N,[[["__m64"]],["__m64"]]],[5,"_mm_subs_pi16",E,R[190],N,[[["__m64"]],["__m64"]]],[5,"_m_psubsw",E,R[190],N,[[["__m64"]],["__m64"]]],[5,"_mm_subs_pu8",E,R[193],N,[[["__m64"]],["__m64"]]],[5,"_m_psubusb",E,R[193],N,[[["__m64"]],["__m64"]]],[5,"_mm_subs_pu16",E,R[192],N,[[["__m64"]],["__m64"]]],[5,"_m_psubusw",E,R[192],N,[[["__m64"]],["__m64"]]],[5,"_mm_packs_pi16",E,R[47],N,[[["__m64"]],["__m64"]]],[5,"_mm_packs_pi32",E,R[88],N,[[["__m64"]],["__m64"]]],[5,"_mm_cmpgt_pi8",E,R[211],N,[[["__m64"]],["__m64"]]],[5,"_mm_cmpgt_pi16",E,R[211],N,[[["__m64"]],["__m64"]]],[5,"_mm_cmpgt_pi32",E,R[211],N,[[["__m64"]],["__m64"]]],[5,"_mm_unpackhi_pi16",E,"Unpacks the upper two elements from two `i16x4` vectors…",N,[[["__m64"]],["__m64"]]],[5,"_mm_unpackhi_pi8",E,"Unpacks the upper four elements from two `i8x8` vectors…",N,[[["__m64"]],["__m64"]]],[5,"_mm_unpacklo_pi8",E,"Unpacks the lower four elements from two `i8x8` vectors…",N,[[["__m64"]],["__m64"]]],[5,"_mm_unpacklo_pi16",E,"Unpacks the lower two elements from two `i16x4` vectors…",N,[[["__m64"]],["__m64"]]],[5,"_mm_unpackhi_pi32",E,"Unpacks the upper element from two `i32x2` vectors and…",N,[[["__m64"]],["__m64"]]],[5,"_mm_unpacklo_pi32",E,"Unpacks the lower element from two `i32x2` vectors and…",N,[[["__m64"]],["__m64"]]],[5,"_mm_set_pi16",E,"Sets packed 16-bit integers in dst with the supplied values.",N,[[["i16"]],["__m64"]]],[5,"_mm_set_pi32",E,"Sets packed 32-bit integers in dst with the supplied values.",N,[[["i32"]],["__m64"]]],[5,"_mm_set_pi8",E,"Sets packed 8-bit integers in dst with the supplied values.",N,[[["i8"]],["__m64"]]],[5,"_mm_set1_pi16",E,"Broadcasts 16-bit integer a to all all elements of dst.",N,[[["i16"]],["__m64"]]],[5,"_mm_set1_pi32",E,"Broadcasts 32-bit integer a to all all elements of dst.",N,[[["i32"]],["__m64"]]],[5,"_mm_set1_pi8",E,"Broadcasts 8-bit integer a to all all elements of dst.",N,[[["i8"]],["__m64"]]],[5,"_mm_setr_pi16",E,"Sets packed 16-bit integers in dst with the supplied…",N,[[["i16"]],["__m64"]]],[5,"_mm_setr_pi32",E,"Sets packed 32-bit integers in dst with the supplied…",N,[[["i32"]],["__m64"]]],[5,"_mm_setr_pi8",E,"Sets packed 8-bit integers in dst with the supplied values…",N,[[["i8"]],["__m64"]]],[5,"_mm_empty",E,R[212],N,[[]]],[5,"_m_empty",E,R[212],N,[[]]],[5,"_mm_cvtsi32_si64",E,"Copies 32-bit integer `a` to the lower elements of the…",N,[[["i32"]],["__m64"]]],[5,"_mm_cvtsi64_si32",E,"Return the lower 32-bit integer in `a`.",N,[[["__m64"]],["i32"]]],[5,"_mm_clmulepi64_si128",E,"Performs a carry-less multiplication of two 64-bit…",N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_aesdec_si128",E,"Performs one round of an AES decryption flow on data…",N,[[[R[32]]],[R[32]]]],[5,"_mm_aesdeclast_si128",E,"Performs the last round of an AES decryption flow on data…",N,[[[R[32]]],[R[32]]]],[5,"_mm_aesenc_si128",E,"Performs one round of an AES encryption flow on data…",N,[[[R[32]]],[R[32]]]],[5,"_mm_aesenclast_si128",E,"Performs the last round of an AES encryption flow on data…",N,[[[R[32]]],[R[32]]]],[5,"_mm_aesimc_si128",E,"Performs the `InvMixColumns` transformation on `a`.",N,[[[R[32]]],[R[32]]]],[5,"_mm_aeskeygenassist_si128",E,"Assist in expanding the AES cipher key.",N,[[[R[32]],["i32"]],[R[32]]]],[5,"_rdrand16_step",E,"Read a hardware generated 16-bit random value and store…",N,[[["u16"]],["i32"]]],[5,"_rdrand32_step",E,"Read a hardware generated 32-bit random value and store…",N,[[["u32"]],["i32"]]],[5,"_rdseed16_step",E,"Read a 16-bit NIST SP800-90B and SP800-90C compliant…",N,[[["u16"]],["i32"]]],[5,"_rdseed32_step",E,"Read a 32-bit NIST SP800-90B and SP800-90C compliant…",N,[[["u32"]],["i32"]]],[5,"_mm_sha1msg1_epu32",E,R[213],N,[[[R[32]]],[R[32]]]],[5,"_mm_sha1msg2_epu32",E,"Performs the final calculation for the next four SHA1…",N,[[[R[32]]],[R[32]]]],[5,"_mm_sha1nexte_epu32",E,"Calculate SHA1 state variable E after four rounds of…",N,[[[R[32]]],[R[32]]]],[5,"_mm_sha1rnds4_epu32",E,"Performs four rounds of SHA1 operation using an initial…",N,[[[R[32]],["i32"]],[R[32]]]],[5,"_mm_sha256msg1_epu32",E,R[213],N,[[[R[32]]],[R[32]]]],[5,"_mm_sha256msg2_epu32",E,"Performs the final calculation for the next four SHA256…",N,[[[R[32]]],[R[32]]]],[5,"_mm_sha256rnds2_epu32",E,"Performs 2 rounds of SHA256 operation using an initial…",N,[[[R[32]]],[R[32]]]],[5,"_addcarry_u32",E,R[214],N,[[["u32"],["u32"],["u8"]],["u8"]]],[5,"_addcarryx_u32",E,R[214],N,[[["u32"],["u32"],["u8"]],["u8"]]],[5,"_subborrow_u32",E,R[214],N,[[["u32"],["u32"],["u8"]],["u8"]]],[5,"ud2",E,"Generates the trap instruction `UD2`",N,[[]]],[5,"_mm512_abs_epi32",E,R[215],N,[[[R[217]]],[R[217]]]],[5,"_mm512_mask_abs_epi32",E,R[216],N,[[[R[217]],[R[218]]],[R[217]]]],[5,"_mm512_maskz_abs_epi32",E,R[216],N,[[[R[217]],[R[218]]],[R[217]]]],[5,"_mm512_setzero_si512",E,"Returns vector of type `__m512i` with all elements set to…",N,[[],[R[217]]]],[5,"_mm512_setr_epi32",E,"Sets packed 32-bit integers in `dst` with the supplied…",N,[[["i32"]],[R[217]]]],[5,"_mm512_set1_epi64",E,"Broadcast 64-bit integer `a` to all elements of `dst`.",N,[[["i64"]],[R[217]]]],[5,"_mm512_madd52hi_epu64",E,R[219],N,[[[R[217]]],[R[217]]]],[5,"_mm512_madd52lo_epu64",E,R[219],N,[[[R[217]]],[R[217]]]],[5,"_mm256_madd52hi_epu64",E,R[219],N,[[[R[112]]],[R[112]]]],[5,"_mm256_madd52lo_epu64",E,R[219],N,[[[R[112]]],[R[112]]]],[5,"_mm_madd52hi_epu64",E,R[219],N,[[[R[32]]],[R[32]]]],[5,"_mm_madd52lo_epu64",E,R[219],N,[[[R[32]]],[R[32]]]],[5,"_bittest",E,R[220],N,[[["i32"]],["u8"]]],[5,"_bittestandset",E,R[220],N,[[["i32"]],["u8"]]],[5,"_bittestandreset",E,R[220],N,[[["i32"]],["u8"]]],[5,"_bittestandcomplement",E,R[220],N,[[["i32"]],["u8"]]],[5,"_xbegin",E,"Specifies the start of a restricted transactional memory…",N,[[],["u32"]]],[5,"_xend",E,"Specifies the end of a restricted transactional memory…",N,[[]]],[5,"_xabort",E,"Forces a restricted transactional memory (RTM) region to…",N,[[["u32"]]]],[5,"_xtest",E,"Queries whether the processor is executing in a…",N,[[],["u8"]]],[5,"_xabort_code",E,"Retrieves the parameter passed to [`_xabort`] when…",N,[[["u32"]],["u32"]]],[5,"_mm_cvtph_ps",E,"Converts the 4 x 16-bit half-precision float values in the…",N,[[[R[32]]],[R[4]]]],[5,"_mm256_cvtph_ps",E,"Converts the 8 x 16-bit half-precision float values in the…",N,[[[R[32]]],["__m256"]]],[5,"_mm_cvtps_ph",E,"Converts the 4 x 32-bit float values in the 128-bit vector…",N,[[["i32"],[R[4]]],[R[32]]]],[5,"_mm256_cvtps_ph",E,"Converts the 8 x 32-bit float values in the 256-bit vector…",N,[[["i32"],["__m256"]],[R[32]]]],[6,R[218],E,"The `__mmask16` type used in AVX-512 intrinsics, a 16-bit…",N,N],[17,"_XCR_XFEATURE_ENABLED_MASK",E,"`XFEATURE_ENABLED_MASK` for `XCR`",N,N],[17,"_MM_EXCEPT_INVALID",E,R[14],N,N],[17,"_MM_EXCEPT_DENORM",E,R[14],N,N],[17,"_MM_EXCEPT_DIV_ZERO",E,R[14],N,N],[17,"_MM_EXCEPT_OVERFLOW",E,R[14],N,N],[17,"_MM_EXCEPT_UNDERFLOW",E,R[14],N,N],[17,"_MM_EXCEPT_INEXACT",E,R[14],N,N],[17,"_MM_EXCEPT_MASK",E,"See `_MM_GET_EXCEPTION_STATE`",N,N],[17,"_MM_MASK_INVALID",E,R[14],N,N],[17,"_MM_MASK_DENORM",E,R[14],N,N],[17,"_MM_MASK_DIV_ZERO",E,R[14],N,N],[17,"_MM_MASK_OVERFLOW",E,R[14],N,N],[17,"_MM_MASK_UNDERFLOW",E,R[14],N,N],[17,"_MM_MASK_INEXACT",E,R[14],N,N],[17,"_MM_MASK_MASK",E,"See `_MM_GET_EXCEPTION_MASK`",N,N],[17,"_MM_ROUND_NEAREST",E,R[14],N,N],[17,"_MM_ROUND_DOWN",E,R[14],N,N],[17,"_MM_ROUND_UP",E,R[14],N,N],[17,"_MM_ROUND_TOWARD_ZERO",E,R[14],N,N],[17,"_MM_ROUND_MASK",E,"See `_MM_GET_ROUNDING_MODE`",N,N],[17,"_MM_FLUSH_ZERO_MASK",E,"See `_MM_GET_FLUSH_ZERO_MODE`",N,N],[17,"_MM_FLUSH_ZERO_ON",E,R[14],N,N],[17,"_MM_FLUSH_ZERO_OFF",E,R[14],N,N],[17,"_MM_HINT_T0",E,R[221],N,N],[17,"_MM_HINT_T1",E,R[221],N,N],[17,"_MM_HINT_T2",E,R[221],N,N],[17,"_MM_HINT_NTA",E,R[221],N,N],[17,"_MM_FROUND_TO_NEAREST_INT",E,"round to nearest",N,N],[17,"_MM_FROUND_TO_NEG_INF",E,"round down",N,N],[17,"_MM_FROUND_TO_POS_INF",E,"round up",N,N],[17,"_MM_FROUND_TO_ZERO",E,"truncate",N,N],[17,"_MM_FROUND_CUR_DIRECTION",E,"use MXCSR.RC; see `vendor::_MM_SET_ROUNDING_MODE`",N,N],[17,"_MM_FROUND_RAISE_EXC",E,"do not suppress exceptions",N,N],[17,"_MM_FROUND_NO_EXC",E,"suppress exceptions",N,N],[17,"_MM_FROUND_NINT",E,"round to nearest and do not suppress exceptions",N,N],[17,"_MM_FROUND_FLOOR",E,"round down and do not suppress exceptions",N,N],[17,"_MM_FROUND_CEIL",E,"round up and do not suppress exceptions",N,N],[17,"_MM_FROUND_TRUNC",E,"truncate and do not suppress exceptions",N,N],[17,"_MM_FROUND_RINT",E,"use MXCSR.RC and do not suppress exceptions; see…",N,N],[17,"_MM_FROUND_NEARBYINT",E,"use MXCSR.RC and suppress exceptions; see…",N,N],[17,"_SIDD_UBYTE_OPS",E,"String contains unsigned 8-bit characters (Default)",N,N],[17,"_SIDD_UWORD_OPS",E,R[222],N,N],[17,"_SIDD_SBYTE_OPS",E,"String contains signed 8-bit characters",N,N],[17,"_SIDD_SWORD_OPS",E,R[222],N,N],[17,"_SIDD_CMP_EQUAL_ANY",E,"For each character in `a`, find if it is in `b` (Default)",N,N],[17,"_SIDD_CMP_RANGES",E,"For each character in `a`, determine if `b[0] <= c <= b[1]…",N,N],[17,"_SIDD_CMP_EQUAL_EACH",E,"The strings defined by `a` and `b` are equal",N,N],[17,"_SIDD_CMP_EQUAL_ORDERED",E,"Search for the defined substring in the target",N,N],[17,"_SIDD_POSITIVE_POLARITY",E,"Do not negate results (Default)",N,N],[17,"_SIDD_NEGATIVE_POLARITY",E,"Negates results",N,N],[17,"_SIDD_MASKED_POSITIVE_POLARITY",E,"Do not negate results before the end of the string",N,N],[17,"_SIDD_MASKED_NEGATIVE_POLARITY",E,"Negates results only before the end of the string",N,N],[17,"_SIDD_LEAST_SIGNIFICANT",E,"Index only: return the least significant bit (Default)",N,N],[17,"_SIDD_MOST_SIGNIFICANT",E,"Index only: return the most significant bit",N,N],[17,"_SIDD_BIT_MASK",E,"Mask only: return the bit mask",N,N],[17,"_SIDD_UNIT_MASK",E,"Mask only: return the byte mask",N,N],[17,"_CMP_EQ_OQ",E,"Equal (ordered, non-signaling)",N,N],[17,"_CMP_LT_OS",E,"Less-than (ordered, signaling)",N,N],[17,"_CMP_LE_OS",E,"Less-than-or-equal (ordered, signaling)",N,N],[17,"_CMP_UNORD_Q",E,"Unordered (non-signaling)",N,N],[17,"_CMP_NEQ_UQ",E,"Not-equal (unordered, non-signaling)",N,N],[17,"_CMP_NLT_US",E,"Not-less-than (unordered, signaling)",N,N],[17,"_CMP_NLE_US",E,"Not-less-than-or-equal (unordered, signaling)",N,N],[17,"_CMP_ORD_Q",E,"Ordered (non-signaling)",N,N],[17,"_CMP_EQ_UQ",E,"Equal (unordered, non-signaling)",N,N],[17,"_CMP_NGE_US",E,"Not-greater-than-or-equal (unordered, signaling)",N,N],[17,"_CMP_NGT_US",E,"Not-greater-than (unordered, signaling)",N,N],[17,"_CMP_FALSE_OQ",E,"False (ordered, non-signaling)",N,N],[17,"_CMP_NEQ_OQ",E,"Not-equal (ordered, non-signaling)",N,N],[17,"_CMP_GE_OS",E,"Greater-than-or-equal (ordered, signaling)",N,N],[17,"_CMP_GT_OS",E,"Greater-than (ordered, signaling)",N,N],[17,"_CMP_TRUE_UQ",E,"True (unordered, non-signaling)",N,N],[17,"_CMP_EQ_OS",E,"Equal (ordered, signaling)",N,N],[17,"_CMP_LT_OQ",E,"Less-than (ordered, non-signaling)",N,N],[17,"_CMP_LE_OQ",E,"Less-than-or-equal (ordered, non-signaling)",N,N],[17,"_CMP_UNORD_S",E,"Unordered (signaling)",N,N],[17,"_CMP_NEQ_US",E,"Not-equal (unordered, signaling)",N,N],[17,"_CMP_NLT_UQ",E,"Not-less-than (unordered, non-signaling)",N,N],[17,"_CMP_NLE_UQ",E,"Not-less-than-or-equal (unordered, non-signaling)",N,N],[17,"_CMP_ORD_S",E,"Ordered (signaling)",N,N],[17,"_CMP_EQ_US",E,"Equal (unordered, signaling)",N,N],[17,"_CMP_NGE_UQ",E,"Not-greater-than-or-equal (unordered, non-signaling)",N,N],[17,"_CMP_NGT_UQ",E,"Not-greater-than (unordered, non-signaling)",N,N],[17,"_CMP_FALSE_OS",E,"False (ordered, signaling)",N,N],[17,"_CMP_NEQ_OS",E,"Not-equal (ordered, signaling)",N,N],[17,"_CMP_GE_OQ",E,"Greater-than-or-equal (ordered, non-signaling)",N,N],[17,"_CMP_GT_OQ",E,"Greater-than (ordered, non-signaling)",N,N],[17,"_CMP_TRUE_US",E,"True (unordered, signaling)",N,N],[17,"_XBEGIN_STARTED",E,"Transaction successfully started.",N,N],[17,"_XABORT_EXPLICIT",E,"Transaction explicitly aborted with xabort. The parameter…",N,N],[17,"_XABORT_RETRY",E,"Transaction retry is possible.",N,N],[17,"_XABORT_CONFLICT",E,"Transaction abort due to a memory conflict with another…",N,N],[17,"_XABORT_CAPACITY",E,"Transaction abort due to the transaction using too much…",N,N],[17,"_XABORT_DEBUG",E,"Transaction abort due to a debug trap.",N,N],[17,"_XABORT_NESTED",E,"Transaction abort in a inner nested transaction.",N,N],[11,R[224],E,E,1,[[[U]],["result"]]],[11,"into",E,E,1,[[],[U]]],[11,"from",E,E,1,[[[T]],[T]]],[11,R[225],E,E,1,[[],["result"]]],[11,"borrow",E,E,1,[[["self"]],[T]]],[11,R[223],E,E,1,[[["self"]],[T]]],[11,R[226],E,E,1,[[["self"]],["typeid"]]],[11,R[224],E,E,2,[[[U]],["result"]]],[11,"into",E,E,2,[[],[U]]],[11,"from",E,E,2,[[[T]],[T]]],[11,R[225],E,E,2,[[],["result"]]],[11,"borrow",E,E,2,[[["self"]],[T]]],[11,R[223],E,E,2,[[["self"]],[T]]],[11,R[226],E,E,2,[[["self"]],["typeid"]]],[11,R[224],E,E,3,[[[U]],["result"]]],[11,"into",E,E,3,[[],[U]]],[11,"from",E,E,3,[[[T]],[T]]],[11,R[225],E,E,3,[[],["result"]]],[11,"borrow",E,E,3,[[["self"]],[T]]],[11,R[223],E,E,3,[[["self"]],[T]]],[11,R[226],E,E,3,[[["self"]],["typeid"]]],[11,R[224],E,E,4,[[[U]],["result"]]],[11,"into",E,E,4,[[],[U]]],[11,"from",E,E,4,[[[T]],[T]]],[11,R[225],E,E,4,[[],["result"]]],[11,"borrow",E,E,4,[[["self"]],[T]]],[11,R[223],E,E,4,[[["self"]],[T]]],[11,R[226],E,E,4,[[["self"]],["typeid"]]],[11,R[224],E,E,5,[[[U]],["result"]]],[11,"into",E,E,5,[[],[U]]],[11,"from",E,E,5,[[[T]],[T]]],[11,R[225],E,E,5,[[],["result"]]],[11,"borrow",E,E,5,[[["self"]],[T]]],[11,R[223],E,E,5,[[["self"]],[T]]],[11,R[226],E,E,5,[[["self"]],["typeid"]]],[11,R[224],E,E,6,[[[U]],["result"]]],[11,"into",E,E,6,[[],[U]]],[11,"from",E,E,6,[[[T]],[T]]],[11,R[225],E,E,6,[[],["result"]]],[11,"borrow",E,E,6,[[["self"]],[T]]],[11,R[223],E,E,6,[[["self"]],[T]]],[11,R[226],E,E,6,[[["self"]],["typeid"]]],[11,R[224],E,E,7,[[[U]],["result"]]],[11,"into",E,E,7,[[],[U]]],[11,"from",E,E,7,[[[T]],[T]]],[11,R[225],E,E,7,[[],["result"]]],[11,"borrow",E,E,7,[[["self"]],[T]]],[11,R[223],E,E,7,[[["self"]],[T]]],[11,R[226],E,E,7,[[["self"]],["typeid"]]],[11,R[224],E,E,8,[[[U]],["result"]]],[11,"into",E,E,8,[[],[U]]],[11,"from",E,E,8,[[[T]],[T]]],[11,R[225],E,E,8,[[],["result"]]],[11,"borrow",E,E,8,[[["self"]],[T]]],[11,R[223],E,E,8,[[["self"]],[T]]],[11,R[226],E,E,8,[[["self"]],["typeid"]]],[11,R[224],E,E,9,[[[U]],["result"]]],[11,"into",E,E,9,[[],[U]]],[11,"from",E,E,9,[[[T]],[T]]],[11,R[225],E,E,9,[[],["result"]]],[11,"borrow",E,E,9,[[["self"]],[T]]],[11,R[223],E,E,9,[[["self"]],[T]]],[11,R[226],E,E,9,[[["self"]],["typeid"]]],[11,R[224],E,E,10,[[[U]],["result"]]],[11,"into",E,E,10,[[],[U]]],[11,"from",E,E,10,[[[T]],[T]]],[11,R[225],E,E,10,[[],["result"]]],[11,"borrow",E,E,10,[[["self"]],[T]]],[11,R[223],E,E,10,[[["self"]],[T]]],[11,R[226],E,E,10,[[["self"]],["typeid"]]],[11,R[224],E,E,0,[[[U]],["result"]]],[11,"into",E,E,0,[[],[U]]],[11,"from",E,E,0,[[[T]],[T]]],[11,R[225],E,E,0,[[],["result"]]],[11,"borrow",E,E,0,[[["self"]],[T]]],[11,R[223],E,E,0,[[["self"]],[T]]],[11,R[226],E,E,0,[[["self"]],["typeid"]]],[11,"eq",E,E,0,[[["self"],[R[1]]],["bool"]]],[11,"ne",E,E,0,[[["self"],[R[1]]],["bool"]]],[11,"cmp",E,E,0,[[["self"],[R[1]]],[R[227]]]],[11,"partial_cmp",E,E,0,[[["self"],[R[1]]],[["option",[R[227]]],[R[227]]]]],[11,"lt",E,E,0,[[["self"],[R[1]]],["bool"]]],[11,"le",E,E,0,[[["self"],[R[1]]],["bool"]]],[11,"gt",E,E,0,[[["self"],[R[1]]],["bool"]]],[11,"ge",E,E,0,[[["self"],[R[1]]],["bool"]]],[11,"fmt",E,E,1,[[["self"],[R[228]]],["result"]]],[11,"fmt",E,E,2,[[["self"],[R[228]]],["result"]]],[11,"fmt",E,E,3,[[["self"],[R[228]]],["result"]]],[11,"fmt",E,E,4,[[["self"],[R[228]]],["result"]]],[11,"fmt",E,E,5,[[["self"],[R[228]]],["result"]]],[11,"fmt",E,E,6,[[["self"],[R[228]]],["result"]]],[11,"fmt",E,E,7,[[["self"],[R[228]]],["result"]]],[11,"fmt",E,E,8,[[["self"],[R[228]]],["result"]]],[11,"fmt",E,E,9,[[["self"],[R[228]]],["result"]]],[11,"fmt",E,E,10,[[["self"],[R[228]]],["result"]]],[11,"fmt",E,E,0,[[["self"],[R[228]]],["result"]]],[11,"clone",E,E,1,[[["self"]],["__m64"]]],[11,"clone",E,E,2,[[["self"]],[R[32]]]],[11,"clone",E,E,3,[[["self"]],[R[4]]]],[11,"clone",E,E,4,[[["self"]],[R[44]]]],[11,"clone",E,E,5,[[["self"]],[R[112]]]],[11,"clone",E,E,6,[[["self"]],["__m256"]]],[11,"clone",E,E,7,[[["self"]],[R[100]]]],[11,"clone",E,E,8,[[["self"]],[R[217]]]],[11,"clone",E,E,9,[[["self"]],["__m512"]]],[11,"clone",E,E,10,[[["self"]],["__m512d"]]],[11,"clone",E,E,0,[[["self"]],[R[1]]]]],"p":[[3,R[229]],[3,"__m64"],[3,R[32]],[3,R[4]],[3,R[44]],[3,R[112]],[3,"__m256"],[3,R[100]],[3,R[217]],[3,"__m512"],[3,"__m512d"]]}; |