| // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py |
| // REQUIRES: powerpc-registered-target |
| |
| // RUN: %clang -S -emit-llvm -target powerpc64-gnu-linux -mcpu=pwr8 -DNO_MM_MALLOC -ffreestanding -DNO_WARN_X86_INTRINSICS %s \ |
| // RUN: -fno-discard-value-names -mllvm -disable-llvm-optzns -o - | llvm-cxxfilt -n | FileCheck %s |
| // RUN: %clang -S -emit-llvm -target powerpc64le-gnu-linux -mcpu=pwr8 -DNO_MM_MALLOC -ffreestanding -DNO_WARN_X86_INTRINSICS %s \ |
| // RUN: -fno-discard-value-names -mllvm -disable-llvm-optzns -o - | llvm-cxxfilt -n | FileCheck %s |
| |
| // RUN: %clang -S -emit-llvm -target powerpc64-unknown-freebsd13.0 -mcpu=pwr8 -DNO_MM_MALLOC -ffreestanding -DNO_WARN_X86_INTRINSICS %s \ |
| // RUN: -fno-discard-value-names -mllvm -disable-llvm-optzns -o - | llvm-cxxfilt -n | FileCheck %s |
| // RUN: %clang -S -emit-llvm -target powerpc64le-unknown-freebsd13.0 -mcpu=pwr8 -DNO_MM_MALLOC -ffreestanding -DNO_WARN_X86_INTRINSICS %s \ |
| // RUN: -fno-discard-value-names -mllvm -disable-llvm-optzns -o - | llvm-cxxfilt -n | FileCheck %s |
| |
| #include <pmmintrin.h> |
| |
| __m128d resd, md1, md2; |
| __m128 res, m1, m2; |
| __m128i resi, mi; |
| double *d; |
| |
| void __attribute__((noinline)) |
| // CHECK-LABEL: @test_pmmintrin( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[TMP0:%.*]] = load <2 x double>, <2 x double>* @md1, align 16 |
| // CHECK-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* @md2, align 16 |
| // CHECK-NEXT: [[CALL:%.*]] = call <2 x double> @_mm_addsub_pd(<2 x double> noundef [[TMP0]], <2 x double> noundef [[TMP1]]) #[[ATTR4:[0-9]+]] |
| // CHECK-NEXT: store <2 x double> [[CALL]], <2 x double>* @resd, align 16 |
| // CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, <4 x float>* @m1, align 16 |
| // CHECK-NEXT: [[TMP3:%.*]] = load <4 x float>, <4 x float>* @m2, align 16 |
| // CHECK-NEXT: [[CALL1:%.*]] = call <4 x float> @_mm_addsub_ps(<4 x float> noundef [[TMP2]], <4 x float> noundef [[TMP3]]) #[[ATTR4]] |
| // CHECK-NEXT: store <4 x float> [[CALL1]], <4 x float>* @res, align 16 |
| // CHECK-NEXT: [[TMP4:%.*]] = load <2 x double>, <2 x double>* @md1, align 16 |
| // CHECK-NEXT: [[TMP5:%.*]] = load <2 x double>, <2 x double>* @md2, align 16 |
| // CHECK-NEXT: [[CALL2:%.*]] = call <2 x double> @_mm_hadd_pd(<2 x double> noundef [[TMP4]], <2 x double> noundef [[TMP5]]) #[[ATTR4]] |
| // CHECK-NEXT: store <2 x double> [[CALL2]], <2 x double>* @resd, align 16 |
| // CHECK-NEXT: [[TMP6:%.*]] = load <4 x float>, <4 x float>* @m1, align 16 |
| // CHECK-NEXT: [[TMP7:%.*]] = load <4 x float>, <4 x float>* @m2, align 16 |
| // CHECK-NEXT: [[CALL3:%.*]] = call <4 x float> @_mm_hadd_ps(<4 x float> noundef [[TMP6]], <4 x float> noundef [[TMP7]]) #[[ATTR4]] |
| // CHECK-NEXT: store <4 x float> [[CALL3]], <4 x float>* @res, align 16 |
| // CHECK-NEXT: [[TMP8:%.*]] = load <2 x double>, <2 x double>* @md1, align 16 |
| // CHECK-NEXT: [[TMP9:%.*]] = load <2 x double>, <2 x double>* @md2, align 16 |
| // CHECK-NEXT: [[CALL4:%.*]] = call <2 x double> @_mm_hsub_pd(<2 x double> noundef [[TMP8]], <2 x double> noundef [[TMP9]]) #[[ATTR4]] |
| // CHECK-NEXT: store <2 x double> [[CALL4]], <2 x double>* @resd, align 16 |
| // CHECK-NEXT: [[TMP10:%.*]] = load <4 x float>, <4 x float>* @m1, align 16 |
| // CHECK-NEXT: [[TMP11:%.*]] = load <4 x float>, <4 x float>* @m2, align 16 |
| // CHECK-NEXT: [[CALL5:%.*]] = call <4 x float> @_mm_hsub_ps(<4 x float> noundef [[TMP10]], <4 x float> noundef [[TMP11]]) #[[ATTR4]] |
| // CHECK-NEXT: store <4 x float> [[CALL5]], <4 x float>* @res, align 16 |
| // CHECK-NEXT: [[CALL6:%.*]] = call <2 x i64> @_mm_lddqu_si128(<2 x i64>* noundef @mi) #[[ATTR4]] |
| // CHECK-NEXT: store <2 x i64> [[CALL6]], <2 x i64>* @resi, align 16 |
| // CHECK-NEXT: [[TMP12:%.*]] = load double*, double** @d, align 8 |
| // CHECK-NEXT: [[CALL7:%.*]] = call <2 x double> @_mm_loaddup_pd(double* noundef [[TMP12]]) #[[ATTR4]] |
| // CHECK-NEXT: store <2 x double> [[CALL7]], <2 x double>* @resd, align 16 |
| // CHECK-NEXT: [[TMP13:%.*]] = load <2 x double>, <2 x double>* @md1, align 16 |
| // CHECK-NEXT: [[CALL8:%.*]] = call <2 x double> @_mm_movedup_pd(<2 x double> noundef [[TMP13]]) #[[ATTR4]] |
| // CHECK-NEXT: store <2 x double> [[CALL8]], <2 x double>* @resd, align 16 |
| // CHECK-NEXT: [[TMP14:%.*]] = load <4 x float>, <4 x float>* @m1, align 16 |
| // CHECK-NEXT: [[CALL9:%.*]] = call <4 x float> @_mm_movehdup_ps(<4 x float> noundef [[TMP14]]) #[[ATTR4]] |
| // CHECK-NEXT: store <4 x float> [[CALL9]], <4 x float>* @res, align 16 |
| // CHECK-NEXT: [[TMP15:%.*]] = load <4 x float>, <4 x float>* @m1, align 16 |
| // CHECK-NEXT: [[CALL10:%.*]] = call <4 x float> @_mm_moveldup_ps(<4 x float> noundef [[TMP15]]) #[[ATTR4]] |
| // CHECK-NEXT: store <4 x float> [[CALL10]], <4 x float>* @res, align 16 |
| // CHECK-NEXT: ret void |
| // |
| test_pmmintrin() { |
| resd = _mm_addsub_pd(md1, md2); |
| res = _mm_addsub_ps(m1, m2); |
| resd = _mm_hadd_pd(md1, md2); |
| // |
| res = _mm_hadd_ps(m1, m2); |
| resd = _mm_hsub_pd(md1, md2); |
| res = _mm_hsub_ps(m1, m2); |
| resi = _mm_lddqu_si128(&mi); |
| resd = _mm_loaddup_pd(d); |
| resd = _mm_movedup_pd(md1); |
| res = _mm_movehdup_ps(m1); |
| res = _mm_moveldup_ps(m1); |
| } |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |