| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 |
| |
| ;; Test the ual feature which is similar to AArch64/arm64-strict-align.ll. |
| |
| ; RUN: llc --mtriple=loongarch32 -mattr=+d < %s | FileCheck %s --check-prefix=LA32-ALIGNED |
| ; RUN: llc --mtriple=loongarch32 --mattr=+ual < %s | FileCheck %s --check-prefix=LA32-UNALIGNED |
| ; RUN: llc --mtriple=loongarch32 --mattr=-ual < %s | FileCheck %s --check-prefix=LA32-ALIGNED |
| |
| ; RUN: llc --mtriple=loongarch64 -mattr=+d < %s | FileCheck %s --check-prefix=LA64-UNALIGNED |
| ; RUN: llc --mtriple=loongarch64 --mattr=+ual < %s | FileCheck %s --check-prefix=LA64-UNALIGNED |
| ; RUN: llc --mtriple=loongarch64 --mattr=-ual < %s | FileCheck %s --check-prefix=LA64-ALIGNED |
| |
| define i32 @f0(ptr %p) nounwind { |
| ; LA32-ALIGNED-LABEL: f0: |
| ; LA32-ALIGNED: # %bb.0: |
| ; LA32-ALIGNED-NEXT: ld.hu $a1, $a0, 2 |
| ; LA32-ALIGNED-NEXT: ld.hu $a0, $a0, 0 |
| ; LA32-ALIGNED-NEXT: slli.w $a1, $a1, 16 |
| ; LA32-ALIGNED-NEXT: or $a0, $a1, $a0 |
| ; LA32-ALIGNED-NEXT: ret |
| ; |
| ; LA32-UNALIGNED-LABEL: f0: |
| ; LA32-UNALIGNED: # %bb.0: |
| ; LA32-UNALIGNED-NEXT: ld.w $a0, $a0, 0 |
| ; LA32-UNALIGNED-NEXT: ret |
| ; |
| ; LA64-UNALIGNED-LABEL: f0: |
| ; LA64-UNALIGNED: # %bb.0: |
| ; LA64-UNALIGNED-NEXT: ld.w $a0, $a0, 0 |
| ; LA64-UNALIGNED-NEXT: ret |
| ; |
| ; LA64-ALIGNED-LABEL: f0: |
| ; LA64-ALIGNED: # %bb.0: |
| ; LA64-ALIGNED-NEXT: ld.h $a1, $a0, 2 |
| ; LA64-ALIGNED-NEXT: ld.hu $a0, $a0, 0 |
| ; LA64-ALIGNED-NEXT: slli.d $a1, $a1, 16 |
| ; LA64-ALIGNED-NEXT: or $a0, $a1, $a0 |
| ; LA64-ALIGNED-NEXT: ret |
| %tmp = load i32, ptr %p, align 2 |
| ret i32 %tmp |
| } |
| |
| define i64 @f1(ptr %p) nounwind { |
| ; LA32-ALIGNED-LABEL: f1: |
| ; LA32-ALIGNED: # %bb.0: |
| ; LA32-ALIGNED-NEXT: ld.w $a2, $a0, 0 |
| ; LA32-ALIGNED-NEXT: ld.w $a1, $a0, 4 |
| ; LA32-ALIGNED-NEXT: move $a0, $a2 |
| ; LA32-ALIGNED-NEXT: ret |
| ; |
| ; LA32-UNALIGNED-LABEL: f1: |
| ; LA32-UNALIGNED: # %bb.0: |
| ; LA32-UNALIGNED-NEXT: ld.w $a2, $a0, 0 |
| ; LA32-UNALIGNED-NEXT: ld.w $a1, $a0, 4 |
| ; LA32-UNALIGNED-NEXT: move $a0, $a2 |
| ; LA32-UNALIGNED-NEXT: ret |
| ; |
| ; LA64-UNALIGNED-LABEL: f1: |
| ; LA64-UNALIGNED: # %bb.0: |
| ; LA64-UNALIGNED-NEXT: ld.d $a0, $a0, 0 |
| ; LA64-UNALIGNED-NEXT: ret |
| ; |
| ; LA64-ALIGNED-LABEL: f1: |
| ; LA64-ALIGNED: # %bb.0: |
| ; LA64-ALIGNED-NEXT: ld.wu $a1, $a0, 4 |
| ; LA64-ALIGNED-NEXT: ld.wu $a0, $a0, 0 |
| ; LA64-ALIGNED-NEXT: slli.d $a1, $a1, 32 |
| ; LA64-ALIGNED-NEXT: or $a0, $a1, $a0 |
| ; LA64-ALIGNED-NEXT: ret |
| %tmp = load i64, ptr %p, align 4 |
| ret i64 %tmp |
| } |