| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | 
 | ; RUN:  sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+xsfvcp,+zvfh \ | 
 | ; RUN:    -verify-machineinstrs | FileCheck %s | 
 | ; RUN:  sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvcp,+zvfh \ | 
 | ; RUN:    -verify-machineinstrs | FileCheck %s | 
 |  | 
 | define void @test_sf_vc_x_se_e8mf8(i8 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_x_se_e8mf8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.x.se.e8mf8.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.x.se.e8mf8.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen) | 
 |  | 
 | define void @test_sf_vc_x_se_e8mf4(i8 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_x_se_e8mf4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.x.se.e8mf4.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.x.se.e8mf4.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen) | 
 |  | 
 | define void @test_sf_vc_x_se_e8mf2(i8 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_x_se_e8mf2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.x.se.e8mf2.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.x.se.e8mf2.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen) | 
 |  | 
 | define void @test_sf_vc_x_se_e8m1(i8 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_x_se_e8m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.x.se.e8m1.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.x.se.e8m1.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen) | 
 |  | 
 | define void @test_sf_vc_x_se_e8m2(i8 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_x_se_e8m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.x.se.e8m2.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.x.se.e8m2.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen) | 
 |  | 
 | define void @test_sf_vc_x_se_e8m4(i8 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_x_se_e8m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.x.se.e8m4.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.x.se.e8m4.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen) | 
 |  | 
 | define void @test_sf_vc_x_se_e8m8(i8 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_x_se_e8m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.x.se.e8m8.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.x.se.e8m8.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen) | 
 |  | 
 | define void @test_sf_vc_x_se_e16mf4(i16 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_x_se_e16mf4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.x.se.e16mf4.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.x.se.e16mf4.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen) | 
 |  | 
 | define void @test_sf_vc_x_se_e16mf2(i16 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_x_se_e16mf2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.x.se.e16mf2.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.x.se.e16mf2.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen) | 
 |  | 
 | define void @test_sf_vc_x_se_e16m1(i16 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_x_se_e16m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.x.se.e16m1.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.x.se.e16m1.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen) | 
 |  | 
 | define void @test_sf_vc_x_se_e16m2(i16 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_x_se_e16m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.x.se.e16m2.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.x.se.e16m2.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen) | 
 |  | 
 | define void @test_sf_vc_x_se_e16m4(i16 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_x_se_e16m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.x.se.e16m4.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.x.se.e16m4.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen) | 
 |  | 
 | define void @test_sf_vc_x_se_e16m8(i16 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_x_se_e16m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.x.se.e16m8.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.x.se.e16m8.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen) | 
 |  | 
 | define void @test_sf_vc_x_se_e32mf2(i32 signext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_x_se_e32mf2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.x.se.e32mf2.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.x.se.e32mf2.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen) | 
 |  | 
 | define void @test_sf_vc_x_se_e32m1(i32 signext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_x_se_e32m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.x.se.e32m1.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.x.se.e32m1.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen) | 
 |  | 
 | define void @test_sf_vc_x_se_e32m2(i32 signext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_x_se_e32m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.x.se.e32m2.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.x.se.e32m2.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen) | 
 |  | 
 | define void @test_sf_vc_x_se_e32m4(i32 signext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_x_se_e32m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.x.se.e32m4.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.x.se.e32m4.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen) | 
 |  | 
 | define void @test_sf_vc_x_se_e32m8(i32 signext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_x_se_e32m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.x.se.e32m8.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.x.se.e32m8.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen) | 
 |  | 
 | define <vscale x 1 x i8> @test_sf_vc_v_x_se_e8mf8(i8 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_se_e8mf8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.x.se.nxv1i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl) | 
 |   ret <vscale x 1 x i8> %0 | 
 | } | 
 |  | 
 | declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.x.se.nxv1i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) | 
 |  | 
 | define <vscale x 2 x i8> @test_sf_vc_v_x_se_e8mf4(i8 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_se_e8mf4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.x.se.nxv2i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl) | 
 |   ret <vscale x 2 x i8> %0 | 
 | } | 
 |  | 
 | declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.x.se.nxv2i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) | 
 |  | 
 | define <vscale x 4 x i8> @test_sf_vc_v_x_se_e8mf2(i8 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_se_e8mf2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.x.se.nxv4i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl) | 
 |   ret <vscale x 4 x i8> %0 | 
 | } | 
 |  | 
 | declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.x.se.nxv4i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) | 
 |  | 
 | define <vscale x 8 x i8> @test_sf_vc_v_x_se_e8m1(i8 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_se_e8m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.x.se.nxv8i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl) | 
 |   ret <vscale x 8 x i8> %0 | 
 | } | 
 |  | 
 | declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.x.se.nxv8i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) | 
 |  | 
 | define <vscale x 16 x i8> @test_sf_vc_v_x_se_e8m2(i8 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_se_e8m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.x.se.nxv16i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl) | 
 |   ret <vscale x 16 x i8> %0 | 
 | } | 
 |  | 
 | declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.x.se.nxv16i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) | 
 |  | 
 | define <vscale x 32 x i8> @test_sf_vc_v_x_se_e8m4(i8 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_se_e8m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.x.se.nxv32i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl) | 
 |   ret <vscale x 32 x i8> %0 | 
 | } | 
 |  | 
 | declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.x.se.nxv32i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) | 
 |  | 
 | define <vscale x 64 x i8> @test_sf_vc_v_x_se_e8m8(i8 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_se_e8m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.x.se.nxv64i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl) | 
 |   ret <vscale x 64 x i8> %0 | 
 | } | 
 |  | 
 | declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.x.se.nxv64i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) | 
 |  | 
 | define <vscale x 1 x i16> @test_sf_vc_v_x_se_e16mf4(i16 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_se_e16mf4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.x.se.nxv1i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl) | 
 |   ret <vscale x 1 x i16> %0 | 
 | } | 
 |  | 
 | declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.x.se.nxv1i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) | 
 |  | 
 | define <vscale x 2 x i16> @test_sf_vc_v_x_se_e16mf2(i16 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_se_e16mf2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.x.se.nxv2i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl) | 
 |   ret <vscale x 2 x i16> %0 | 
 | } | 
 |  | 
 | declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.x.se.nxv2i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) | 
 |  | 
 | define <vscale x 4 x i16> @test_sf_vc_v_x_se_e16m1(i16 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_se_e16m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.x.se.nxv4i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl) | 
 |   ret <vscale x 4 x i16> %0 | 
 | } | 
 |  | 
 | declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.x.se.nxv4i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) | 
 |  | 
 | define <vscale x 8 x i16> @test_sf_vc_v_x_se_e16m2(i16 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_se_e16m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.x.se.nxv8i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl) | 
 |   ret <vscale x 8 x i16> %0 | 
 | } | 
 |  | 
 | declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.x.se.nxv8i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) | 
 |  | 
 | define <vscale x 16 x i16> @test_sf_vc_v_x_se_e16m4(i16 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_se_e16m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.x.se.nxv16i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl) | 
 |   ret <vscale x 16 x i16> %0 | 
 | } | 
 |  | 
 | declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.x.se.nxv16i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) | 
 |  | 
 | define <vscale x 32 x i16> @test_sf_vc_v_x_se_e16m8(i16 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_se_e16m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.x.se.nxv32i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl) | 
 |   ret <vscale x 32 x i16> %0 | 
 | } | 
 |  | 
 | declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.x.se.nxv32i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) | 
 |  | 
 | define <vscale x 1 x i32> @test_sf_vc_v_x_se_e32mf2(i32 signext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_se_e32mf2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.x.se.nxv1i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl) | 
 |   ret <vscale x 1 x i32> %0 | 
 | } | 
 |  | 
 | declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.x.se.nxv1i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) | 
 |  | 
 | define <vscale x 2 x i32> @test_sf_vc_v_x_se_e32m1(i32 signext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_se_e32m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.x.se.nxv2i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl) | 
 |   ret <vscale x 2 x i32> %0 | 
 | } | 
 |  | 
 | declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.x.se.nxv2i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) | 
 |  | 
 | define <vscale x 4 x i32> @test_sf_vc_v_x_se_e32m2(i32 signext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_se_e32m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.x.se.nxv4i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl) | 
 |   ret <vscale x 4 x i32> %0 | 
 | } | 
 |  | 
 | declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.x.se.nxv4i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) | 
 |  | 
 | define <vscale x 8 x i32> @test_sf_vc_v_x_se_e32m4(i32 signext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_se_e32m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.x.se.nxv8i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl) | 
 |   ret <vscale x 8 x i32> %0 | 
 | } | 
 |  | 
 | declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.x.se.nxv8i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) | 
 |  | 
 | define <vscale x 16 x i32> @test_sf_vc_v_x_se_e32m8(i32 signext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_se_e32m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.x.se.nxv16i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl) | 
 |   ret <vscale x 16 x i32> %0 | 
 | } | 
 |  | 
 | declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.x.se.nxv16i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) | 
 |  | 
 | define <vscale x 1 x i8> @test_sf_vc_v_x_e8mf8(i8 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_e8mf8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.x.nxv1i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl) | 
 |   ret <vscale x 1 x i8> %0 | 
 | } | 
 |  | 
 | declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.x.nxv1i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) | 
 |  | 
 | define <vscale x 2 x i8> @test_sf_vc_v_x_e8mf4(i8 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_e8mf4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.x.nxv2i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl) | 
 |   ret <vscale x 2 x i8> %0 | 
 | } | 
 |  | 
 | declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.x.nxv2i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) | 
 |  | 
 | define <vscale x 4 x i8> @test_sf_vc_v_x_e8mf2(i8 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_e8mf2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.x.nxv4i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl) | 
 |   ret <vscale x 4 x i8> %0 | 
 | } | 
 |  | 
 | declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.x.nxv4i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) | 
 |  | 
 | define <vscale x 8 x i8> @test_sf_vc_v_x_e8m1(i8 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_e8m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.x.nxv8i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl) | 
 |   ret <vscale x 8 x i8> %0 | 
 | } | 
 |  | 
 | declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.x.nxv8i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) | 
 |  | 
 | define <vscale x 16 x i8> @test_sf_vc_v_x_e8m2(i8 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_e8m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.x.nxv16i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl) | 
 |   ret <vscale x 16 x i8> %0 | 
 | } | 
 |  | 
 | declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.x.nxv16i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) | 
 |  | 
 | define <vscale x 32 x i8> @test_sf_vc_v_x_e8m4(i8 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_e8m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.x.nxv32i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl) | 
 |   ret <vscale x 32 x i8> %0 | 
 | } | 
 |  | 
 | declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.x.nxv32i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) | 
 |  | 
 | define <vscale x 64 x i8> @test_sf_vc_v_x_e8m8(i8 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_e8m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.x.nxv64i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl) | 
 |   ret <vscale x 64 x i8> %0 | 
 | } | 
 |  | 
 | declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.x.nxv64i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) | 
 |  | 
 | define <vscale x 1 x i16> @test_sf_vc_v_x_e16mf4(i16 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_e16mf4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.x.nxv1i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl) | 
 |   ret <vscale x 1 x i16> %0 | 
 | } | 
 |  | 
 | declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.x.nxv1i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) | 
 |  | 
 | define <vscale x 2 x i16> @test_sf_vc_v_x_e16mf2(i16 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_e16mf2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.x.nxv2i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl) | 
 |   ret <vscale x 2 x i16> %0 | 
 | } | 
 |  | 
 | declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.x.nxv2i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) | 
 |  | 
 | define <vscale x 4 x i16> @test_sf_vc_v_x_e16m1(i16 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_e16m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.x.nxv4i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl) | 
 |   ret <vscale x 4 x i16> %0 | 
 | } | 
 |  | 
 | declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.x.nxv4i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) | 
 |  | 
 | define <vscale x 8 x i16> @test_sf_vc_v_x_e16m2(i16 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_e16m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.x.nxv8i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl) | 
 |   ret <vscale x 8 x i16> %0 | 
 | } | 
 |  | 
 | declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.x.nxv8i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) | 
 |  | 
 | define <vscale x 16 x i16> @test_sf_vc_v_x_e16m4(i16 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_e16m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.x.nxv16i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl) | 
 |   ret <vscale x 16 x i16> %0 | 
 | } | 
 |  | 
 | declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.x.nxv16i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) | 
 |  | 
 | define <vscale x 32 x i16> @test_sf_vc_v_x_e16m8(i16 zeroext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_e16m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.x.nxv32i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl) | 
 |   ret <vscale x 32 x i16> %0 | 
 | } | 
 |  | 
 | declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.x.nxv32i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) | 
 |  | 
 | define <vscale x 1 x i32> @test_sf_vc_v_x_e32mf2(i32 signext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_e32mf2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.x.nxv1i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl) | 
 |   ret <vscale x 1 x i32> %0 | 
 | } | 
 |  | 
 | declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.x.nxv1i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) | 
 |  | 
 | define <vscale x 2 x i32> @test_sf_vc_v_x_e32m1(i32 signext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_e32m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.x.nxv2i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl) | 
 |   ret <vscale x 2 x i32> %0 | 
 | } | 
 |  | 
 | declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.x.nxv2i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) | 
 |  | 
 | define <vscale x 4 x i32> @test_sf_vc_v_x_e32m2(i32 signext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_e32m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.x.nxv4i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl) | 
 |   ret <vscale x 4 x i32> %0 | 
 | } | 
 |  | 
 | declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.x.nxv4i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) | 
 |  | 
 | define <vscale x 8 x i32> @test_sf_vc_v_x_e32m4(i32 signext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_e32m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.x.nxv8i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl) | 
 |   ret <vscale x 8 x i32> %0 | 
 | } | 
 |  | 
 | declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.x.nxv8i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) | 
 |  | 
 | define <vscale x 16 x i32> @test_sf_vc_v_x_e32m8(i32 signext %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_x_e32m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.x.nxv16i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl) | 
 |   ret <vscale x 16 x i32> %0 | 
 | } | 
 |  | 
 | declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.x.nxv16i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) | 
 |  | 
 | define void @test_sf_vc_i_se_e8mf8(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_i_se_e8mf8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.i.se.e8mf8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.i.se.e8mf8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define void @test_sf_vc_i_se_e8mf4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_i_se_e8mf4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.i.se.e8mf4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.i.se.e8mf4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define void @test_sf_vc_i_se_e8mf2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_i_se_e8mf2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.i.se.e8mf2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.i.se.e8mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define void @test_sf_vc_i_se_e8m1(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_i_se_e8m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.i.se.e8m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.i.se.e8m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define void @test_sf_vc_i_se_e8m2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_i_se_e8m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.i.se.e8m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.i.se.e8m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define void @test_sf_vc_i_se_e8m4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_i_se_e8m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.i.se.e8m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.i.se.e8m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define void @test_sf_vc_i_se_e8m8(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_i_se_e8m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.i.se.e8m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.i.se.e8m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define void @test_sf_vc_i_se_e16mf4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_i_se_e16mf4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.i.se.e16mf4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.i.se.e16mf4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define void @test_sf_vc_i_se_e16mf2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_i_se_e16mf2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.i.se.e16mf2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.i.se.e16mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define void @test_sf_vc_i_se_e16m1(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_i_se_e16m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.i.se.e16m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.i.se.e16m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define void @test_sf_vc_i_se_e16m2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_i_se_e16m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.i.se.e16m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.i.se.e16m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define void @test_sf_vc_i_se_e16m4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_i_se_e16m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.i.se.e16m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.i.se.e16m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define void @test_sf_vc_i_se_e16m8(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_i_se_e16m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.i.se.e16m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.i.se.e16m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define void @test_sf_vc_i_se_e32mf2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_i_se_e32mf2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.i.se.e32mf2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.i.se.e32mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define void @test_sf_vc_i_se_e32m1(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_i_se_e32m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.i.se.e32m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.i.se.e32m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define void @test_sf_vc_i_se_e32m2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_i_se_e32m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.i.se.e32m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.i.se.e32m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define void @test_sf_vc_i_se_e32m4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_i_se_e32m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.i.se.e32m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.i.se.e32m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define void @test_sf_vc_i_se_e32m8(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_i_se_e32m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.i.se.e32m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.i.se.e32m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define void @test_sf_vc_i_se_e64m1(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_i_se_e64m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.i.se.e64m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.i.se.e64m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define void @test_sf_vc_i_se_e64m2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_i_se_e64m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.i.se.e64m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.i.se.e64m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define void @test_sf_vc_i_se_e64m4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_i_se_e64m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.i.se.e64m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.i.se.e64m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define void @test_sf_vc_i_se_e64m8(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_i_se_e64m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   tail call void @llvm.riscv.sf.vc.i.se.e64m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret void | 
 | } | 
 |  | 
 | declare void @llvm.riscv.sf.vc.i.se.e64m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 1 x i8> @test_sf_vc_v_i_se_e8mf8(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_se_e8mf8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.i.se.nxv1i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 1 x i8> %0 | 
 | } | 
 |  | 
 | declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.i.se.nxv1i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 2 x i8> @test_sf_vc_v_i_se_e8mf4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_se_e8mf4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.i.se.nxv2i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 2 x i8> %0 | 
 | } | 
 |  | 
 | declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.i.se.nxv2i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 4 x i8> @test_sf_vc_v_i_se_e8mf2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_se_e8mf2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.i.se.nxv4i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 4 x i8> %0 | 
 | } | 
 |  | 
 | declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.i.se.nxv4i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 8 x i8> @test_sf_vc_v_i_se_e8m1(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_se_e8m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.i.se.nxv8i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 8 x i8> %0 | 
 | } | 
 |  | 
 | declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.i.se.nxv8i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 16 x i8> @test_sf_vc_v_i_se_e8m2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_se_e8m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.i.se.nxv16i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 16 x i8> %0 | 
 | } | 
 |  | 
 | declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.i.se.nxv16i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 32 x i8> @test_sf_vc_v_i_se_e8m4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_se_e8m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.i.se.nxv32i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 32 x i8> %0 | 
 | } | 
 |  | 
 | declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.i.se.nxv32i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 64 x i8> @test_sf_vc_v_i_se_e8m8(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_se_e8m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.i.se.nxv64i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 64 x i8> %0 | 
 | } | 
 |  | 
 | declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.i.se.nxv64i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 1 x i16> @test_sf_vc_v_i_se_e16mf4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_se_e16mf4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.i.se.nxv1i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 1 x i16> %0 | 
 | } | 
 |  | 
 | declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.i.se.nxv1i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 2 x i16> @test_sf_vc_v_i_se_e16mf2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_se_e16mf2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.i.se.nxv2i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 2 x i16> %0 | 
 | } | 
 |  | 
 | declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.i.se.nxv2i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 4 x i16> @test_sf_vc_v_i_se_e16m1(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_se_e16m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.i.se.nxv4i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 4 x i16> %0 | 
 | } | 
 |  | 
 | declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.i.se.nxv4i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 8 x i16> @test_sf_vc_v_i_se_e16m2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_se_e16m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.i.se.nxv8i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 8 x i16> %0 | 
 | } | 
 |  | 
 | declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.i.se.nxv8i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 16 x i16> @test_sf_vc_v_i_se_e16m4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_se_e16m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.i.se.nxv16i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 16 x i16> %0 | 
 | } | 
 |  | 
 | declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.i.se.nxv16i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 32 x i16> @test_sf_vc_v_i_se_e16m8(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_se_e16m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.i.se.nxv32i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 32 x i16> %0 | 
 | } | 
 |  | 
 | declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.i.se.nxv32i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 1 x i32> @test_sf_vc_v_i_se_e32mf2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_se_e32mf2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.i.se.nxv1i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 1 x i32> %0 | 
 | } | 
 |  | 
 | declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.i.se.nxv1i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 2 x i32> @test_sf_vc_v_i_se_e32m1(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_se_e32m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.i.se.nxv2i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 2 x i32> %0 | 
 | } | 
 |  | 
 | declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.i.se.nxv2i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 4 x i32> @test_sf_vc_v_i_se_e32m2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_se_e32m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.i.se.nxv4i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 4 x i32> %0 | 
 | } | 
 |  | 
 | declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.i.se.nxv4i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 8 x i32> @test_sf_vc_v_i_se_e32m4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_se_e32m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.i.se.nxv8i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 8 x i32> %0 | 
 | } | 
 |  | 
 | declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.i.se.nxv8i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 16 x i32> @test_sf_vc_v_i_se_e32m8(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_se_e32m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.i.se.nxv16i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 16 x i32> %0 | 
 | } | 
 |  | 
 | declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.i.se.nxv16i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 1 x i64> @test_sf_vc_v_i_se_e64m1(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_se_e64m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.i.se.nxv1i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 1 x i64> %0 | 
 | } | 
 |  | 
 | declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.i.se.nxv1i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 2 x i64> @test_sf_vc_v_i_se_e64m2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_se_e64m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.i.se.nxv2i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 2 x i64> %0 | 
 | } | 
 |  | 
 | declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.i.se.nxv2i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 4 x i64> @test_sf_vc_v_i_se_e64m4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_se_e64m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.i.se.nxv4i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 4 x i64> %0 | 
 | } | 
 |  | 
 | declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.i.se.nxv4i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 8 x i64> @test_sf_vc_v_i_se_e64m8(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_se_e64m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.i.se.nxv8i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 8 x i64> %0 | 
 | } | 
 |  | 
 | declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.i.se.nxv8i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 1 x i8> @test_sf_vc_v_i_e8mf8(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_e8mf8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.i.nxv1i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 1 x i8> %0 | 
 | } | 
 |  | 
 | declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.i.nxv1i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 2 x i8> @test_sf_vc_v_i_e8mf4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_e8mf4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.i.nxv2i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 2 x i8> %0 | 
 | } | 
 |  | 
 | declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.i.nxv2i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 4 x i8> @test_sf_vc_v_i_e8mf2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_e8mf2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.i.nxv4i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 4 x i8> %0 | 
 | } | 
 |  | 
 | declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.i.nxv4i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 8 x i8> @test_sf_vc_v_i_e8m1(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_e8m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.i.nxv8i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 8 x i8> %0 | 
 | } | 
 |  | 
 | declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.i.nxv8i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 16 x i8> @test_sf_vc_v_i_e8m2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_e8m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.i.nxv16i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 16 x i8> %0 | 
 | } | 
 |  | 
 | declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.i.nxv16i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 32 x i8> @test_sf_vc_v_i_e8m4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_e8m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.i.nxv32i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 32 x i8> %0 | 
 | } | 
 |  | 
 | declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.i.nxv32i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 64 x i8> @test_sf_vc_v_i_e8m8(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_e8m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.i.nxv64i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 64 x i8> %0 | 
 | } | 
 |  | 
 | declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.i.nxv64i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 1 x i16> @test_sf_vc_v_i_e16mf4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_e16mf4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.i.nxv1i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 1 x i16> %0 | 
 | } | 
 |  | 
 | declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.i.nxv1i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 2 x i16> @test_sf_vc_v_i_e16mf2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_e16mf2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.i.nxv2i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 2 x i16> %0 | 
 | } | 
 |  | 
 | declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.i.nxv2i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 4 x i16> @test_sf_vc_v_i_e16m1(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_e16m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.i.nxv4i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 4 x i16> %0 | 
 | } | 
 |  | 
 | declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.i.nxv4i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 8 x i16> @test_sf_vc_v_i_e16m2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_e16m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.i.nxv8i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 8 x i16> %0 | 
 | } | 
 |  | 
 | declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.i.nxv8i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 16 x i16> @test_sf_vc_v_i_e16m4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_e16m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.i.nxv16i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 16 x i16> %0 | 
 | } | 
 |  | 
 | declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.i.nxv16i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 32 x i16> @test_sf_vc_v_i_e16m8(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_e16m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.i.nxv32i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 32 x i16> %0 | 
 | } | 
 |  | 
 | declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.i.nxv32i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 1 x i32> @test_sf_vc_v_i_e32mf2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_e32mf2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.i.nxv1i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 1 x i32> %0 | 
 | } | 
 |  | 
 | declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.i.nxv1i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 2 x i32> @test_sf_vc_v_i_e32m1(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_e32m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.i.nxv2i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 2 x i32> %0 | 
 | } | 
 |  | 
 | declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.i.nxv2i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 4 x i32> @test_sf_vc_v_i_e32m2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_e32m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.i.nxv4i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 4 x i32> %0 | 
 | } | 
 |  | 
 | declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.i.nxv4i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 8 x i32> @test_sf_vc_v_i_e32m4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_e32m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.i.nxv8i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 8 x i32> %0 | 
 | } | 
 |  | 
 | declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.i.nxv8i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 16 x i32> @test_sf_vc_v_i_e32m8(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_e32m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.i.nxv16i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 16 x i32> %0 | 
 | } | 
 |  | 
 | declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.i.nxv16i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 1 x i64> @test_sf_vc_v_i_e64m1(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_e64m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.i.nxv1i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 1 x i64> %0 | 
 | } | 
 |  | 
 | declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.i.nxv1i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 2 x i64> @test_sf_vc_v_i_e64m2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_e64m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.i.nxv2i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 2 x i64> %0 | 
 | } | 
 |  | 
 | declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.i.nxv2i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 4 x i64> @test_sf_vc_v_i_e64m4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_e64m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.i.nxv4i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 4 x i64> %0 | 
 | } | 
 |  | 
 | declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.i.nxv4i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 8 x i64> @test_sf_vc_v_i_e64m8(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_v_i_e64m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.i.nxv8i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 8 x i64> %0 | 
 | } | 
 |  | 
 | declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.i.nxv8i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 1 x half> @test_f_sf_vc_v_i_se_e16mf4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_se_e16mf4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.i.se.nxv1f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 1 x half> %0 | 
 | } | 
 |  | 
 | declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.i.se.nxv1f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 2 x half> @test_f_sf_vc_v_i_se_e16mf2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_se_e16mf2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.i.se.nxv2f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 2 x half> %0 | 
 | } | 
 |  | 
 | declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.i.se.nxv2f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 4 x half> @test_f_sf_vc_v_i_se_e16m1(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_se_e16m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.i.se.nxv4f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 4 x half> %0 | 
 | } | 
 |  | 
 | declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.i.se.nxv4f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 8 x half> @test_f_sf_vc_v_i_se_e16m2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_se_e16m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.i.se.nxv8f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 8 x half> %0 | 
 | } | 
 |  | 
 | declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.i.se.nxv8f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 16 x half> @test_f_sf_vc_v_i_se_e16m4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_se_e16m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.i.se.nxv16f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 16 x half> %0 | 
 | } | 
 |  | 
 | declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.i.se.nxv16f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 32 x half> @test_f_sf_vc_v_i_se_e16m8(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_se_e16m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.i.se.nxv32f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 32 x half> %0 | 
 | } | 
 |  | 
 | declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.i.se.nxv32f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 1 x float> @test_f_sf_vc_v_i_se_e32mf2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_se_e32mf2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.i.se.nxv1f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 1 x float> %0 | 
 | } | 
 |  | 
 | declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.i.se.nxv1f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 2 x float> @test_f_sf_vc_v_i_se_e32m1(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_se_e32m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.i.se.nxv2f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 2 x float> %0 | 
 | } | 
 |  | 
 | declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.i.se.nxv2f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 4 x float> @test_f_sf_vc_v_i_se_e32m2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_se_e32m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.i.se.nxv4f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 4 x float> %0 | 
 | } | 
 |  | 
 | declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.i.se.nxv4f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 8 x float> @test_f_sf_vc_v_i_se_e32m4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_se_e32m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.i.se.nxv8f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 8 x float> %0 | 
 | } | 
 |  | 
 | declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.i.se.nxv8f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 16 x float> @test_f_sf_vc_v_i_se_e32m8(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_se_e32m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.i.se.nxv16f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 16 x float> %0 | 
 | } | 
 |  | 
 | declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.i.se.nxv16f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 1 x double> @test_f_sf_vc_v_i_se_e64m1(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_se_e64m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.i.se.nxv1f64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 1 x double> %0 | 
 | } | 
 |  | 
 | declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.i.se.nxv1f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 2 x double> @test_f_sf_vc_v_i_se_e64m2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_se_e64m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.i.se.nxv2f64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 2 x double> %0 | 
 | } | 
 |  | 
 | declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.i.se.nxv2f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 4 x double> @test_f_sf_vc_v_i_se_e64m4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_se_e64m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.i.se.nxv4f64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 4 x double> %0 | 
 | } | 
 |  | 
 | declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.i.se.nxv4f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 8 x double> @test_f_sf_vc_v_i_se_e64m8(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_se_e64m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.i.se.nxv8f64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 8 x double> %0 | 
 | } | 
 |  | 
 | declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.i.se.nxv8f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 1 x half> @test_f_sf_vc_v_i_e16mf4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_e16mf4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.i.nxv1f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 1 x half> %0 | 
 | } | 
 |  | 
 | declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.i.nxv1f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 2 x half> @test_f_sf_vc_v_i_e16mf2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_e16mf2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.i.nxv2f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 2 x half> %0 | 
 | } | 
 |  | 
 | declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.i.nxv2f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 4 x half> @test_f_sf_vc_v_i_e16m1(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_e16m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.i.nxv4f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 4 x half> %0 | 
 | } | 
 |  | 
 | declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.i.nxv4f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 8 x half> @test_f_sf_vc_v_i_e16m2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_e16m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.i.nxv8f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 8 x half> %0 | 
 | } | 
 |  | 
 | declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.i.nxv8f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 16 x half> @test_f_sf_vc_v_i_e16m4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_e16m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.i.nxv16f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 16 x half> %0 | 
 | } | 
 |  | 
 | declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.i.nxv16f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 32 x half> @test_f_sf_vc_v_i_e16m8(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_e16m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.i.nxv32f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 32 x half> %0 | 
 | } | 
 |  | 
 | declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.i.nxv32f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 1 x float> @test_f_sf_vc_v_i_e32mf2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_e32mf2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.i.nxv1f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 1 x float> %0 | 
 | } | 
 |  | 
 | declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.i.nxv1f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 2 x float> @test_f_sf_vc_v_i_e32m1(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_e32m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.i.nxv2f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 2 x float> %0 | 
 | } | 
 |  | 
 | declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.i.nxv2f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 4 x float> @test_f_sf_vc_v_i_e32m2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_e32m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.i.nxv4f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 4 x float> %0 | 
 | } | 
 |  | 
 | declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.i.nxv4f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 8 x float> @test_f_sf_vc_v_i_e32m4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_e32m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.i.nxv8f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 8 x float> %0 | 
 | } | 
 |  | 
 | declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.i.nxv8f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 16 x float> @test_f_sf_vc_v_i_e32m8(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_e32m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.i.nxv16f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 16 x float> %0 | 
 | } | 
 |  | 
 | declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.i.nxv16f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 1 x double> @test_f_sf_vc_v_i_e64m1(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_e64m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.i.nxv1f64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 1 x double> %0 | 
 | } | 
 |  | 
 | declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.i.nxv1f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 2 x double> @test_f_sf_vc_v_i_e64m2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_e64m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.i.nxv2f64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 2 x double> %0 | 
 | } | 
 |  | 
 | declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.i.nxv2f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 4 x double> @test_f_sf_vc_v_i_e64m4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_e64m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.i.nxv4f64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 4 x double> %0 | 
 | } | 
 |  | 
 | declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.i.nxv4f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 8 x double> @test_f_sf_vc_v_i_e64m8(iXLen %vl) { | 
 | ; CHECK-LABEL: test_f_sf_vc_v_i_e64m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.i.nxv8f64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) | 
 |   ret <vscale x 8 x double> %0 | 
 | } | 
 |  | 
 | declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.i.nxv8f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 1 x half> @test_sf_vc_fv_x_se_e16mf4(i16 %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_fv_x_se_e16mf4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 4, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.x.se.nxv1f16.i16.iXLen(iXLen 3, iXLen 4, i16 %rs1, iXLen %vl) | 
 |   ret <vscale x 1 x half> %0 | 
 | } | 
 |  | 
 | declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.x.se.nxv1f16.i16.iXLen(iXLen, iXLen, i16, iXLen) | 
 |  | 
 | define <vscale x 2 x half> @test_sf_vc_fv_x_se_e16mf2(i16 %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_fv_x_se_e16mf2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 4, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.x.se.nxv2f16.i16.iXLen(iXLen 3, iXLen 4, i16 %rs1, iXLen %vl) | 
 |   ret <vscale x 2 x half> %0 | 
 | } | 
 |  | 
 | declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.x.se.nxv2f16.i16.iXLen(iXLen, iXLen, i16, iXLen) | 
 |  | 
 | define <vscale x 4 x half> @test_sf_vc_fv_x_se_e16m1(i16 %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_fv_x_se_e16m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 4, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.x.se.nxv4f16.i16.iXLen(iXLen 3, iXLen 4, i16 %rs1, iXLen %vl) | 
 |   ret <vscale x 4 x half> %0 | 
 | } | 
 |  | 
 | declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.x.se.nxv4f16.i16.iXLen(iXLen, iXLen, i16, iXLen) | 
 |  | 
 | define <vscale x 8 x half> @test_sf_vc_fv_x_se_e16m2(i16 %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_fv_x_se_e16m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 4, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.x.se.nxv8f16.i16.iXLen(iXLen 3, iXLen 4, i16 %rs1, iXLen %vl) | 
 |   ret <vscale x 8 x half> %0 | 
 | } | 
 |  | 
 | declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.x.se.nxv8f16.i16.iXLen(iXLen, iXLen, i16, iXLen) | 
 |  | 
 | define <vscale x 16 x half> @test_sf_vc_fv_x_se_e16m4(i16 %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_fv_x_se_e16m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 4, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.x.se.nxv16f16.i16.iXLen(iXLen 3, iXLen 4, i16 %rs1, iXLen %vl) | 
 |   ret <vscale x 16 x half> %0 | 
 | } | 
 |  | 
 | declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.x.se.nxv16f16.i16.iXLen(iXLen, iXLen, i16, iXLen) | 
 |  | 
 | define <vscale x 32 x half> @test_sf_vc_fv_x_se_e16m8(i16 %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_fv_x_se_e16m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 4, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.x.se.nxv32f16.i16.iXLen(iXLen 3, iXLen 4, i16 %rs1, iXLen %vl) | 
 |   ret <vscale x 32 x half> %0 | 
 | } | 
 |  | 
 | declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.x.se.nxv32f16.i16.iXLen(iXLen, iXLen, i16, iXLen) | 
 |  | 
 | define <vscale x 1 x float> @test_sf_vc_fv_x_se_e32mf2(i32 %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_fv_x_se_e32mf2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 4, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.x.se.nxv1f32.i32.iXLen(iXLen 3, iXLen 4, i32 %rs1, iXLen %vl) | 
 |   ret <vscale x 1 x float> %0 | 
 | } | 
 |  | 
 | declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.x.se.nxv1f32.i32.iXLen(iXLen, iXLen, i32, iXLen) | 
 |  | 
 | define <vscale x 2 x float> @test_sf_vc_fv_x_se_e32m1(i32 %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_fv_x_se_e32m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 4, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.x.se.nxv2f32.i32.iXLen(iXLen 3, iXLen 4, i32 %rs1, iXLen %vl) | 
 |   ret <vscale x 2 x float> %0 | 
 | } | 
 |  | 
 | declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.x.se.nxv2f32.i32.iXLen(iXLen, iXLen, i32, iXLen) | 
 |  | 
 | define <vscale x 4 x float> @test_sf_vc_fv_x_se_e32m2(i32 %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_fv_x_se_e32m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 4, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.x.se.nxv4f32.i32.iXLen(iXLen 3, iXLen 4, i32 %rs1, iXLen %vl) | 
 |   ret <vscale x 4 x float> %0 | 
 | } | 
 |  | 
 | declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.x.se.nxv4f32.i32.iXLen(iXLen, iXLen, i32, iXLen) | 
 |  | 
 | define <vscale x 8 x float> @test_sf_vc_fv_x_se_e32m4(i32 %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_fv_x_se_e32m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 4, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.x.se.nxv8f32.i32.iXLen(iXLen 3, iXLen 4, i32 %rs1, iXLen %vl) | 
 |   ret <vscale x 8 x float> %0 | 
 | } | 
 |  | 
 | declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.x.se.nxv8f32.i32.iXLen(iXLen, iXLen, i32, iXLen) | 
 |  | 
 | define <vscale x 16 x float> @test_sf_vc_fv_x_se_e32m8(i32 %rs1, iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_fv_x_se_e32m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.x 3, 4, v8, a0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.x.se.nxv16f32.i32.iXLen(iXLen 3, iXLen 4, i32 %rs1, iXLen %vl) | 
 |   ret <vscale x 16 x float> %0 | 
 | } | 
 |  | 
 | declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.x.se.nxv16f32.i32.iXLen(iXLen, iXLen, i32, iXLen) | 
 |  | 
 | define <vscale x 1 x half> @test_sf_vc_fv_i_se_e16mf4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_fv_i_se_e16mf4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 8, v8, 4 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.i.se.nxv1f16.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) | 
 |   ret <vscale x 1 x half> %0 | 
 | } | 
 |  | 
 | declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.i.se.nxv1f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 2 x half> @test_sf_vc_fv_i_se_e16mf2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_fv_i_se_e16mf2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 8, v8, 4 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.i.se.nxv2f16.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) | 
 |   ret <vscale x 2 x half> %0 | 
 | } | 
 |  | 
 | declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.i.se.nxv2f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 4 x half> @test_sf_vc_fv_i_se_e16m1(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_fv_i_se_e16m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 8, v8, 4 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.i.se.nxv4f16.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) | 
 |   ret <vscale x 4 x half> %0 | 
 | } | 
 |  | 
 | declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.i.se.nxv4f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 8 x half> @test_sf_vc_fv_i_se_e16m2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_fv_i_se_e16m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 8, v8, 4 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.i.se.nxv8f16.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) | 
 |   ret <vscale x 8 x half> %0 | 
 | } | 
 |  | 
 | declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.i.se.nxv8f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 16 x half> @test_sf_vc_fv_i_se_e16m4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_fv_i_se_e16m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 8, v8, 4 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.i.se.nxv16f16.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) | 
 |   ret <vscale x 16 x half> %0 | 
 | } | 
 |  | 
 | declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.i.se.nxv16f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 32 x half> @test_sf_vc_fv_i_se_e16m8(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_fv_i_se_e16m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 8, v8, 4 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.i.se.nxv32f16.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) | 
 |   ret <vscale x 32 x half> %0 | 
 | } | 
 |  | 
 | declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.i.se.nxv32f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 1 x float> @test_sf_vc_fv_i_se_e32mf2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_fv_i_se_e32mf2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 8, v8, 4 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.i.se.nxv1f32.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) | 
 |   ret <vscale x 1 x float> %0 | 
 | } | 
 |  | 
 | declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.i.se.nxv1f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 2 x float> @test_sf_vc_fv_i_se_e32m1(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_fv_i_se_e32m1: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 8, v8, 4 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.i.se.nxv2f32.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) | 
 |   ret <vscale x 2 x float> %0 | 
 | } | 
 |  | 
 | declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.i.se.nxv2f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 4 x float> @test_sf_vc_fv_i_se_e32m2(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_fv_i_se_e32m2: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 8, v8, 4 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.i.se.nxv4f32.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) | 
 |   ret <vscale x 4 x float> %0 | 
 | } | 
 |  | 
 | declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.i.se.nxv4f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 8 x float> @test_sf_vc_fv_i_se_e32m4(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_fv_i_se_e32m4: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 8, v8, 4 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.i.se.nxv8f32.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) | 
 |   ret <vscale x 8 x float> %0 | 
 | } | 
 |  | 
 | declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.i.se.nxv8f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) | 
 |  | 
 | define <vscale x 16 x float> @test_sf_vc_fv_i_se_e32m8(iXLen %vl) { | 
 | ; CHECK-LABEL: test_sf_vc_fv_i_se_e32m8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma | 
 | ; CHECK-NEXT:    sf.vc.v.i 3, 8, v8, 4 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.i.se.nxv16f32.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) | 
 |   ret <vscale x 16 x float> %0 | 
 | } | 
 |  | 
 | declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.i.se.nxv16f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) |