For scalable vectors, pass the maximum runtime step to materializeVectorTripCount. Use it to simplify the vector trip count to the original trip count directly, if MaxRuntimeSteps divides the original trip count without remainder. In those cases, all lower power-of-2 vscales will divide the rip count without remainder. PR: https://github.com/llvm/llvm-project/pull/193067
852 lines
57 KiB
LLVM
852 lines
57 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
|
|
; FIXME: fmaximumnum/fminimumnum have no vectorizing support yet.
|
|
; RUN: opt --passes=loop-vectorize --mtriple=riscv64 -mattr="+v,+zvfh" -S < %s | FileCheck %s
|
|
; RUN: opt --passes=loop-vectorize --mtriple=riscv64 -mattr="+v,+zvfhmin" -S < %s | FileCheck %s --check-prefix=ZVFHMIN
|
|
|
|
define void @fmin32(ptr noundef readonly captures(none) %input1, ptr noundef readonly captures(none) %input2, ptr noundef writeonly captures(none) %output) {
|
|
; CHECK-LABEL: define void @fmin32(
|
|
; CHECK-SAME: ptr noundef readonly captures(none) [[INPUT1:%.*]], ptr noundef readonly captures(none) [[INPUT2:%.*]], ptr noundef writeonly captures(none) [[OUTPUT:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
; CHECK-NEXT: [[ENTRY:.*]]:
|
|
; CHECK-NEXT: [[INPUT23:%.*]] = ptrtoaddr ptr [[INPUT2]] to i64
|
|
; CHECK-NEXT: [[INPUT12:%.*]] = ptrtoaddr ptr [[INPUT1]] to i64
|
|
; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoaddr ptr [[OUTPUT]] to i64
|
|
; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP13:%.*]] = shl nuw i64 [[TMP8]], 2
|
|
; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP13]], i64 15)
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP14]]
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP15]], 4
|
|
; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP4]], 4
|
|
; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[OUTPUT1]], [[INPUT12]]
|
|
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP16]]
|
|
; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[TMP4]], 4
|
|
; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[OUTPUT1]], [[INPUT23]]
|
|
; CHECK-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP1]], [[TMP7]]
|
|
; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
|
|
; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
|
|
; CHECK: [[VECTOR_PH]]:
|
|
; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP18:%.*]] = shl nuw i64 [[TMP9]], 2
|
|
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; CHECK: [[VECTOR_BODY]]:
|
|
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT1]], i64 0, i64 [[INDEX]]
|
|
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP2]], align 4
|
|
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT2]], i64 0, i64 [[INDEX]]
|
|
; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 4 x float>, ptr [[TMP5]], align 4
|
|
; CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 4 x float> @llvm.minimumnum.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[WIDE_LOAD5]])
|
|
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[INDEX]]
|
|
; CHECK-NEXT: store <vscale x 4 x float> [[TMP17]], ptr [[TMP10]], align 4
|
|
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]]
|
|
; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096
|
|
; CHECK-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
|
|
; CHECK: [[MIDDLE_BLOCK]]:
|
|
; CHECK-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]]
|
|
; CHECK: [[SCALAR_PH]]:
|
|
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 4096, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
|
|
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
|
|
; CHECK: [[FOR_BODY]]:
|
|
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
|
|
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT1]], i64 0, i64 [[INDVARS_IV]]
|
|
; CHECK-NEXT: [[IN1:%.*]] = load float, ptr [[ARRAYIDX]], align 4
|
|
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT2]], i64 0, i64 [[INDVARS_IV]]
|
|
; CHECK-NEXT: [[IN2:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
|
|
; CHECK-NEXT: [[OUT:%.*]] = tail call float @llvm.minimumnum.f32(float [[IN1]], float [[IN2]])
|
|
; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[INDVARS_IV]]
|
|
; CHECK-NEXT: store float [[OUT]], ptr [[ARRAYIDX4]], align 4
|
|
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
|
|
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4096
|
|
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
|
|
; CHECK: [[EXIT]]:
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
; ZVFHMIN-LABEL: define void @fmin32(
|
|
; ZVFHMIN-SAME: ptr noundef readonly captures(none) [[INPUT1:%.*]], ptr noundef readonly captures(none) [[INPUT2:%.*]], ptr noundef writeonly captures(none) [[OUTPUT:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
; ZVFHMIN-NEXT: [[ENTRY:.*]]:
|
|
; ZVFHMIN-NEXT: [[INPUT23:%.*]] = ptrtoaddr ptr [[INPUT2]] to i64
|
|
; ZVFHMIN-NEXT: [[INPUT12:%.*]] = ptrtoaddr ptr [[INPUT1]] to i64
|
|
; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoaddr ptr [[OUTPUT]] to i64
|
|
; ZVFHMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
|
|
; ZVFHMIN-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 2
|
|
; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP1]], i64 15)
|
|
; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP2]]
|
|
; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; ZVFHMIN: [[VECTOR_MEMCHECK]]:
|
|
; ZVFHMIN-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
|
|
; ZVFHMIN-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
|
|
; ZVFHMIN-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
|
|
; ZVFHMIN-NEXT: [[TMP6:%.*]] = sub i64 [[OUTPUT1]], [[INPUT12]]
|
|
; ZVFHMIN-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
|
|
; ZVFHMIN-NEXT: [[TMP7:%.*]] = mul i64 [[TMP4]], 4
|
|
; ZVFHMIN-NEXT: [[TMP8:%.*]] = sub i64 [[OUTPUT1]], [[INPUT23]]
|
|
; ZVFHMIN-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]]
|
|
; ZVFHMIN-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
|
|
; ZVFHMIN-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
|
|
; ZVFHMIN: [[VECTOR_PH]]:
|
|
; ZVFHMIN-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
|
|
; ZVFHMIN-NEXT: [[TMP10:%.*]] = shl nuw i64 [[TMP9]], 2
|
|
; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; ZVFHMIN: [[VECTOR_BODY]]:
|
|
; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; ZVFHMIN-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT1]], i64 0, i64 [[INDEX]]
|
|
; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP13]], align 4
|
|
; ZVFHMIN-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT2]], i64 0, i64 [[INDEX]]
|
|
; ZVFHMIN-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 4 x float>, ptr [[TMP15]], align 4
|
|
; ZVFHMIN-NEXT: [[TMP17:%.*]] = call <vscale x 4 x float> @llvm.minimumnum.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[WIDE_LOAD5]])
|
|
; ZVFHMIN-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[INDEX]]
|
|
; ZVFHMIN-NEXT: store <vscale x 4 x float> [[TMP17]], ptr [[TMP18]], align 4
|
|
; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
|
|
; ZVFHMIN-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096
|
|
; ZVFHMIN-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
|
|
; ZVFHMIN: [[MIDDLE_BLOCK]]:
|
|
; ZVFHMIN-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]]
|
|
; ZVFHMIN: [[SCALAR_PH]]:
|
|
; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 4096, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
|
|
; ZVFHMIN-NEXT: br label %[[FOR_BODY:.*]]
|
|
; ZVFHMIN: [[FOR_BODY]]:
|
|
; ZVFHMIN-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
|
|
; ZVFHMIN-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT1]], i64 0, i64 [[IV]]
|
|
; ZVFHMIN-NEXT: [[IN1:%.*]] = load float, ptr [[ARRAYIDX]], align 4
|
|
; ZVFHMIN-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT2]], i64 0, i64 [[IV]]
|
|
; ZVFHMIN-NEXT: [[IN2:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
|
|
; ZVFHMIN-NEXT: [[OUT:%.*]] = tail call float @llvm.minimumnum.f32(float [[IN1]], float [[IN2]])
|
|
; ZVFHMIN-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[IV]]
|
|
; ZVFHMIN-NEXT: store float [[OUT]], ptr [[ARRAYIDX4]], align 4
|
|
; ZVFHMIN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
|
|
; ZVFHMIN-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 4096
|
|
; ZVFHMIN-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
|
|
; ZVFHMIN: [[EXIT]]:
|
|
; ZVFHMIN-NEXT: ret void
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%arrayidx = getelementptr inbounds nuw [4096 x float], ptr %input1, i64 0, i64 %iv
|
|
%in1 = load float, ptr %arrayidx, align 4
|
|
%arrayidx2 = getelementptr inbounds nuw [4096 x float], ptr %input2, i64 0, i64 %iv
|
|
%in2 = load float, ptr %arrayidx2, align 4
|
|
%out = tail call float @llvm.minimumnum.f32(float %in1, float %in2)
|
|
%arrayidx4 = getelementptr inbounds nuw [4096 x float], ptr %output, i64 0, i64 %iv
|
|
store float %out, ptr %arrayidx4, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond.not = icmp eq i64 %iv.next, 4096
|
|
br i1 %exitcond.not, label %exit, label %for.body
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
|
|
define void @fmax32(ptr noundef readonly captures(none) %input1, ptr noundef readonly captures(none) %input2, ptr noundef writeonly captures(none) %output) {
|
|
; CHECK-LABEL: define void @fmax32(
|
|
; CHECK-SAME: ptr noundef readonly captures(none) [[INPUT1:%.*]], ptr noundef readonly captures(none) [[INPUT2:%.*]], ptr noundef writeonly captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] {
|
|
; CHECK-NEXT: [[ENTRY:.*]]:
|
|
; CHECK-NEXT: [[INPUT23:%.*]] = ptrtoaddr ptr [[INPUT2]] to i64
|
|
; CHECK-NEXT: [[INPUT12:%.*]] = ptrtoaddr ptr [[INPUT1]] to i64
|
|
; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoaddr ptr [[OUTPUT]] to i64
|
|
; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP13:%.*]] = shl nuw i64 [[TMP8]], 2
|
|
; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP13]], i64 15)
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP14]]
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP15]], 4
|
|
; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP4]], 4
|
|
; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[OUTPUT1]], [[INPUT12]]
|
|
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP16]]
|
|
; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[TMP4]], 4
|
|
; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[OUTPUT1]], [[INPUT23]]
|
|
; CHECK-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP1]], [[TMP7]]
|
|
; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
|
|
; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
|
|
; CHECK: [[VECTOR_PH]]:
|
|
; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP18:%.*]] = shl nuw i64 [[TMP9]], 2
|
|
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; CHECK: [[VECTOR_BODY]]:
|
|
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT1]], i64 0, i64 [[INDEX]]
|
|
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP2]], align 4
|
|
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT2]], i64 0, i64 [[INDEX]]
|
|
; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 4 x float>, ptr [[TMP5]], align 4
|
|
; CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 4 x float> @llvm.maximumnum.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[WIDE_LOAD5]])
|
|
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[INDEX]]
|
|
; CHECK-NEXT: store <vscale x 4 x float> [[TMP17]], ptr [[TMP10]], align 4
|
|
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]]
|
|
; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096
|
|
; CHECK-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
|
|
; CHECK: [[MIDDLE_BLOCK]]:
|
|
; CHECK-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]]
|
|
; CHECK: [[SCALAR_PH]]:
|
|
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 4096, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
|
|
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
|
|
; CHECK: [[FOR_BODY]]:
|
|
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
|
|
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT1]], i64 0, i64 [[INDVARS_IV]]
|
|
; CHECK-NEXT: [[IN1:%.*]] = load float, ptr [[ARRAYIDX]], align 4
|
|
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT2]], i64 0, i64 [[INDVARS_IV]]
|
|
; CHECK-NEXT: [[IN2:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
|
|
; CHECK-NEXT: [[OUT:%.*]] = tail call float @llvm.maximumnum.f32(float [[IN1]], float [[IN2]])
|
|
; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[INDVARS_IV]]
|
|
; CHECK-NEXT: store float [[OUT]], ptr [[ARRAYIDX4]], align 4
|
|
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
|
|
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4096
|
|
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
|
|
; CHECK: [[EXIT]]:
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
; ZVFHMIN-LABEL: define void @fmax32(
|
|
; ZVFHMIN-SAME: ptr noundef readonly captures(none) [[INPUT1:%.*]], ptr noundef readonly captures(none) [[INPUT2:%.*]], ptr noundef writeonly captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] {
|
|
; ZVFHMIN-NEXT: [[ENTRY:.*]]:
|
|
; ZVFHMIN-NEXT: [[INPUT23:%.*]] = ptrtoaddr ptr [[INPUT2]] to i64
|
|
; ZVFHMIN-NEXT: [[INPUT12:%.*]] = ptrtoaddr ptr [[INPUT1]] to i64
|
|
; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoaddr ptr [[OUTPUT]] to i64
|
|
; ZVFHMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
|
|
; ZVFHMIN-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 2
|
|
; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP1]], i64 15)
|
|
; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP2]]
|
|
; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; ZVFHMIN: [[VECTOR_MEMCHECK]]:
|
|
; ZVFHMIN-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
|
|
; ZVFHMIN-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
|
|
; ZVFHMIN-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
|
|
; ZVFHMIN-NEXT: [[TMP6:%.*]] = sub i64 [[OUTPUT1]], [[INPUT12]]
|
|
; ZVFHMIN-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
|
|
; ZVFHMIN-NEXT: [[TMP7:%.*]] = mul i64 [[TMP4]], 4
|
|
; ZVFHMIN-NEXT: [[TMP8:%.*]] = sub i64 [[OUTPUT1]], [[INPUT23]]
|
|
; ZVFHMIN-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]]
|
|
; ZVFHMIN-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
|
|
; ZVFHMIN-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
|
|
; ZVFHMIN: [[VECTOR_PH]]:
|
|
; ZVFHMIN-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
|
|
; ZVFHMIN-NEXT: [[TMP10:%.*]] = shl nuw i64 [[TMP9]], 2
|
|
; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; ZVFHMIN: [[VECTOR_BODY]]:
|
|
; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; ZVFHMIN-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT1]], i64 0, i64 [[INDEX]]
|
|
; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP13]], align 4
|
|
; ZVFHMIN-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT2]], i64 0, i64 [[INDEX]]
|
|
; ZVFHMIN-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 4 x float>, ptr [[TMP15]], align 4
|
|
; ZVFHMIN-NEXT: [[TMP17:%.*]] = call <vscale x 4 x float> @llvm.maximumnum.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[WIDE_LOAD5]])
|
|
; ZVFHMIN-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[INDEX]]
|
|
; ZVFHMIN-NEXT: store <vscale x 4 x float> [[TMP17]], ptr [[TMP18]], align 4
|
|
; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
|
|
; ZVFHMIN-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096
|
|
; ZVFHMIN-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
|
|
; ZVFHMIN: [[MIDDLE_BLOCK]]:
|
|
; ZVFHMIN-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]]
|
|
; ZVFHMIN: [[SCALAR_PH]]:
|
|
; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 4096, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
|
|
; ZVFHMIN-NEXT: br label %[[FOR_BODY:.*]]
|
|
; ZVFHMIN: [[FOR_BODY]]:
|
|
; ZVFHMIN-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
|
|
; ZVFHMIN-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT1]], i64 0, i64 [[IV]]
|
|
; ZVFHMIN-NEXT: [[IN1:%.*]] = load float, ptr [[ARRAYIDX]], align 4
|
|
; ZVFHMIN-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT2]], i64 0, i64 [[IV]]
|
|
; ZVFHMIN-NEXT: [[IN2:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
|
|
; ZVFHMIN-NEXT: [[OUT:%.*]] = tail call float @llvm.maximumnum.f32(float [[IN1]], float [[IN2]])
|
|
; ZVFHMIN-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[IV]]
|
|
; ZVFHMIN-NEXT: store float [[OUT]], ptr [[ARRAYIDX4]], align 4
|
|
; ZVFHMIN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
|
|
; ZVFHMIN-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 4096
|
|
; ZVFHMIN-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
|
|
; ZVFHMIN: [[EXIT]]:
|
|
; ZVFHMIN-NEXT: ret void
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%arrayidx = getelementptr inbounds nuw [4096 x float], ptr %input1, i64 0, i64 %iv
|
|
%in1 = load float, ptr %arrayidx, align 4
|
|
%arrayidx2 = getelementptr inbounds nuw [4096 x float], ptr %input2, i64 0, i64 %iv
|
|
%in2 = load float, ptr %arrayidx2, align 4
|
|
%out = tail call float @llvm.maximumnum.f32(float %in1, float %in2)
|
|
%arrayidx4 = getelementptr inbounds nuw [4096 x float], ptr %output, i64 0, i64 %iv
|
|
store float %out, ptr %arrayidx4, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond.not = icmp eq i64 %iv.next, 4096
|
|
br i1 %exitcond.not, label %exit, label %for.body
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
|
|
define void @fmin64(ptr noundef readonly captures(none) %input1, ptr noundef readonly captures(none) %input2, ptr noundef writeonly captures(none) %output) {
|
|
; CHECK-LABEL: define void @fmin64(
|
|
; CHECK-SAME: ptr noundef readonly captures(none) [[INPUT1:%.*]], ptr noundef readonly captures(none) [[INPUT2:%.*]], ptr noundef writeonly captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] {
|
|
; CHECK-NEXT: [[ENTRY:.*]]:
|
|
; CHECK-NEXT: [[INPUT23:%.*]] = ptrtoaddr ptr [[INPUT2]] to i64
|
|
; CHECK-NEXT: [[INPUT12:%.*]] = ptrtoaddr ptr [[INPUT1]] to i64
|
|
; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoaddr ptr [[OUTPUT]] to i64
|
|
; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP13:%.*]] = shl nuw i64 [[TMP8]], 1
|
|
; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP13]], i64 15)
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP14]]
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP15]], 2
|
|
; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP4]], 8
|
|
; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[OUTPUT1]], [[INPUT12]]
|
|
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP16]]
|
|
; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[TMP4]], 8
|
|
; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[OUTPUT1]], [[INPUT23]]
|
|
; CHECK-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP1]], [[TMP7]]
|
|
; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
|
|
; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
|
|
; CHECK: [[VECTOR_PH]]:
|
|
; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP18:%.*]] = shl nuw i64 [[TMP9]], 1
|
|
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; CHECK: [[VECTOR_BODY]]:
|
|
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT1]], i64 0, i64 [[INDEX]]
|
|
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x double>, ptr [[TMP2]], align 8
|
|
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT2]], i64 0, i64 [[INDEX]]
|
|
; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 2 x double>, ptr [[TMP5]], align 8
|
|
; CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 2 x double> @llvm.minimumnum.nxv2f64(<vscale x 2 x double> [[WIDE_LOAD]], <vscale x 2 x double> [[WIDE_LOAD5]])
|
|
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[INDEX]]
|
|
; CHECK-NEXT: store <vscale x 2 x double> [[TMP17]], ptr [[TMP10]], align 8
|
|
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]]
|
|
; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096
|
|
; CHECK-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
|
|
; CHECK: [[MIDDLE_BLOCK]]:
|
|
; CHECK-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]]
|
|
; CHECK: [[SCALAR_PH]]:
|
|
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 4096, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
|
|
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
|
|
; CHECK: [[FOR_BODY]]:
|
|
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
|
|
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT1]], i64 0, i64 [[INDVARS_IV]]
|
|
; CHECK-NEXT: [[IN1:%.*]] = load double, ptr [[ARRAYIDX]], align 8
|
|
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT2]], i64 0, i64 [[INDVARS_IV]]
|
|
; CHECK-NEXT: [[IN2:%.*]] = load double, ptr [[ARRAYIDX2]], align 8
|
|
; CHECK-NEXT: [[OUT:%.*]] = tail call double @llvm.minimumnum.f64(double [[IN1]], double [[IN2]])
|
|
; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[INDVARS_IV]]
|
|
; CHECK-NEXT: store double [[OUT]], ptr [[ARRAYIDX4]], align 8
|
|
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
|
|
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4096
|
|
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
|
|
; CHECK: [[EXIT]]:
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
; ZVFHMIN-LABEL: define void @fmin64(
|
|
; ZVFHMIN-SAME: ptr noundef readonly captures(none) [[INPUT1:%.*]], ptr noundef readonly captures(none) [[INPUT2:%.*]], ptr noundef writeonly captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] {
|
|
; ZVFHMIN-NEXT: [[ENTRY:.*]]:
|
|
; ZVFHMIN-NEXT: [[INPUT23:%.*]] = ptrtoaddr ptr [[INPUT2]] to i64
|
|
; ZVFHMIN-NEXT: [[INPUT12:%.*]] = ptrtoaddr ptr [[INPUT1]] to i64
|
|
; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoaddr ptr [[OUTPUT]] to i64
|
|
; ZVFHMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
|
|
; ZVFHMIN-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 1
|
|
; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP1]], i64 15)
|
|
; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP2]]
|
|
; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; ZVFHMIN: [[VECTOR_MEMCHECK]]:
|
|
; ZVFHMIN-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
|
|
; ZVFHMIN-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
|
|
; ZVFHMIN-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 8
|
|
; ZVFHMIN-NEXT: [[TMP6:%.*]] = sub i64 [[OUTPUT1]], [[INPUT12]]
|
|
; ZVFHMIN-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
|
|
; ZVFHMIN-NEXT: [[TMP7:%.*]] = mul i64 [[TMP4]], 8
|
|
; ZVFHMIN-NEXT: [[TMP8:%.*]] = sub i64 [[OUTPUT1]], [[INPUT23]]
|
|
; ZVFHMIN-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]]
|
|
; ZVFHMIN-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
|
|
; ZVFHMIN-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
|
|
; ZVFHMIN: [[VECTOR_PH]]:
|
|
; ZVFHMIN-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
|
|
; ZVFHMIN-NEXT: [[TMP10:%.*]] = shl nuw i64 [[TMP9]], 1
|
|
; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; ZVFHMIN: [[VECTOR_BODY]]:
|
|
; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; ZVFHMIN-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT1]], i64 0, i64 [[INDEX]]
|
|
; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x double>, ptr [[TMP13]], align 8
|
|
; ZVFHMIN-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT2]], i64 0, i64 [[INDEX]]
|
|
; ZVFHMIN-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 2 x double>, ptr [[TMP15]], align 8
|
|
; ZVFHMIN-NEXT: [[TMP17:%.*]] = call <vscale x 2 x double> @llvm.minimumnum.nxv2f64(<vscale x 2 x double> [[WIDE_LOAD]], <vscale x 2 x double> [[WIDE_LOAD5]])
|
|
; ZVFHMIN-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[INDEX]]
|
|
; ZVFHMIN-NEXT: store <vscale x 2 x double> [[TMP17]], ptr [[TMP18]], align 8
|
|
; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
|
|
; ZVFHMIN-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096
|
|
; ZVFHMIN-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
|
|
; ZVFHMIN: [[MIDDLE_BLOCK]]:
|
|
; ZVFHMIN-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]]
|
|
; ZVFHMIN: [[SCALAR_PH]]:
|
|
; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 4096, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
|
|
; ZVFHMIN-NEXT: br label %[[FOR_BODY:.*]]
|
|
; ZVFHMIN: [[FOR_BODY]]:
|
|
; ZVFHMIN-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
|
|
; ZVFHMIN-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT1]], i64 0, i64 [[IV]]
|
|
; ZVFHMIN-NEXT: [[IN1:%.*]] = load double, ptr [[ARRAYIDX]], align 8
|
|
; ZVFHMIN-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT2]], i64 0, i64 [[IV]]
|
|
; ZVFHMIN-NEXT: [[IN2:%.*]] = load double, ptr [[ARRAYIDX2]], align 8
|
|
; ZVFHMIN-NEXT: [[OUT:%.*]] = tail call double @llvm.minimumnum.f64(double [[IN1]], double [[IN2]])
|
|
; ZVFHMIN-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[IV]]
|
|
; ZVFHMIN-NEXT: store double [[OUT]], ptr [[ARRAYIDX4]], align 8
|
|
; ZVFHMIN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
|
|
; ZVFHMIN-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 4096
|
|
; ZVFHMIN-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
|
|
; ZVFHMIN: [[EXIT]]:
|
|
; ZVFHMIN-NEXT: ret void
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%arrayidx = getelementptr inbounds nuw [4096 x double], ptr %input1, i64 0, i64 %iv
|
|
%in1 = load double, ptr %arrayidx, align 8
|
|
%arrayidx2 = getelementptr inbounds nuw [4096 x double], ptr %input2, i64 0, i64 %iv
|
|
%in2 = load double, ptr %arrayidx2, align 8
|
|
%out = tail call double @llvm.minimumnum.f64(double %in1, double %in2)
|
|
%arrayidx4 = getelementptr inbounds nuw [4096 x double], ptr %output, i64 0, i64 %iv
|
|
store double %out, ptr %arrayidx4, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond.not = icmp eq i64 %iv.next, 4096
|
|
br i1 %exitcond.not, label %exit, label %for.body
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
|
|
define void @fmax64(ptr noundef readonly captures(none) %input1, ptr noundef readonly captures(none) %input2, ptr noundef writeonly captures(none) %output) {
|
|
; CHECK-LABEL: define void @fmax64(
|
|
; CHECK-SAME: ptr noundef readonly captures(none) [[INPUT1:%.*]], ptr noundef readonly captures(none) [[INPUT2:%.*]], ptr noundef writeonly captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] {
|
|
; CHECK-NEXT: [[ENTRY:.*]]:
|
|
; CHECK-NEXT: [[INPUT23:%.*]] = ptrtoaddr ptr [[INPUT2]] to i64
|
|
; CHECK-NEXT: [[INPUT12:%.*]] = ptrtoaddr ptr [[INPUT1]] to i64
|
|
; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoaddr ptr [[OUTPUT]] to i64
|
|
; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP13:%.*]] = shl nuw i64 [[TMP8]], 1
|
|
; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP13]], i64 15)
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP14]]
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP15]], 2
|
|
; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP4]], 8
|
|
; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[OUTPUT1]], [[INPUT12]]
|
|
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP16]]
|
|
; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[TMP4]], 8
|
|
; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[OUTPUT1]], [[INPUT23]]
|
|
; CHECK-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP1]], [[TMP7]]
|
|
; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
|
|
; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
|
|
; CHECK: [[VECTOR_PH]]:
|
|
; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP18:%.*]] = shl nuw i64 [[TMP9]], 1
|
|
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; CHECK: [[VECTOR_BODY]]:
|
|
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT1]], i64 0, i64 [[INDEX]]
|
|
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x double>, ptr [[TMP2]], align 8
|
|
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT2]], i64 0, i64 [[INDEX]]
|
|
; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 2 x double>, ptr [[TMP5]], align 8
|
|
; CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 2 x double> @llvm.maximumnum.nxv2f64(<vscale x 2 x double> [[WIDE_LOAD]], <vscale x 2 x double> [[WIDE_LOAD5]])
|
|
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[INDEX]]
|
|
; CHECK-NEXT: store <vscale x 2 x double> [[TMP17]], ptr [[TMP10]], align 8
|
|
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]]
|
|
; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096
|
|
; CHECK-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
|
|
; CHECK: [[MIDDLE_BLOCK]]:
|
|
; CHECK-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]]
|
|
; CHECK: [[SCALAR_PH]]:
|
|
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 4096, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
|
|
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
|
|
; CHECK: [[FOR_BODY]]:
|
|
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
|
|
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT1]], i64 0, i64 [[INDVARS_IV]]
|
|
; CHECK-NEXT: [[IN1:%.*]] = load double, ptr [[ARRAYIDX]], align 8
|
|
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT2]], i64 0, i64 [[INDVARS_IV]]
|
|
; CHECK-NEXT: [[IN2:%.*]] = load double, ptr [[ARRAYIDX2]], align 8
|
|
; CHECK-NEXT: [[OUT:%.*]] = tail call double @llvm.maximumnum.f64(double [[IN1]], double [[IN2]])
|
|
; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[INDVARS_IV]]
|
|
; CHECK-NEXT: store double [[OUT]], ptr [[ARRAYIDX4]], align 8
|
|
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
|
|
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4096
|
|
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
|
|
; CHECK: [[EXIT]]:
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
; ZVFHMIN-LABEL: define void @fmax64(
|
|
; ZVFHMIN-SAME: ptr noundef readonly captures(none) [[INPUT1:%.*]], ptr noundef readonly captures(none) [[INPUT2:%.*]], ptr noundef writeonly captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] {
|
|
; ZVFHMIN-NEXT: [[ENTRY:.*]]:
|
|
; ZVFHMIN-NEXT: [[INPUT23:%.*]] = ptrtoaddr ptr [[INPUT2]] to i64
|
|
; ZVFHMIN-NEXT: [[INPUT12:%.*]] = ptrtoaddr ptr [[INPUT1]] to i64
|
|
; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoaddr ptr [[OUTPUT]] to i64
|
|
; ZVFHMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
|
|
; ZVFHMIN-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 1
|
|
; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP1]], i64 15)
|
|
; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP2]]
|
|
; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; ZVFHMIN: [[VECTOR_MEMCHECK]]:
|
|
; ZVFHMIN-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
|
|
; ZVFHMIN-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
|
|
; ZVFHMIN-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 8
|
|
; ZVFHMIN-NEXT: [[TMP6:%.*]] = sub i64 [[OUTPUT1]], [[INPUT12]]
|
|
; ZVFHMIN-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
|
|
; ZVFHMIN-NEXT: [[TMP7:%.*]] = mul i64 [[TMP4]], 8
|
|
; ZVFHMIN-NEXT: [[TMP8:%.*]] = sub i64 [[OUTPUT1]], [[INPUT23]]
|
|
; ZVFHMIN-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]]
|
|
; ZVFHMIN-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
|
|
; ZVFHMIN-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
|
|
; ZVFHMIN: [[VECTOR_PH]]:
|
|
; ZVFHMIN-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
|
|
; ZVFHMIN-NEXT: [[TMP10:%.*]] = shl nuw i64 [[TMP9]], 1
|
|
; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; ZVFHMIN: [[VECTOR_BODY]]:
|
|
; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; ZVFHMIN-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT1]], i64 0, i64 [[INDEX]]
|
|
; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x double>, ptr [[TMP13]], align 8
|
|
; ZVFHMIN-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT2]], i64 0, i64 [[INDEX]]
|
|
; ZVFHMIN-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 2 x double>, ptr [[TMP15]], align 8
|
|
; ZVFHMIN-NEXT: [[TMP17:%.*]] = call <vscale x 2 x double> @llvm.maximumnum.nxv2f64(<vscale x 2 x double> [[WIDE_LOAD]], <vscale x 2 x double> [[WIDE_LOAD5]])
|
|
; ZVFHMIN-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[INDEX]]
|
|
; ZVFHMIN-NEXT: store <vscale x 2 x double> [[TMP17]], ptr [[TMP18]], align 8
|
|
; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
|
|
; ZVFHMIN-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096
|
|
; ZVFHMIN-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
|
|
; ZVFHMIN: [[MIDDLE_BLOCK]]:
|
|
; ZVFHMIN-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]]
|
|
; ZVFHMIN: [[SCALAR_PH]]:
|
|
; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 4096, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
|
|
; ZVFHMIN-NEXT: br label %[[FOR_BODY:.*]]
|
|
; ZVFHMIN: [[FOR_BODY]]:
|
|
; ZVFHMIN-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
|
|
; ZVFHMIN-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT1]], i64 0, i64 [[IV]]
|
|
; ZVFHMIN-NEXT: [[IN1:%.*]] = load double, ptr [[ARRAYIDX]], align 8
|
|
; ZVFHMIN-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT2]], i64 0, i64 [[IV]]
|
|
; ZVFHMIN-NEXT: [[IN2:%.*]] = load double, ptr [[ARRAYIDX2]], align 8
|
|
; ZVFHMIN-NEXT: [[OUT:%.*]] = tail call double @llvm.maximumnum.f64(double [[IN1]], double [[IN2]])
|
|
; ZVFHMIN-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[IV]]
|
|
; ZVFHMIN-NEXT: store double [[OUT]], ptr [[ARRAYIDX4]], align 8
|
|
; ZVFHMIN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
|
|
; ZVFHMIN-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 4096
|
|
; ZVFHMIN-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
|
|
; ZVFHMIN: [[EXIT]]:
|
|
; ZVFHMIN-NEXT: ret void
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%arrayidx = getelementptr inbounds nuw [4096 x double], ptr %input1, i64 0, i64 %iv
|
|
%in1 = load double, ptr %arrayidx, align 8
|
|
%arrayidx2 = getelementptr inbounds nuw [4096 x double], ptr %input2, i64 0, i64 %iv
|
|
%in2 = load double, ptr %arrayidx2, align 8
|
|
%out = tail call double @llvm.maximumnum.f64(double %in1, double %in2)
|
|
%arrayidx4 = getelementptr inbounds nuw [4096 x double], ptr %output, i64 0, i64 %iv
|
|
store double %out, ptr %arrayidx4, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond.not = icmp eq i64 %iv.next, 4096
|
|
br i1 %exitcond.not, label %exit, label %for.body
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
|
|
define void @fmin16(ptr noundef readonly captures(none) %input1, ptr noundef readonly captures(none) %input2, ptr noundef writeonly captures(none) %output) {
|
|
; CHECK-LABEL: define void @fmin16(
|
|
; CHECK-SAME: ptr noundef readonly captures(none) [[INPUT1:%.*]], ptr noundef readonly captures(none) [[INPUT2:%.*]], ptr noundef writeonly captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] {
|
|
; CHECK-NEXT: [[ENTRY:.*:]]
|
|
; CHECK-NEXT: [[INPUT23:%.*]] = ptrtoaddr ptr [[INPUT2]] to i64
|
|
; CHECK-NEXT: [[INPUT12:%.*]] = ptrtoaddr ptr [[INPUT1]] to i64
|
|
; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoaddr ptr [[OUTPUT]] to i64
|
|
; CHECK-NEXT: br label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 8
|
|
; CHECK-NEXT: [[TMP18:%.*]] = mul i64 [[TMP16]], 2
|
|
; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[OUTPUT1]], [[INPUT12]]
|
|
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP18]]
|
|
; CHECK-NEXT: [[TMP19:%.*]] = mul i64 [[TMP16]], 2
|
|
; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[OUTPUT1]], [[INPUT23]]
|
|
; CHECK-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP1]], [[TMP19]]
|
|
; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
|
|
; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
|
|
; CHECK: [[VECTOR_PH]]:
|
|
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; CHECK: [[VECTOR_BODY]]:
|
|
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 4096, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
|
|
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDEX]]
|
|
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP2]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
|
|
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDEX]]
|
|
; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP4]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
|
|
; CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 8 x half> @llvm.minimumnum.nxv8f16(<vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[WIDE_LOAD5]])
|
|
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[INDEX]]
|
|
; CHECK-NEXT: call void @llvm.vp.store.nxv8f16.p0(<vscale x 8 x half> [[TMP17]], ptr align 2 [[TMP7]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
|
|
; CHECK-NEXT: [[TMP20:%.*]] = zext i32 [[TMP13]] to i64
|
|
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP20]], [[INDEX]]
|
|
; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]]
|
|
; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
|
|
; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
|
|
; CHECK: [[MIDDLE_BLOCK]]:
|
|
; CHECK-NEXT: br label %[[EXIT:.*]]
|
|
; CHECK: [[SCALAR_PH]]:
|
|
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
|
|
; CHECK: [[FOR_BODY]]:
|
|
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
|
|
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDVARS_IV]]
|
|
; CHECK-NEXT: [[IN1:%.*]] = load half, ptr [[ARRAYIDX]], align 2
|
|
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDVARS_IV]]
|
|
; CHECK-NEXT: [[IN2:%.*]] = load half, ptr [[ARRAYIDX2]], align 2
|
|
; CHECK-NEXT: [[OUT:%.*]] = tail call half @llvm.minimumnum.f16(half [[IN1]], half [[IN2]])
|
|
; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[INDVARS_IV]]
|
|
; CHECK-NEXT: store half [[OUT]], ptr [[ARRAYIDX4]], align 2
|
|
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
|
|
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4096
|
|
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
|
|
; CHECK: [[EXIT]]:
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
; ZVFHMIN-LABEL: define void @fmin16(
|
|
; ZVFHMIN-SAME: ptr noundef readonly captures(none) [[INPUT1:%.*]], ptr noundef readonly captures(none) [[INPUT2:%.*]], ptr noundef writeonly captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] {
|
|
; ZVFHMIN-NEXT: [[ENTRY:.*:]]
|
|
; ZVFHMIN-NEXT: [[INPUT23:%.*]] = ptrtoaddr ptr [[INPUT2]] to i64
|
|
; ZVFHMIN-NEXT: [[INPUT12:%.*]] = ptrtoaddr ptr [[INPUT1]] to i64
|
|
; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoaddr ptr [[OUTPUT]] to i64
|
|
; ZVFHMIN-NEXT: br label %[[VECTOR_MEMCHECK:.*]]
|
|
; ZVFHMIN: [[VECTOR_MEMCHECK]]:
|
|
; ZVFHMIN-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
|
|
; ZVFHMIN-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
|
|
; ZVFHMIN-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 2
|
|
; ZVFHMIN-NEXT: [[TMP6:%.*]] = sub i64 [[OUTPUT1]], [[INPUT12]]
|
|
; ZVFHMIN-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
|
|
; ZVFHMIN-NEXT: [[TMP7:%.*]] = mul i64 [[TMP4]], 2
|
|
; ZVFHMIN-NEXT: [[TMP8:%.*]] = sub i64 [[OUTPUT1]], [[INPUT23]]
|
|
; ZVFHMIN-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]]
|
|
; ZVFHMIN-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
|
|
; ZVFHMIN-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
|
|
; ZVFHMIN: [[VECTOR_PH]]:
|
|
; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; ZVFHMIN: [[VECTOR_BODY]]:
|
|
; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; ZVFHMIN-NEXT: [[AVL:%.*]] = phi i64 [ 4096, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; ZVFHMIN-NEXT: [[TMP19:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
|
|
; ZVFHMIN-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDEX]]
|
|
; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP13]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP19]])
|
|
; ZVFHMIN-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDEX]]
|
|
; ZVFHMIN-NEXT: [[WIDE_LOAD5:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP15]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP19]])
|
|
; ZVFHMIN-NEXT: [[TMP17:%.*]] = call <vscale x 8 x half> @llvm.minimumnum.nxv8f16(<vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[WIDE_LOAD5]])
|
|
; ZVFHMIN-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[INDEX]]
|
|
; ZVFHMIN-NEXT: call void @llvm.vp.store.nxv8f16.p0(<vscale x 8 x half> [[TMP17]], ptr align 2 [[TMP18]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP19]])
|
|
; ZVFHMIN-NEXT: [[TMP16:%.*]] = zext i32 [[TMP19]] to i64
|
|
; ZVFHMIN-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]]
|
|
; ZVFHMIN-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
|
|
; ZVFHMIN-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
|
|
; ZVFHMIN-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
|
|
; ZVFHMIN: [[MIDDLE_BLOCK]]:
|
|
; ZVFHMIN-NEXT: br label %[[EXIT:.*]]
|
|
; ZVFHMIN: [[SCALAR_PH]]:
|
|
; ZVFHMIN-NEXT: br label %[[FOR_BODY:.*]]
|
|
; ZVFHMIN: [[FOR_BODY]]:
|
|
; ZVFHMIN-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
|
|
; ZVFHMIN-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[IV]]
|
|
; ZVFHMIN-NEXT: [[IN1:%.*]] = load half, ptr [[ARRAYIDX]], align 2
|
|
; ZVFHMIN-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[IV]]
|
|
; ZVFHMIN-NEXT: [[IN2:%.*]] = load half, ptr [[ARRAYIDX2]], align 2
|
|
; ZVFHMIN-NEXT: [[OUT:%.*]] = tail call half @llvm.minimumnum.f16(half [[IN1]], half [[IN2]])
|
|
; ZVFHMIN-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[IV]]
|
|
; ZVFHMIN-NEXT: store half [[OUT]], ptr [[ARRAYIDX4]], align 2
|
|
; ZVFHMIN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
|
|
; ZVFHMIN-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 4096
|
|
; ZVFHMIN-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
|
|
; ZVFHMIN: [[EXIT]]:
|
|
; ZVFHMIN-NEXT: ret void
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%arrayidx = getelementptr inbounds nuw [4096 x half], ptr %input1, i64 0, i64 %iv
|
|
%in1 = load half, ptr %arrayidx, align 2
|
|
%arrayidx2 = getelementptr inbounds nuw [4096 x half], ptr %input2, i64 0, i64 %iv
|
|
%in2 = load half, ptr %arrayidx2, align 2
|
|
%out = tail call half @llvm.minimumnum.f16(half %in1, half %in2)
|
|
%arrayidx4 = getelementptr inbounds nuw [4096 x half], ptr %output, i64 0, i64 %iv
|
|
store half %out, ptr %arrayidx4, align 2
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond.not = icmp eq i64 %iv.next, 4096
|
|
br i1 %exitcond.not, label %exit, label %for.body
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
|
|
define void @fmax16(ptr noundef readonly captures(none) %input1, ptr noundef readonly captures(none) %input2, ptr noundef writeonly captures(none) %output) {
|
|
; CHECK-LABEL: define void @fmax16(
|
|
; CHECK-SAME: ptr noundef readonly captures(none) [[INPUT1:%.*]], ptr noundef readonly captures(none) [[INPUT2:%.*]], ptr noundef writeonly captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] {
|
|
; CHECK-NEXT: [[ENTRY:.*:]]
|
|
; CHECK-NEXT: [[INPUT23:%.*]] = ptrtoaddr ptr [[INPUT2]] to i64
|
|
; CHECK-NEXT: [[INPUT12:%.*]] = ptrtoaddr ptr [[INPUT1]] to i64
|
|
; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoaddr ptr [[OUTPUT]] to i64
|
|
; CHECK-NEXT: br label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 8
|
|
; CHECK-NEXT: [[TMP18:%.*]] = mul i64 [[TMP16]], 2
|
|
; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[OUTPUT1]], [[INPUT12]]
|
|
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP18]]
|
|
; CHECK-NEXT: [[TMP19:%.*]] = mul i64 [[TMP16]], 2
|
|
; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[OUTPUT1]], [[INPUT23]]
|
|
; CHECK-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP1]], [[TMP19]]
|
|
; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
|
|
; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
|
|
; CHECK: [[VECTOR_PH]]:
|
|
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; CHECK: [[VECTOR_BODY]]:
|
|
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 4096, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
|
|
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDEX]]
|
|
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP2]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
|
|
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDEX]]
|
|
; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP4]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
|
|
; CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 8 x half> @llvm.maximumnum.nxv8f16(<vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[WIDE_LOAD5]])
|
|
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[INDEX]]
|
|
; CHECK-NEXT: call void @llvm.vp.store.nxv8f16.p0(<vscale x 8 x half> [[TMP17]], ptr align 2 [[TMP7]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
|
|
; CHECK-NEXT: [[TMP20:%.*]] = zext i32 [[TMP13]] to i64
|
|
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP20]], [[INDEX]]
|
|
; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]]
|
|
; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
|
|
; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
|
|
; CHECK: [[MIDDLE_BLOCK]]:
|
|
; CHECK-NEXT: br label %[[EXIT:.*]]
|
|
; CHECK: [[SCALAR_PH]]:
|
|
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
|
|
; CHECK: [[FOR_BODY]]:
|
|
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
|
|
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDVARS_IV]]
|
|
; CHECK-NEXT: [[IN1:%.*]] = load half, ptr [[ARRAYIDX]], align 2
|
|
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDVARS_IV]]
|
|
; CHECK-NEXT: [[IN2:%.*]] = load half, ptr [[ARRAYIDX2]], align 2
|
|
; CHECK-NEXT: [[OUT:%.*]] = tail call half @llvm.maximumnum.f16(half [[IN1]], half [[IN2]])
|
|
; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[INDVARS_IV]]
|
|
; CHECK-NEXT: store half [[OUT]], ptr [[ARRAYIDX4]], align 2
|
|
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
|
|
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4096
|
|
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
|
|
; CHECK: [[EXIT]]:
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
; ZVFHMIN-LABEL: define void @fmax16(
|
|
; ZVFHMIN-SAME: ptr noundef readonly captures(none) [[INPUT1:%.*]], ptr noundef readonly captures(none) [[INPUT2:%.*]], ptr noundef writeonly captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] {
|
|
; ZVFHMIN-NEXT: [[ENTRY:.*:]]
|
|
; ZVFHMIN-NEXT: [[INPUT23:%.*]] = ptrtoaddr ptr [[INPUT2]] to i64
|
|
; ZVFHMIN-NEXT: [[INPUT12:%.*]] = ptrtoaddr ptr [[INPUT1]] to i64
|
|
; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoaddr ptr [[OUTPUT]] to i64
|
|
; ZVFHMIN-NEXT: br label %[[VECTOR_MEMCHECK:.*]]
|
|
; ZVFHMIN: [[VECTOR_MEMCHECK]]:
|
|
; ZVFHMIN-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
|
|
; ZVFHMIN-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
|
|
; ZVFHMIN-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 2
|
|
; ZVFHMIN-NEXT: [[TMP6:%.*]] = sub i64 [[OUTPUT1]], [[INPUT12]]
|
|
; ZVFHMIN-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
|
|
; ZVFHMIN-NEXT: [[TMP7:%.*]] = mul i64 [[TMP4]], 2
|
|
; ZVFHMIN-NEXT: [[TMP8:%.*]] = sub i64 [[OUTPUT1]], [[INPUT23]]
|
|
; ZVFHMIN-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]]
|
|
; ZVFHMIN-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
|
|
; ZVFHMIN-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
|
|
; ZVFHMIN: [[VECTOR_PH]]:
|
|
; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; ZVFHMIN: [[VECTOR_BODY]]:
|
|
; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; ZVFHMIN-NEXT: [[AVL:%.*]] = phi i64 [ 4096, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; ZVFHMIN-NEXT: [[TMP19:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
|
|
; ZVFHMIN-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDEX]]
|
|
; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP13]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP19]])
|
|
; ZVFHMIN-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDEX]]
|
|
; ZVFHMIN-NEXT: [[WIDE_LOAD5:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP15]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP19]])
|
|
; ZVFHMIN-NEXT: [[TMP17:%.*]] = call <vscale x 8 x half> @llvm.maximumnum.nxv8f16(<vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[WIDE_LOAD5]])
|
|
; ZVFHMIN-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[INDEX]]
|
|
; ZVFHMIN-NEXT: call void @llvm.vp.store.nxv8f16.p0(<vscale x 8 x half> [[TMP17]], ptr align 2 [[TMP18]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP19]])
|
|
; ZVFHMIN-NEXT: [[TMP16:%.*]] = zext i32 [[TMP19]] to i64
|
|
; ZVFHMIN-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]]
|
|
; ZVFHMIN-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
|
|
; ZVFHMIN-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
|
|
; ZVFHMIN-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
|
|
; ZVFHMIN: [[MIDDLE_BLOCK]]:
|
|
; ZVFHMIN-NEXT: br label %[[EXIT:.*]]
|
|
; ZVFHMIN: [[SCALAR_PH]]:
|
|
; ZVFHMIN-NEXT: br label %[[FOR_BODY:.*]]
|
|
; ZVFHMIN: [[FOR_BODY]]:
|
|
; ZVFHMIN-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
|
|
; ZVFHMIN-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[IV]]
|
|
; ZVFHMIN-NEXT: [[IN1:%.*]] = load half, ptr [[ARRAYIDX]], align 2
|
|
; ZVFHMIN-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[IV]]
|
|
; ZVFHMIN-NEXT: [[IN2:%.*]] = load half, ptr [[ARRAYIDX2]], align 2
|
|
; ZVFHMIN-NEXT: [[OUT:%.*]] = tail call half @llvm.maximumnum.f16(half [[IN1]], half [[IN2]])
|
|
; ZVFHMIN-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[IV]]
|
|
; ZVFHMIN-NEXT: store half [[OUT]], ptr [[ARRAYIDX4]], align 2
|
|
; ZVFHMIN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
|
|
; ZVFHMIN-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 4096
|
|
; ZVFHMIN-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
|
|
; ZVFHMIN: [[EXIT]]:
|
|
; ZVFHMIN-NEXT: ret void
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%arrayidx = getelementptr inbounds nuw [4096 x half], ptr %input1, i64 0, i64 %iv
|
|
%in1 = load half, ptr %arrayidx, align 2
|
|
%arrayidx2 = getelementptr inbounds nuw [4096 x half], ptr %input2, i64 0, i64 %iv
|
|
%in2 = load half, ptr %arrayidx2, align 2
|
|
%out = tail call half @llvm.maximumnum.f16(half %in1, half %in2)
|
|
%arrayidx4 = getelementptr inbounds nuw [4096 x half], ptr %output, i64 0, i64 %iv
|
|
store half %out, ptr %arrayidx4, align 2
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond.not = icmp eq i64 %iv.next, 4096
|
|
br i1 %exitcond.not, label %exit, label %for.body
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
;.
|
|
; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
|
|
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
|
|
; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
|
|
; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
|
|
; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
|
|
; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]]}
|
|
; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
|
|
; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]]}
|
|
; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]}
|
|
; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]]}
|
|
; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]}
|
|
; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]]}
|
|
; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]}
|
|
; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]]}
|
|
;.
|
|
; ZVFHMIN: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
|
|
; ZVFHMIN: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
|
|
; ZVFHMIN: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
|
|
; ZVFHMIN: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
|
|
; ZVFHMIN: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
|
|
; ZVFHMIN: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]]}
|
|
; ZVFHMIN: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
|
|
; ZVFHMIN: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]]}
|
|
; ZVFHMIN: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]}
|
|
; ZVFHMIN: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]]}
|
|
; ZVFHMIN: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]}
|
|
; ZVFHMIN: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]]}
|
|
; ZVFHMIN: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]}
|
|
; ZVFHMIN: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]]}
|
|
;.
|