Update groupChecks to always use DepCands to try and merge runtime checks. DepCands contains the dependency partition, grouping together all accessed pointers to he same underlying objects. If we computed the dependencies, We only need to check accesses to the same underlying object, if there is an unknown dependency for this underlying object; otherwise we already proved that all accesses withing the underlying object are safe w.r.t. vectorization and we only need to check that accesses to the underlying object don't overlap with accesses to other underlying objects. To ensure runtime checks are generated for the case with unknown dependencies, remove equivalence classes containing accesses involved in unknown dependencies. This reduces the number of runtime checks needed in case non-constant dependence distances are found, and is in preparation for removing the restriction that the accesses need to have the same stride which was added in https://github.com/llvm/llvm-project/pull/88039. PR: https://github.com/llvm/llvm-project/pull/91196
492 lines
21 KiB
LLVM
492 lines
21 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "vector.ph" --version 5
|
|
; RUN: opt %s -passes=loop-vectorize -hoist-runtime-checks=false -force-vector-width=4 -force-vector-interleave=1 -S | FileCheck %s
|
|
|
|
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
|
|
|
|
define void @same_step_and_size(ptr %a, ptr %b, i64 %n) {
|
|
; CHECK-LABEL: define void @same_step_and_size(
|
|
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) {
|
|
; CHECK-NEXT: [[ENTRY:.*:]]
|
|
; CHECK-NEXT: [[A2:%.*]] = ptrtoaddr ptr [[A]] to i64
|
|
; CHECK-NEXT: [[B1:%.*]] = ptrtoaddr ptr [[B]] to i64
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], [[SCALAR_PH:label %.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[B1]], [[A2]]
|
|
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], 16
|
|
; CHECK-NEXT: br i1 [[DIFF_CHECK]], [[SCALAR_PH]], [[VECTOR_PH:label %.*]]
|
|
;
|
|
entry:
|
|
br label %loop
|
|
|
|
loop:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
|
|
%gep.a = getelementptr inbounds i32, ptr %a, i64 %iv
|
|
%l = load i32, ptr %gep.a
|
|
%mul = mul nsw i32 %l, 3
|
|
%gep.b = getelementptr inbounds i32, ptr %b, i64 %iv
|
|
store i32 %mul, ptr %gep.b
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, %n
|
|
br i1 %exitcond, label %exit, label %loop
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
define void @same_step_and_size_no_dominance_between_accesses(ptr %a, ptr %b, i64 %n, i64 %x) {
|
|
; CHECK-LABEL: define void @same_step_and_size_no_dominance_between_accesses(
|
|
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]], i64 [[X:%.*]]) {
|
|
; CHECK-NEXT: [[ENTRY:.*:]]
|
|
; CHECK-NEXT: [[B2:%.*]] = ptrtoaddr ptr [[B]] to i64
|
|
; CHECK-NEXT: [[A1:%.*]] = ptrtoaddr ptr [[A]] to i64
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], [[SCALAR_PH:label %.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[A1]], [[B2]]
|
|
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], 16
|
|
; CHECK-NEXT: br i1 [[DIFF_CHECK]], [[SCALAR_PH]], [[VECTOR_PH:label %.*]]
|
|
;
|
|
entry:
|
|
br label %loop
|
|
|
|
loop:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ]
|
|
%cmp = icmp ne i64 %iv, %x
|
|
br i1 %cmp, label %then, label %else
|
|
|
|
then:
|
|
%gep.a = getelementptr inbounds i32, ptr %a, i64 %iv
|
|
store i32 0, ptr %gep.a
|
|
br label %loop.latch
|
|
|
|
else:
|
|
%gep.b = getelementptr inbounds i32, ptr %b, i64 %iv
|
|
store i32 10, ptr %gep.b
|
|
br label %loop.latch
|
|
|
|
loop.latch:
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, %n
|
|
br i1 %exitcond, label %exit, label %loop
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
define void @different_steps_and_different_access_sizes(ptr %a, ptr %b, i64 %n) {
|
|
; CHECK-LABEL: define void @different_steps_and_different_access_sizes(
|
|
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) {
|
|
; CHECK-NEXT: [[ENTRY:.*:]]
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], [[SCALAR_PH:label %.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[N]], 2
|
|
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP0]]
|
|
; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[N]], 1
|
|
; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP1]]
|
|
; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[B]], [[SCEVGEP1]]
|
|
; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]]
|
|
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
|
|
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], [[SCALAR_PH]], [[VECTOR_PH:label %.*]]
|
|
;
|
|
entry:
|
|
br label %loop
|
|
|
|
loop:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
|
|
%gep.a = getelementptr inbounds i16, ptr %a, i64 %iv
|
|
%l = load i16, ptr %gep.a
|
|
%l.ext = sext i16 %l to i32
|
|
%mul = mul nsw i32 %l.ext, 3
|
|
%gep.b = getelementptr inbounds i32, ptr %b, i64 %iv
|
|
store i32 %mul, ptr %gep.b
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, %n
|
|
br i1 %exitcond, label %exit, label %loop
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
define void @steps_match_but_different_access_sizes_1(ptr %a, ptr %b, i64 %n) {
|
|
; CHECK-LABEL: define void @steps_match_but_different_access_sizes_1(
|
|
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) {
|
|
; CHECK-NEXT: [[ENTRY:.*:]]
|
|
; CHECK-NEXT: [[A2:%.*]] = ptrtoaddr ptr [[A]] to i64
|
|
; CHECK-NEXT: [[B1:%.*]] = ptrtoaddr ptr [[B]] to i64
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], [[SCALAR_PH:label %.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[B1]], -2
|
|
; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[A2]]
|
|
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP1]], 16
|
|
; CHECK-NEXT: br i1 [[DIFF_CHECK]], [[SCALAR_PH]], [[VECTOR_PH:label %.*]]
|
|
;
|
|
entry:
|
|
br label %loop
|
|
|
|
loop:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
|
|
%gep.a = getelementptr inbounds [2 x i16], ptr %a, i64 %iv, i64 1
|
|
%l = load i16, ptr %gep.a
|
|
%l.ext = sext i16 %l to i32
|
|
%mul = mul nsw i32 %l.ext, 3
|
|
%gep.b = getelementptr inbounds i32, ptr %b, i64 %iv
|
|
store i32 %mul, ptr %gep.b
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, %n
|
|
br i1 %exitcond, label %exit, label %loop
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
; Same as @steps_match_but_different_access_sizes_1, but with source and sink
|
|
; accesses flipped.
|
|
define void @steps_match_but_different_access_sizes_2(ptr %a, ptr %b, i64 %n) {
|
|
; CHECK-LABEL: define void @steps_match_but_different_access_sizes_2(
|
|
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) {
|
|
; CHECK-NEXT: [[ENTRY:.*:]]
|
|
; CHECK-NEXT: [[B2:%.*]] = ptrtoaddr ptr [[B]] to i64
|
|
; CHECK-NEXT: [[A1:%.*]] = ptrtoaddr ptr [[A]] to i64
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], [[SCALAR_PH:label %.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[A1]], 2
|
|
; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[B2]]
|
|
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP1]], 16
|
|
; CHECK-NEXT: br i1 [[DIFF_CHECK]], [[SCALAR_PH]], [[VECTOR_PH:label %.*]]
|
|
;
|
|
entry:
|
|
br label %loop
|
|
|
|
loop:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
|
|
%gep.b = getelementptr inbounds i32, ptr %b, i64 %iv
|
|
%l = load i32, ptr %gep.b
|
|
%mul = mul nsw i32 %l, 3
|
|
%gep.a = getelementptr inbounds [2 x i16], ptr %a, i64 %iv, i64 1
|
|
%trunc = trunc i32 %mul to i16
|
|
store i16 %trunc, ptr %gep.a
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, %n
|
|
br i1 %exitcond, label %exit, label %loop
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
define void @steps_match_two_loadstores_different_access_sizes(ptr %src.1, ptr %src.2, ptr %dst.1, ptr %dst.2, i64 %n) {
|
|
; CHECK-LABEL: define void @steps_match_two_loadstores_different_access_sizes(
|
|
; CHECK-SAME: ptr [[SRC_1:%.*]], ptr [[SRC_2:%.*]], ptr [[DST_1:%.*]], ptr [[DST_2:%.*]], i64 [[N:%.*]]) {
|
|
; CHECK-NEXT: [[ENTRY:.*:]]
|
|
; CHECK-NEXT: [[SRC_25:%.*]] = ptrtoaddr ptr [[SRC_2]] to i64
|
|
; CHECK-NEXT: [[SRC_13:%.*]] = ptrtoaddr ptr [[SRC_1]] to i64
|
|
; CHECK-NEXT: [[DST_12:%.*]] = ptrtoaddr ptr [[DST_1]] to i64
|
|
; CHECK-NEXT: [[DST_21:%.*]] = ptrtoaddr ptr [[DST_2]] to i64
|
|
; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N]], i64 1)
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[UMAX]], 4
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], [[SCALAR_PH:label %.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[DST_21]], [[DST_12]]
|
|
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], 32
|
|
; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[DST_12]], [[SRC_13]]
|
|
; CHECK-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP1]], 32
|
|
; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
|
|
; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[DST_12]], [[SRC_25]]
|
|
; CHECK-NEXT: [[DIFF_CHECK6:%.*]] = icmp ult i64 [[TMP2]], 32
|
|
; CHECK-NEXT: [[CONFLICT_RDX7:%.*]] = or i1 [[CONFLICT_RDX]], [[DIFF_CHECK6]]
|
|
; CHECK-NEXT: [[TMP3:%.*]] = sub i64 [[DST_21]], [[SRC_13]]
|
|
; CHECK-NEXT: [[DIFF_CHECK8:%.*]] = icmp ult i64 [[TMP3]], 32
|
|
; CHECK-NEXT: [[CONFLICT_RDX9:%.*]] = or i1 [[CONFLICT_RDX7]], [[DIFF_CHECK8]]
|
|
; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[DST_21]], [[SRC_25]]
|
|
; CHECK-NEXT: [[DIFF_CHECK10:%.*]] = icmp ult i64 [[TMP4]], 32
|
|
; CHECK-NEXT: [[CONFLICT_RDX11:%.*]] = or i1 [[CONFLICT_RDX9]], [[DIFF_CHECK10]]
|
|
; CHECK-NEXT: br i1 [[CONFLICT_RDX11]], [[SCALAR_PH]], [[VECTOR_PH:label %.*]]
|
|
;
|
|
entry:
|
|
br label %loop
|
|
|
|
loop:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
|
|
%gep.src.1 = getelementptr i64, ptr %src.1, i64 %iv
|
|
%ld.src.1 = load i64, ptr %gep.src.1
|
|
%ld.src.1.i32 = trunc i64 %ld.src.1 to i32
|
|
%gep.src.2 = getelementptr i64, ptr %src.2, i64 %iv
|
|
%ld.src.2 = load i64, ptr %gep.src.2
|
|
%add = add i64 %ld.src.1, %ld.src.2
|
|
%gep.dst.1 = getelementptr nusw i64, ptr %dst.1, i64 %iv
|
|
store i32 %ld.src.1.i32, ptr %gep.dst.1
|
|
%gep.dst.2 = getelementptr nusw i64, ptr %dst.2, i64 %iv
|
|
store i64 %add, ptr %gep.dst.2
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%cond = icmp ult i64 %iv.next, %n
|
|
br i1 %cond, label %loop, label %exit
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
; Full no-overlap checks are required instead of difference checks, as
|
|
; one of the add-recs used is invariant in the inner loop.
|
|
; Test case for PR57315.
|
|
define void @nested_loop_outer_iv_addrec_invariant_in_inner1(ptr %a, ptr %b, i64 %n) {
|
|
; CHECK-LABEL: define void @nested_loop_outer_iv_addrec_invariant_in_inner1(
|
|
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) {
|
|
; CHECK-NEXT: [[ENTRY:.*]]:
|
|
; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[N]], 2
|
|
; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP0]]
|
|
; CHECK-NEXT: br label %[[OUTER_HEADER:.*]]
|
|
; CHECK: [[OUTER_HEADER]]:
|
|
; CHECK-NEXT: [[OUTER_IV:%.*]] = phi i64 [ [[OUTER_IV_NEXT:%.*]], [[OUTER_LATCH:%.*]] ], [ 0, %[[ENTRY]] ]
|
|
; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[OUTER_IV]], 2
|
|
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr nuw i8, ptr [[A]], i64 [[TMP1]]
|
|
; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], 4
|
|
; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP2]]
|
|
; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[OUTER_IV]]
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], [[SCALAR_PH:label %.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[SCEVGEP]], [[SCEVGEP2]]
|
|
; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP1]]
|
|
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
|
|
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], [[SCALAR_PH]], [[VECTOR_PH:label %.*]]
|
|
;
|
|
|
|
|
|
entry:
|
|
br label %outer.header
|
|
|
|
outer.header:
|
|
%outer.iv = phi i64 [ %outer.iv.next, %outer.latch ], [ 0, %entry ]
|
|
%gep.a = getelementptr inbounds i32, ptr %a, i64 %outer.iv
|
|
br label %inner.body
|
|
|
|
inner.body:
|
|
%inner.iv = phi i64 [ 0, %outer.header ], [ %inner.iv.next, %inner.body ]
|
|
%gep.b = getelementptr inbounds i32, ptr %b, i64 %inner.iv
|
|
%l = load i32, ptr %gep.b, align 4
|
|
%sub = sub i32 %l, 10
|
|
store i32 %sub, ptr %gep.a, align 4
|
|
%inner.iv.next = add nuw nsw i64 %inner.iv, 1
|
|
%inner.cond = icmp eq i64 %inner.iv.next, %n
|
|
br i1 %inner.cond, label %outer.latch, label %inner.body
|
|
|
|
outer.latch:
|
|
%outer.iv.next = add nuw nsw i64 %outer.iv, 1
|
|
%outer.cond = icmp eq i64 %outer.iv.next, %n
|
|
br i1 %outer.cond, label %exit, label %outer.header
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
; Same as @nested_loop_outer_iv_addrec_invariant_in_inner1 but with dependence
|
|
; sink and source swapped.
|
|
define void @nested_loop_outer_iv_addrec_invariant_in_inner2(ptr %a, ptr %b, i64 %n) {
|
|
; CHECK-LABEL: define void @nested_loop_outer_iv_addrec_invariant_in_inner2(
|
|
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) {
|
|
; CHECK-NEXT: [[ENTRY:.*]]:
|
|
; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[N]], 2
|
|
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP0]]
|
|
; CHECK-NEXT: br label %[[OUTER_HEADER:.*]]
|
|
; CHECK: [[OUTER_HEADER]]:
|
|
; CHECK-NEXT: [[OUTER_IV:%.*]] = phi i64 [ [[OUTER_IV_NEXT:%.*]], [[OUTER_LATCH:%.*]] ], [ 0, %[[ENTRY]] ]
|
|
; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[OUTER_IV]], 2
|
|
; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr nuw i8, ptr [[A]], i64 [[TMP1]]
|
|
; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], 4
|
|
; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP2]]
|
|
; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[OUTER_IV]]
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], [[SCALAR_PH:label %.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[B]], [[SCEVGEP2]]
|
|
; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SCEVGEP1]], [[SCEVGEP]]
|
|
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
|
|
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], [[SCALAR_PH]], [[VECTOR_PH:label %.*]]
|
|
;
|
|
|
|
|
|
entry:
|
|
br label %outer.header
|
|
|
|
outer.header:
|
|
%outer.iv = phi i64 [ %outer.iv.next, %outer.latch ], [ 0, %entry ]
|
|
%gep.a = getelementptr inbounds i32, ptr %a, i64 %outer.iv
|
|
br label %inner.body
|
|
|
|
inner.body:
|
|
%inner.iv = phi i64 [ 0, %outer.header ], [ %inner.iv.next, %inner.body ]
|
|
%l = load i32, ptr %gep.a, align 4
|
|
%sub = sub i32 %l, 10
|
|
%gep.b = getelementptr inbounds i32, ptr %b, i64 %inner.iv
|
|
store i32 %sub, ptr %gep.b, align 4
|
|
%inner.iv.next = add nuw nsw i64 %inner.iv, 1
|
|
%inner.cond = icmp eq i64 %inner.iv.next, %n
|
|
br i1 %inner.cond, label %outer.latch, label %inner.body
|
|
|
|
outer.latch:
|
|
%outer.iv.next = add nuw nsw i64 %outer.iv, 1
|
|
%outer.cond = icmp eq i64 %outer.iv.next, %n
|
|
br i1 %outer.cond, label %exit, label %outer.header
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
; Test case where the AddRec for the pointers in the inner loop have the AddRec
|
|
; of the outer loop as start value. It is sufficient to subtract the start
|
|
; values (%dst, %src) of the outer AddRecs.
|
|
define void @nested_loop_start_of_inner_ptr_addrec_is_same_outer_addrec(ptr nocapture noundef %dst, ptr nocapture noundef readonly %src, i64 noundef %m, i64 noundef %n) {
|
|
; CHECK-LABEL: define void @nested_loop_start_of_inner_ptr_addrec_is_same_outer_addrec(
|
|
; CHECK-SAME: ptr noundef captures(none) [[DST:%.*]], ptr noundef readonly captures(none) [[SRC:%.*]], i64 noundef [[M:%.*]], i64 noundef [[N:%.*]]) {
|
|
; CHECK-NEXT: [[ENTRY:.*]]:
|
|
; CHECK-NEXT: [[SRC2:%.*]] = ptrtoaddr ptr [[SRC]] to i64
|
|
; CHECK-NEXT: [[DST1:%.*]] = ptrtoaddr ptr [[DST]] to i64
|
|
; CHECK-NEXT: [[SUB:%.*]] = sub i64 [[DST1]], [[SRC2]]
|
|
; CHECK-NEXT: br label %[[OUTER_LOOP:.*]]
|
|
; CHECK: [[OUTER_LOOP]]:
|
|
; CHECK-NEXT: [[OUTER_IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[OUTER_IV_NEXT:%.*]], [[INNER_EXIT:%.*]] ]
|
|
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i64 [[OUTER_IV]], [[N]]
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], [[SCALAR_PH:label %.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[SUB]], 16
|
|
; CHECK-NEXT: br i1 [[DIFF_CHECK]], [[SCALAR_PH]], [[VECTOR_PH:label %.*]]
|
|
;
|
|
entry:
|
|
br label %outer.loop
|
|
|
|
outer.loop:
|
|
%outer.iv = phi i64 [ 0, %entry ], [ %outer.iv.next, %inner.exit ]
|
|
%mul = mul nsw i64 %outer.iv, %n
|
|
br label %inner.loop
|
|
|
|
inner.loop:
|
|
%iv.inner = phi i64 [ 0, %outer.loop ], [ %iv.inner.next, %inner.loop ]
|
|
%idx = add nuw nsw i64 %iv.inner, %mul
|
|
%gep.src = getelementptr inbounds i32, ptr %src, i64 %idx
|
|
%l = load i32, ptr %gep.src, align 4
|
|
%gep.dst = getelementptr inbounds i32, ptr %dst, i64 %idx
|
|
%add = add nsw i32 %l, 10
|
|
store i32 %add, ptr %gep.dst, align 4
|
|
%iv.inner.next = add nuw nsw i64 %iv.inner, 1
|
|
%inner.exit.cond = icmp eq i64 %iv.inner.next, %n
|
|
br i1 %inner.exit.cond, label %inner.exit, label %inner.loop
|
|
|
|
inner.exit:
|
|
%outer.iv.next = add nuw nsw i64 %outer.iv, 1
|
|
%outer.exit.cond = icmp eq i64 %outer.iv.next, %m
|
|
br i1 %outer.exit.cond, label %outer.exit, label %outer.loop
|
|
|
|
outer.exit:
|
|
ret void
|
|
}
|
|
|
|
define void @use_diff_checks_when_retrying_with_rt_checks(i64 %off, ptr %dst, ptr %src) {
|
|
; CHECK-LABEL: define void @use_diff_checks_when_retrying_with_rt_checks(
|
|
; CHECK-SAME: i64 [[OFF:%.*]], ptr [[DST:%.*]], ptr [[SRC:%.*]]) {
|
|
; CHECK-NEXT: [[ENTRY:.*:]]
|
|
; CHECK-NEXT: br label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[OFF]], 3
|
|
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP1]]
|
|
; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], 8000
|
|
; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP2]]
|
|
; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[DST]], i64 8000
|
|
; CHECK-NEXT: [[SCEVGEP3:%.*]] = getelementptr i8, ptr [[SRC]], i64 8008
|
|
; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[SCEVGEP]], [[SCEVGEP2]]
|
|
; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[DST]], [[SCEVGEP1]]
|
|
; CHECK-NEXT: [[CONFLICT_RDX5:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
|
|
; CHECK-NEXT: [[BOUND04:%.*]] = icmp ult ptr [[SCEVGEP]], [[SCEVGEP3]]
|
|
; CHECK-NEXT: [[BOUND15:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP1]]
|
|
; CHECK-NEXT: [[DIFF_CHECK6:%.*]] = and i1 [[BOUND04]], [[BOUND15]]
|
|
; CHECK-NEXT: [[CONFLICT_RDX7:%.*]] = or i1 [[CONFLICT_RDX5]], [[DIFF_CHECK6]]
|
|
; CHECK-NEXT: [[BOUND07:%.*]] = icmp ult ptr [[DST]], [[SCEVGEP3]]
|
|
; CHECK-NEXT: [[BOUND18:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP2]]
|
|
; CHECK-NEXT: [[DIFF_CHECK8:%.*]] = and i1 [[BOUND07]], [[BOUND18]]
|
|
; CHECK-NEXT: [[CONFLICT_RDX9:%.*]] = or i1 [[CONFLICT_RDX7]], [[DIFF_CHECK8]]
|
|
; CHECK-NEXT: br i1 [[CONFLICT_RDX9]], [[SCALAR_PH:label %.*]], [[VECTOR_PH:label %.*]]
|
|
;
|
|
entry:
|
|
br label %loop
|
|
|
|
loop:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
|
|
%iv.off = add i64 %off, %iv
|
|
%gep.src = getelementptr i64, ptr %src, i64 %iv
|
|
%l.0 = load i64, ptr %gep.src, align 8
|
|
%gep.dst.off = getelementptr i64, ptr %dst, i64 %iv.off
|
|
store i64 %l.0, ptr %gep.dst.off, align 8
|
|
%gep.src.8 = getelementptr i8, ptr %gep.src, i64 8
|
|
%l.1 = load i64, ptr %gep.src.8, align 8
|
|
%gep.dst.iv = getelementptr i64, ptr %dst, i64 %iv
|
|
store i64 %l.1, ptr %gep.dst.iv, align 8
|
|
%iv.next = add i64 %iv, 1
|
|
%ec = icmp eq i64 %iv.next, 1000
|
|
br i1 %ec, label %exit, label %loop
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
|
|
define void @remove_diff_checks_via_guards(i32 %x, i32 %y, ptr %A) {
|
|
; CHECK-LABEL: define void @remove_diff_checks_via_guards(
|
|
; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]], ptr [[A:%.*]]) {
|
|
; CHECK-NEXT: [[ENTRY:.*:]]
|
|
; CHECK-NEXT: [[OFFSET:%.*]] = sub i32 [[X]], [[Y]]
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp sge i32 [[OFFSET]], 0
|
|
; CHECK-NEXT: br i1 [[CMP]], [[EXIT:label %.*]], label %[[LOOP_PREHEADER:.*]]
|
|
; CHECK: [[LOOP_PREHEADER]]:
|
|
; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[X]] to i64
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add nsw i64 [[TMP0]], 1
|
|
; CHECK-NEXT: [[SMAX2:%.*]] = call i64 @llvm.smax.i64(i64 [[TMP1]], i64 0)
|
|
; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[SMAX2]] to i32
|
|
; CHECK-NEXT: [[TMP3:%.*]] = add nuw i32 [[TMP2]], 1
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP3]], 4
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], [[SCALAR_PH:label %.*]], label %[[VECTOR_SCEVCHECK:.*]]
|
|
; CHECK: [[VECTOR_SCEVCHECK]]:
|
|
; CHECK-NEXT: [[TMP4:%.*]] = sext i32 [[X]] to i64
|
|
; CHECK-NEXT: [[TMP5:%.*]] = add nsw i64 [[TMP4]], 1
|
|
; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[TMP5]], i64 0)
|
|
; CHECK-NEXT: [[TMP6:%.*]] = trunc i64 [[SMAX]] to i32
|
|
; CHECK-NEXT: [[TMP7:%.*]] = icmp slt i32 [[TMP6]], 0
|
|
; CHECK-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[SMAX]], 4294967295
|
|
; CHECK-NEXT: [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]]
|
|
; CHECK-NEXT: [[TMP10:%.*]] = trunc i64 [[SMAX]] to i32
|
|
; CHECK-NEXT: [[TMP11:%.*]] = add i32 [[OFFSET]], [[TMP10]]
|
|
; CHECK-NEXT: [[TMP12:%.*]] = icmp slt i32 [[TMP11]], [[OFFSET]]
|
|
; CHECK-NEXT: [[TMP13:%.*]] = icmp ugt i64 [[SMAX]], 4294967295
|
|
; CHECK-NEXT: [[TMP14:%.*]] = or i1 [[TMP12]], [[TMP13]]
|
|
; CHECK-NEXT: [[TMP15:%.*]] = or i1 [[TMP9]], [[TMP14]]
|
|
; CHECK-NEXT: br i1 [[TMP15]], [[SCALAR_PH]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[TMP16:%.*]] = sext i32 [[OFFSET]] to i64
|
|
; CHECK-NEXT: [[TMP17:%.*]] = shl nsw i64 [[TMP16]], 2
|
|
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP17]], 16
|
|
; CHECK-NEXT: br i1 [[DIFF_CHECK]], [[SCALAR_PH]], [[VECTOR_PH1:label %.*]]
|
|
;
|
|
entry:
|
|
%offset = sub i32 %x, %y
|
|
%cmp = icmp sge i32 %offset, 0
|
|
br i1 %cmp, label %exit, label %loop
|
|
|
|
loop:
|
|
%iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
|
|
%iv.ext = sext i32 %iv to i64
|
|
%gep.A = getelementptr i32, ptr %A, i64 %iv.ext
|
|
%l = load i32, ptr %gep.A, align 1
|
|
%iv.offset = add i32 %iv, %offset
|
|
%iv.offset.ext = sext i32 %iv.offset to i64
|
|
%gep.A.offset = getelementptr i32, ptr %A, i64 %iv.offset.ext
|
|
store i32 %l, ptr %gep.A.offset, align 1
|
|
%iv.next = add i32 %iv, 1
|
|
%ec = icmp sgt i32 %iv, %x
|
|
br i1 %ec, label %exit, label %loop
|
|
|
|
exit:
|
|
ret void
|
|
}
|