624 lines
27 KiB
LLVM
624 lines
27 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "vector.ph" --version 5
|
|
; RUN: opt %s -passes=loop-vectorize -hoist-runtime-checks=false -force-vector-width=4 -force-vector-interleave=1 -S | FileCheck %s
|
|
|
|
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
|
|
|
|
define void @same_step_and_size(ptr %a, ptr %b, i64 %n) {
|
|
; CHECK-LABEL: define void @same_step_and_size(
|
|
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) {
|
|
; CHECK-NEXT: [[ENTRY:.*:]]
|
|
; CHECK-NEXT: [[A2:%.*]] = ptrtoaddr ptr [[A]] to i64
|
|
; CHECK-NEXT: [[B1:%.*]] = ptrtoaddr ptr [[B]] to i64
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], [[SCALAR_PH:label %.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[B1]], [[A2]]
|
|
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], 16
|
|
; CHECK-NEXT: br i1 [[DIFF_CHECK]], [[SCALAR_PH]], [[VECTOR_PH:label %.*]]
|
|
;
|
|
entry:
|
|
br label %loop
|
|
|
|
loop:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
|
|
%gep.a = getelementptr inbounds i32, ptr %a, i64 %iv
|
|
%l = load i32, ptr %gep.a
|
|
%mul = mul nsw i32 %l, 3
|
|
%gep.b = getelementptr inbounds i32, ptr %b, i64 %iv
|
|
store i32 %mul, ptr %gep.b
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, %n
|
|
br i1 %exitcond, label %exit, label %loop
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
define void @same_step_and_size_no_dominance_between_accesses(ptr %a, ptr %b, i64 %n, i64 %x) {
|
|
; CHECK-LABEL: define void @same_step_and_size_no_dominance_between_accesses(
|
|
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]], i64 [[X:%.*]]) {
|
|
; CHECK-NEXT: [[ENTRY:.*:]]
|
|
; CHECK-NEXT: [[B2:%.*]] = ptrtoaddr ptr [[B]] to i64
|
|
; CHECK-NEXT: [[A1:%.*]] = ptrtoaddr ptr [[A]] to i64
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], [[SCALAR_PH:label %.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[A1]], [[B2]]
|
|
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], 16
|
|
; CHECK-NEXT: br i1 [[DIFF_CHECK]], [[SCALAR_PH]], [[VECTOR_PH:label %.*]]
|
|
;
|
|
entry:
|
|
br label %loop
|
|
|
|
loop:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ]
|
|
%cmp = icmp ne i64 %iv, %x
|
|
br i1 %cmp, label %then, label %else
|
|
|
|
then:
|
|
%gep.a = getelementptr inbounds i32, ptr %a, i64 %iv
|
|
store i32 0, ptr %gep.a
|
|
br label %loop.latch
|
|
|
|
else:
|
|
%gep.b = getelementptr inbounds i32, ptr %b, i64 %iv
|
|
store i32 10, ptr %gep.b
|
|
br label %loop.latch
|
|
|
|
loop.latch:
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, %n
|
|
br i1 %exitcond, label %exit, label %loop
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
define void @different_steps_and_different_access_sizes(ptr %a, ptr %b, i64 %n) {
|
|
; CHECK-LABEL: define void @different_steps_and_different_access_sizes(
|
|
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) {
|
|
; CHECK-NEXT: [[ENTRY:.*:]]
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], [[SCALAR_PH:label %.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[N]], 2
|
|
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP0]]
|
|
; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[N]], 1
|
|
; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP1]]
|
|
; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[B]], [[SCEVGEP1]]
|
|
; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]]
|
|
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
|
|
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], [[SCALAR_PH]], [[VECTOR_PH:label %.*]]
|
|
;
|
|
entry:
|
|
br label %loop
|
|
|
|
loop:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
|
|
%gep.a = getelementptr inbounds i16, ptr %a, i64 %iv
|
|
%l = load i16, ptr %gep.a
|
|
%l.ext = sext i16 %l to i32
|
|
%mul = mul nsw i32 %l.ext, 3
|
|
%gep.b = getelementptr inbounds i32, ptr %b, i64 %iv
|
|
store i32 %mul, ptr %gep.b
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, %n
|
|
br i1 %exitcond, label %exit, label %loop
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
define void @steps_match_but_different_access_sizes_1(ptr %a, ptr %b, i64 %n) {
|
|
; CHECK-LABEL: define void @steps_match_but_different_access_sizes_1(
|
|
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) {
|
|
; CHECK-NEXT: [[ENTRY:.*:]]
|
|
; CHECK-NEXT: [[A2:%.*]] = ptrtoaddr ptr [[A]] to i64
|
|
; CHECK-NEXT: [[B1:%.*]] = ptrtoaddr ptr [[B]] to i64
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], [[SCALAR_PH:label %.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[B1]], -2
|
|
; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[A2]]
|
|
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP1]], 16
|
|
; CHECK-NEXT: br i1 [[DIFF_CHECK]], [[SCALAR_PH]], [[VECTOR_PH:label %.*]]
|
|
;
|
|
entry:
|
|
br label %loop
|
|
|
|
loop:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
|
|
%gep.a = getelementptr inbounds [2 x i16], ptr %a, i64 %iv, i64 1
|
|
%l = load i16, ptr %gep.a
|
|
%l.ext = sext i16 %l to i32
|
|
%mul = mul nsw i32 %l.ext, 3
|
|
%gep.b = getelementptr inbounds i32, ptr %b, i64 %iv
|
|
store i32 %mul, ptr %gep.b
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, %n
|
|
br i1 %exitcond, label %exit, label %loop
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
; Same as @steps_match_but_different_access_sizes_1, but with source and sink
|
|
; accesses flipped.
|
|
define void @steps_match_but_different_access_sizes_2(ptr %a, ptr %b, i64 %n) {
|
|
; CHECK-LABEL: define void @steps_match_but_different_access_sizes_2(
|
|
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) {
|
|
; CHECK-NEXT: [[ENTRY:.*:]]
|
|
; CHECK-NEXT: [[B2:%.*]] = ptrtoaddr ptr [[B]] to i64
|
|
; CHECK-NEXT: [[A1:%.*]] = ptrtoaddr ptr [[A]] to i64
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], [[SCALAR_PH:label %.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[A1]], 2
|
|
; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[B2]]
|
|
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP1]], 16
|
|
; CHECK-NEXT: br i1 [[DIFF_CHECK]], [[SCALAR_PH]], [[VECTOR_PH:label %.*]]
|
|
;
|
|
entry:
|
|
br label %loop
|
|
|
|
loop:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
|
|
%gep.b = getelementptr inbounds i32, ptr %b, i64 %iv
|
|
%l = load i32, ptr %gep.b
|
|
%mul = mul nsw i32 %l, 3
|
|
%gep.a = getelementptr inbounds [2 x i16], ptr %a, i64 %iv, i64 1
|
|
%trunc = trunc i32 %mul to i16
|
|
store i16 %trunc, ptr %gep.a
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, %n
|
|
br i1 %exitcond, label %exit, label %loop
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
define void @steps_match_two_loadstores_different_access_sizes(ptr %src.1, ptr %src.2, ptr %dst.1, ptr %dst.2, i64 %n) {
|
|
; CHECK-LABEL: define void @steps_match_two_loadstores_different_access_sizes(
|
|
; CHECK-SAME: ptr [[SRC_1:%.*]], ptr [[SRC_2:%.*]], ptr [[DST_1:%.*]], ptr [[DST_2:%.*]], i64 [[N:%.*]]) {
|
|
; CHECK-NEXT: [[ENTRY:.*:]]
|
|
; CHECK-NEXT: [[SRC_25:%.*]] = ptrtoaddr ptr [[SRC_2]] to i64
|
|
; CHECK-NEXT: [[SRC_13:%.*]] = ptrtoaddr ptr [[SRC_1]] to i64
|
|
; CHECK-NEXT: [[DST_12:%.*]] = ptrtoaddr ptr [[DST_1]] to i64
|
|
; CHECK-NEXT: [[DST_21:%.*]] = ptrtoaddr ptr [[DST_2]] to i64
|
|
; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N]], i64 1)
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[UMAX]], 4
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], [[SCALAR_PH:label %.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[DST_21]], [[DST_12]]
|
|
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], 32
|
|
; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[DST_12]], [[SRC_13]]
|
|
; CHECK-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP1]], 32
|
|
; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
|
|
; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[DST_12]], [[SRC_25]]
|
|
; CHECK-NEXT: [[DIFF_CHECK6:%.*]] = icmp ult i64 [[TMP2]], 32
|
|
; CHECK-NEXT: [[CONFLICT_RDX7:%.*]] = or i1 [[CONFLICT_RDX]], [[DIFF_CHECK6]]
|
|
; CHECK-NEXT: [[TMP3:%.*]] = sub i64 [[DST_21]], [[SRC_13]]
|
|
; CHECK-NEXT: [[DIFF_CHECK8:%.*]] = icmp ult i64 [[TMP3]], 32
|
|
; CHECK-NEXT: [[CONFLICT_RDX9:%.*]] = or i1 [[CONFLICT_RDX7]], [[DIFF_CHECK8]]
|
|
; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[DST_21]], [[SRC_25]]
|
|
; CHECK-NEXT: [[DIFF_CHECK10:%.*]] = icmp ult i64 [[TMP4]], 32
|
|
; CHECK-NEXT: [[CONFLICT_RDX11:%.*]] = or i1 [[CONFLICT_RDX9]], [[DIFF_CHECK10]]
|
|
; CHECK-NEXT: br i1 [[CONFLICT_RDX11]], [[SCALAR_PH]], [[VECTOR_PH:label %.*]]
|
|
;
|
|
entry:
|
|
br label %loop
|
|
|
|
loop:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
|
|
%gep.src.1 = getelementptr i64, ptr %src.1, i64 %iv
|
|
%ld.src.1 = load i64, ptr %gep.src.1
|
|
%ld.src.1.i32 = trunc i64 %ld.src.1 to i32
|
|
%gep.src.2 = getelementptr i64, ptr %src.2, i64 %iv
|
|
%ld.src.2 = load i64, ptr %gep.src.2
|
|
%add = add i64 %ld.src.1, %ld.src.2
|
|
%gep.dst.1 = getelementptr nusw i64, ptr %dst.1, i64 %iv
|
|
store i32 %ld.src.1.i32, ptr %gep.dst.1
|
|
%gep.dst.2 = getelementptr nusw i64, ptr %dst.2, i64 %iv
|
|
store i64 %add, ptr %gep.dst.2
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%cond = icmp ult i64 %iv.next, %n
|
|
br i1 %cond, label %loop, label %exit
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
; Full no-overlap checks are required instead of difference checks, as
|
|
; one of the add-recs used is invariant in the inner loop.
|
|
; Test case for PR57315.
|
|
define void @nested_loop_outer_iv_addrec_invariant_in_inner1(ptr %a, ptr %b, i64 %n) {
|
|
; CHECK-LABEL: define void @nested_loop_outer_iv_addrec_invariant_in_inner1(
|
|
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) {
|
|
; CHECK-NEXT: [[ENTRY:.*]]:
|
|
; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[N]], 2
|
|
; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP0]]
|
|
; CHECK-NEXT: br label %[[OUTER_HEADER:.*]]
|
|
; CHECK: [[OUTER_HEADER]]:
|
|
; CHECK-NEXT: [[OUTER_IV:%.*]] = phi i64 [ [[OUTER_IV_NEXT:%.*]], [[OUTER_LATCH:%.*]] ], [ 0, %[[ENTRY]] ]
|
|
; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[OUTER_IV]], 2
|
|
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr nuw i8, ptr [[A]], i64 [[TMP1]]
|
|
; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], 4
|
|
; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP2]]
|
|
; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[OUTER_IV]]
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], [[SCALAR_PH:label %.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[SCEVGEP]], [[SCEVGEP2]]
|
|
; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP1]]
|
|
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
|
|
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], [[SCALAR_PH]], [[VECTOR_PH:label %.*]]
|
|
;
|
|
|
|
|
|
entry:
|
|
br label %outer.header
|
|
|
|
outer.header:
|
|
%outer.iv = phi i64 [ %outer.iv.next, %outer.latch ], [ 0, %entry ]
|
|
%gep.a = getelementptr inbounds i32, ptr %a, i64 %outer.iv
|
|
br label %inner.body
|
|
|
|
inner.body:
|
|
%inner.iv = phi i64 [ 0, %outer.header ], [ %inner.iv.next, %inner.body ]
|
|
%gep.b = getelementptr inbounds i32, ptr %b, i64 %inner.iv
|
|
%l = load i32, ptr %gep.b, align 4
|
|
%sub = sub i32 %l, 10
|
|
store i32 %sub, ptr %gep.a, align 4
|
|
%inner.iv.next = add nuw nsw i64 %inner.iv, 1
|
|
%inner.cond = icmp eq i64 %inner.iv.next, %n
|
|
br i1 %inner.cond, label %outer.latch, label %inner.body
|
|
|
|
outer.latch:
|
|
%outer.iv.next = add nuw nsw i64 %outer.iv, 1
|
|
%outer.cond = icmp eq i64 %outer.iv.next, %n
|
|
br i1 %outer.cond, label %exit, label %outer.header
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
; Same as @nested_loop_outer_iv_addrec_invariant_in_inner1 but with dependence
|
|
; sink and source swapped.
|
|
define void @nested_loop_outer_iv_addrec_invariant_in_inner2(ptr %a, ptr %b, i64 %n) {
|
|
; CHECK-LABEL: define void @nested_loop_outer_iv_addrec_invariant_in_inner2(
|
|
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) {
|
|
; CHECK-NEXT: [[ENTRY:.*]]:
|
|
; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[N]], 2
|
|
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP0]]
|
|
; CHECK-NEXT: br label %[[OUTER_HEADER:.*]]
|
|
; CHECK: [[OUTER_HEADER]]:
|
|
; CHECK-NEXT: [[OUTER_IV:%.*]] = phi i64 [ [[OUTER_IV_NEXT:%.*]], [[OUTER_LATCH:%.*]] ], [ 0, %[[ENTRY]] ]
|
|
; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[OUTER_IV]], 2
|
|
; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr nuw i8, ptr [[A]], i64 [[TMP1]]
|
|
; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], 4
|
|
; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP2]]
|
|
; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[OUTER_IV]]
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], [[SCALAR_PH:label %.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[B]], [[SCEVGEP2]]
|
|
; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SCEVGEP1]], [[SCEVGEP]]
|
|
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
|
|
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], [[SCALAR_PH]], [[VECTOR_PH:label %.*]]
|
|
;
|
|
|
|
|
|
entry:
|
|
br label %outer.header
|
|
|
|
outer.header:
|
|
%outer.iv = phi i64 [ %outer.iv.next, %outer.latch ], [ 0, %entry ]
|
|
%gep.a = getelementptr inbounds i32, ptr %a, i64 %outer.iv
|
|
br label %inner.body
|
|
|
|
inner.body:
|
|
%inner.iv = phi i64 [ 0, %outer.header ], [ %inner.iv.next, %inner.body ]
|
|
%l = load i32, ptr %gep.a, align 4
|
|
%sub = sub i32 %l, 10
|
|
%gep.b = getelementptr inbounds i32, ptr %b, i64 %inner.iv
|
|
store i32 %sub, ptr %gep.b, align 4
|
|
%inner.iv.next = add nuw nsw i64 %inner.iv, 1
|
|
%inner.cond = icmp eq i64 %inner.iv.next, %n
|
|
br i1 %inner.cond, label %outer.latch, label %inner.body
|
|
|
|
outer.latch:
|
|
%outer.iv.next = add nuw nsw i64 %outer.iv, 1
|
|
%outer.cond = icmp eq i64 %outer.iv.next, %n
|
|
br i1 %outer.cond, label %exit, label %outer.header
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
; Test case where the AddRec for the pointers in the inner loop have the AddRec
|
|
; of the outer loop as start value. It is sufficient to subtract the start
|
|
; values (%dst, %src) of the outer AddRecs.
|
|
define void @nested_loop_start_of_inner_ptr_addrec_is_same_outer_addrec(ptr nocapture noundef %dst, ptr nocapture noundef readonly %src, i64 noundef %m, i64 noundef %n) {
|
|
; CHECK-LABEL: define void @nested_loop_start_of_inner_ptr_addrec_is_same_outer_addrec(
|
|
; CHECK-SAME: ptr noundef captures(none) [[DST:%.*]], ptr noundef readonly captures(none) [[SRC:%.*]], i64 noundef [[M:%.*]], i64 noundef [[N:%.*]]) {
|
|
; CHECK-NEXT: [[ENTRY:.*]]:
|
|
; CHECK-NEXT: [[SRC2:%.*]] = ptrtoaddr ptr [[SRC]] to i64
|
|
; CHECK-NEXT: [[DST1:%.*]] = ptrtoaddr ptr [[DST]] to i64
|
|
; CHECK-NEXT: [[SUB:%.*]] = sub i64 [[DST1]], [[SRC2]]
|
|
; CHECK-NEXT: br label %[[OUTER_LOOP:.*]]
|
|
; CHECK: [[OUTER_LOOP]]:
|
|
; CHECK-NEXT: [[OUTER_IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[OUTER_IV_NEXT:%.*]], [[INNER_EXIT:%.*]] ]
|
|
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i64 [[OUTER_IV]], [[N]]
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], [[SCALAR_PH:label %.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[SUB]], 16
|
|
; CHECK-NEXT: br i1 [[DIFF_CHECK]], [[SCALAR_PH]], [[VECTOR_PH:label %.*]]
|
|
;
|
|
entry:
|
|
br label %outer.loop
|
|
|
|
outer.loop:
|
|
%outer.iv = phi i64 [ 0, %entry ], [ %outer.iv.next, %inner.exit ]
|
|
%mul = mul nsw i64 %outer.iv, %n
|
|
br label %inner.loop
|
|
|
|
inner.loop:
|
|
%iv.inner = phi i64 [ 0, %outer.loop ], [ %iv.inner.next, %inner.loop ]
|
|
%idx = add nuw nsw i64 %iv.inner, %mul
|
|
%gep.src = getelementptr inbounds i32, ptr %src, i64 %idx
|
|
%l = load i32, ptr %gep.src, align 4
|
|
%gep.dst = getelementptr inbounds i32, ptr %dst, i64 %idx
|
|
%add = add nsw i32 %l, 10
|
|
store i32 %add, ptr %gep.dst, align 4
|
|
%iv.inner.next = add nuw nsw i64 %iv.inner, 1
|
|
%inner.exit.cond = icmp eq i64 %iv.inner.next, %n
|
|
br i1 %inner.exit.cond, label %inner.exit, label %inner.loop
|
|
|
|
inner.exit:
|
|
%outer.iv.next = add nuw nsw i64 %outer.iv, 1
|
|
%outer.exit.cond = icmp eq i64 %outer.iv.next, %m
|
|
br i1 %outer.exit.cond, label %outer.exit, label %outer.loop
|
|
|
|
outer.exit:
|
|
ret void
|
|
}
|
|
|
|
define void @use_diff_checks_when_retrying_with_rt_checks(i64 %off, ptr %dst, ptr %src) {
|
|
; CHECK-LABEL: define void @use_diff_checks_when_retrying_with_rt_checks(
|
|
; CHECK-SAME: i64 [[OFF:%.*]], ptr [[DST:%.*]], ptr [[SRC:%.*]]) {
|
|
; CHECK-NEXT: [[ENTRY:.*:]]
|
|
; CHECK-NEXT: br label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[OFF]], 3
|
|
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP1]]
|
|
; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], 8000
|
|
; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP2]]
|
|
; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[DST]], i64 8000
|
|
; CHECK-NEXT: [[SCEVGEP3:%.*]] = getelementptr i8, ptr [[SRC]], i64 8008
|
|
; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[SCEVGEP]], [[SCEVGEP2]]
|
|
; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[DST]], [[SCEVGEP1]]
|
|
; CHECK-NEXT: [[CONFLICT_RDX5:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
|
|
; CHECK-NEXT: [[BOUND04:%.*]] = icmp ult ptr [[SCEVGEP]], [[SCEVGEP3]]
|
|
; CHECK-NEXT: [[BOUND15:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP1]]
|
|
; CHECK-NEXT: [[DIFF_CHECK6:%.*]] = and i1 [[BOUND04]], [[BOUND15]]
|
|
; CHECK-NEXT: [[CONFLICT_RDX7:%.*]] = or i1 [[CONFLICT_RDX5]], [[DIFF_CHECK6]]
|
|
; CHECK-NEXT: [[BOUND07:%.*]] = icmp ult ptr [[DST]], [[SCEVGEP3]]
|
|
; CHECK-NEXT: [[BOUND18:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP2]]
|
|
; CHECK-NEXT: [[DIFF_CHECK8:%.*]] = and i1 [[BOUND07]], [[BOUND18]]
|
|
; CHECK-NEXT: [[CONFLICT_RDX9:%.*]] = or i1 [[CONFLICT_RDX7]], [[DIFF_CHECK8]]
|
|
; CHECK-NEXT: br i1 [[CONFLICT_RDX9]], [[SCALAR_PH:label %.*]], [[VECTOR_PH:label %.*]]
|
|
;
|
|
entry:
|
|
br label %loop
|
|
|
|
loop:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
|
|
%iv.off = add i64 %off, %iv
|
|
%gep.src = getelementptr i64, ptr %src, i64 %iv
|
|
%l.0 = load i64, ptr %gep.src, align 8
|
|
%gep.dst.off = getelementptr i64, ptr %dst, i64 %iv.off
|
|
store i64 %l.0, ptr %gep.dst.off, align 8
|
|
%gep.src.8 = getelementptr i8, ptr %gep.src, i64 8
|
|
%l.1 = load i64, ptr %gep.src.8, align 8
|
|
%gep.dst.iv = getelementptr i64, ptr %dst, i64 %iv
|
|
store i64 %l.1, ptr %gep.dst.iv, align 8
|
|
%iv.next = add i64 %iv, 1
|
|
%ec = icmp eq i64 %iv.next, 1000
|
|
br i1 %ec, label %exit, label %loop
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
|
|
define void @remove_diff_checks_via_guards(i32 %x, i32 %y, ptr %A) {
|
|
; CHECK-LABEL: define void @remove_diff_checks_via_guards(
|
|
; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]], ptr [[A:%.*]]) {
|
|
; CHECK-NEXT: [[ENTRY:.*:]]
|
|
; CHECK-NEXT: [[OFFSET:%.*]] = sub i32 [[X]], [[Y]]
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp sge i32 [[OFFSET]], 0
|
|
; CHECK-NEXT: br i1 [[CMP]], [[EXIT:label %.*]], label %[[LOOP_PREHEADER:.*]]
|
|
; CHECK: [[LOOP_PREHEADER]]:
|
|
; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[X]] to i64
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add nsw i64 [[TMP0]], 1
|
|
; CHECK-NEXT: [[SMAX2:%.*]] = call i64 @llvm.smax.i64(i64 [[TMP1]], i64 0)
|
|
; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[SMAX2]] to i32
|
|
; CHECK-NEXT: [[TMP3:%.*]] = add nuw i32 [[TMP2]], 1
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP3]], 4
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], [[SCALAR_PH:label %.*]], label %[[VECTOR_SCEVCHECK:.*]]
|
|
; CHECK: [[VECTOR_SCEVCHECK]]:
|
|
; CHECK-NEXT: [[TMP4:%.*]] = sext i32 [[X]] to i64
|
|
; CHECK-NEXT: [[TMP5:%.*]] = add nsw i64 [[TMP4]], 1
|
|
; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[TMP5]], i64 0)
|
|
; CHECK-NEXT: [[TMP6:%.*]] = trunc i64 [[SMAX]] to i32
|
|
; CHECK-NEXT: [[TMP7:%.*]] = icmp slt i32 [[TMP6]], 0
|
|
; CHECK-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[SMAX]], 4294967295
|
|
; CHECK-NEXT: [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]]
|
|
; CHECK-NEXT: [[TMP10:%.*]] = trunc i64 [[SMAX]] to i32
|
|
; CHECK-NEXT: [[TMP11:%.*]] = add i32 [[OFFSET]], [[TMP10]]
|
|
; CHECK-NEXT: [[TMP12:%.*]] = icmp slt i32 [[TMP11]], [[OFFSET]]
|
|
; CHECK-NEXT: [[TMP13:%.*]] = icmp ugt i64 [[SMAX]], 4294967295
|
|
; CHECK-NEXT: [[TMP14:%.*]] = or i1 [[TMP12]], [[TMP13]]
|
|
; CHECK-NEXT: [[TMP15:%.*]] = or i1 [[TMP9]], [[TMP14]]
|
|
; CHECK-NEXT: br i1 [[TMP15]], [[SCALAR_PH]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[TMP16:%.*]] = sext i32 [[OFFSET]] to i64
|
|
; CHECK-NEXT: [[TMP17:%.*]] = shl nsw i64 [[TMP16]], 2
|
|
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP17]], 16
|
|
; CHECK-NEXT: br i1 [[DIFF_CHECK]], [[SCALAR_PH]], [[VECTOR_PH1:label %.*]]
|
|
;
|
|
entry:
|
|
%offset = sub i32 %x, %y
|
|
%cmp = icmp sge i32 %offset, 0
|
|
br i1 %cmp, label %exit, label %loop
|
|
|
|
loop:
|
|
%iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
|
|
%iv.ext = sext i32 %iv to i64
|
|
%gep.A = getelementptr i32, ptr %A, i64 %iv.ext
|
|
%l = load i32, ptr %gep.A, align 1
|
|
%iv.offset = add i32 %iv, %offset
|
|
%iv.offset.ext = sext i32 %iv.offset to i64
|
|
%gep.A.offset = getelementptr i32, ptr %A, i64 %iv.offset.ext
|
|
store i32 %l, ptr %gep.A.offset, align 1
|
|
%iv.next = add i32 %iv, 1
|
|
%ec = icmp sgt i32 %iv, %x
|
|
br i1 %ec, label %exit, label %loop
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
define void @diff_check_via_i32_ptrarith(ptr %origin, ptr %dst, ptr %base, i32 %d, i32 %n) {
|
|
; CHECK-LABEL: define void @diff_check_via_i32_ptrarith(
|
|
; CHECK-SAME: ptr [[ORIGIN:%.*]], ptr [[DST:%.*]], ptr [[BASE:%.*]], i32 [[D:%.*]], i32 [[N:%.*]]) {
|
|
; CHECK-NEXT: [[ENTRY:.*:]]
|
|
; CHECK-NEXT: [[BASE1:%.*]] = ptrtoaddr ptr [[BASE]] to i64
|
|
; CHECK-NEXT: [[RHS:%.*]] = ptrtoint ptr [[ORIGIN]] to i64
|
|
; CHECK-NEXT: [[LHS:%.*]] = ptrtoint ptr [[DST]] to i64
|
|
; CHECK-NEXT: [[DIFF:%.*]] = sub i64 [[LHS]], [[RHS]]
|
|
; CHECK-NEXT: [[DIFF_I32:%.*]] = trunc i64 [[DIFF]] to i32
|
|
; CHECK-NEXT: [[OP:%.*]] = sub nuw nsw i32 [[D]], [[DIFF_I32]]
|
|
; CHECK-NEXT: [[IDX_EXT:%.*]] = zext i32 [[OP]] to i64
|
|
; CHECK-NEXT: [[SRC:%.*]] = getelementptr inbounds nuw i8, ptr [[BASE]], i64 [[IDX_EXT]]
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[OP]], [[N]]
|
|
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_PH:.*]], [[EXIT:label %.*]]
|
|
; CHECK: [[LOOP_PH]]:
|
|
; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[RHS]] to i32
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[D]], [[TMP0]]
|
|
; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], -1
|
|
; CHECK-NEXT: [[TMP3:%.*]] = trunc i64 [[LHS]] to i32
|
|
; CHECK-NEXT: [[TMP4:%.*]] = sub i32 [[TMP2]], [[TMP3]]
|
|
; CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
; CHECK-NEXT: [[TMP6:%.*]] = add nuw nsw i64 [[TMP5]], 1
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP6]], 4
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], [[SCALAR_PH:label %.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[TMP7:%.*]] = trunc i64 [[RHS]] to i32
|
|
; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[D]], [[TMP7]]
|
|
; CHECK-NEXT: [[TMP9:%.*]] = trunc i64 [[LHS]] to i32
|
|
; CHECK-NEXT: [[TMP10:%.*]] = sub i32 [[TMP8]], [[TMP9]]
|
|
; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
|
|
; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[BASE1]], [[TMP11]]
|
|
; CHECK-NEXT: [[TMP13:%.*]] = sub i64 [[LHS]], [[TMP12]]
|
|
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP13]], 4
|
|
; CHECK-NEXT: br i1 [[DIFF_CHECK]], [[SCALAR_PH]], [[VECTOR_PH:label %.*]]
|
|
;
|
|
entry:
|
|
%rhs = ptrtoint ptr %origin to i64
|
|
%lhs = ptrtoint ptr %dst to i64
|
|
%diff = sub i64 %lhs, %rhs
|
|
%diff.i32 = trunc i64 %diff to i32
|
|
%op = sub nuw nsw i32 %d, %diff.i32
|
|
%idx.ext = zext i32 %op to i64
|
|
%src = getelementptr inbounds nuw i8, ptr %base, i64 %idx.ext
|
|
%cmp = icmp ult i32 %op, %n
|
|
br i1 %cmp, label %loop.ph, label %exit
|
|
|
|
loop.ph:
|
|
br label %loop
|
|
|
|
loop:
|
|
%dst.phi = phi ptr [ %dst.next, %loop ], [ %dst, %loop.ph ]
|
|
%src.phi = phi ptr [ %src.next, %loop ], [ %src, %loop.ph ]
|
|
%iv = phi i32 [ %iv.dec, %loop ], [ %op, %loop.ph ]
|
|
%src.next = getelementptr inbounds nuw i8, ptr %src.phi, i64 1
|
|
%val = load i8, ptr %src.phi, align 1
|
|
%dst.next = getelementptr inbounds nuw i8, ptr %dst.phi, i64 1
|
|
store i8 %val, ptr %dst.phi, align 1
|
|
%iv.dec = add i32 %iv, -1
|
|
%done = icmp eq i32 %iv.dec, 0
|
|
br i1 %done, label %exit, label %loop, !llvm.loop !0
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
define void @phi_of_ptrtoint_diff_check(ptr %base, ptr %end, i64 %n, i1 %cond) {
|
|
; CHECK-LABEL: define void @phi_of_ptrtoint_diff_check(
|
|
; CHECK-SAME: ptr [[BASE:%.*]], ptr [[END:%.*]], i64 [[N:%.*]], i1 [[COND:%.*]]) {
|
|
; CHECK-NEXT: [[ENTRY:.*]]:
|
|
; CHECK-NEXT: [[END_INT:%.*]] = ptrtoint ptr [[END]] to i64
|
|
; CHECK-NEXT: [[EXT:%.*]] = getelementptr inbounds nuw i8, ptr [[END]], i64 [[N]]
|
|
; CHECK-NEXT: [[EXT_INT:%.*]] = ptrtoint ptr [[EXT]] to i64
|
|
; CHECK-NEXT: br i1 [[COND]], label %[[IF_THEN:.*]], label %[[MERGE:.*]]
|
|
; CHECK: [[IF_THEN]]:
|
|
; CHECK-NEXT: br label %[[MERGE]]
|
|
; CHECK: [[MERGE]]:
|
|
; CHECK-NEXT: [[DST_INT:%.*]] = phi i64 [ [[EXT_INT]], %[[IF_THEN]] ], [ [[END_INT]], %[[ENTRY]] ]
|
|
; CHECK-NEXT: [[DST_PTR:%.*]] = phi ptr [ [[EXT]], %[[IF_THEN]] ], [ [[END]], %[[ENTRY]] ]
|
|
; CHECK-NEXT: [[DST_PTR1:%.*]] = ptrtoaddr ptr [[DST_PTR]] to i64
|
|
; CHECK-NEXT: [[END2:%.*]] = getelementptr inbounds nuw i8, ptr [[BASE]], i64 [[N]]
|
|
; CHECK-NEXT: [[END2_INT:%.*]] = ptrtoint ptr [[END2]] to i64
|
|
; CHECK-NEXT: [[OFFSET:%.*]] = sub i64 [[DST_INT]], [[END2_INT]]
|
|
; CHECK-NEXT: [[SRC:%.*]] = getelementptr inbounds i8, ptr [[BASE]], i64 [[OFFSET]]
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp ult ptr [[SRC]], [[END]]
|
|
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_PREHEADER:.*]], [[EXIT:label %.*]]
|
|
; CHECK: [[LOOP_PREHEADER]]:
|
|
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], [[END_INT]]
|
|
; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[DST_INT]]
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], 4
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], [[SCALAR_PH:label %.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; CHECK: [[VECTOR_MEMCHECK]]:
|
|
; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[N]], [[DST_PTR1]]
|
|
; CHECK-NEXT: [[TMP3:%.*]] = sub i64 [[TMP2]], [[DST_INT]]
|
|
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP3]], 4
|
|
; CHECK-NEXT: br i1 [[DIFF_CHECK]], [[SCALAR_PH]], [[VECTOR_PH:label %.*]]
|
|
;
|
|
entry:
|
|
%end.int = ptrtoint ptr %end to i64
|
|
%ext = getelementptr inbounds nuw i8, ptr %end, i64 %n
|
|
%ext.int = ptrtoint ptr %ext to i64
|
|
br i1 %cond, label %if.then, label %loop.ph
|
|
|
|
if.then:
|
|
br label %loop.ph
|
|
|
|
loop.ph:
|
|
%dst.int = phi i64 [ %ext.int, %if.then ], [ %end.int, %entry ]
|
|
%dst.ptr = phi ptr [ %ext, %if.then ], [ %end, %entry ]
|
|
%end2 = getelementptr inbounds nuw i8, ptr %base, i64 %n
|
|
%end2.int = ptrtoint ptr %end2 to i64
|
|
%offset = sub i64 %dst.int, %end2.int
|
|
%src = getelementptr inbounds i8, ptr %base, i64 %offset
|
|
%cmp = icmp ult ptr %src, %end
|
|
br i1 %cmp, label %loop, label %exit
|
|
|
|
loop:
|
|
%iv.src = phi ptr [ %src, %loop.ph ], [ %next.src, %loop ]
|
|
%iv.dst = phi ptr [ %dst.ptr, %loop.ph ], [ %next.dst, %loop ]
|
|
%val = load i8, ptr %iv.src, align 1
|
|
store i8 %val, ptr %iv.dst, align 1
|
|
%next.src = getelementptr inbounds nuw i8, ptr %iv.src, i64 1
|
|
%next.dst = getelementptr inbounds nuw i8, ptr %iv.dst, i64 1
|
|
%done = icmp eq ptr %next.src, %end
|
|
br i1 %done, label %exit, label %loop
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
!0 = distinct !{!0, !1}
|
|
!1 = !{!"llvm.loop.mustprogress"}
|