; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6 ; RUN: opt -S -passes=loop-vectorize -enable-vplan-native-path < %s | FileCheck %s ; int A[1024], B[1024]; ; ; void foo(int iCount, int c, int jCount) ; { ; ; int i, j; ; ; #pragma clang loop vectorize(enable) vectorize_width(4) ; for (i = 0; i < iCount; i++) { ; A[i] = c; ; for (j = 0; j < jCount; j++) { ; A[i] += B[j] + i; ; } ; } ; } @A = common global [1024 x i32] zeroinitializer, align 16 @B = common global [1024 x i32] zeroinitializer, align 16 define void @foo(i32 %iCount, i32 %c, i32 %jCount) { ; CHECK-LABEL: define void @foo( ; CHECK-SAME: i32 [[ICOUNT:%.*]], i32 [[C:%.*]], i32 [[JCOUNT:%.*]]) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[CMP22:%.*]] = icmp sgt i32 [[ICOUNT]], 0 ; CHECK-NEXT: br i1 [[CMP22]], label %[[FOR_BODY_LR_PH:.*]], label %[[FOR_END11:.*]] ; CHECK: [[FOR_BODY_LR_PH]]: ; CHECK-NEXT: [[CMP220:%.*]] = icmp sgt i32 [[JCOUNT]], 0 ; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[JCOUNT]] to i64 ; CHECK-NEXT: [[WIDE_TRIP_COUNT27:%.*]] = zext i32 [[ICOUNT]] to i64 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT27]], 4 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT27]], 4 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT27]], [[N_MOD_VF]] ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[WIDE_TRIP_COUNT]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer ; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i32> poison, i32 [[C]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT1]], <4 x i32> poison, <4 x i32> zeroinitializer ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_LATCH:.*]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_LATCH]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1024 x i32], ptr @A, i64 0, <4 x i64> [[VEC_IND]] ; CHECK-NEXT: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> [[BROADCAST_SPLAT2]], <4 x ptr> align 4 [[TMP0]], <4 x i1> splat (i1 true)) ; CHECK-NEXT: br i1 [[CMP220]], label %[[FOR_BODY3_LR_PH3:.*]], label %[[VECTOR_LATCH]] ; CHECK: [[FOR_BODY3_LR_PH3]]: ; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> align 4 [[TMP0]], <4 x i1> splat (i1 true), <4 x i32> poison) ; CHECK-NEXT: [[TMP1:%.*]] = trunc <4 x i64> [[VEC_IND]] to <4 x i32> ; CHECK-NEXT: br label %[[FOR_BODY34:.*]] ; CHECK: [[FOR_BODY34]]: ; CHECK-NEXT: [[TMP2:%.*]] = phi <4 x i64> [ zeroinitializer, %[[FOR_BODY3_LR_PH3]] ], [ [[TMP7:%.*]], %[[FOR_BODY34]] ] ; CHECK-NEXT: [[TMP3:%.*]] = phi <4 x i32> [ [[WIDE_MASKED_GATHER]], %[[FOR_BODY3_LR_PH3]] ], [ [[TMP6:%.*]], %[[FOR_BODY34]] ] ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1024 x i32], ptr @B, i64 0, <4 x i64> [[TMP2]] ; CHECK-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> align 4 [[TMP4]], <4 x i1> splat (i1 true), <4 x i32> poison) ; CHECK-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER5]], [[TMP1]] ; CHECK-NEXT: [[TMP6]] = add nsw <4 x i32> [[TMP5]], [[TMP3]] ; CHECK-NEXT: [[TMP7]] = add nuw nsw <4 x i64> [[TMP2]], splat (i64 1) ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq <4 x i64> [[TMP7]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x i1> [[TMP8]], i64 0 ; CHECK-NEXT: br i1 [[TMP9]], label %[[FOR_COND1_FOR_INC9_CRIT_EDGE6:.*]], label %[[FOR_BODY34]] ; CHECK: [[FOR_COND1_FOR_INC9_CRIT_EDGE6]]: ; CHECK-NEXT: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> [[TMP6]], <4 x ptr> align 4 [[TMP0]], <4 x i1> splat (i1 true)) ; CHECK-NEXT: br label %[[VECTOR_LATCH]] ; CHECK: [[VECTOR_LATCH]]: ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add nuw nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT27]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END11_LOOPEXIT:.*]], label %[[SCALAR_PH]] ; CHECK: [[SCALAR_PH]]: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[FOR_BODY_LR_PH]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: ; CHECK-NEXT: [[INDVARS_IV25:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT26:%.*]], %[[FOR_INC9:.*]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i32], ptr @A, i64 0, i64 [[INDVARS_IV25]] ; CHECK-NEXT: store i32 [[C]], ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: br i1 [[CMP220]], label %[[FOR_BODY3_LR_PH:.*]], label %[[FOR_INC9]] ; CHECK: [[FOR_BODY3_LR_PH]]: ; CHECK-NEXT: [[ARRAYIDX_PROMOTED:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[TMP11:%.*]] = trunc i64 [[INDVARS_IV25]] to i32 ; CHECK-NEXT: br label %[[FOR_BODY3:.*]] ; CHECK: [[FOR_BODY3]]: ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[FOR_BODY3_LR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY3]] ] ; CHECK-NEXT: [[TMP12:%.*]] = phi i32 [ [[ARRAYIDX_PROMOTED]], %[[FOR_BODY3_LR_PH]] ], [ [[ADD8:%.*]], %[[FOR_BODY3]] ] ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [1024 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]] ; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP11]] ; CHECK-NEXT: [[ADD8]] = add nsw i32 [[ADD]], [[TMP12]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]] ; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_COND1_FOR_INC9_CRIT_EDGE:.*]], label %[[FOR_BODY3]] ; CHECK: [[FOR_COND1_FOR_INC9_CRIT_EDGE]]: ; CHECK-NEXT: [[ADD8_LCSSA:%.*]] = phi i32 [ [[ADD8]], %[[FOR_BODY3]] ] ; CHECK-NEXT: store i32 [[ADD8_LCSSA]], ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: br label %[[FOR_INC9]] ; CHECK: [[FOR_INC9]]: ; CHECK-NEXT: [[INDVARS_IV_NEXT26]] = add nuw nsw i64 [[INDVARS_IV25]], 1 ; CHECK-NEXT: [[EXITCOND28:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT26]], [[WIDE_TRIP_COUNT27]] ; CHECK-NEXT: br i1 [[EXITCOND28]], label %[[FOR_END11_LOOPEXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[FOR_END11_LOOPEXIT]]: ; CHECK-NEXT: br label %[[FOR_END11]] ; CHECK: [[FOR_END11]]: ; CHECK-NEXT: ret void ; entry: %outer.cmp = icmp sgt i32 %iCount, 0 br i1 %outer.cmp, label %outer.ph, label %exit outer.ph: %inner.cmp = icmp sgt i32 %jCount, 0 %inner.tc = zext i32 %jCount to i64 %outer.tc = zext i32 %iCount to i64 br label %outer.header outer.header: %iv.outer = phi i64 [ 0, %outer.ph ], [ %iv.outer.next, %outer.latch ] %gep.A = getelementptr inbounds [1024 x i32], ptr @A, i64 0, i64 %iv.outer store i32 %c, ptr %gep.A, align 4 br i1 %inner.cmp, label %inner.ph, label %outer.latch inner.ph: %a.val = load i32, ptr %gep.A, align 4 %i.trunc = trunc i64 %iv.outer to i32 br label %inner inner: %iv.inner = phi i64 [ 0, %inner.ph ], [ %iv.inner.next, %inner ] %accum = phi i32 [ %a.val, %inner.ph ], [ %accum.next, %inner ] %gep.B = getelementptr inbounds [1024 x i32], ptr @B, i64 0, i64 %iv.inner %b.val = load i32, ptr %gep.B, align 4 %sum.b.i = add nsw i32 %b.val, %i.trunc %accum.next = add nsw i32 %sum.b.i, %accum %iv.inner.next = add nuw nsw i64 %iv.inner, 1 %inner.ec = icmp eq i64 %iv.inner.next, %inner.tc br i1 %inner.ec, label %inner.exit, label %inner inner.exit: store i32 %accum.next, ptr %gep.A, align 4 br label %outer.latch outer.latch: %iv.outer.next = add nuw nsw i64 %iv.outer, 1 %outer.ec = icmp eq i64 %iv.outer.next, %outer.tc br i1 %outer.ec, label %exit, label %outer.header, !llvm.loop !1 exit: ret void } !1 = distinct !{!1, !2, !3} !2 = !{!"llvm.loop.vectorize.width", i32 4} !3 = !{!"llvm.loop.vectorize.enable", i1 true}