Many tests were running extra passes after loop-vectorize, but they made no difference to the outcome. If tests don't require these extra passes, then we shouldn't run them because it increases the testing time.
29 lines
1.2 KiB
LLVM
29 lines
1.2 KiB
LLVM
; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx900 -passes=loop-vectorize < %s | FileCheck -check-prefixes=GCN %s
|
|
; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx900 -passes=loop-vectorize -pass-remarks-analysis='loop-vectorize' < %s 2>&1 | FileCheck -check-prefixes=REMARK %s
|
|
|
|
; GCN-LABEL: @runtime_check_divergent_target(
|
|
; GCN-NOT: load <2 x half>
|
|
; GCN-NOT: store <2 x half>
|
|
|
|
; REMARK: remark: <unknown>:0:0: loop not vectorized: runtime pointer checks needed. Not enabled for divergent target
|
|
define amdgpu_kernel void @runtime_check_divergent_target(ptr addrspace(1) nocapture %a, ptr addrspace(1) nocapture %b) {
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
|
|
%arrayidx = getelementptr inbounds half, ptr addrspace(1) %b, i64 %indvars.iv
|
|
%load = load half, ptr addrspace(1) %arrayidx, align 4
|
|
%mul = fmul half %load, 3.0
|
|
%arrayidx2 = getelementptr inbounds half, ptr addrspace(1) %a, i64 %indvars.iv
|
|
store half %mul, ptr addrspace(1) %arrayidx2, align 4
|
|
%indvars.iv.next = add i64 %indvars.iv, 1
|
|
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
|
|
%exitcond = icmp eq i32 %lftr.wideiv, 1024
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|