Do not remove IMPLICIT_DEF of a physreg unless all uses have an undef flag added. Previously, only the first use instruction had undef flags added. This will cause a failure in machine instruction verification. Multi-instruction uses tested in AMDGPU/multi-use-implicit-def.mir and X86/multi-use-implicit-def.mir. --------- Signed-off-by: John Lu <John.Lu@amd.com>
389 lines
14 KiB
LLVM
389 lines
14 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
|
|
; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa < %s | FileCheck -check-prefixes=GCN,SDAG %s
|
|
; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa < %s | FileCheck -check-prefixes=GCN,GISEL %s
|
|
|
|
define amdgpu_kernel void @test_bitcast_return_type_noinline() #0 {
|
|
; SDAG-LABEL: test_bitcast_return_type_noinline:
|
|
; SDAG: ; %bb.0:
|
|
; SDAG-NEXT: s_mov_b32 s32, 0
|
|
; SDAG-NEXT: s_mov_b32 flat_scratch_lo, s13
|
|
; SDAG-NEXT: s_add_i32 s12, s12, s17
|
|
; SDAG-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
|
|
; SDAG-NEXT: s_add_u32 s0, s0, s17
|
|
; SDAG-NEXT: s_addc_u32 s1, s1, 0
|
|
; SDAG-NEXT: s_mov_b32 s13, s15
|
|
; SDAG-NEXT: s_mov_b32 s12, s14
|
|
; SDAG-NEXT: s_getpc_b64 s[18:19]
|
|
; SDAG-NEXT: s_add_u32 s18, s18, ret_i32_noinline@rel32@lo+4
|
|
; SDAG-NEXT: s_addc_u32 s19, s19, ret_i32_noinline@rel32@hi+12
|
|
; SDAG-NEXT: v_lshlrev_b32_e32 v2, 20, v2
|
|
; SDAG-NEXT: v_lshlrev_b32_e32 v1, 10, v1
|
|
; SDAG-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; SDAG-NEXT: v_or_b32_e32 v31, v0, v2
|
|
; SDAG-NEXT: s_mov_b32 s14, s16
|
|
; SDAG-NEXT: s_swappc_b64 s[30:31], s[18:19]
|
|
; SDAG-NEXT: v_add_f32_e32 v0, 1.0, v0
|
|
; SDAG-NEXT: flat_store_dword v[0:1], v0
|
|
; SDAG-NEXT: s_waitcnt vmcnt(0)
|
|
; SDAG-NEXT: s_endpgm
|
|
;
|
|
; GISEL-LABEL: test_bitcast_return_type_noinline:
|
|
; GISEL: ; %bb.0:
|
|
; GISEL-NEXT: s_mov_b32 s32, 0
|
|
; GISEL-NEXT: s_mov_b32 flat_scratch_lo, s13
|
|
; GISEL-NEXT: s_add_i32 s12, s12, s17
|
|
; GISEL-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
|
|
; GISEL-NEXT: s_add_u32 s0, s0, s17
|
|
; GISEL-NEXT: s_addc_u32 s1, s1, 0
|
|
; GISEL-NEXT: s_mov_b32 s13, s15
|
|
; GISEL-NEXT: s_mov_b32 s12, s14
|
|
; GISEL-NEXT: v_lshlrev_b32_e32 v1, 10, v1
|
|
; GISEL-NEXT: v_lshlrev_b32_e32 v2, 20, v2
|
|
; GISEL-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GISEL-NEXT: v_or_b32_e32 v31, v0, v2
|
|
; GISEL-NEXT: s_getpc_b64 s[18:19]
|
|
; GISEL-NEXT: s_add_u32 s18, s18, ret_i32_noinline@rel32@lo+4
|
|
; GISEL-NEXT: s_addc_u32 s19, s19, ret_i32_noinline@rel32@hi+12
|
|
; GISEL-NEXT: s_mov_b32 s14, s16
|
|
; GISEL-NEXT: s_swappc_b64 s[30:31], s[18:19]
|
|
; GISEL-NEXT: v_add_f32_e32 v0, 1.0, v0
|
|
; GISEL-NEXT: flat_store_dword v[0:1], v0
|
|
; GISEL-NEXT: s_waitcnt vmcnt(0)
|
|
; GISEL-NEXT: s_endpgm
|
|
%val = call float @ret_i32_noinline()
|
|
%op = fadd float %val, 1.0
|
|
store volatile float %op, ptr addrspace(1) poison
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_bitcast_return_type_alwaysinline() #0 {
|
|
; SDAG-LABEL: test_bitcast_return_type_alwaysinline:
|
|
; SDAG: ; %bb.0:
|
|
; SDAG-NEXT: s_mov_b32 s32, 0
|
|
; SDAG-NEXT: s_mov_b32 flat_scratch_lo, s13
|
|
; SDAG-NEXT: s_add_i32 s12, s12, s17
|
|
; SDAG-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
|
|
; SDAG-NEXT: s_add_u32 s0, s0, s17
|
|
; SDAG-NEXT: s_addc_u32 s1, s1, 0
|
|
; SDAG-NEXT: s_mov_b32 s13, s15
|
|
; SDAG-NEXT: s_mov_b32 s12, s14
|
|
; SDAG-NEXT: s_getpc_b64 s[18:19]
|
|
; SDAG-NEXT: s_add_u32 s18, s18, ret_i32_alwaysinline@rel32@lo+4
|
|
; SDAG-NEXT: s_addc_u32 s19, s19, ret_i32_alwaysinline@rel32@hi+12
|
|
; SDAG-NEXT: v_lshlrev_b32_e32 v2, 20, v2
|
|
; SDAG-NEXT: v_lshlrev_b32_e32 v1, 10, v1
|
|
; SDAG-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; SDAG-NEXT: v_or_b32_e32 v31, v0, v2
|
|
; SDAG-NEXT: s_mov_b32 s14, s16
|
|
; SDAG-NEXT: s_swappc_b64 s[30:31], s[18:19]
|
|
; SDAG-NEXT: v_add_f32_e32 v0, 1.0, v0
|
|
; SDAG-NEXT: flat_store_dword v[0:1], v0
|
|
; SDAG-NEXT: s_waitcnt vmcnt(0)
|
|
; SDAG-NEXT: s_endpgm
|
|
;
|
|
; GISEL-LABEL: test_bitcast_return_type_alwaysinline:
|
|
; GISEL: ; %bb.0:
|
|
; GISEL-NEXT: s_mov_b32 s32, 0
|
|
; GISEL-NEXT: s_mov_b32 flat_scratch_lo, s13
|
|
; GISEL-NEXT: s_add_i32 s12, s12, s17
|
|
; GISEL-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
|
|
; GISEL-NEXT: s_add_u32 s0, s0, s17
|
|
; GISEL-NEXT: s_addc_u32 s1, s1, 0
|
|
; GISEL-NEXT: s_mov_b32 s13, s15
|
|
; GISEL-NEXT: s_mov_b32 s12, s14
|
|
; GISEL-NEXT: v_lshlrev_b32_e32 v1, 10, v1
|
|
; GISEL-NEXT: v_lshlrev_b32_e32 v2, 20, v2
|
|
; GISEL-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GISEL-NEXT: v_or_b32_e32 v31, v0, v2
|
|
; GISEL-NEXT: s_getpc_b64 s[18:19]
|
|
; GISEL-NEXT: s_add_u32 s18, s18, ret_i32_alwaysinline@rel32@lo+4
|
|
; GISEL-NEXT: s_addc_u32 s19, s19, ret_i32_alwaysinline@rel32@hi+12
|
|
; GISEL-NEXT: s_mov_b32 s14, s16
|
|
; GISEL-NEXT: s_swappc_b64 s[30:31], s[18:19]
|
|
; GISEL-NEXT: v_add_f32_e32 v0, 1.0, v0
|
|
; GISEL-NEXT: flat_store_dword v[0:1], v0
|
|
; GISEL-NEXT: s_waitcnt vmcnt(0)
|
|
; GISEL-NEXT: s_endpgm
|
|
%val = call float @ret_i32_alwaysinline()
|
|
%op = fadd float %val, 1.0
|
|
store volatile float %op, ptr addrspace(1) poison
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_bitcast_argument_type() #0 {
|
|
; SDAG-LABEL: test_bitcast_argument_type:
|
|
; SDAG: ; %bb.0:
|
|
; SDAG-NEXT: s_mov_b32 s32, 0
|
|
; SDAG-NEXT: s_mov_b32 flat_scratch_lo, s13
|
|
; SDAG-NEXT: s_add_i32 s12, s12, s17
|
|
; SDAG-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
|
|
; SDAG-NEXT: s_add_u32 s0, s0, s17
|
|
; SDAG-NEXT: s_addc_u32 s1, s1, 0
|
|
; SDAG-NEXT: s_mov_b32 s13, s15
|
|
; SDAG-NEXT: s_mov_b32 s12, s14
|
|
; SDAG-NEXT: s_getpc_b64 s[18:19]
|
|
; SDAG-NEXT: s_add_u32 s18, s18, ident_i32@rel32@lo+4
|
|
; SDAG-NEXT: s_addc_u32 s19, s19, ident_i32@rel32@hi+12
|
|
; SDAG-NEXT: v_lshlrev_b32_e32 v2, 20, v2
|
|
; SDAG-NEXT: v_lshlrev_b32_e32 v1, 10, v1
|
|
; SDAG-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; SDAG-NEXT: v_or_b32_e32 v31, v0, v2
|
|
; SDAG-NEXT: v_mov_b32_e32 v0, 2.0
|
|
; SDAG-NEXT: s_mov_b32 s14, s16
|
|
; SDAG-NEXT: s_swappc_b64 s[30:31], s[18:19]
|
|
; SDAG-NEXT: v_add_i32_e32 v0, vcc, 1, v0
|
|
; SDAG-NEXT: flat_store_dword v[0:1], v0
|
|
; SDAG-NEXT: s_waitcnt vmcnt(0)
|
|
; SDAG-NEXT: s_endpgm
|
|
;
|
|
; GISEL-LABEL: test_bitcast_argument_type:
|
|
; GISEL: ; %bb.0:
|
|
; GISEL-NEXT: s_mov_b32 s32, 0
|
|
; GISEL-NEXT: s_mov_b32 flat_scratch_lo, s13
|
|
; GISEL-NEXT: s_add_i32 s12, s12, s17
|
|
; GISEL-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
|
|
; GISEL-NEXT: s_add_u32 s0, s0, s17
|
|
; GISEL-NEXT: s_addc_u32 s1, s1, 0
|
|
; GISEL-NEXT: s_mov_b32 s13, s15
|
|
; GISEL-NEXT: s_mov_b32 s12, s14
|
|
; GISEL-NEXT: v_lshlrev_b32_e32 v1, 10, v1
|
|
; GISEL-NEXT: v_lshlrev_b32_e32 v2, 20, v2
|
|
; GISEL-NEXT: s_getpc_b64 s[18:19]
|
|
; GISEL-NEXT: s_add_u32 s18, s18, ident_i32@rel32@lo+4
|
|
; GISEL-NEXT: s_addc_u32 s19, s19, ident_i32@rel32@hi+12
|
|
; GISEL-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GISEL-NEXT: v_or_b32_e32 v31, v0, v2
|
|
; GISEL-NEXT: v_mov_b32_e32 v0, 2.0
|
|
; GISEL-NEXT: s_mov_b32 s14, s16
|
|
; GISEL-NEXT: s_swappc_b64 s[30:31], s[18:19]
|
|
; GISEL-NEXT: v_add_i32_e32 v0, vcc, 1, v0
|
|
; GISEL-NEXT: flat_store_dword v[0:1], v0
|
|
; GISEL-NEXT: s_waitcnt vmcnt(0)
|
|
; GISEL-NEXT: s_endpgm
|
|
%val = call i32 @ident_i32(float 2.0)
|
|
%op = add i32 %val, 1
|
|
store volatile i32 %op, ptr addrspace(1) poison
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_bitcast_argument_and_return_types() #0 {
|
|
; SDAG-LABEL: test_bitcast_argument_and_return_types:
|
|
; SDAG: ; %bb.0:
|
|
; SDAG-NEXT: s_mov_b32 s32, 0
|
|
; SDAG-NEXT: s_mov_b32 flat_scratch_lo, s13
|
|
; SDAG-NEXT: s_add_i32 s12, s12, s17
|
|
; SDAG-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
|
|
; SDAG-NEXT: s_add_u32 s0, s0, s17
|
|
; SDAG-NEXT: s_addc_u32 s1, s1, 0
|
|
; SDAG-NEXT: s_mov_b32 s13, s15
|
|
; SDAG-NEXT: s_mov_b32 s12, s14
|
|
; SDAG-NEXT: s_getpc_b64 s[18:19]
|
|
; SDAG-NEXT: s_add_u32 s18, s18, ident_i32@rel32@lo+4
|
|
; SDAG-NEXT: s_addc_u32 s19, s19, ident_i32@rel32@hi+12
|
|
; SDAG-NEXT: v_lshlrev_b32_e32 v2, 20, v2
|
|
; SDAG-NEXT: v_lshlrev_b32_e32 v1, 10, v1
|
|
; SDAG-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; SDAG-NEXT: v_or_b32_e32 v31, v0, v2
|
|
; SDAG-NEXT: v_mov_b32_e32 v0, 2.0
|
|
; SDAG-NEXT: s_mov_b32 s14, s16
|
|
; SDAG-NEXT: s_swappc_b64 s[30:31], s[18:19]
|
|
; SDAG-NEXT: v_add_f32_e32 v0, 1.0, v0
|
|
; SDAG-NEXT: flat_store_dword v[0:1], v0
|
|
; SDAG-NEXT: s_waitcnt vmcnt(0)
|
|
; SDAG-NEXT: s_endpgm
|
|
;
|
|
; GISEL-LABEL: test_bitcast_argument_and_return_types:
|
|
; GISEL: ; %bb.0:
|
|
; GISEL-NEXT: s_mov_b32 s32, 0
|
|
; GISEL-NEXT: s_mov_b32 flat_scratch_lo, s13
|
|
; GISEL-NEXT: s_add_i32 s12, s12, s17
|
|
; GISEL-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
|
|
; GISEL-NEXT: s_add_u32 s0, s0, s17
|
|
; GISEL-NEXT: s_addc_u32 s1, s1, 0
|
|
; GISEL-NEXT: s_mov_b32 s13, s15
|
|
; GISEL-NEXT: s_mov_b32 s12, s14
|
|
; GISEL-NEXT: v_lshlrev_b32_e32 v1, 10, v1
|
|
; GISEL-NEXT: v_lshlrev_b32_e32 v2, 20, v2
|
|
; GISEL-NEXT: s_getpc_b64 s[18:19]
|
|
; GISEL-NEXT: s_add_u32 s18, s18, ident_i32@rel32@lo+4
|
|
; GISEL-NEXT: s_addc_u32 s19, s19, ident_i32@rel32@hi+12
|
|
; GISEL-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GISEL-NEXT: v_or_b32_e32 v31, v0, v2
|
|
; GISEL-NEXT: v_mov_b32_e32 v0, 2.0
|
|
; GISEL-NEXT: s_mov_b32 s14, s16
|
|
; GISEL-NEXT: s_swappc_b64 s[30:31], s[18:19]
|
|
; GISEL-NEXT: v_add_f32_e32 v0, 1.0, v0
|
|
; GISEL-NEXT: flat_store_dword v[0:1], v0
|
|
; GISEL-NEXT: s_waitcnt vmcnt(0)
|
|
; GISEL-NEXT: s_endpgm
|
|
%val = call float @ident_i32(float 2.0)
|
|
%op = fadd float %val, 1.0
|
|
store volatile float %op, ptr addrspace(1) poison
|
|
ret void
|
|
}
|
|
|
|
define hidden i32 @use_workitem_id_x(i32 %arg0) #3 {
|
|
; GCN-LABEL: use_workitem_id_x:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GCN-NEXT: v_and_b32_e32 v1, 0x3ff, v31
|
|
; GCN-NEXT: v_add_i32_e32 v0, vcc, v1, v0
|
|
; GCN-NEXT: s_setpc_b64 s[30:31]
|
|
%id = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%op = add i32 %id, %arg0
|
|
ret i32 %op
|
|
}
|
|
|
|
define amdgpu_kernel void @test_bitcast_use_workitem_id_x() #3 {
|
|
; SDAG-LABEL: test_bitcast_use_workitem_id_x:
|
|
; SDAG: ; %bb.0:
|
|
; SDAG-NEXT: s_mov_b32 s32, 0
|
|
; SDAG-NEXT: s_mov_b32 flat_scratch_lo, s13
|
|
; SDAG-NEXT: s_add_i32 s12, s12, s17
|
|
; SDAG-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
|
|
; SDAG-NEXT: s_add_u32 s0, s0, s17
|
|
; SDAG-NEXT: s_addc_u32 s1, s1, 0
|
|
; SDAG-NEXT: s_mov_b32 s13, s15
|
|
; SDAG-NEXT: s_mov_b32 s12, s14
|
|
; SDAG-NEXT: v_mov_b32_e32 v31, v0
|
|
; SDAG-NEXT: s_getpc_b64 s[18:19]
|
|
; SDAG-NEXT: s_add_u32 s18, s18, use_workitem_id_x@rel32@lo+4
|
|
; SDAG-NEXT: s_addc_u32 s19, s19, use_workitem_id_x@rel32@hi+12
|
|
; SDAG-NEXT: v_mov_b32_e32 v0, 9
|
|
; SDAG-NEXT: s_mov_b32 s14, s16
|
|
; SDAG-NEXT: s_swappc_b64 s[30:31], s[18:19]
|
|
; SDAG-NEXT: v_add_f32_e32 v0, 1.0, v0
|
|
; SDAG-NEXT: flat_store_dword v[0:1], v0
|
|
; SDAG-NEXT: s_waitcnt vmcnt(0)
|
|
; SDAG-NEXT: s_endpgm
|
|
;
|
|
; GISEL-LABEL: test_bitcast_use_workitem_id_x:
|
|
; GISEL: ; %bb.0:
|
|
; GISEL-NEXT: s_mov_b32 s32, 0
|
|
; GISEL-NEXT: s_mov_b32 flat_scratch_lo, s13
|
|
; GISEL-NEXT: s_add_i32 s12, s12, s17
|
|
; GISEL-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
|
|
; GISEL-NEXT: s_add_u32 s0, s0, s17
|
|
; GISEL-NEXT: s_addc_u32 s1, s1, 0
|
|
; GISEL-NEXT: v_mov_b32_e32 v31, v0
|
|
; GISEL-NEXT: s_mov_b32 s13, s15
|
|
; GISEL-NEXT: s_mov_b32 s12, s14
|
|
; GISEL-NEXT: s_getpc_b64 s[18:19]
|
|
; GISEL-NEXT: s_add_u32 s18, s18, use_workitem_id_x@rel32@lo+4
|
|
; GISEL-NEXT: s_addc_u32 s19, s19, use_workitem_id_x@rel32@hi+12
|
|
; GISEL-NEXT: v_mov_b32_e32 v0, 9
|
|
; GISEL-NEXT: s_mov_b32 s14, s16
|
|
; GISEL-NEXT: s_swappc_b64 s[30:31], s[18:19]
|
|
; GISEL-NEXT: v_add_f32_e32 v0, 1.0, v0
|
|
; GISEL-NEXT: flat_store_dword v[0:1], v0
|
|
; GISEL-NEXT: s_waitcnt vmcnt(0)
|
|
; GISEL-NEXT: s_endpgm
|
|
%val = call float @use_workitem_id_x(i32 9)
|
|
%op = fadd float %val, 1.0
|
|
store volatile float %op, ptr addrspace(1) poison
|
|
ret void
|
|
}
|
|
|
|
@_ZTIi = external global ptr
|
|
declare i32 @__gxx_personality_v0(...)
|
|
define amdgpu_kernel void @test_invoke() #0 personality ptr @__gxx_personality_v0 {
|
|
; SDAG-LABEL: test_invoke:
|
|
; SDAG: ; %bb.0:
|
|
; SDAG-NEXT: s_mov_b32 s32, 0
|
|
; SDAG-NEXT: s_mov_b32 flat_scratch_lo, s13
|
|
; SDAG-NEXT: s_add_i32 s12, s12, s17
|
|
; SDAG-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
|
|
; SDAG-NEXT: s_add_u32 s0, s0, s17
|
|
; SDAG-NEXT: s_addc_u32 s1, s1, 0
|
|
; SDAG-NEXT: s_mov_b32 s13, s15
|
|
; SDAG-NEXT: s_mov_b32 s12, s14
|
|
; SDAG-NEXT: s_getpc_b64 s[18:19]
|
|
; SDAG-NEXT: s_add_u32 s18, s18, ident_i32@rel32@lo+4
|
|
; SDAG-NEXT: s_addc_u32 s19, s19, ident_i32@rel32@hi+12
|
|
; SDAG-NEXT: v_lshlrev_b32_e32 v2, 20, v2
|
|
; SDAG-NEXT: v_lshlrev_b32_e32 v1, 10, v1
|
|
; SDAG-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; SDAG-NEXT: v_or_b32_e32 v31, v0, v2
|
|
; SDAG-NEXT: v_mov_b32_e32 v0, 2.0
|
|
; SDAG-NEXT: ; implicit-def: $sgpr15
|
|
; SDAG-NEXT: s_mov_b32 s14, s16
|
|
; SDAG-NEXT: s_swappc_b64 s[30:31], s[18:19]
|
|
; SDAG-NEXT: v_add_f32_e32 v0, 1.0, v0
|
|
; SDAG-NEXT: flat_store_dword v[0:1], v0
|
|
; SDAG-NEXT: s_waitcnt vmcnt(0)
|
|
; SDAG-NEXT: s_endpgm
|
|
;
|
|
; GISEL-LABEL: test_invoke:
|
|
; GISEL: ; %bb.0:
|
|
; GISEL-NEXT: s_mov_b32 s32, 0
|
|
; GISEL-NEXT: s_mov_b32 flat_scratch_lo, s13
|
|
; GISEL-NEXT: s_add_i32 s12, s12, s17
|
|
; GISEL-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
|
|
; GISEL-NEXT: s_add_u32 s0, s0, s17
|
|
; GISEL-NEXT: s_addc_u32 s1, s1, 0
|
|
; GISEL-NEXT: s_mov_b32 s13, s15
|
|
; GISEL-NEXT: s_mov_b32 s12, s14
|
|
; GISEL-NEXT: v_lshlrev_b32_e32 v1, 10, v1
|
|
; GISEL-NEXT: v_lshlrev_b32_e32 v2, 20, v2
|
|
; GISEL-NEXT: s_getpc_b64 s[18:19]
|
|
; GISEL-NEXT: s_add_u32 s18, s18, ident_i32@rel32@lo+4
|
|
; GISEL-NEXT: s_addc_u32 s19, s19, ident_i32@rel32@hi+12
|
|
; GISEL-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GISEL-NEXT: v_or_b32_e32 v31, v0, v2
|
|
; GISEL-NEXT: v_mov_b32_e32 v0, 2.0
|
|
; GISEL-NEXT: ; implicit-def: $sgpr15
|
|
; GISEL-NEXT: s_mov_b32 s14, s16
|
|
; GISEL-NEXT: s_swappc_b64 s[30:31], s[18:19]
|
|
; GISEL-NEXT: v_add_f32_e32 v0, 1.0, v0
|
|
; GISEL-NEXT: flat_store_dword v[0:1], v0
|
|
; GISEL-NEXT: s_waitcnt vmcnt(0)
|
|
; GISEL-NEXT: s_endpgm
|
|
%val = invoke float @ident_i32(float 2.0)
|
|
to label %continue unwind label %broken
|
|
|
|
broken:
|
|
landingpad { ptr, i32 } catch ptr @_ZTIi
|
|
ret void
|
|
|
|
continue:
|
|
%op = fadd float %val, 1.0
|
|
store volatile float %op, ptr addrspace(1) poison
|
|
ret void
|
|
}
|
|
|
|
; Callees appears last in source file to test that we still lower their
|
|
; arguments before we lower any calls to them.
|
|
|
|
define hidden i32 @ret_i32_noinline() #0 {
|
|
; GCN-LABEL: ret_i32_noinline:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GCN-NEXT: v_mov_b32_e32 v0, 4
|
|
; GCN-NEXT: s_setpc_b64 s[30:31]
|
|
ret i32 4
|
|
}
|
|
|
|
define hidden i32 @ret_i32_alwaysinline() #1 {
|
|
; GCN-LABEL: ret_i32_alwaysinline:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GCN-NEXT: v_mov_b32_e32 v0, 4
|
|
; GCN-NEXT: s_setpc_b64 s[30:31]
|
|
ret i32 4
|
|
}
|
|
|
|
define hidden i32 @ident_i32(i32 %i) #0 {
|
|
; GCN-LABEL: ident_i32:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GCN-NEXT: s_setpc_b64 s[30:31]
|
|
ret i32 %i
|
|
}
|
|
|
|
declare i32 @llvm.amdgcn.workitem.id.x() #2
|
|
|
|
attributes #0 = { nounwind noinline }
|
|
attributes #1 = { alwaysinline nounwind }
|
|
attributes #2 = { nounwind readnone speculatable }
|
|
attributes #3 = { nounwind noinline "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" }
|