Files
llvm-project/clang/test/CodeGenHLSL/ArrayAssignable.logicalptr.hlsl
Nathan Gauër a51597f35f [HLSL] Handle logical pointer for array assign (#193227)
This commits adds SPIR-V testing on an existing test (almost-NFC on DXIL
testing). It also copies it and invokes Clang using the experimental
logical pointer flag.
Adding this flag shows a missing case in the frontend, handled with this
commit.

Due to the difference in index handling between the structured.gep and
legacy one, the Cbuffer load codegen had to be rewritten. It's a bit
more naive, as we get one gep per level, but this will be handled by
optimizations later on.
2026-04-27 13:51:29 +02:00

444 lines
35 KiB
HLSL

// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 6
// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.3-library -finclude-default-header -emit-llvm -disable-llvm-passes -o - %s -fexperimental-logical-pointer | FileCheck %s --check-prefixes=CHECK-DXIL
// RUN: %clang_cc1 -triple spirv-pc-vulkan1.3-library -finclude-default-header -emit-llvm -disable-llvm-passes -o - %s -fexperimental-logical-pointer | FileCheck %s --check-prefixes=CHECK-SPIR
struct S {
int x;
float f;
};
cbuffer CBArrays : register(b0) {
float c1[2];
int4 c2[2];
int c3[2][2];
S c4[2];
}
// CHECK-DXIL: [[CBLayout:%.*]] = type <{ <{ [1 x <{ float, target("dx.Padding", 12) }>], float }>, target("dx.Padding", 12), [2 x <4 x i32>], <{ [1 x <{ <{ [1 x <{ i32, target("dx.Padding", 12) }>], i32 }>, target("dx.Padding", 12) }>], <{ [1 x <{ i32, target("dx.Padding", 12) }>], i32 }> }>, target("dx.Padding", 12), <{ [1 x <{ %S, target("dx.Padding", 8) }>], %S }> }>
// CHECK-DXIL: @CBArrays.cb = global target("dx.CBuffer", [[CBLayout]])
// CHECK-DXIL: @c1 = external hidden addrspace(2) global <{ [1 x <{ float, target("dx.Padding", 12) }>], float }>, align 4
// CHECK-DXIL: @c2 = external hidden addrspace(2) global [2 x <4 x i32>], align 4
// CHECK-DXIL: @c3 = external hidden addrspace(2) global <{ [1 x <{ <{ [1 x <{ i32, target("dx.Padding", 12) }>], i32 }>, target("dx.Padding", 12) }>], <{ [1 x <{ i32, target("dx.Padding", 12) }>], i32 }> }>, align 4
// CHECK-DXIL: @c4 = external hidden addrspace(2) global <{ [1 x <{ %S, target("dx.Padding", 8) }>], %S }>, align 1
// CHECK-SPIR: [[CBLayout:%.*]] = type <{ <{ [1 x <{ float, target("spirv.Padding", 12) }>], float }>, target("spirv.Padding", 12), [2 x <4 x i32>], <{ [1 x <{ <{ [1 x <{ i32, target("spirv.Padding", 12) }>], i32 }>, target("spirv.Padding", 12) }>], <{ [1 x <{ i32, target("spirv.Padding", 12) }>], i32 }> }>, target("spirv.Padding", 12), <{ [1 x <{ %S, target("spirv.Padding", 8) }>], %S }> }>
// CHECK-SPIR: @CBArrays.cb = global target("spirv.VulkanBuffer", %__cblayout_CBArrays, 2, 0) poison
// CHECK-SPIR: @c1 = external hidden addrspace(12) global <{ [1 x <{ float, target("spirv.Padding", 12) }>], float }>, align 4
// CHECK-SPIR: @c2 = external hidden addrspace(12) global [2 x <4 x i32>], align 4
// CHECK-SPIR: @c3 = external hidden addrspace(12) global <{ [1 x <{ <{ [1 x <{ i32, target("spirv.Padding", 12) }>], i32 }>, target("spirv.Padding", 12) }>], <{ [1 x <{ i32, target("spirv.Padding", 12) }>], i32 }> }>, align 4
// CHECK-SPIR: @c4 = external hidden addrspace(12) global <{ [1 x <{ %S, target("spirv.Padding", 8) }>], %S }>, align 1
// CHECK-DXIL-LABEL: define hidden void @_Z11arr_assign1v(
// CHECK-DXIL-SAME: ) #[[ATTR2:[0-9]+]] {
// CHECK-DXIL-NEXT: [[ENTRY:.*:]]
// CHECK-DXIL-NEXT: [[ARR:%.*]] = call elementtype([2 x i32]) ptr @llvm.structured.alloca.p0()
// CHECK-DXIL-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARR]], ptr align 4 @__const._Z11arr_assign1v.Arr, i32 8, i1 false)
// CHECK-DXIL-NEXT: [[ARR2:%.*]] = call elementtype([2 x i32]) ptr @llvm.structured.alloca.p0()
// CHECK-DXIL-NEXT: call void @llvm.memset.p0.i32(ptr align 4 [[ARR2]], i8 0, i32 8, i1 false)
// CHECK-DXIL-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARR]], ptr align 4 [[ARR2]], i32 8, i1 false)
// CHECK-DXIL-NEXT: ret void
//
// CHECK-SPIR-LABEL: define hidden spir_func void @_Z11arr_assign1v(
// CHECK-SPIR-SAME: ) #[[ATTR2:[0-9]+]] {
// CHECK-SPIR-NEXT: [[ENTRY:.*:]]
// CHECK-SPIR-NEXT: [[TMP0:%.*]] = call token @llvm.experimental.convergence.entry()
// CHECK-SPIR-NEXT: [[ARR:%.*]] = call elementtype([2 x i32]) ptr @llvm.structured.alloca.p0()
// CHECK-SPIR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARR]], ptr align 4 @__const._Z11arr_assign1v.Arr, i64 8, i1 false)
// CHECK-SPIR-NEXT: [[ARR2:%.*]] = call elementtype([2 x i32]) ptr @llvm.structured.alloca.p0()
// CHECK-SPIR-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[ARR2]], i8 0, i64 8, i1 false)
// CHECK-SPIR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARR]], ptr align 4 [[ARR2]], i64 8, i1 false)
// CHECK-SPIR-NEXT: ret void
//
void arr_assign1() {
int Arr[2] = {0, 1};
int Arr2[2] = {0, 0};
Arr = Arr2;
}
// CHECK-DXIL-LABEL: define hidden void @_Z11arr_assign2v(
// CHECK-DXIL-SAME: ) #[[ATTR2]] {
// CHECK-DXIL-NEXT: [[ENTRY:.*:]]
// CHECK-DXIL-NEXT: [[ARR:%.*]] = call elementtype([2 x i32]) ptr @llvm.structured.alloca.p0()
// CHECK-DXIL-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARR]], ptr align 4 @__const._Z11arr_assign2v.Arr, i32 8, i1 false)
// CHECK-DXIL-NEXT: [[ARR2:%.*]] = call elementtype([2 x i32]) ptr @llvm.structured.alloca.p0()
// CHECK-DXIL-NEXT: call void @llvm.memset.p0.i32(ptr align 4 [[ARR2]], i8 0, i32 8, i1 false)
// CHECK-DXIL-NEXT: [[ARR3:%.*]] = call elementtype([2 x i32]) ptr @llvm.structured.alloca.p0()
// CHECK-DXIL-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARR3]], ptr align 4 @__const._Z11arr_assign2v.Arr3, i32 8, i1 false)
// CHECK-DXIL-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARR2]], ptr align 4 [[ARR3]], i32 8, i1 false)
// CHECK-DXIL-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARR]], ptr align 4 [[ARR2]], i32 8, i1 false)
// CHECK-DXIL-NEXT: ret void
//
// CHECK-SPIR-LABEL: define hidden spir_func void @_Z11arr_assign2v(
// CHECK-SPIR-SAME: ) #[[ATTR2]] {
// CHECK-SPIR-NEXT: [[ENTRY:.*:]]
// CHECK-SPIR-NEXT: [[TMP0:%.*]] = call token @llvm.experimental.convergence.entry()
// CHECK-SPIR-NEXT: [[ARR:%.*]] = call elementtype([2 x i32]) ptr @llvm.structured.alloca.p0()
// CHECK-SPIR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARR]], ptr align 4 @__const._Z11arr_assign2v.Arr, i64 8, i1 false)
// CHECK-SPIR-NEXT: [[ARR2:%.*]] = call elementtype([2 x i32]) ptr @llvm.structured.alloca.p0()
// CHECK-SPIR-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[ARR2]], i8 0, i64 8, i1 false)
// CHECK-SPIR-NEXT: [[ARR3:%.*]] = call elementtype([2 x i32]) ptr @llvm.structured.alloca.p0()
// CHECK-SPIR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARR3]], ptr align 4 @__const._Z11arr_assign2v.Arr3, i64 8, i1 false)
// CHECK-SPIR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARR2]], ptr align 4 [[ARR3]], i64 8, i1 false)
// CHECK-SPIR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARR]], ptr align 4 [[ARR2]], i64 8, i1 false)
// CHECK-SPIR-NEXT: ret void
//
void arr_assign2() {
int Arr[2] = {0, 1};
int Arr2[2] = {0, 0};
int Arr3[2] = {3, 4};
Arr = Arr2 = Arr3;
}
// CHECK-DXIL-LABEL: define hidden void @_Z11arr_assign3v(
// CHECK-DXIL-SAME: ) #[[ATTR2]] {
// CHECK-DXIL-NEXT: [[ENTRY:.*:]]
// CHECK-DXIL-NEXT: [[ARR2:%.*]] = call elementtype([2 x [2 x i32]]) ptr @llvm.structured.alloca.p0()
// CHECK-DXIL-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARR2]], ptr align 4 @__const._Z11arr_assign3v.Arr2, i32 16, i1 false)
// CHECK-DXIL-NEXT: [[ARR3:%.*]] = call elementtype([2 x [2 x i32]]) ptr @llvm.structured.alloca.p0()
// CHECK-DXIL-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARR3]], ptr align 4 @__const._Z11arr_assign3v.Arr3, i32 16, i1 false)
// CHECK-DXIL-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARR2]], ptr align 4 [[ARR3]], i32 16, i1 false)
// CHECK-DXIL-NEXT: ret void
//
// CHECK-SPIR-LABEL: define hidden spir_func void @_Z11arr_assign3v(
// CHECK-SPIR-SAME: ) #[[ATTR2]] {
// CHECK-SPIR-NEXT: [[ENTRY:.*:]]
// CHECK-SPIR-NEXT: [[TMP0:%.*]] = call token @llvm.experimental.convergence.entry()
// CHECK-SPIR-NEXT: [[ARR2:%.*]] = call elementtype([2 x [2 x i32]]) ptr @llvm.structured.alloca.p0()
// CHECK-SPIR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARR2]], ptr align 4 @__const._Z11arr_assign3v.Arr2, i64 16, i1 false)
// CHECK-SPIR-NEXT: [[ARR3:%.*]] = call elementtype([2 x [2 x i32]]) ptr @llvm.structured.alloca.p0()
// CHECK-SPIR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARR3]], ptr align 4 @__const._Z11arr_assign3v.Arr3, i64 16, i1 false)
// CHECK-SPIR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARR2]], ptr align 4 [[ARR3]], i64 16, i1 false)
// CHECK-SPIR-NEXT: ret void
//
void arr_assign3() {
int Arr2[2][2] = {{0, 0}, {1, 1}};
int Arr3[2][2] = {{1, 1}, {0, 0}};
Arr2 = Arr3;
}
// CHECK-DXIL-LABEL: define hidden void @_Z11arr_assign4v(
// CHECK-DXIL-SAME: ) #[[ATTR2]] {
// CHECK-DXIL-NEXT: [[ENTRY:.*:]]
// CHECK-DXIL-NEXT: [[ARR:%.*]] = call elementtype([2 x i32]) ptr @llvm.structured.alloca.p0()
// CHECK-DXIL-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARR]], ptr align 4 @__const._Z11arr_assign4v.Arr, i32 8, i1 false)
// CHECK-DXIL-NEXT: [[ARR2:%.*]] = call elementtype([2 x i32]) ptr @llvm.structured.alloca.p0()
// CHECK-DXIL-NEXT: call void @llvm.memset.p0.i32(ptr align 4 [[ARR2]], i8 0, i32 8, i1 false)
// CHECK-DXIL-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARR]], ptr align 4 [[ARR2]], i32 8, i1 false)
// CHECK-DXIL-NEXT: [[TMP0:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x i32]) [[ARR]], i32 0)
// CHECK-DXIL-NEXT: store i32 6, ptr [[TMP0]], align 4
// CHECK-DXIL-NEXT: ret void
//
// CHECK-SPIR-LABEL: define hidden spir_func void @_Z11arr_assign4v(
// CHECK-SPIR-SAME: ) #[[ATTR2]] {
// CHECK-SPIR-NEXT: [[ENTRY:.*:]]
// CHECK-SPIR-NEXT: [[TMP0:%.*]] = call token @llvm.experimental.convergence.entry()
// CHECK-SPIR-NEXT: [[ARR:%.*]] = call elementtype([2 x i32]) ptr @llvm.structured.alloca.p0()
// CHECK-SPIR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARR]], ptr align 4 @__const._Z11arr_assign4v.Arr, i64 8, i1 false)
// CHECK-SPIR-NEXT: [[ARR2:%.*]] = call elementtype([2 x i32]) ptr @llvm.structured.alloca.p0()
// CHECK-SPIR-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[ARR2]], i8 0, i64 8, i1 false)
// CHECK-SPIR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARR]], ptr align 4 [[ARR2]], i64 8, i1 false)
// CHECK-SPIR-NEXT: [[TMP1:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x i32]) [[ARR]], i64 0)
// CHECK-SPIR-NEXT: store i32 6, ptr [[TMP1]], align 4
// CHECK-SPIR-NEXT: ret void
//
void arr_assign4() {
int Arr[2] = {0, 1};
int Arr2[2] = {0, 0};
(Arr = Arr2)[0] = 6;
}
// CHECK-DXIL-LABEL: define hidden void @_Z11arr_assign5v(
// CHECK-DXIL-SAME: ) #[[ATTR2]] {
// CHECK-DXIL-NEXT: [[ENTRY:.*:]]
// CHECK-DXIL-NEXT: [[ARR:%.*]] = call elementtype([2 x i32]) ptr @llvm.structured.alloca.p0()
// CHECK-DXIL-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARR]], ptr align 4 @__const._Z11arr_assign5v.Arr, i32 8, i1 false)
// CHECK-DXIL-NEXT: [[ARR2:%.*]] = call elementtype([2 x i32]) ptr @llvm.structured.alloca.p0()
// CHECK-DXIL-NEXT: call void @llvm.memset.p0.i32(ptr align 4 [[ARR2]], i8 0, i32 8, i1 false)
// CHECK-DXIL-NEXT: [[ARR3:%.*]] = call elementtype([2 x i32]) ptr @llvm.structured.alloca.p0()
// CHECK-DXIL-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARR3]], ptr align 4 @__const._Z11arr_assign5v.Arr3, i32 8, i1 false)
// CHECK-DXIL-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARR2]], ptr align 4 [[ARR3]], i32 8, i1 false)
// CHECK-DXIL-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARR]], ptr align 4 [[ARR2]], i32 8, i1 false)
// CHECK-DXIL-NEXT: [[TMP0:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x i32]) [[ARR]], i32 0)
// CHECK-DXIL-NEXT: store i32 6, ptr [[TMP0]], align 4
// CHECK-DXIL-NEXT: ret void
//
// CHECK-SPIR-LABEL: define hidden spir_func void @_Z11arr_assign5v(
// CHECK-SPIR-SAME: ) #[[ATTR2]] {
// CHECK-SPIR-NEXT: [[ENTRY:.*:]]
// CHECK-SPIR-NEXT: [[TMP0:%.*]] = call token @llvm.experimental.convergence.entry()
// CHECK-SPIR-NEXT: [[ARR:%.*]] = call elementtype([2 x i32]) ptr @llvm.structured.alloca.p0()
// CHECK-SPIR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARR]], ptr align 4 @__const._Z11arr_assign5v.Arr, i64 8, i1 false)
// CHECK-SPIR-NEXT: [[ARR2:%.*]] = call elementtype([2 x i32]) ptr @llvm.structured.alloca.p0()
// CHECK-SPIR-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[ARR2]], i8 0, i64 8, i1 false)
// CHECK-SPIR-NEXT: [[ARR3:%.*]] = call elementtype([2 x i32]) ptr @llvm.structured.alloca.p0()
// CHECK-SPIR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARR3]], ptr align 4 @__const._Z11arr_assign5v.Arr3, i64 8, i1 false)
// CHECK-SPIR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARR2]], ptr align 4 [[ARR3]], i64 8, i1 false)
// CHECK-SPIR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARR]], ptr align 4 [[ARR2]], i64 8, i1 false)
// CHECK-SPIR-NEXT: [[TMP1:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x i32]) [[ARR]], i64 0)
// CHECK-SPIR-NEXT: store i32 6, ptr [[TMP1]], align 4
// CHECK-SPIR-NEXT: ret void
//
void arr_assign5() {
int Arr[2] = {0, 1};
int Arr2[2] = {0, 0};
int Arr3[2] = {3, 4};
(Arr = Arr2 = Arr3)[0] = 6;
}
// CHECK-DXIL-LABEL: define hidden void @_Z11arr_assign6v(
// CHECK-DXIL-SAME: ) #[[ATTR2]] {
// CHECK-DXIL-NEXT: [[ENTRY:.*:]]
// CHECK-DXIL-NEXT: [[ARR:%.*]] = call elementtype([2 x [2 x i32]]) ptr @llvm.structured.alloca.p0()
// CHECK-DXIL-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARR]], ptr align 4 @__const._Z11arr_assign6v.Arr, i32 16, i1 false)
// CHECK-DXIL-NEXT: [[ARR2:%.*]] = call elementtype([2 x [2 x i32]]) ptr @llvm.structured.alloca.p0()
// CHECK-DXIL-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARR2]], ptr align 4 @__const._Z11arr_assign6v.Arr2, i32 16, i1 false)
// CHECK-DXIL-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARR]], ptr align 4 [[ARR2]], i32 16, i1 false)
// CHECK-DXIL-NEXT: [[TMP0:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x [2 x i32]]) [[ARR]], i32 0)
// CHECK-DXIL-NEXT: [[TMP1:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x i32]) [[TMP0]], i32 0)
// CHECK-DXIL-NEXT: store i32 6, ptr [[TMP1]], align 4
// CHECK-DXIL-NEXT: ret void
//
// CHECK-SPIR-LABEL: define hidden spir_func void @_Z11arr_assign6v(
// CHECK-SPIR-SAME: ) #[[ATTR2]] {
// CHECK-SPIR-NEXT: [[ENTRY:.*:]]
// CHECK-SPIR-NEXT: [[TMP0:%.*]] = call token @llvm.experimental.convergence.entry()
// CHECK-SPIR-NEXT: [[ARR:%.*]] = call elementtype([2 x [2 x i32]]) ptr @llvm.structured.alloca.p0()
// CHECK-SPIR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARR]], ptr align 4 @__const._Z11arr_assign6v.Arr, i64 16, i1 false)
// CHECK-SPIR-NEXT: [[ARR2:%.*]] = call elementtype([2 x [2 x i32]]) ptr @llvm.structured.alloca.p0()
// CHECK-SPIR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARR2]], ptr align 4 @__const._Z11arr_assign6v.Arr2, i64 16, i1 false)
// CHECK-SPIR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARR]], ptr align 4 [[ARR2]], i64 16, i1 false)
// CHECK-SPIR-NEXT: [[TMP1:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x [2 x i32]]) [[ARR]], i64 0)
// CHECK-SPIR-NEXT: [[TMP2:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x i32]) [[TMP1]], i64 0)
// CHECK-SPIR-NEXT: store i32 6, ptr [[TMP2]], align 4
// CHECK-SPIR-NEXT: ret void
//
void arr_assign6() {
int Arr[2][2] = {{0, 0}, {1, 1}};
int Arr2[2][2] = {{1, 1}, {0, 0}};
(Arr = Arr2)[0][0] = 6;
}
// CHECK-DXIL-LABEL: define hidden void @_Z11arr_assign7v(
// CHECK-DXIL-SAME: ) #[[ATTR2]] {
// CHECK-DXIL-NEXT: [[ENTRY:.*:]]
// CHECK-DXIL-NEXT: [[ARR:%.*]] = call elementtype([2 x [2 x i32]]) ptr @llvm.structured.alloca.p0()
// CHECK-DXIL-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARR]], ptr align 4 @__const._Z11arr_assign7v.Arr, i32 16, i1 false)
// CHECK-DXIL-NEXT: [[ARR2:%.*]] = call elementtype([2 x [2 x i32]]) ptr @llvm.structured.alloca.p0()
// CHECK-DXIL-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARR2]], ptr align 4 @__const._Z11arr_assign7v.Arr2, i32 16, i1 false)
// CHECK-DXIL-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARR]], ptr align 4 [[ARR2]], i32 16, i1 false)
// CHECK-DXIL-NEXT: [[TMP0:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x [2 x i32]]) [[ARR]], i32 0)
// CHECK-DXIL-NEXT: store i32 6, ptr [[TMP0]], align 4
// CHECK-DXIL-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x i32]) [[TMP0]], i32 1)
// CHECK-DXIL-NEXT: store i32 6, ptr [[ARRAYINIT_ELEMENT]], align 4
// CHECK-DXIL-NEXT: ret void
//
// CHECK-SPIR-LABEL: define hidden spir_func void @_Z11arr_assign7v(
// CHECK-SPIR-SAME: ) #[[ATTR2]] {
// CHECK-SPIR-NEXT: [[ENTRY:.*:]]
// CHECK-SPIR-NEXT: [[TMP0:%.*]] = call token @llvm.experimental.convergence.entry()
// CHECK-SPIR-NEXT: [[ARR:%.*]] = call elementtype([2 x [2 x i32]]) ptr @llvm.structured.alloca.p0()
// CHECK-SPIR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARR]], ptr align 4 @__const._Z11arr_assign7v.Arr, i64 16, i1 false)
// CHECK-SPIR-NEXT: [[ARR2:%.*]] = call elementtype([2 x [2 x i32]]) ptr @llvm.structured.alloca.p0()
// CHECK-SPIR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARR2]], ptr align 4 @__const._Z11arr_assign7v.Arr2, i64 16, i1 false)
// CHECK-SPIR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARR]], ptr align 4 [[ARR2]], i64 16, i1 false)
// CHECK-SPIR-NEXT: [[TMP1:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x [2 x i32]]) [[ARR]], i64 0)
// CHECK-SPIR-NEXT: store i32 6, ptr [[TMP1]], align 4
// CHECK-SPIR-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x i32]) [[TMP1]], i64 1)
// CHECK-SPIR-NEXT: store i32 6, ptr [[ARRAYINIT_ELEMENT]], align 4
// CHECK-SPIR-NEXT: ret void
//
void arr_assign7() {
int Arr[2][2] = {{0, 1}, {2, 3}};
int Arr2[2][2] = {{0, 0}, {1, 1}};
(Arr = Arr2)[0] = {6, 6};
}
// Verify you can assign from a cbuffer array
// CHECK-DXIL-LABEL: define hidden void @_Z11arr_assign8v(
// CHECK-DXIL-SAME: ) #[[ATTR2]] {
// CHECK-DXIL-NEXT: [[ENTRY:.*:]]
// CHECK-DXIL-NEXT: [[C:%.*]] = call elementtype([2 x float]) ptr @llvm.structured.alloca.p0()
// CHECK-DXIL-NEXT: [[TMP0:%.*]] = call ptr addrspace(2) (ptr addrspace(2), ...) @llvm.structured.gep.p2(ptr addrspace(2) elementtype(<{ [1 x <{ float, target("dx.Padding", 12) }>], float }>) @c1, i32 0, i32 0, i32 0)
// CHECK-DXIL-NEXT: [[TMP1:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x float]) [[C]], i32 0)
// CHECK-DXIL-NEXT: [[CBUF_LOAD:%.*]] = load float, ptr addrspace(2) [[TMP0]], align 4
// CHECK-DXIL-NEXT: store float [[CBUF_LOAD]], ptr [[TMP1]], align 4
// CHECK-DXIL-NEXT: [[TMP2:%.*]] = call ptr addrspace(2) (ptr addrspace(2), ...) @llvm.structured.gep.p2(ptr addrspace(2) elementtype(<{ [1 x <{ float, target("dx.Padding", 12) }>], float }>) @c1, i32 1)
// CHECK-DXIL-NEXT: [[TMP3:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x float]) [[C]], i32 1)
// CHECK-DXIL-NEXT: [[CBUF_LOAD1:%.*]] = load float, ptr addrspace(2) [[TMP2]], align 4
// CHECK-DXIL-NEXT: store float [[CBUF_LOAD1]], ptr [[TMP3]], align 4
// CHECK-DXIL-NEXT: ret void
//
// CHECK-SPIR-LABEL: define hidden spir_func void @_Z11arr_assign8v(
// CHECK-SPIR-SAME: ) #[[ATTR2]] {
// CHECK-SPIR-NEXT: [[ENTRY:.*:]]
// CHECK-SPIR-NEXT: [[TMP0:%.*]] = call token @llvm.experimental.convergence.entry()
// CHECK-SPIR-NEXT: [[C:%.*]] = call elementtype([2 x float]) ptr @llvm.structured.alloca.p0()
// CHECK-SPIR-NEXT: [[TMP1:%.*]] = call ptr addrspace(12) (ptr addrspace(12), ...) @llvm.structured.gep.p12(ptr addrspace(12) elementtype(<{ [1 x <{ float, target("spirv.Padding", 12) }>], float }>) @c1, i32 0, i32 0, i32 0)
// CHECK-SPIR-NEXT: [[TMP2:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x float]) [[C]], i32 0)
// CHECK-SPIR-NEXT: [[CBUF_LOAD:%.*]] = load float, ptr addrspace(12) [[TMP1]], align 4
// CHECK-SPIR-NEXT: store float [[CBUF_LOAD]], ptr [[TMP2]], align 4
// CHECK-SPIR-NEXT: [[TMP3:%.*]] = call ptr addrspace(12) (ptr addrspace(12), ...) @llvm.structured.gep.p12(ptr addrspace(12) elementtype(<{ [1 x <{ float, target("spirv.Padding", 12) }>], float }>) @c1, i32 1)
// CHECK-SPIR-NEXT: [[TMP4:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x float]) [[C]], i32 1)
// CHECK-SPIR-NEXT: [[CBUF_LOAD1:%.*]] = load float, ptr addrspace(12) [[TMP3]], align 4
// CHECK-SPIR-NEXT: store float [[CBUF_LOAD1]], ptr [[TMP4]], align 4
// CHECK-SPIR-NEXT: ret void
//
void arr_assign8() {
float C[2];
C = c1;
}
// CHECK-DXIL-LABEL: define hidden void @_Z11arr_assign9v(
// CHECK-DXIL-SAME: ) #[[ATTR2]] {
// CHECK-DXIL-NEXT: [[ENTRY:.*:]]
// CHECK-DXIL-NEXT: [[C:%.*]] = call elementtype([2 x <4 x i32>]) ptr @llvm.structured.alloca.p0()
// CHECK-DXIL-NEXT: [[TMP0:%.*]] = call ptr addrspace(2) (ptr addrspace(2), ...) @llvm.structured.gep.p2(ptr addrspace(2) elementtype([2 x <4 x i32>]) @c2, i32 0)
// CHECK-DXIL-NEXT: [[TMP1:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x <4 x i32>]) [[C]], i32 0)
// CHECK-DXIL-NEXT: [[CBUF_LOAD:%.*]] = load <4 x i32>, ptr addrspace(2) [[TMP0]], align 4
// CHECK-DXIL-NEXT: store <4 x i32> [[CBUF_LOAD]], ptr [[TMP1]], align 4
// CHECK-DXIL-NEXT: [[TMP2:%.*]] = call ptr addrspace(2) (ptr addrspace(2), ...) @llvm.structured.gep.p2(ptr addrspace(2) elementtype([2 x <4 x i32>]) @c2, i32 1)
// CHECK-DXIL-NEXT: [[TMP3:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x <4 x i32>]) [[C]], i32 1)
// CHECK-DXIL-NEXT: [[CBUF_LOAD1:%.*]] = load <4 x i32>, ptr addrspace(2) [[TMP2]], align 4
// CHECK-DXIL-NEXT: store <4 x i32> [[CBUF_LOAD1]], ptr [[TMP3]], align 4
// CHECK-DXIL-NEXT: ret void
//
// CHECK-SPIR-LABEL: define hidden spir_func void @_Z11arr_assign9v(
// CHECK-SPIR-SAME: ) #[[ATTR2]] {
// CHECK-SPIR-NEXT: [[ENTRY:.*:]]
// CHECK-SPIR-NEXT: [[TMP0:%.*]] = call token @llvm.experimental.convergence.entry()
// CHECK-SPIR-NEXT: [[C:%.*]] = call elementtype([2 x <4 x i32>]) ptr @llvm.structured.alloca.p0()
// CHECK-SPIR-NEXT: [[TMP1:%.*]] = call ptr addrspace(12) (ptr addrspace(12), ...) @llvm.structured.gep.p12(ptr addrspace(12) elementtype([2 x <4 x i32>]) @c2, i32 0)
// CHECK-SPIR-NEXT: [[TMP2:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x <4 x i32>]) [[C]], i32 0)
// CHECK-SPIR-NEXT: [[CBUF_LOAD:%.*]] = load <4 x i32>, ptr addrspace(12) [[TMP1]], align 4
// CHECK-SPIR-NEXT: store <4 x i32> [[CBUF_LOAD]], ptr [[TMP2]], align 4
// CHECK-SPIR-NEXT: [[TMP3:%.*]] = call ptr addrspace(12) (ptr addrspace(12), ...) @llvm.structured.gep.p12(ptr addrspace(12) elementtype([2 x <4 x i32>]) @c2, i32 1)
// CHECK-SPIR-NEXT: [[TMP4:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x <4 x i32>]) [[C]], i32 1)
// CHECK-SPIR-NEXT: [[CBUF_LOAD1:%.*]] = load <4 x i32>, ptr addrspace(12) [[TMP3]], align 4
// CHECK-SPIR-NEXT: store <4 x i32> [[CBUF_LOAD1]], ptr [[TMP4]], align 4
// CHECK-SPIR-NEXT: ret void
//
void arr_assign9() {
// TODO: We should be able to just memcpy here.
// See https://github.com/llvm/wg-hlsl/issues/351
int4 C[2];
C = c2;
}
// CHECK-DXIL-LABEL: define hidden void @_Z12arr_assign10v(
// CHECK-DXIL-SAME: ) #[[ATTR2]] {
// CHECK-DXIL-NEXT: [[ENTRY:.*:]]
// CHECK-DXIL-NEXT: [[C:%.*]] = call elementtype([2 x [2 x i32]]) ptr @llvm.structured.alloca.p0()
// CHECK-DXIL-NEXT: [[TMP0:%.*]] = call ptr addrspace(2) (ptr addrspace(2), ...) @llvm.structured.gep.p2(ptr addrspace(2) elementtype(<{ [1 x <{ <{ [1 x <{ i32, target("dx.Padding", 12) }>], i32 }>, target("dx.Padding", 12) }>], <{ [1 x <{ i32, target("dx.Padding", 12) }>], i32 }> }>) @c3, i32 0, i32 0, i32 0)
// CHECK-DXIL-NEXT: [[TMP1:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x [2 x i32]]) [[C]], i32 0)
// CHECK-DXIL-NEXT: [[TMP2:%.*]] = call ptr addrspace(2) (ptr addrspace(2), ...) @llvm.structured.gep.p2(ptr addrspace(2) elementtype(<{ [1 x <{ i32, target("dx.Padding", 12) }>], i32 }>) [[TMP0]], i32 0, i32 0, i32 0)
// CHECK-DXIL-NEXT: [[TMP3:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x i32]) [[TMP1]], i32 0)
// CHECK-DXIL-NEXT: [[CBUF_LOAD:%.*]] = load i32, ptr addrspace(2) [[TMP2]], align 4
// CHECK-DXIL-NEXT: store i32 [[CBUF_LOAD]], ptr [[TMP3]], align 4
// CHECK-DXIL-NEXT: [[TMP4:%.*]] = call ptr addrspace(2) (ptr addrspace(2), ...) @llvm.structured.gep.p2(ptr addrspace(2) elementtype(<{ [1 x <{ i32, target("dx.Padding", 12) }>], i32 }>) [[TMP0]], i32 1)
// CHECK-DXIL-NEXT: [[TMP5:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x i32]) [[TMP1]], i32 1)
// CHECK-DXIL-NEXT: [[CBUF_LOAD1:%.*]] = load i32, ptr addrspace(2) [[TMP4]], align 4
// CHECK-DXIL-NEXT: store i32 [[CBUF_LOAD1]], ptr [[TMP5]], align 4
// CHECK-DXIL-NEXT: [[TMP6:%.*]] = call ptr addrspace(2) (ptr addrspace(2), ...) @llvm.structured.gep.p2(ptr addrspace(2) elementtype(<{ [1 x <{ <{ [1 x <{ i32, target("dx.Padding", 12) }>], i32 }>, target("dx.Padding", 12) }>], <{ [1 x <{ i32, target("dx.Padding", 12) }>], i32 }> }>) @c3, i32 1)
// CHECK-DXIL-NEXT: [[TMP7:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x [2 x i32]]) [[C]], i32 1)
// CHECK-DXIL-NEXT: [[TMP8:%.*]] = call ptr addrspace(2) (ptr addrspace(2), ...) @llvm.structured.gep.p2(ptr addrspace(2) elementtype(<{ [1 x <{ i32, target("dx.Padding", 12) }>], i32 }>) [[TMP6]], i32 0, i32 0, i32 0)
// CHECK-DXIL-NEXT: [[TMP9:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x i32]) [[TMP7]], i32 0)
// CHECK-DXIL-NEXT: [[CBUF_LOAD2:%.*]] = load i32, ptr addrspace(2) [[TMP8]], align 4
// CHECK-DXIL-NEXT: store i32 [[CBUF_LOAD2]], ptr [[TMP9]], align 4
// CHECK-DXIL-NEXT: [[TMP10:%.*]] = call ptr addrspace(2) (ptr addrspace(2), ...) @llvm.structured.gep.p2(ptr addrspace(2) elementtype(<{ [1 x <{ i32, target("dx.Padding", 12) }>], i32 }>) [[TMP6]], i32 1)
// CHECK-DXIL-NEXT: [[TMP11:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x i32]) [[TMP7]], i32 1)
// CHECK-DXIL-NEXT: [[CBUF_LOAD3:%.*]] = load i32, ptr addrspace(2) [[TMP10]], align 4
// CHECK-DXIL-NEXT: store i32 [[CBUF_LOAD3]], ptr [[TMP11]], align 4
// CHECK-DXIL-NEXT: ret void
//
// CHECK-SPIR-LABEL: define hidden spir_func void @_Z12arr_assign10v(
// CHECK-SPIR-SAME: ) #[[ATTR2]] {
// CHECK-SPIR-NEXT: [[ENTRY:.*:]]
// CHECK-SPIR-NEXT: [[TMP0:%.*]] = call token @llvm.experimental.convergence.entry()
// CHECK-SPIR-NEXT: [[C:%.*]] = call elementtype([2 x [2 x i32]]) ptr @llvm.structured.alloca.p0()
// CHECK-SPIR-NEXT: [[TMP1:%.*]] = call ptr addrspace(12) (ptr addrspace(12), ...) @llvm.structured.gep.p12(ptr addrspace(12) elementtype(<{ [1 x <{ <{ [1 x <{ i32, target("spirv.Padding", 12) }>], i32 }>, target("spirv.Padding", 12) }>], <{ [1 x <{ i32, target("spirv.Padding", 12) }>], i32 }> }>) @c3, i32 0, i32 0, i32 0)
// CHECK-SPIR-NEXT: [[TMP2:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x [2 x i32]]) [[C]], i32 0)
// CHECK-SPIR-NEXT: [[TMP3:%.*]] = call ptr addrspace(12) (ptr addrspace(12), ...) @llvm.structured.gep.p12(ptr addrspace(12) elementtype(<{ [1 x <{ i32, target("spirv.Padding", 12) }>], i32 }>) [[TMP1]], i32 0, i32 0, i32 0)
// CHECK-SPIR-NEXT: [[TMP4:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x i32]) [[TMP2]], i32 0)
// CHECK-SPIR-NEXT: [[CBUF_LOAD:%.*]] = load i32, ptr addrspace(12) [[TMP3]], align 4
// CHECK-SPIR-NEXT: store i32 [[CBUF_LOAD]], ptr [[TMP4]], align 4
// CHECK-SPIR-NEXT: [[TMP5:%.*]] = call ptr addrspace(12) (ptr addrspace(12), ...) @llvm.structured.gep.p12(ptr addrspace(12) elementtype(<{ [1 x <{ i32, target("spirv.Padding", 12) }>], i32 }>) [[TMP1]], i32 1)
// CHECK-SPIR-NEXT: [[TMP6:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x i32]) [[TMP2]], i32 1)
// CHECK-SPIR-NEXT: [[CBUF_LOAD1:%.*]] = load i32, ptr addrspace(12) [[TMP5]], align 4
// CHECK-SPIR-NEXT: store i32 [[CBUF_LOAD1]], ptr [[TMP6]], align 4
// CHECK-SPIR-NEXT: [[TMP7:%.*]] = call ptr addrspace(12) (ptr addrspace(12), ...) @llvm.structured.gep.p12(ptr addrspace(12) elementtype(<{ [1 x <{ <{ [1 x <{ i32, target("spirv.Padding", 12) }>], i32 }>, target("spirv.Padding", 12) }>], <{ [1 x <{ i32, target("spirv.Padding", 12) }>], i32 }> }>) @c3, i32 1)
// CHECK-SPIR-NEXT: [[TMP8:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x [2 x i32]]) [[C]], i32 1)
// CHECK-SPIR-NEXT: [[TMP9:%.*]] = call ptr addrspace(12) (ptr addrspace(12), ...) @llvm.structured.gep.p12(ptr addrspace(12) elementtype(<{ [1 x <{ i32, target("spirv.Padding", 12) }>], i32 }>) [[TMP7]], i32 0, i32 0, i32 0)
// CHECK-SPIR-NEXT: [[TMP10:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x i32]) [[TMP8]], i32 0)
// CHECK-SPIR-NEXT: [[CBUF_LOAD2:%.*]] = load i32, ptr addrspace(12) [[TMP9]], align 4
// CHECK-SPIR-NEXT: store i32 [[CBUF_LOAD2]], ptr [[TMP10]], align 4
// CHECK-SPIR-NEXT: [[TMP11:%.*]] = call ptr addrspace(12) (ptr addrspace(12), ...) @llvm.structured.gep.p12(ptr addrspace(12) elementtype(<{ [1 x <{ i32, target("spirv.Padding", 12) }>], i32 }>) [[TMP7]], i32 1)
// CHECK-SPIR-NEXT: [[TMP12:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x i32]) [[TMP8]], i32 1)
// CHECK-SPIR-NEXT: [[CBUF_LOAD3:%.*]] = load i32, ptr addrspace(12) [[TMP11]], align 4
// CHECK-SPIR-NEXT: store i32 [[CBUF_LOAD3]], ptr [[TMP12]], align 4
// CHECK-SPIR-NEXT: ret void
//
void arr_assign10() {
int C[2][2];
C = c3;
}
// CHECK-DXIL-LABEL: define hidden void @_Z12arr_assign11v(
// CHECK-DXIL-SAME: ) #[[ATTR2]] {
// CHECK-DXIL-NEXT: [[ENTRY:.*:]]
// CHECK-DXIL-NEXT: [[C:%.*]] = call elementtype([2 x [[STRUCT_S:%.*]]]) ptr @llvm.structured.alloca.p0()
// CHECK-DXIL-NEXT: [[TMP0:%.*]] = call ptr addrspace(2) (ptr addrspace(2), ...) @llvm.structured.gep.p2(ptr addrspace(2) elementtype(<{ [1 x <{ [[S:%.*]], target("dx.Padding", 8) }>], [[S]] }>) @c4, i32 0, i32 0, i32 0)
// CHECK-DXIL-NEXT: [[TMP1:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x [[STRUCT_S]]]) [[C]], i32 0)
// CHECK-DXIL-NEXT: [[TMP2:%.*]] = call ptr addrspace(2) (ptr addrspace(2), ...) @llvm.structured.gep.p2(ptr addrspace(2) elementtype([[S]]) [[TMP0]], i32 0)
// CHECK-DXIL-NEXT: [[TMP3:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([[STRUCT_S]]) [[TMP1]], i32 0)
// CHECK-DXIL-NEXT: [[CBUF_LOAD:%.*]] = load i32, ptr addrspace(2) [[TMP2]], align 4
// CHECK-DXIL-NEXT: store i32 [[CBUF_LOAD]], ptr [[TMP3]], align 4
// CHECK-DXIL-NEXT: [[TMP4:%.*]] = call ptr addrspace(2) (ptr addrspace(2), ...) @llvm.structured.gep.p2(ptr addrspace(2) elementtype([[S]]) [[TMP0]], i32 1)
// CHECK-DXIL-NEXT: [[TMP5:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([[STRUCT_S]]) [[TMP1]], i32 1)
// CHECK-DXIL-NEXT: [[CBUF_LOAD1:%.*]] = load float, ptr addrspace(2) [[TMP4]], align 4
// CHECK-DXIL-NEXT: store float [[CBUF_LOAD1]], ptr [[TMP5]], align 4
// CHECK-DXIL-NEXT: [[TMP6:%.*]] = call ptr addrspace(2) (ptr addrspace(2), ...) @llvm.structured.gep.p2(ptr addrspace(2) elementtype(<{ [1 x <{ [[S]], target("dx.Padding", 8) }>], [[S]] }>) @c4, i32 1)
// CHECK-DXIL-NEXT: [[TMP7:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x [[STRUCT_S]]]) [[C]], i32 1)
// CHECK-DXIL-NEXT: [[TMP8:%.*]] = call ptr addrspace(2) (ptr addrspace(2), ...) @llvm.structured.gep.p2(ptr addrspace(2) elementtype([[S]]) [[TMP6]], i32 0)
// CHECK-DXIL-NEXT: [[TMP9:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([[STRUCT_S]]) [[TMP7]], i32 0)
// CHECK-DXIL-NEXT: [[CBUF_LOAD2:%.*]] = load i32, ptr addrspace(2) [[TMP8]], align 4
// CHECK-DXIL-NEXT: store i32 [[CBUF_LOAD2]], ptr [[TMP9]], align 4
// CHECK-DXIL-NEXT: [[TMP10:%.*]] = call ptr addrspace(2) (ptr addrspace(2), ...) @llvm.structured.gep.p2(ptr addrspace(2) elementtype([[S]]) [[TMP6]], i32 1)
// CHECK-DXIL-NEXT: [[TMP11:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([[STRUCT_S]]) [[TMP7]], i32 1)
// CHECK-DXIL-NEXT: [[CBUF_LOAD3:%.*]] = load float, ptr addrspace(2) [[TMP10]], align 4
// CHECK-DXIL-NEXT: store float [[CBUF_LOAD3]], ptr [[TMP11]], align 4
// CHECK-DXIL-NEXT: ret void
//
// CHECK-SPIR-LABEL: define hidden spir_func void @_Z12arr_assign11v(
// CHECK-SPIR-SAME: ) #[[ATTR2]] {
// CHECK-SPIR-NEXT: [[ENTRY:.*:]]
// CHECK-SPIR-NEXT: [[TMP0:%.*]] = call token @llvm.experimental.convergence.entry()
// CHECK-SPIR-NEXT: [[C:%.*]] = call elementtype([2 x [[STRUCT_S:%.*]]]) ptr @llvm.structured.alloca.p0()
// CHECK-SPIR-NEXT: [[TMP1:%.*]] = call ptr addrspace(12) (ptr addrspace(12), ...) @llvm.structured.gep.p12(ptr addrspace(12) elementtype(<{ [1 x <{ [[S:%.*]], target("spirv.Padding", 8) }>], [[S]] }>) @c4, i32 0, i32 0, i32 0)
// CHECK-SPIR-NEXT: [[TMP2:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x [[STRUCT_S]]]) [[C]], i32 0)
// CHECK-SPIR-NEXT: [[TMP3:%.*]] = call ptr addrspace(12) (ptr addrspace(12), ...) @llvm.structured.gep.p12(ptr addrspace(12) elementtype([[S]]) [[TMP1]], i32 0)
// CHECK-SPIR-NEXT: [[TMP4:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([[STRUCT_S]]) [[TMP2]], i32 0)
// CHECK-SPIR-NEXT: [[CBUF_LOAD:%.*]] = load i32, ptr addrspace(12) [[TMP3]], align 4
// CHECK-SPIR-NEXT: store i32 [[CBUF_LOAD]], ptr [[TMP4]], align 4
// CHECK-SPIR-NEXT: [[TMP5:%.*]] = call ptr addrspace(12) (ptr addrspace(12), ...) @llvm.structured.gep.p12(ptr addrspace(12) elementtype([[S]]) [[TMP1]], i32 1)
// CHECK-SPIR-NEXT: [[TMP6:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([[STRUCT_S]]) [[TMP2]], i32 1)
// CHECK-SPIR-NEXT: [[CBUF_LOAD1:%.*]] = load float, ptr addrspace(12) [[TMP5]], align 4
// CHECK-SPIR-NEXT: store float [[CBUF_LOAD1]], ptr [[TMP6]], align 4
// CHECK-SPIR-NEXT: [[TMP7:%.*]] = call ptr addrspace(12) (ptr addrspace(12), ...) @llvm.structured.gep.p12(ptr addrspace(12) elementtype(<{ [1 x <{ [[S]], target("spirv.Padding", 8) }>], [[S]] }>) @c4, i32 1)
// CHECK-SPIR-NEXT: [[TMP8:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([2 x [[STRUCT_S]]]) [[C]], i32 1)
// CHECK-SPIR-NEXT: [[TMP9:%.*]] = call ptr addrspace(12) (ptr addrspace(12), ...) @llvm.structured.gep.p12(ptr addrspace(12) elementtype([[S]]) [[TMP7]], i32 0)
// CHECK-SPIR-NEXT: [[TMP10:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([[STRUCT_S]]) [[TMP8]], i32 0)
// CHECK-SPIR-NEXT: [[CBUF_LOAD2:%.*]] = load i32, ptr addrspace(12) [[TMP9]], align 4
// CHECK-SPIR-NEXT: store i32 [[CBUF_LOAD2]], ptr [[TMP10]], align 4
// CHECK-SPIR-NEXT: [[TMP11:%.*]] = call ptr addrspace(12) (ptr addrspace(12), ...) @llvm.structured.gep.p12(ptr addrspace(12) elementtype([[S]]) [[TMP7]], i32 1)
// CHECK-SPIR-NEXT: [[TMP12:%.*]] = call ptr (ptr, ...) @llvm.structured.gep.p0(ptr elementtype([[STRUCT_S]]) [[TMP8]], i32 1)
// CHECK-SPIR-NEXT: [[CBUF_LOAD3:%.*]] = load float, ptr addrspace(12) [[TMP11]], align 4
// CHECK-SPIR-NEXT: store float [[CBUF_LOAD3]], ptr [[TMP12]], align 4
// CHECK-SPIR-NEXT: ret void
//
void arr_assign11() {
S C[2];
C = c4;
}