AMDGPU uses metadata to guide atomic related optimisations. SPIR-V was not handling it, which led to significant and spurious performance differences. This patch fixes this oversight by encoding the metadata as UserSemantic string decorations applied to the atomic instructions.
269 lines
15 KiB
C
269 lines
15 KiB
C
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
|
|
// RUN: %clang_cc1 -fnative-half-arguments-and-returns -triple amdgcn-amd-amdhsa-gnu -target-cpu gfx900 -emit-llvm -o - %s | FileCheck -check-prefixes=CHECK,SAFE %s
|
|
// RUN: %clang_cc1 -fnative-half-arguments-and-returns -triple amdgcn-amd-amdhsa-gnu -target-cpu gfx900 -emit-llvm -munsafe-fp-atomics -o - %s | FileCheck -check-prefixes=CHECK,UNSAFE %s
|
|
// RUN: %clang_cc1 -fnative-half-arguments-and-returns -triple spirv64-amd-amdhsa -emit-llvm -o - %s | FileCheck -check-prefixes=CHECK-SPIRV,SAFE-SPIRV %s
|
|
// RUN: %clang_cc1 -fnative-half-arguments-and-returns -triple spirv64-amd-amdhsa -emit-llvm -munsafe-fp-atomics -o - %s | FileCheck -check-prefixes=CHECK-SPIRV,UNSAFE-SPIRV %s
|
|
|
|
// SAFE-LABEL: define dso_local float @test_float_post_inc(
|
|
// SAFE-SAME: ) #[[ATTR0:[0-9]+]] {
|
|
// SAFE-NEXT: [[ENTRY:.*:]]
|
|
// SAFE-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test_float_post_inc.n to ptr), float 1.000000e+00 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META2:![0-9]+]], !amdgpu.no.remote.memory [[META2]]
|
|
// SAFE-NEXT: ret float [[TMP0]]
|
|
//
|
|
// UNSAFE-LABEL: define dso_local float @test_float_post_inc(
|
|
// UNSAFE-SAME: ) #[[ATTR0:[0-9]+]] {
|
|
// UNSAFE-NEXT: [[ENTRY:.*:]]
|
|
// UNSAFE-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test_float_post_inc.n to ptr), float 1.000000e+00 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META2:![0-9]+]], !amdgpu.no.remote.memory [[META2]], !amdgpu.ignore.denormal.mode [[META2]]
|
|
// UNSAFE-NEXT: ret float [[TMP0]]
|
|
//
|
|
// SAFE-SPIRV-LABEL: define spir_func float @test_float_post_inc(
|
|
// SAFE-SPIRV-SAME: ) addrspace(4) #[[ATTR0:[0-9]+]] {
|
|
// SAFE-SPIRV-NEXT: [[ENTRY:.*:]]
|
|
// SAFE-SPIRV-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspace(4) addrspacecast (ptr addrspace(1) @test_float_post_inc.n to ptr addrspace(4)), float 1.000000e+00 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META2:![0-9]+]], !amdgpu.no.remote.memory [[META2]]
|
|
// SAFE-SPIRV-NEXT: ret float [[TMP0]]
|
|
//
|
|
// UNSAFE-SPIRV-LABEL: define spir_func float @test_float_post_inc(
|
|
// UNSAFE-SPIRV-SAME: ) addrspace(4) #[[ATTR0:[0-9]+]] {
|
|
// UNSAFE-SPIRV-NEXT: [[ENTRY:.*:]]
|
|
// UNSAFE-SPIRV-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspace(4) addrspacecast (ptr addrspace(1) @test_float_post_inc.n to ptr addrspace(4)), float 1.000000e+00 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META2:![0-9]+]], !amdgpu.no.remote.memory [[META2]], !amdgpu.ignore.denormal.mode [[META2]]
|
|
// UNSAFE-SPIRV-NEXT: ret float [[TMP0]]
|
|
//
|
|
float test_float_post_inc()
|
|
{
|
|
static _Atomic float n;
|
|
return n++;
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local float @test_float_post_dc(
|
|
// CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test_float_post_dc.n to ptr), float 1.000000e+00 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META2:![0-9]+]], !amdgpu.no.remote.memory [[META2]]
|
|
// CHECK-NEXT: ret float [[TMP0]]
|
|
//
|
|
// CHECK-SPIRV-LABEL: define spir_func float @test_float_post_dc(
|
|
// CHECK-SPIRV-SAME: ) addrspace(4) #[[ATTR0:[0-9]+]] {
|
|
// CHECK-SPIRV-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-SPIRV-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspace(4) addrspacecast (ptr addrspace(1) @test_float_post_dc.n to ptr addrspace(4)), float 1.000000e+00 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META2:![0-9]+]], !amdgpu.no.remote.memory [[META2]]
|
|
// CHECK-SPIRV-NEXT: ret float [[TMP0]]
|
|
//
|
|
float test_float_post_dc()
|
|
{
|
|
static _Atomic float n;
|
|
return n--;
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local float @test_float_pre_dc(
|
|
// CHECK-SAME: ) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test_float_pre_dc.n to ptr), float 1.000000e+00 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META2]], !amdgpu.no.remote.memory [[META2]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = fsub float [[TMP0]], 1.000000e+00
|
|
// CHECK-NEXT: ret float [[TMP1]]
|
|
//
|
|
// CHECK-SPIRV-LABEL: define spir_func float @test_float_pre_dc(
|
|
// CHECK-SPIRV-SAME: ) addrspace(4) #[[ATTR0]] {
|
|
// CHECK-SPIRV-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-SPIRV-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspace(4) addrspacecast (ptr addrspace(1) @test_float_pre_dc.n to ptr addrspace(4)), float 1.000000e+00 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META2]], !amdgpu.no.remote.memory [[META2]]
|
|
// CHECK-SPIRV-NEXT: [[TMP1:%.*]] = fsub float [[TMP0]], 1.000000e+00
|
|
// CHECK-SPIRV-NEXT: ret float [[TMP1]]
|
|
//
|
|
float test_float_pre_dc()
|
|
{
|
|
static _Atomic float n;
|
|
return --n;
|
|
}
|
|
|
|
// SAFE-LABEL: define dso_local float @test_float_pre_inc(
|
|
// SAFE-SAME: ) #[[ATTR0]] {
|
|
// SAFE-NEXT: [[ENTRY:.*:]]
|
|
// SAFE-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test_float_pre_inc.n to ptr), float 1.000000e+00 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META2]], !amdgpu.no.remote.memory [[META2]]
|
|
// SAFE-NEXT: [[TMP1:%.*]] = fadd float [[TMP0]], 1.000000e+00
|
|
// SAFE-NEXT: ret float [[TMP1]]
|
|
//
|
|
// UNSAFE-LABEL: define dso_local float @test_float_pre_inc(
|
|
// UNSAFE-SAME: ) #[[ATTR0]] {
|
|
// UNSAFE-NEXT: [[ENTRY:.*:]]
|
|
// UNSAFE-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test_float_pre_inc.n to ptr), float 1.000000e+00 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META2]], !amdgpu.no.remote.memory [[META2]], !amdgpu.ignore.denormal.mode [[META2]]
|
|
// UNSAFE-NEXT: [[TMP1:%.*]] = fadd float [[TMP0]], 1.000000e+00
|
|
// UNSAFE-NEXT: ret float [[TMP1]]
|
|
//
|
|
// SAFE-SPIRV-LABEL: define spir_func float @test_float_pre_inc(
|
|
// SAFE-SPIRV-SAME: ) addrspace(4) #[[ATTR0]] {
|
|
// SAFE-SPIRV-NEXT: [[ENTRY:.*:]]
|
|
// SAFE-SPIRV-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspace(4) addrspacecast (ptr addrspace(1) @test_float_pre_inc.n to ptr addrspace(4)), float 1.000000e+00 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META2]], !amdgpu.no.remote.memory [[META2]]
|
|
// SAFE-SPIRV-NEXT: [[TMP1:%.*]] = fadd float [[TMP0]], 1.000000e+00
|
|
// SAFE-SPIRV-NEXT: ret float [[TMP1]]
|
|
//
|
|
// UNSAFE-SPIRV-LABEL: define spir_func float @test_float_pre_inc(
|
|
// UNSAFE-SPIRV-SAME: ) addrspace(4) #[[ATTR0]] {
|
|
// UNSAFE-SPIRV-NEXT: [[ENTRY:.*:]]
|
|
// UNSAFE-SPIRV-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspace(4) addrspacecast (ptr addrspace(1) @test_float_pre_inc.n to ptr addrspace(4)), float 1.000000e+00 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META2]], !amdgpu.no.remote.memory [[META2]], !amdgpu.ignore.denormal.mode [[META2]]
|
|
// UNSAFE-SPIRV-NEXT: [[TMP1:%.*]] = fadd float [[TMP0]], 1.000000e+00
|
|
// UNSAFE-SPIRV-NEXT: ret float [[TMP1]]
|
|
//
|
|
float test_float_pre_inc()
|
|
{
|
|
static _Atomic float n;
|
|
return ++n;
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local double @test_double_post_inc(
|
|
// CHECK-SAME: ) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test_double_post_inc.n to ptr), double 1.000000e+00 seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META2]], !amdgpu.no.remote.memory [[META2]]
|
|
// CHECK-NEXT: ret double [[TMP0]]
|
|
//
|
|
// CHECK-SPIRV-LABEL: define spir_func double @test_double_post_inc(
|
|
// CHECK-SPIRV-SAME: ) addrspace(4) #[[ATTR0]] {
|
|
// CHECK-SPIRV-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-SPIRV-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspace(4) addrspacecast (ptr addrspace(1) @test_double_post_inc.n to ptr addrspace(4)), double 1.000000e+00 seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META2]], !amdgpu.no.remote.memory [[META2]]
|
|
// CHECK-SPIRV-NEXT: ret double [[TMP0]]
|
|
//
|
|
double test_double_post_inc()
|
|
{
|
|
static _Atomic double n;
|
|
return n++;
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local double @test_double_post_dc(
|
|
// CHECK-SAME: ) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test_double_post_dc.n to ptr), double 1.000000e+00 seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META2]], !amdgpu.no.remote.memory [[META2]]
|
|
// CHECK-NEXT: ret double [[TMP0]]
|
|
//
|
|
// CHECK-SPIRV-LABEL: define spir_func double @test_double_post_dc(
|
|
// CHECK-SPIRV-SAME: ) addrspace(4) #[[ATTR0]] {
|
|
// CHECK-SPIRV-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-SPIRV-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspace(4) addrspacecast (ptr addrspace(1) @test_double_post_dc.n to ptr addrspace(4)), double 1.000000e+00 seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META2]], !amdgpu.no.remote.memory [[META2]]
|
|
// CHECK-SPIRV-NEXT: ret double [[TMP0]]
|
|
//
|
|
double test_double_post_dc()
|
|
{
|
|
static _Atomic double n;
|
|
return n--;
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local double @test_double_pre_dc(
|
|
// CHECK-SAME: ) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test_double_pre_dc.n to ptr), double 1.000000e+00 seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META2]], !amdgpu.no.remote.memory [[META2]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = fsub double [[TMP0]], 1.000000e+00
|
|
// CHECK-NEXT: ret double [[TMP1]]
|
|
//
|
|
// CHECK-SPIRV-LABEL: define spir_func double @test_double_pre_dc(
|
|
// CHECK-SPIRV-SAME: ) addrspace(4) #[[ATTR0]] {
|
|
// CHECK-SPIRV-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-SPIRV-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspace(4) addrspacecast (ptr addrspace(1) @test_double_pre_dc.n to ptr addrspace(4)), double 1.000000e+00 seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META2]], !amdgpu.no.remote.memory [[META2]]
|
|
// CHECK-SPIRV-NEXT: [[TMP1:%.*]] = fsub double [[TMP0]], 1.000000e+00
|
|
// CHECK-SPIRV-NEXT: ret double [[TMP1]]
|
|
//
|
|
double test_double_pre_dc()
|
|
{
|
|
static _Atomic double n;
|
|
return --n;
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local double @test_double_pre_inc(
|
|
// CHECK-SAME: ) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test_double_pre_inc.n to ptr), double 1.000000e+00 seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META2]], !amdgpu.no.remote.memory [[META2]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = fadd double [[TMP0]], 1.000000e+00
|
|
// CHECK-NEXT: ret double [[TMP1]]
|
|
//
|
|
// CHECK-SPIRV-LABEL: define spir_func double @test_double_pre_inc(
|
|
// CHECK-SPIRV-SAME: ) addrspace(4) #[[ATTR0]] {
|
|
// CHECK-SPIRV-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-SPIRV-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspace(4) addrspacecast (ptr addrspace(1) @test_double_pre_inc.n to ptr addrspace(4)), double 1.000000e+00 seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META2]], !amdgpu.no.remote.memory [[META2]]
|
|
// CHECK-SPIRV-NEXT: [[TMP1:%.*]] = fadd double [[TMP0]], 1.000000e+00
|
|
// CHECK-SPIRV-NEXT: ret double [[TMP1]]
|
|
//
|
|
double test_double_pre_inc()
|
|
{
|
|
static _Atomic double n;
|
|
return ++n;
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local half @test__Float16_post_inc(
|
|
// CHECK-SAME: ) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test__Float16_post_inc.n to ptr), half 0xH3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META2]], !amdgpu.no.remote.memory [[META2]]
|
|
// CHECK-NEXT: ret half [[TMP0]]
|
|
//
|
|
// CHECK-SPIRV-LABEL: define spir_func half @test__Float16_post_inc(
|
|
// CHECK-SPIRV-SAME: ) addrspace(4) #[[ATTR0]] {
|
|
// CHECK-SPIRV-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-SPIRV-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspace(4) addrspacecast (ptr addrspace(1) @test__Float16_post_inc.n to ptr addrspace(4)), half 0xH3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META2]], !amdgpu.no.remote.memory [[META2]]
|
|
// CHECK-SPIRV-NEXT: ret half [[TMP0]]
|
|
//
|
|
_Float16 test__Float16_post_inc()
|
|
{
|
|
static _Atomic _Float16 n;
|
|
return n++;
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local half @test__Float16_post_dc(
|
|
// CHECK-SAME: ) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test__Float16_post_dc.n to ptr), half 0xH3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META2]], !amdgpu.no.remote.memory [[META2]]
|
|
// CHECK-NEXT: ret half [[TMP0]]
|
|
//
|
|
// CHECK-SPIRV-LABEL: define spir_func half @test__Float16_post_dc(
|
|
// CHECK-SPIRV-SAME: ) addrspace(4) #[[ATTR0]] {
|
|
// CHECK-SPIRV-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-SPIRV-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspace(4) addrspacecast (ptr addrspace(1) @test__Float16_post_dc.n to ptr addrspace(4)), half 0xH3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META2]], !amdgpu.no.remote.memory [[META2]]
|
|
// CHECK-SPIRV-NEXT: ret half [[TMP0]]
|
|
//
|
|
_Float16 test__Float16_post_dc()
|
|
{
|
|
static _Atomic _Float16 n;
|
|
return n--;
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local half @test__Float16_pre_dc(
|
|
// CHECK-SAME: ) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test__Float16_pre_dc.n to ptr), half 0xH3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META2]], !amdgpu.no.remote.memory [[META2]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = fsub half [[TMP0]], 0xH3C00
|
|
// CHECK-NEXT: ret half [[TMP1]]
|
|
//
|
|
// CHECK-SPIRV-LABEL: define spir_func half @test__Float16_pre_dc(
|
|
// CHECK-SPIRV-SAME: ) addrspace(4) #[[ATTR0]] {
|
|
// CHECK-SPIRV-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-SPIRV-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspace(4) addrspacecast (ptr addrspace(1) @test__Float16_pre_dc.n to ptr addrspace(4)), half 0xH3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META2]], !amdgpu.no.remote.memory [[META2]]
|
|
// CHECK-SPIRV-NEXT: [[TMP1:%.*]] = fsub half [[TMP0]], 0xH3C00
|
|
// CHECK-SPIRV-NEXT: ret half [[TMP1]]
|
|
//
|
|
_Float16 test__Float16_pre_dc()
|
|
{
|
|
static _Atomic _Float16 n;
|
|
return --n;
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local half @test__Float16_pre_inc(
|
|
// CHECK-SAME: ) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test__Float16_pre_inc.n to ptr), half 0xH3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META2]], !amdgpu.no.remote.memory [[META2]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = fadd half [[TMP0]], 0xH3C00
|
|
// CHECK-NEXT: ret half [[TMP1]]
|
|
//
|
|
// CHECK-SPIRV-LABEL: define spir_func half @test__Float16_pre_inc(
|
|
// CHECK-SPIRV-SAME: ) addrspace(4) #[[ATTR0]] {
|
|
// CHECK-SPIRV-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-SPIRV-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspace(4) addrspacecast (ptr addrspace(1) @test__Float16_pre_inc.n to ptr addrspace(4)), half 0xH3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META2]], !amdgpu.no.remote.memory [[META2]]
|
|
// CHECK-SPIRV-NEXT: [[TMP1:%.*]] = fadd half [[TMP0]], 0xH3C00
|
|
// CHECK-SPIRV-NEXT: ret half [[TMP1]]
|
|
//
|
|
_Float16 test__Float16_pre_inc()
|
|
{
|
|
static _Atomic _Float16 n;
|
|
return ++n;
|
|
}
|
|
//.
|
|
// SAFE: [[META2]] = !{}
|
|
//.
|
|
// UNSAFE: [[META2]] = !{}
|
|
//.
|
|
// SAFE-SPIRV: [[META2]] = !{}
|
|
//.
|
|
// UNSAFE-SPIRV: [[META2]] = !{}
|
|
//.
|