Files
llvm-project/offload/test/offloading/default_thread_limit.c
Nick Sarnie 26b777444b [offload][lit] XFAIL all failing tests on the Level Zero plugin (#174804)
We finally got our buildbot added (to staging, at least) so we want to
start running L0 tests in CI.
We need `check-offload` to pass though, so XFAIL everything failing.
There's a couple `UNSUPPORTED` as well, those are for sporadic fails.

Also make set the `gpu` and `intelgpu` LIT variables when testing the
`spirv64-intel` triple.

We have no DeviceRTL yet so basically everything fails, but we manage to
get

```
Total Discovered Tests: 432
Unsupported      : 169 (39.12%)
Passed           :  67 (15.51%)
Expectedly Failed: 196 (45.37%)
```

We still don't build the level zero plugin by default and these tests
don't run unless the plugin was built, so this has no effect on most
builds.

---------

Signed-off-by: Nick Sarnie <nick.sarnie@intel.com>
2026-01-07 19:20:30 +00:00

102 lines
3.1 KiB
C

// clang-format off
// RUN: %libomptarget-compile-generic
// RUN: env LIBOMPTARGET_INFO=16 \
// RUN: %libomptarget-run-generic 2>&1 | %fcheck-generic --check-prefix=DEFAULT
// RUN: %libomptarget-compile-generic -g
// RUN: env LIBOMPTARGET_INFO=16 \
// RUN: %libomptarget-run-generic 2>&1 | %fcheck-generic --check-prefix=DEFAULT
// REQUIRES: amdgpu
// XFAIL: intelgpu
__attribute__((optnone)) int optnone() { return 1; }
int main() {
int N = optnone() * 4098 * 32;
// DEFAULT: [[NT:(128|256)]] (MaxFlatWorkGroupSize: [[NT]]
#pragma omp target teams distribute parallel for simd
for (int i = 0; i < N; ++i) {
optnone();
}
// DEFAULT: [[NT:(128|256)]] (MaxFlatWorkGroupSize: [[NT]]
#pragma omp target teams distribute parallel for simd
for (int i = 0; i < N; ++i) {
optnone();
}
// DEFAULT: [[NT:(128|256)]] (MaxFlatWorkGroupSize: [[NT]]
#pragma omp target teams distribute parallel for simd
for (int i = 0; i < N; ++i) {
optnone();
}
// DEFAULT: [[NT:(128|256)]] (MaxFlatWorkGroupSize: [[NT]]
#pragma omp target
#pragma omp teams distribute parallel for
for (int i = 0; i < N; ++i) {
optnone();
}
// DEFAULT: 42 (MaxFlatWorkGroupSize: 1024
#pragma omp target thread_limit(optnone() * 42)
#pragma omp teams distribute parallel for
for (int i = 0; i < N; ++i) {
optnone();
}
// DEFAULT: 42 (MaxFlatWorkGroupSize: 42
#pragma omp target thread_limit(optnone() * 42) ompx_attribute(__attribute__((amdgpu_flat_work_group_size(42, 42))))
#pragma omp teams distribute parallel for
for (int i = 0; i < N; ++i) {
optnone();
}
// DEFAULT: 42 (MaxFlatWorkGroupSize: 42
#pragma omp target ompx_attribute(__attribute__((amdgpu_flat_work_group_size(42, 42))))
#pragma omp teams distribute parallel for
for (int i = 0; i < N; ++i) {
optnone();
}
// DEFAULT: MaxFlatWorkGroupSize: 1024
#pragma omp target
#pragma omp teams distribute parallel for num_threads(optnone() * 42)
for (int i = 0; i < N; ++i) {
optnone();
}
// DEFAULT: MaxFlatWorkGroupSize: 1024
#pragma omp target teams distribute parallel for thread_limit(optnone() * 42)
for (int i = 0; i < N; ++i) {
optnone();
}
// DEFAULT: MaxFlatWorkGroupSize: 1024
#pragma omp target teams distribute parallel for num_threads(optnone() * 42)
for (int i = 0; i < N; ++i) {
optnone();
}
// DEFAULT: 9 (MaxFlatWorkGroupSize: 9
#pragma omp target
#pragma omp teams distribute parallel for num_threads(9)
for (int i = 0; i < N; ++i) {
optnone();
}
// DEFAULT: 4 (MaxFlatWorkGroupSize: 4
#pragma omp target thread_limit(4)
#pragma omp teams distribute parallel for
for (int i = 0; i < N; ++i) {
optnone();
}
// DEFAULT: 4 (MaxFlatWorkGroupSize: 4
#pragma omp target
#pragma omp teams distribute parallel for thread_limit(4)
for (int i = 0; i < N; ++i) {
optnone();
}
// DEFAULT: 9 (MaxFlatWorkGroupSize: 9
#pragma omp target teams distribute parallel for num_threads(9)
for (int i = 0; i < N; ++i) {
optnone();
}
// DEFAULT: 4 (MaxFlatWorkGroupSize: 4
#pragma omp target teams distribute parallel for simd thread_limit(4)
for (int i = 0; i < N; ++i) {
optnone();
}
}