Files
Twice b919d62eae [MLIR][Python] Forward the name of MLIR types to Python side (#174700)
In this PR, I added a C API for each (upstream) MLIR type to retrieve
its type name (for example, `IntegerType` -> `mlirIntegerTypeGetName()`
-> `"builtin.integer"`), and exposed a corresponding `type_name` class
attribute in the Python bindings (e.g., `IntegerType.type_name` ->
`"builtin.integer"`). This can be used in various places to avoid
hard-coded strings, such as eliminating the manual string in
`irdl.base("!builtin.integer")`.

Note that parts of this PR (mainly mechanical changes) were produced via
GitHub Copilot and GPT-5.2. I have manually reviewed the changes and
verified them with tests to ensure correctness.
2026-01-07 16:27:31 +08:00

36 lines
1.4 KiB
C++

//===- NVGPU.cpp - C Interface for NVGPU dialect ------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "mlir-c/Dialect/NVGPU.h"
#include "mlir/CAPI/Registration.h"
#include "mlir/Dialect/NVGPU/IR/NVGPUDialect.h"
#include "mlir/IR/BuiltinTypes.h"
using namespace mlir;
using namespace mlir::nvgpu;
MLIR_DEFINE_CAPI_DIALECT_REGISTRATION(NVGPU, nvgpu, mlir::nvgpu::NVGPUDialect)
bool mlirTypeIsANVGPUTensorMapDescriptorType(MlirType type) {
return isa<nvgpu::TensorMapDescriptorType>(unwrap(type));
}
MlirType mlirNVGPUTensorMapDescriptorTypeGet(MlirContext ctx,
MlirType tensorMemrefType,
int swizzle, int l2promo,
int oobFill, int interleave) {
return wrap(nvgpu::TensorMapDescriptorType::get(
unwrap(ctx), cast<MemRefType>(unwrap(tensorMemrefType)),
TensorMapSwizzleKind(swizzle), TensorMapL2PromoKind(l2promo),
TensorMapOOBKind(oobFill), TensorMapInterleaveKind(interleave)));
}
MlirStringRef mlirNVGPUTensorMapDescriptorTypeGetName(void) {
return wrap(nvgpu::TensorMapDescriptorType::name);
}