-
Notifications
You must be signed in to change notification settings - Fork 14.5k
[mlir][NFC] update include
create APIs (3/n)
#149687
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
[mlir][NFC] update include
create APIs (3/n)
#149687
Conversation
c4d4d7a
to
1863ce9
Compare
include
create APIs (3/n)
✅ With the latest revision this PR passed the C/C++ code formatter. |
8a51a76
to
d44fb73
Compare
include
create APIs (3/n)include
create APIs (3/n)
d44fb73
to
b34390b
Compare
@llvm/pr-subscribers-mlir-tosa @llvm/pr-subscribers-mlir-linalg Author: Maksim Levental (makslevental) ChangesSee #147168 for more info. Full diff: https://github.com/llvm/llvm-project/pull/149687.diff 14 Files Affected:
diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
index 6245f88db3d19..f3b34f9fded7f 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
@@ -651,7 +651,8 @@ void replaceOpWithBufferizedValues(RewriterBase &rewriter, Operation *op,
template <typename OpTy, typename... Args>
OpTy replaceOpWithNewBufferizedOp(RewriterBase &rewriter, Operation *op,
Args &&...args) {
- auto newOp = rewriter.create<OpTy>(op->getLoc(), std::forward<Args>(args)...);
+ auto newOp =
+ OpTy::create(rewriter, op->getLoc(), std::forward<Args>(args)...);
replaceOpWithBufferizedValues(rewriter, op, newOp->getResults());
return newOp;
}
diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h
index 596c470ef6d23..a2409f2796b94 100644
--- a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h
+++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h
@@ -148,14 +148,14 @@ struct BufferResultsToOutParamsOpts {
/// Default memref.alloc is used
AllocationFn allocationFn = [](OpBuilder &builder, Location loc,
MemRefType type) {
- return builder.create<memref::AllocOp>(loc, type).getResult();
+ return memref::AllocOp::create(builder, loc, type).getResult();
};
/// Memcpy function; used to create a copy between two memrefs.
/// Default memref.copy is used.
MemCpyFn memCpyFn = [](OpBuilder &builder, Location loc, Value from,
Value to) {
- builder.create<memref::CopyOp>(loc, from, to);
+ memref::CopyOp::create(builder, loc, from, to);
return success();
};
diff --git a/mlir/include/mlir/Dialect/LLVMIR/BasicPtxBuilderInterface.td b/mlir/include/mlir/Dialect/LLVMIR/BasicPtxBuilderInterface.td
index df5a2448bd779..e98b94b5b3052 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/BasicPtxBuilderInterface.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/BasicPtxBuilderInterface.td
@@ -114,7 +114,7 @@ def BasicPtxBuilderOpInterface : OpInterface<"BasicPtxBuilderInterface"> {
/*methodBody=*/"",
/*defaultImpl=*/ [{
mlir::Operation* op = $_op;
- return rewriter.create<LLVM::ConstantOp>(
+ return LLVM::ConstantOp::create(rewriter,
op->getLoc(), rewriter.getIntegerType(32), val);
}]
>,
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
index 7bbc56f549c0b..ca0cc03acd7ad 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
@@ -454,7 +454,7 @@ def TransposeOp : LinalgStructuredBase_Op<"transpose", [
static void regionBuilder(mlir::ImplicitLocOpBuilder &b, mlir::Block &block,
mlir::ArrayRef<mlir::NamedAttribute>, function_ref<InFlightDiagnostic()> emitError) {
OpBuilder::InsertionGuard guard(b);
- b.create<linalg::YieldOp>(b.getLoc(), block.getArgument(0));
+ linalg::YieldOp::create(b, b.getLoc(), block.getArgument(0));
}
static std::function<void(mlir::ImplicitLocOpBuilder &, mlir::Block &,
@@ -528,7 +528,7 @@ def BroadcastOp : LinalgStructuredBase_Op<"broadcast", [
mlir::ArrayRef<mlir::NamedAttribute>,
function_ref<InFlightDiagnostic()> emitError) {
OpBuilder::InsertionGuard guard(b);
- b.create<linalg::YieldOp>(b.getLoc(), block.getArgument(0));
+ linalg::YieldOp::create(b, b.getLoc(), block.getArgument(0));
}
static std::function<void(mlir::ImplicitLocOpBuilder &, mlir::Block &,
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h
index 620fd7c63146d..a6d904df78f0d 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h
@@ -131,7 +131,7 @@ class SparseTensorType {
/// ambiguity whenever passing a `SparseTensorType` directly to a
/// function which is overloaded to accept either `Type` or `TypeRange`.
/// In particular, this includes `RewriterBase::replaceOpWithNewOp<OpTy>`
- /// and `OpBuilder::create<OpTy>` whenever the `OpTy::build` is overloaded
+ /// and `OpTy::create` whenever the `OpTy::build` is overloaded
/// thus. This happens because the `TypeRange<T>(T&&)` ctor is implicit
/// as well, and there's no SFINAE we can add to this method that would
/// block subsequent application of that ctor. The only way to fix the
diff --git a/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h b/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h
index 6f3b0916a7a60..cdcd099ec7d22 100644
--- a/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h
+++ b/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h
@@ -70,7 +70,7 @@ checkHasDynamicBatchDims(PatternRewriter &rewriter, Op op,
}
dynamicDims.push_back(
- rewriter.create<tensor::DimOp>(op->getLoc(), params[0], 0));
+ tensor::DimOp::create(rewriter, op->getLoc(), params[0], 0));
return dynamicDims;
}
@@ -91,7 +91,7 @@ namespace {
template <typename TosaOp, typename... Args>
TosaOp createOpAndInferShape(ImplicitLocOpBuilder &builder, Type resultTy,
Args &&...args) {
- auto op = builder.create<TosaOp>(resultTy, args...);
+ auto op = TosaOp::create(builder, resultTy, args...);
InferShapedTypeOpInterface shapeInterface =
dyn_cast<InferShapedTypeOpInterface>(op.getOperation());
diff --git a/mlir/include/mlir/Dialect/Tosa/Utils/QuantUtils.h b/mlir/include/mlir/Dialect/Tosa/Utils/QuantUtils.h
index bdd8713037eea..9d9a934cdfd5e 100644
--- a/mlir/include/mlir/Dialect/Tosa/Utils/QuantUtils.h
+++ b/mlir/include/mlir/Dialect/Tosa/Utils/QuantUtils.h
@@ -47,7 +47,7 @@ Value getConstTensorInt(OpBuilder &builder, Location loc,
mlir::RankedTensorType const_type =
RankedTensorType::get({count}, element_type);
mlir::DenseElementsAttr const_attr = DenseElementsAttr::get(const_type, vec);
- auto const_op = builder.create<tosa::ConstOp>(loc, const_type, const_attr);
+ auto const_op = tosa::ConstOp::create(builder, loc, const_type, const_attr);
return const_op.getResult();
}
diff --git a/mlir/include/mlir/Dialect/Utils/ReshapeOpsUtils.h b/mlir/include/mlir/Dialect/Utils/ReshapeOpsUtils.h
index 704e39e908841..6d4ea5b5136de 100644
--- a/mlir/include/mlir/Dialect/Utils/ReshapeOpsUtils.h
+++ b/mlir/include/mlir/Dialect/Utils/ReshapeOpsUtils.h
@@ -332,11 +332,11 @@ struct ComposeCollapseOfExpandOp : public OpRewritePattern<CollapseOpTy> {
// the first dynamic size.
Value result = dynamicSizes[0];
for (Value v : llvm::drop_begin(dynamicSizes))
- result = rewriter.create<arith::MulIOp>(loc, result, v);
+ result = arith::MulIOp::create(rewriter, loc, result, v);
if (numStaticElems != 1) {
- result = rewriter.create<arith::MulIOp>(
- loc, result,
- rewriter.create<arith::ConstantIndexOp>(loc, numStaticElems));
+ result = arith::MulIOp::create(
+ rewriter, loc, result,
+ arith::ConstantIndexOp::create(rewriter, loc, numStaticElems));
}
newOutputShape.push_back(result);
}
diff --git a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
index cc8421b23a074..d68138acec0db 100644
--- a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
+++ b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
@@ -118,9 +118,10 @@ inline auto makeVscaleConstantBuilder(PatternRewriter &rewriter, Location loc) {
Value vscale = nullptr;
return [loc, vscale, &rewriter](int64_t multiplier) mutable {
if (!vscale)
- vscale = rewriter.create<vector::VectorScaleOp>(loc);
- return rewriter.create<arith::MulIOp>(
- loc, vscale, rewriter.create<arith::ConstantIndexOp>(loc, multiplier));
+ vscale = vector::VectorScaleOp::create(rewriter, loc);
+ return arith::MulIOp::create(
+ rewriter, loc, vscale,
+ arith::ConstantIndexOp::create(rewriter, loc, multiplier));
};
}
diff --git a/mlir/include/mlir/IR/Builders.h b/mlir/include/mlir/IR/Builders.h
index 5a2520b48a7b3..2e356dec1981f 100644
--- a/mlir/include/mlir/IR/Builders.h
+++ b/mlir/include/mlir/IR/Builders.h
@@ -552,7 +552,7 @@ class OpBuilder : public Builder {
template <typename OpTy, typename... Args>
std::enable_if_t<OpTy::template hasTrait<OpTrait::ZeroResults>(), OpTy>
createOrFold(Location location, Args &&...args) {
- auto op = create<OpTy>(location, std::forward<Args>(args)...);
+ auto op = OpTy::create(*this, location, std::forward<Args>(args)...);
SmallVector<Value, 0> unused;
(void)tryFold(op.getOperation(), unused);
@@ -662,7 +662,7 @@ class ImplicitLocOpBuilder : public mlir::OpBuilder {
/// location.
template <typename OpTy, typename... Args>
OpTy create(Args &&...args) {
- return OpBuilder::create<OpTy>(curLoc, std::forward<Args>(args)...);
+ return OpTy::create(*this, curLoc, std::forward<Args>(args)...);
}
/// Create an operation of specific op type at the current insertion point,
diff --git a/mlir/include/mlir/IR/PatternMatch.h b/mlir/include/mlir/IR/PatternMatch.h
index 3a2dbd136b438..b3608b4394f45 100644
--- a/mlir/include/mlir/IR/PatternMatch.h
+++ b/mlir/include/mlir/IR/PatternMatch.h
@@ -517,7 +517,9 @@ class RewriterBase : public OpBuilder {
/// ops must match. The original op is erased.
template <typename OpTy, typename... Args>
OpTy replaceOpWithNewOp(Operation *op, Args &&...args) {
- auto newOp = create<OpTy>(op->getLoc(), std::forward<Args>(args)...);
+ auto builder = static_cast<OpBuilder *>(this);
+ auto newOp =
+ OpTy::create(*builder, op->getLoc(), std::forward<Args>(args)...);
replaceOp(op, newOp.getOperation());
return newOp;
}
diff --git a/mlir/include/mlir/Interfaces/ViewLikeInterface.h b/mlir/include/mlir/Interfaces/ViewLikeInterface.h
index 14427a97a5502..db9c37fc3dc99 100644
--- a/mlir/include/mlir/Interfaces/ViewLikeInterface.h
+++ b/mlir/include/mlir/Interfaces/ViewLikeInterface.h
@@ -109,8 +109,8 @@ class OpWithOffsetSizesAndStridesConstantArgumentFolder final
// Create the new op in canonical form.
auto newOp =
- rewriter.create<OpType>(op.getLoc(), resultType, op.getSource(),
- mixedOffsets, mixedSizes, mixedStrides);
+ OpType::create(rewriter, op.getLoc(), resultType, op.getSource(),
+ mixedOffsets, mixedSizes, mixedStrides);
CastOpFunc()(rewriter, op, newOp);
return success();
diff --git a/mlir/include/mlir/Parser/Parser.h b/mlir/include/mlir/Parser/Parser.h
index 828760fcbefa9..8a8cfb9090143 100644
--- a/mlir/include/mlir/Parser/Parser.h
+++ b/mlir/include/mlir/Parser/Parser.h
@@ -64,7 +64,7 @@ inline OwningOpRef<ContainerOpT> constructContainerOpForParserIfNecessary(
"block that has an implicit terminator or does not require one");
OpBuilder builder(context);
- ContainerOpT op = builder.create<ContainerOpT>(sourceFileLoc);
+ ContainerOpT op = ContainerOpT::create(builder, sourceFileLoc);
OwningOpRef<ContainerOpT> opRef(op);
assert(op->getNumRegions() == 1 &&
llvm::hasSingleElement(op->getRegion(0)) &&
diff --git a/mlir/test/lib/Dialect/TestIRDLToCpp/TestIRDLToCppDialect.cpp b/mlir/test/lib/Dialect/TestIRDLToCpp/TestIRDLToCppDialect.cpp
index bda614a97ab42..7468fbe5401ce 100644
--- a/mlir/test/lib/Dialect/TestIRDLToCpp/TestIRDLToCppDialect.cpp
+++ b/mlir/test/lib/Dialect/TestIRDLToCpp/TestIRDLToCppDialect.cpp
@@ -43,8 +43,9 @@ struct TestOpConversion : public OpConversionPattern<test_irdl_to_cpp::BeefOp> {
assert(adaptor.getStructuredOperands(0).size() == 1);
assert(adaptor.getStructuredOperands(1).size() == 1);
- auto bar = rewriter.replaceOpWithNewOp<test_irdl_to_cpp::BarOp>(
- op, op->getResultTypes().front());
+ auto bar = rewriter.create<test_irdl_to_cpp::BarOp>(
+ op->getLoc(), op->getResultTypes().front());
+ rewriter.replaceOp(op, bar.getOperation());
rewriter.setInsertionPointAfter(bar);
rewriter.create<test_irdl_to_cpp::HashOp>(
|
@llvm/pr-subscribers-mlir-sparse Author: Maksim Levental (makslevental) ChangesSee #147168 for more info. Full diff: https://github.com/llvm/llvm-project/pull/149687.diff 14 Files Affected:
diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
index 6245f88db3d19..f3b34f9fded7f 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
@@ -651,7 +651,8 @@ void replaceOpWithBufferizedValues(RewriterBase &rewriter, Operation *op,
template <typename OpTy, typename... Args>
OpTy replaceOpWithNewBufferizedOp(RewriterBase &rewriter, Operation *op,
Args &&...args) {
- auto newOp = rewriter.create<OpTy>(op->getLoc(), std::forward<Args>(args)...);
+ auto newOp =
+ OpTy::create(rewriter, op->getLoc(), std::forward<Args>(args)...);
replaceOpWithBufferizedValues(rewriter, op, newOp->getResults());
return newOp;
}
diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h
index 596c470ef6d23..a2409f2796b94 100644
--- a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h
+++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h
@@ -148,14 +148,14 @@ struct BufferResultsToOutParamsOpts {
/// Default memref.alloc is used
AllocationFn allocationFn = [](OpBuilder &builder, Location loc,
MemRefType type) {
- return builder.create<memref::AllocOp>(loc, type).getResult();
+ return memref::AllocOp::create(builder, loc, type).getResult();
};
/// Memcpy function; used to create a copy between two memrefs.
/// Default memref.copy is used.
MemCpyFn memCpyFn = [](OpBuilder &builder, Location loc, Value from,
Value to) {
- builder.create<memref::CopyOp>(loc, from, to);
+ memref::CopyOp::create(builder, loc, from, to);
return success();
};
diff --git a/mlir/include/mlir/Dialect/LLVMIR/BasicPtxBuilderInterface.td b/mlir/include/mlir/Dialect/LLVMIR/BasicPtxBuilderInterface.td
index df5a2448bd779..e98b94b5b3052 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/BasicPtxBuilderInterface.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/BasicPtxBuilderInterface.td
@@ -114,7 +114,7 @@ def BasicPtxBuilderOpInterface : OpInterface<"BasicPtxBuilderInterface"> {
/*methodBody=*/"",
/*defaultImpl=*/ [{
mlir::Operation* op = $_op;
- return rewriter.create<LLVM::ConstantOp>(
+ return LLVM::ConstantOp::create(rewriter,
op->getLoc(), rewriter.getIntegerType(32), val);
}]
>,
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
index 7bbc56f549c0b..ca0cc03acd7ad 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
@@ -454,7 +454,7 @@ def TransposeOp : LinalgStructuredBase_Op<"transpose", [
static void regionBuilder(mlir::ImplicitLocOpBuilder &b, mlir::Block &block,
mlir::ArrayRef<mlir::NamedAttribute>, function_ref<InFlightDiagnostic()> emitError) {
OpBuilder::InsertionGuard guard(b);
- b.create<linalg::YieldOp>(b.getLoc(), block.getArgument(0));
+ linalg::YieldOp::create(b, b.getLoc(), block.getArgument(0));
}
static std::function<void(mlir::ImplicitLocOpBuilder &, mlir::Block &,
@@ -528,7 +528,7 @@ def BroadcastOp : LinalgStructuredBase_Op<"broadcast", [
mlir::ArrayRef<mlir::NamedAttribute>,
function_ref<InFlightDiagnostic()> emitError) {
OpBuilder::InsertionGuard guard(b);
- b.create<linalg::YieldOp>(b.getLoc(), block.getArgument(0));
+ linalg::YieldOp::create(b, b.getLoc(), block.getArgument(0));
}
static std::function<void(mlir::ImplicitLocOpBuilder &, mlir::Block &,
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h
index 620fd7c63146d..a6d904df78f0d 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h
@@ -131,7 +131,7 @@ class SparseTensorType {
/// ambiguity whenever passing a `SparseTensorType` directly to a
/// function which is overloaded to accept either `Type` or `TypeRange`.
/// In particular, this includes `RewriterBase::replaceOpWithNewOp<OpTy>`
- /// and `OpBuilder::create<OpTy>` whenever the `OpTy::build` is overloaded
+ /// and `OpTy::create` whenever the `OpTy::build` is overloaded
/// thus. This happens because the `TypeRange<T>(T&&)` ctor is implicit
/// as well, and there's no SFINAE we can add to this method that would
/// block subsequent application of that ctor. The only way to fix the
diff --git a/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h b/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h
index 6f3b0916a7a60..cdcd099ec7d22 100644
--- a/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h
+++ b/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h
@@ -70,7 +70,7 @@ checkHasDynamicBatchDims(PatternRewriter &rewriter, Op op,
}
dynamicDims.push_back(
- rewriter.create<tensor::DimOp>(op->getLoc(), params[0], 0));
+ tensor::DimOp::create(rewriter, op->getLoc(), params[0], 0));
return dynamicDims;
}
@@ -91,7 +91,7 @@ namespace {
template <typename TosaOp, typename... Args>
TosaOp createOpAndInferShape(ImplicitLocOpBuilder &builder, Type resultTy,
Args &&...args) {
- auto op = builder.create<TosaOp>(resultTy, args...);
+ auto op = TosaOp::create(builder, resultTy, args...);
InferShapedTypeOpInterface shapeInterface =
dyn_cast<InferShapedTypeOpInterface>(op.getOperation());
diff --git a/mlir/include/mlir/Dialect/Tosa/Utils/QuantUtils.h b/mlir/include/mlir/Dialect/Tosa/Utils/QuantUtils.h
index bdd8713037eea..9d9a934cdfd5e 100644
--- a/mlir/include/mlir/Dialect/Tosa/Utils/QuantUtils.h
+++ b/mlir/include/mlir/Dialect/Tosa/Utils/QuantUtils.h
@@ -47,7 +47,7 @@ Value getConstTensorInt(OpBuilder &builder, Location loc,
mlir::RankedTensorType const_type =
RankedTensorType::get({count}, element_type);
mlir::DenseElementsAttr const_attr = DenseElementsAttr::get(const_type, vec);
- auto const_op = builder.create<tosa::ConstOp>(loc, const_type, const_attr);
+ auto const_op = tosa::ConstOp::create(builder, loc, const_type, const_attr);
return const_op.getResult();
}
diff --git a/mlir/include/mlir/Dialect/Utils/ReshapeOpsUtils.h b/mlir/include/mlir/Dialect/Utils/ReshapeOpsUtils.h
index 704e39e908841..6d4ea5b5136de 100644
--- a/mlir/include/mlir/Dialect/Utils/ReshapeOpsUtils.h
+++ b/mlir/include/mlir/Dialect/Utils/ReshapeOpsUtils.h
@@ -332,11 +332,11 @@ struct ComposeCollapseOfExpandOp : public OpRewritePattern<CollapseOpTy> {
// the first dynamic size.
Value result = dynamicSizes[0];
for (Value v : llvm::drop_begin(dynamicSizes))
- result = rewriter.create<arith::MulIOp>(loc, result, v);
+ result = arith::MulIOp::create(rewriter, loc, result, v);
if (numStaticElems != 1) {
- result = rewriter.create<arith::MulIOp>(
- loc, result,
- rewriter.create<arith::ConstantIndexOp>(loc, numStaticElems));
+ result = arith::MulIOp::create(
+ rewriter, loc, result,
+ arith::ConstantIndexOp::create(rewriter, loc, numStaticElems));
}
newOutputShape.push_back(result);
}
diff --git a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
index cc8421b23a074..d68138acec0db 100644
--- a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
+++ b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
@@ -118,9 +118,10 @@ inline auto makeVscaleConstantBuilder(PatternRewriter &rewriter, Location loc) {
Value vscale = nullptr;
return [loc, vscale, &rewriter](int64_t multiplier) mutable {
if (!vscale)
- vscale = rewriter.create<vector::VectorScaleOp>(loc);
- return rewriter.create<arith::MulIOp>(
- loc, vscale, rewriter.create<arith::ConstantIndexOp>(loc, multiplier));
+ vscale = vector::VectorScaleOp::create(rewriter, loc);
+ return arith::MulIOp::create(
+ rewriter, loc, vscale,
+ arith::ConstantIndexOp::create(rewriter, loc, multiplier));
};
}
diff --git a/mlir/include/mlir/IR/Builders.h b/mlir/include/mlir/IR/Builders.h
index 5a2520b48a7b3..2e356dec1981f 100644
--- a/mlir/include/mlir/IR/Builders.h
+++ b/mlir/include/mlir/IR/Builders.h
@@ -552,7 +552,7 @@ class OpBuilder : public Builder {
template <typename OpTy, typename... Args>
std::enable_if_t<OpTy::template hasTrait<OpTrait::ZeroResults>(), OpTy>
createOrFold(Location location, Args &&...args) {
- auto op = create<OpTy>(location, std::forward<Args>(args)...);
+ auto op = OpTy::create(*this, location, std::forward<Args>(args)...);
SmallVector<Value, 0> unused;
(void)tryFold(op.getOperation(), unused);
@@ -662,7 +662,7 @@ class ImplicitLocOpBuilder : public mlir::OpBuilder {
/// location.
template <typename OpTy, typename... Args>
OpTy create(Args &&...args) {
- return OpBuilder::create<OpTy>(curLoc, std::forward<Args>(args)...);
+ return OpTy::create(*this, curLoc, std::forward<Args>(args)...);
}
/// Create an operation of specific op type at the current insertion point,
diff --git a/mlir/include/mlir/IR/PatternMatch.h b/mlir/include/mlir/IR/PatternMatch.h
index 3a2dbd136b438..b3608b4394f45 100644
--- a/mlir/include/mlir/IR/PatternMatch.h
+++ b/mlir/include/mlir/IR/PatternMatch.h
@@ -517,7 +517,9 @@ class RewriterBase : public OpBuilder {
/// ops must match. The original op is erased.
template <typename OpTy, typename... Args>
OpTy replaceOpWithNewOp(Operation *op, Args &&...args) {
- auto newOp = create<OpTy>(op->getLoc(), std::forward<Args>(args)...);
+ auto builder = static_cast<OpBuilder *>(this);
+ auto newOp =
+ OpTy::create(*builder, op->getLoc(), std::forward<Args>(args)...);
replaceOp(op, newOp.getOperation());
return newOp;
}
diff --git a/mlir/include/mlir/Interfaces/ViewLikeInterface.h b/mlir/include/mlir/Interfaces/ViewLikeInterface.h
index 14427a97a5502..db9c37fc3dc99 100644
--- a/mlir/include/mlir/Interfaces/ViewLikeInterface.h
+++ b/mlir/include/mlir/Interfaces/ViewLikeInterface.h
@@ -109,8 +109,8 @@ class OpWithOffsetSizesAndStridesConstantArgumentFolder final
// Create the new op in canonical form.
auto newOp =
- rewriter.create<OpType>(op.getLoc(), resultType, op.getSource(),
- mixedOffsets, mixedSizes, mixedStrides);
+ OpType::create(rewriter, op.getLoc(), resultType, op.getSource(),
+ mixedOffsets, mixedSizes, mixedStrides);
CastOpFunc()(rewriter, op, newOp);
return success();
diff --git a/mlir/include/mlir/Parser/Parser.h b/mlir/include/mlir/Parser/Parser.h
index 828760fcbefa9..8a8cfb9090143 100644
--- a/mlir/include/mlir/Parser/Parser.h
+++ b/mlir/include/mlir/Parser/Parser.h
@@ -64,7 +64,7 @@ inline OwningOpRef<ContainerOpT> constructContainerOpForParserIfNecessary(
"block that has an implicit terminator or does not require one");
OpBuilder builder(context);
- ContainerOpT op = builder.create<ContainerOpT>(sourceFileLoc);
+ ContainerOpT op = ContainerOpT::create(builder, sourceFileLoc);
OwningOpRef<ContainerOpT> opRef(op);
assert(op->getNumRegions() == 1 &&
llvm::hasSingleElement(op->getRegion(0)) &&
diff --git a/mlir/test/lib/Dialect/TestIRDLToCpp/TestIRDLToCppDialect.cpp b/mlir/test/lib/Dialect/TestIRDLToCpp/TestIRDLToCppDialect.cpp
index bda614a97ab42..7468fbe5401ce 100644
--- a/mlir/test/lib/Dialect/TestIRDLToCpp/TestIRDLToCppDialect.cpp
+++ b/mlir/test/lib/Dialect/TestIRDLToCpp/TestIRDLToCppDialect.cpp
@@ -43,8 +43,9 @@ struct TestOpConversion : public OpConversionPattern<test_irdl_to_cpp::BeefOp> {
assert(adaptor.getStructuredOperands(0).size() == 1);
assert(adaptor.getStructuredOperands(1).size() == 1);
- auto bar = rewriter.replaceOpWithNewOp<test_irdl_to_cpp::BarOp>(
- op, op->getResultTypes().front());
+ auto bar = rewriter.create<test_irdl_to_cpp::BarOp>(
+ op->getLoc(), op->getResultTypes().front());
+ rewriter.replaceOp(op, bar.getOperation());
rewriter.setInsertionPointAfter(bar);
rewriter.create<test_irdl_to_cpp::HashOp>(
|
@llvm/pr-subscribers-mlir-bufferization Author: Maksim Levental (makslevental) ChangesSee #147168 for more info. Full diff: https://github.com/llvm/llvm-project/pull/149687.diff 14 Files Affected:
diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
index 6245f88db3d19..f3b34f9fded7f 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
@@ -651,7 +651,8 @@ void replaceOpWithBufferizedValues(RewriterBase &rewriter, Operation *op,
template <typename OpTy, typename... Args>
OpTy replaceOpWithNewBufferizedOp(RewriterBase &rewriter, Operation *op,
Args &&...args) {
- auto newOp = rewriter.create<OpTy>(op->getLoc(), std::forward<Args>(args)...);
+ auto newOp =
+ OpTy::create(rewriter, op->getLoc(), std::forward<Args>(args)...);
replaceOpWithBufferizedValues(rewriter, op, newOp->getResults());
return newOp;
}
diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h
index 596c470ef6d23..a2409f2796b94 100644
--- a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h
+++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h
@@ -148,14 +148,14 @@ struct BufferResultsToOutParamsOpts {
/// Default memref.alloc is used
AllocationFn allocationFn = [](OpBuilder &builder, Location loc,
MemRefType type) {
- return builder.create<memref::AllocOp>(loc, type).getResult();
+ return memref::AllocOp::create(builder, loc, type).getResult();
};
/// Memcpy function; used to create a copy between two memrefs.
/// Default memref.copy is used.
MemCpyFn memCpyFn = [](OpBuilder &builder, Location loc, Value from,
Value to) {
- builder.create<memref::CopyOp>(loc, from, to);
+ memref::CopyOp::create(builder, loc, from, to);
return success();
};
diff --git a/mlir/include/mlir/Dialect/LLVMIR/BasicPtxBuilderInterface.td b/mlir/include/mlir/Dialect/LLVMIR/BasicPtxBuilderInterface.td
index df5a2448bd779..e98b94b5b3052 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/BasicPtxBuilderInterface.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/BasicPtxBuilderInterface.td
@@ -114,7 +114,7 @@ def BasicPtxBuilderOpInterface : OpInterface<"BasicPtxBuilderInterface"> {
/*methodBody=*/"",
/*defaultImpl=*/ [{
mlir::Operation* op = $_op;
- return rewriter.create<LLVM::ConstantOp>(
+ return LLVM::ConstantOp::create(rewriter,
op->getLoc(), rewriter.getIntegerType(32), val);
}]
>,
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
index 7bbc56f549c0b..ca0cc03acd7ad 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
@@ -454,7 +454,7 @@ def TransposeOp : LinalgStructuredBase_Op<"transpose", [
static void regionBuilder(mlir::ImplicitLocOpBuilder &b, mlir::Block &block,
mlir::ArrayRef<mlir::NamedAttribute>, function_ref<InFlightDiagnostic()> emitError) {
OpBuilder::InsertionGuard guard(b);
- b.create<linalg::YieldOp>(b.getLoc(), block.getArgument(0));
+ linalg::YieldOp::create(b, b.getLoc(), block.getArgument(0));
}
static std::function<void(mlir::ImplicitLocOpBuilder &, mlir::Block &,
@@ -528,7 +528,7 @@ def BroadcastOp : LinalgStructuredBase_Op<"broadcast", [
mlir::ArrayRef<mlir::NamedAttribute>,
function_ref<InFlightDiagnostic()> emitError) {
OpBuilder::InsertionGuard guard(b);
- b.create<linalg::YieldOp>(b.getLoc(), block.getArgument(0));
+ linalg::YieldOp::create(b, b.getLoc(), block.getArgument(0));
}
static std::function<void(mlir::ImplicitLocOpBuilder &, mlir::Block &,
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h
index 620fd7c63146d..a6d904df78f0d 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h
@@ -131,7 +131,7 @@ class SparseTensorType {
/// ambiguity whenever passing a `SparseTensorType` directly to a
/// function which is overloaded to accept either `Type` or `TypeRange`.
/// In particular, this includes `RewriterBase::replaceOpWithNewOp<OpTy>`
- /// and `OpBuilder::create<OpTy>` whenever the `OpTy::build` is overloaded
+ /// and `OpTy::create` whenever the `OpTy::build` is overloaded
/// thus. This happens because the `TypeRange<T>(T&&)` ctor is implicit
/// as well, and there's no SFINAE we can add to this method that would
/// block subsequent application of that ctor. The only way to fix the
diff --git a/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h b/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h
index 6f3b0916a7a60..cdcd099ec7d22 100644
--- a/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h
+++ b/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h
@@ -70,7 +70,7 @@ checkHasDynamicBatchDims(PatternRewriter &rewriter, Op op,
}
dynamicDims.push_back(
- rewriter.create<tensor::DimOp>(op->getLoc(), params[0], 0));
+ tensor::DimOp::create(rewriter, op->getLoc(), params[0], 0));
return dynamicDims;
}
@@ -91,7 +91,7 @@ namespace {
template <typename TosaOp, typename... Args>
TosaOp createOpAndInferShape(ImplicitLocOpBuilder &builder, Type resultTy,
Args &&...args) {
- auto op = builder.create<TosaOp>(resultTy, args...);
+ auto op = TosaOp::create(builder, resultTy, args...);
InferShapedTypeOpInterface shapeInterface =
dyn_cast<InferShapedTypeOpInterface>(op.getOperation());
diff --git a/mlir/include/mlir/Dialect/Tosa/Utils/QuantUtils.h b/mlir/include/mlir/Dialect/Tosa/Utils/QuantUtils.h
index bdd8713037eea..9d9a934cdfd5e 100644
--- a/mlir/include/mlir/Dialect/Tosa/Utils/QuantUtils.h
+++ b/mlir/include/mlir/Dialect/Tosa/Utils/QuantUtils.h
@@ -47,7 +47,7 @@ Value getConstTensorInt(OpBuilder &builder, Location loc,
mlir::RankedTensorType const_type =
RankedTensorType::get({count}, element_type);
mlir::DenseElementsAttr const_attr = DenseElementsAttr::get(const_type, vec);
- auto const_op = builder.create<tosa::ConstOp>(loc, const_type, const_attr);
+ auto const_op = tosa::ConstOp::create(builder, loc, const_type, const_attr);
return const_op.getResult();
}
diff --git a/mlir/include/mlir/Dialect/Utils/ReshapeOpsUtils.h b/mlir/include/mlir/Dialect/Utils/ReshapeOpsUtils.h
index 704e39e908841..6d4ea5b5136de 100644
--- a/mlir/include/mlir/Dialect/Utils/ReshapeOpsUtils.h
+++ b/mlir/include/mlir/Dialect/Utils/ReshapeOpsUtils.h
@@ -332,11 +332,11 @@ struct ComposeCollapseOfExpandOp : public OpRewritePattern<CollapseOpTy> {
// the first dynamic size.
Value result = dynamicSizes[0];
for (Value v : llvm::drop_begin(dynamicSizes))
- result = rewriter.create<arith::MulIOp>(loc, result, v);
+ result = arith::MulIOp::create(rewriter, loc, result, v);
if (numStaticElems != 1) {
- result = rewriter.create<arith::MulIOp>(
- loc, result,
- rewriter.create<arith::ConstantIndexOp>(loc, numStaticElems));
+ result = arith::MulIOp::create(
+ rewriter, loc, result,
+ arith::ConstantIndexOp::create(rewriter, loc, numStaticElems));
}
newOutputShape.push_back(result);
}
diff --git a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
index cc8421b23a074..d68138acec0db 100644
--- a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
+++ b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
@@ -118,9 +118,10 @@ inline auto makeVscaleConstantBuilder(PatternRewriter &rewriter, Location loc) {
Value vscale = nullptr;
return [loc, vscale, &rewriter](int64_t multiplier) mutable {
if (!vscale)
- vscale = rewriter.create<vector::VectorScaleOp>(loc);
- return rewriter.create<arith::MulIOp>(
- loc, vscale, rewriter.create<arith::ConstantIndexOp>(loc, multiplier));
+ vscale = vector::VectorScaleOp::create(rewriter, loc);
+ return arith::MulIOp::create(
+ rewriter, loc, vscale,
+ arith::ConstantIndexOp::create(rewriter, loc, multiplier));
};
}
diff --git a/mlir/include/mlir/IR/Builders.h b/mlir/include/mlir/IR/Builders.h
index 5a2520b48a7b3..2e356dec1981f 100644
--- a/mlir/include/mlir/IR/Builders.h
+++ b/mlir/include/mlir/IR/Builders.h
@@ -552,7 +552,7 @@ class OpBuilder : public Builder {
template <typename OpTy, typename... Args>
std::enable_if_t<OpTy::template hasTrait<OpTrait::ZeroResults>(), OpTy>
createOrFold(Location location, Args &&...args) {
- auto op = create<OpTy>(location, std::forward<Args>(args)...);
+ auto op = OpTy::create(*this, location, std::forward<Args>(args)...);
SmallVector<Value, 0> unused;
(void)tryFold(op.getOperation(), unused);
@@ -662,7 +662,7 @@ class ImplicitLocOpBuilder : public mlir::OpBuilder {
/// location.
template <typename OpTy, typename... Args>
OpTy create(Args &&...args) {
- return OpBuilder::create<OpTy>(curLoc, std::forward<Args>(args)...);
+ return OpTy::create(*this, curLoc, std::forward<Args>(args)...);
}
/// Create an operation of specific op type at the current insertion point,
diff --git a/mlir/include/mlir/IR/PatternMatch.h b/mlir/include/mlir/IR/PatternMatch.h
index 3a2dbd136b438..b3608b4394f45 100644
--- a/mlir/include/mlir/IR/PatternMatch.h
+++ b/mlir/include/mlir/IR/PatternMatch.h
@@ -517,7 +517,9 @@ class RewriterBase : public OpBuilder {
/// ops must match. The original op is erased.
template <typename OpTy, typename... Args>
OpTy replaceOpWithNewOp(Operation *op, Args &&...args) {
- auto newOp = create<OpTy>(op->getLoc(), std::forward<Args>(args)...);
+ auto builder = static_cast<OpBuilder *>(this);
+ auto newOp =
+ OpTy::create(*builder, op->getLoc(), std::forward<Args>(args)...);
replaceOp(op, newOp.getOperation());
return newOp;
}
diff --git a/mlir/include/mlir/Interfaces/ViewLikeInterface.h b/mlir/include/mlir/Interfaces/ViewLikeInterface.h
index 14427a97a5502..db9c37fc3dc99 100644
--- a/mlir/include/mlir/Interfaces/ViewLikeInterface.h
+++ b/mlir/include/mlir/Interfaces/ViewLikeInterface.h
@@ -109,8 +109,8 @@ class OpWithOffsetSizesAndStridesConstantArgumentFolder final
// Create the new op in canonical form.
auto newOp =
- rewriter.create<OpType>(op.getLoc(), resultType, op.getSource(),
- mixedOffsets, mixedSizes, mixedStrides);
+ OpType::create(rewriter, op.getLoc(), resultType, op.getSource(),
+ mixedOffsets, mixedSizes, mixedStrides);
CastOpFunc()(rewriter, op, newOp);
return success();
diff --git a/mlir/include/mlir/Parser/Parser.h b/mlir/include/mlir/Parser/Parser.h
index 828760fcbefa9..8a8cfb9090143 100644
--- a/mlir/include/mlir/Parser/Parser.h
+++ b/mlir/include/mlir/Parser/Parser.h
@@ -64,7 +64,7 @@ inline OwningOpRef<ContainerOpT> constructContainerOpForParserIfNecessary(
"block that has an implicit terminator or does not require one");
OpBuilder builder(context);
- ContainerOpT op = builder.create<ContainerOpT>(sourceFileLoc);
+ ContainerOpT op = ContainerOpT::create(builder, sourceFileLoc);
OwningOpRef<ContainerOpT> opRef(op);
assert(op->getNumRegions() == 1 &&
llvm::hasSingleElement(op->getRegion(0)) &&
diff --git a/mlir/test/lib/Dialect/TestIRDLToCpp/TestIRDLToCppDialect.cpp b/mlir/test/lib/Dialect/TestIRDLToCpp/TestIRDLToCppDialect.cpp
index bda614a97ab42..7468fbe5401ce 100644
--- a/mlir/test/lib/Dialect/TestIRDLToCpp/TestIRDLToCppDialect.cpp
+++ b/mlir/test/lib/Dialect/TestIRDLToCpp/TestIRDLToCppDialect.cpp
@@ -43,8 +43,9 @@ struct TestOpConversion : public OpConversionPattern<test_irdl_to_cpp::BeefOp> {
assert(adaptor.getStructuredOperands(0).size() == 1);
assert(adaptor.getStructuredOperands(1).size() == 1);
- auto bar = rewriter.replaceOpWithNewOp<test_irdl_to_cpp::BarOp>(
- op, op->getResultTypes().front());
+ auto bar = rewriter.create<test_irdl_to_cpp::BarOp>(
+ op->getLoc(), op->getResultTypes().front());
+ rewriter.replaceOp(op, bar.getOperation());
rewriter.setInsertionPointAfter(bar);
rewriter.create<test_irdl_to_cpp::HashOp>(
|
@llvm/pr-subscribers-mlir Author: Maksim Levental (makslevental) ChangesSee #147168 for more info. Full diff: https://github.com/llvm/llvm-project/pull/149687.diff 14 Files Affected:
diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
index 6245f88db3d19..f3b34f9fded7f 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
@@ -651,7 +651,8 @@ void replaceOpWithBufferizedValues(RewriterBase &rewriter, Operation *op,
template <typename OpTy, typename... Args>
OpTy replaceOpWithNewBufferizedOp(RewriterBase &rewriter, Operation *op,
Args &&...args) {
- auto newOp = rewriter.create<OpTy>(op->getLoc(), std::forward<Args>(args)...);
+ auto newOp =
+ OpTy::create(rewriter, op->getLoc(), std::forward<Args>(args)...);
replaceOpWithBufferizedValues(rewriter, op, newOp->getResults());
return newOp;
}
diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h
index 596c470ef6d23..a2409f2796b94 100644
--- a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h
+++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h
@@ -148,14 +148,14 @@ struct BufferResultsToOutParamsOpts {
/// Default memref.alloc is used
AllocationFn allocationFn = [](OpBuilder &builder, Location loc,
MemRefType type) {
- return builder.create<memref::AllocOp>(loc, type).getResult();
+ return memref::AllocOp::create(builder, loc, type).getResult();
};
/// Memcpy function; used to create a copy between two memrefs.
/// Default memref.copy is used.
MemCpyFn memCpyFn = [](OpBuilder &builder, Location loc, Value from,
Value to) {
- builder.create<memref::CopyOp>(loc, from, to);
+ memref::CopyOp::create(builder, loc, from, to);
return success();
};
diff --git a/mlir/include/mlir/Dialect/LLVMIR/BasicPtxBuilderInterface.td b/mlir/include/mlir/Dialect/LLVMIR/BasicPtxBuilderInterface.td
index df5a2448bd779..e98b94b5b3052 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/BasicPtxBuilderInterface.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/BasicPtxBuilderInterface.td
@@ -114,7 +114,7 @@ def BasicPtxBuilderOpInterface : OpInterface<"BasicPtxBuilderInterface"> {
/*methodBody=*/"",
/*defaultImpl=*/ [{
mlir::Operation* op = $_op;
- return rewriter.create<LLVM::ConstantOp>(
+ return LLVM::ConstantOp::create(rewriter,
op->getLoc(), rewriter.getIntegerType(32), val);
}]
>,
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
index 7bbc56f549c0b..ca0cc03acd7ad 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
@@ -454,7 +454,7 @@ def TransposeOp : LinalgStructuredBase_Op<"transpose", [
static void regionBuilder(mlir::ImplicitLocOpBuilder &b, mlir::Block &block,
mlir::ArrayRef<mlir::NamedAttribute>, function_ref<InFlightDiagnostic()> emitError) {
OpBuilder::InsertionGuard guard(b);
- b.create<linalg::YieldOp>(b.getLoc(), block.getArgument(0));
+ linalg::YieldOp::create(b, b.getLoc(), block.getArgument(0));
}
static std::function<void(mlir::ImplicitLocOpBuilder &, mlir::Block &,
@@ -528,7 +528,7 @@ def BroadcastOp : LinalgStructuredBase_Op<"broadcast", [
mlir::ArrayRef<mlir::NamedAttribute>,
function_ref<InFlightDiagnostic()> emitError) {
OpBuilder::InsertionGuard guard(b);
- b.create<linalg::YieldOp>(b.getLoc(), block.getArgument(0));
+ linalg::YieldOp::create(b, b.getLoc(), block.getArgument(0));
}
static std::function<void(mlir::ImplicitLocOpBuilder &, mlir::Block &,
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h
index 620fd7c63146d..a6d904df78f0d 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h
@@ -131,7 +131,7 @@ class SparseTensorType {
/// ambiguity whenever passing a `SparseTensorType` directly to a
/// function which is overloaded to accept either `Type` or `TypeRange`.
/// In particular, this includes `RewriterBase::replaceOpWithNewOp<OpTy>`
- /// and `OpBuilder::create<OpTy>` whenever the `OpTy::build` is overloaded
+ /// and `OpTy::create` whenever the `OpTy::build` is overloaded
/// thus. This happens because the `TypeRange<T>(T&&)` ctor is implicit
/// as well, and there's no SFINAE we can add to this method that would
/// block subsequent application of that ctor. The only way to fix the
diff --git a/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h b/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h
index 6f3b0916a7a60..cdcd099ec7d22 100644
--- a/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h
+++ b/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h
@@ -70,7 +70,7 @@ checkHasDynamicBatchDims(PatternRewriter &rewriter, Op op,
}
dynamicDims.push_back(
- rewriter.create<tensor::DimOp>(op->getLoc(), params[0], 0));
+ tensor::DimOp::create(rewriter, op->getLoc(), params[0], 0));
return dynamicDims;
}
@@ -91,7 +91,7 @@ namespace {
template <typename TosaOp, typename... Args>
TosaOp createOpAndInferShape(ImplicitLocOpBuilder &builder, Type resultTy,
Args &&...args) {
- auto op = builder.create<TosaOp>(resultTy, args...);
+ auto op = TosaOp::create(builder, resultTy, args...);
InferShapedTypeOpInterface shapeInterface =
dyn_cast<InferShapedTypeOpInterface>(op.getOperation());
diff --git a/mlir/include/mlir/Dialect/Tosa/Utils/QuantUtils.h b/mlir/include/mlir/Dialect/Tosa/Utils/QuantUtils.h
index bdd8713037eea..9d9a934cdfd5e 100644
--- a/mlir/include/mlir/Dialect/Tosa/Utils/QuantUtils.h
+++ b/mlir/include/mlir/Dialect/Tosa/Utils/QuantUtils.h
@@ -47,7 +47,7 @@ Value getConstTensorInt(OpBuilder &builder, Location loc,
mlir::RankedTensorType const_type =
RankedTensorType::get({count}, element_type);
mlir::DenseElementsAttr const_attr = DenseElementsAttr::get(const_type, vec);
- auto const_op = builder.create<tosa::ConstOp>(loc, const_type, const_attr);
+ auto const_op = tosa::ConstOp::create(builder, loc, const_type, const_attr);
return const_op.getResult();
}
diff --git a/mlir/include/mlir/Dialect/Utils/ReshapeOpsUtils.h b/mlir/include/mlir/Dialect/Utils/ReshapeOpsUtils.h
index 704e39e908841..6d4ea5b5136de 100644
--- a/mlir/include/mlir/Dialect/Utils/ReshapeOpsUtils.h
+++ b/mlir/include/mlir/Dialect/Utils/ReshapeOpsUtils.h
@@ -332,11 +332,11 @@ struct ComposeCollapseOfExpandOp : public OpRewritePattern<CollapseOpTy> {
// the first dynamic size.
Value result = dynamicSizes[0];
for (Value v : llvm::drop_begin(dynamicSizes))
- result = rewriter.create<arith::MulIOp>(loc, result, v);
+ result = arith::MulIOp::create(rewriter, loc, result, v);
if (numStaticElems != 1) {
- result = rewriter.create<arith::MulIOp>(
- loc, result,
- rewriter.create<arith::ConstantIndexOp>(loc, numStaticElems));
+ result = arith::MulIOp::create(
+ rewriter, loc, result,
+ arith::ConstantIndexOp::create(rewriter, loc, numStaticElems));
}
newOutputShape.push_back(result);
}
diff --git a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
index cc8421b23a074..d68138acec0db 100644
--- a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
+++ b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
@@ -118,9 +118,10 @@ inline auto makeVscaleConstantBuilder(PatternRewriter &rewriter, Location loc) {
Value vscale = nullptr;
return [loc, vscale, &rewriter](int64_t multiplier) mutable {
if (!vscale)
- vscale = rewriter.create<vector::VectorScaleOp>(loc);
- return rewriter.create<arith::MulIOp>(
- loc, vscale, rewriter.create<arith::ConstantIndexOp>(loc, multiplier));
+ vscale = vector::VectorScaleOp::create(rewriter, loc);
+ return arith::MulIOp::create(
+ rewriter, loc, vscale,
+ arith::ConstantIndexOp::create(rewriter, loc, multiplier));
};
}
diff --git a/mlir/include/mlir/IR/Builders.h b/mlir/include/mlir/IR/Builders.h
index 5a2520b48a7b3..2e356dec1981f 100644
--- a/mlir/include/mlir/IR/Builders.h
+++ b/mlir/include/mlir/IR/Builders.h
@@ -552,7 +552,7 @@ class OpBuilder : public Builder {
template <typename OpTy, typename... Args>
std::enable_if_t<OpTy::template hasTrait<OpTrait::ZeroResults>(), OpTy>
createOrFold(Location location, Args &&...args) {
- auto op = create<OpTy>(location, std::forward<Args>(args)...);
+ auto op = OpTy::create(*this, location, std::forward<Args>(args)...);
SmallVector<Value, 0> unused;
(void)tryFold(op.getOperation(), unused);
@@ -662,7 +662,7 @@ class ImplicitLocOpBuilder : public mlir::OpBuilder {
/// location.
template <typename OpTy, typename... Args>
OpTy create(Args &&...args) {
- return OpBuilder::create<OpTy>(curLoc, std::forward<Args>(args)...);
+ return OpTy::create(*this, curLoc, std::forward<Args>(args)...);
}
/// Create an operation of specific op type at the current insertion point,
diff --git a/mlir/include/mlir/IR/PatternMatch.h b/mlir/include/mlir/IR/PatternMatch.h
index 3a2dbd136b438..b3608b4394f45 100644
--- a/mlir/include/mlir/IR/PatternMatch.h
+++ b/mlir/include/mlir/IR/PatternMatch.h
@@ -517,7 +517,9 @@ class RewriterBase : public OpBuilder {
/// ops must match. The original op is erased.
template <typename OpTy, typename... Args>
OpTy replaceOpWithNewOp(Operation *op, Args &&...args) {
- auto newOp = create<OpTy>(op->getLoc(), std::forward<Args>(args)...);
+ auto builder = static_cast<OpBuilder *>(this);
+ auto newOp =
+ OpTy::create(*builder, op->getLoc(), std::forward<Args>(args)...);
replaceOp(op, newOp.getOperation());
return newOp;
}
diff --git a/mlir/include/mlir/Interfaces/ViewLikeInterface.h b/mlir/include/mlir/Interfaces/ViewLikeInterface.h
index 14427a97a5502..db9c37fc3dc99 100644
--- a/mlir/include/mlir/Interfaces/ViewLikeInterface.h
+++ b/mlir/include/mlir/Interfaces/ViewLikeInterface.h
@@ -109,8 +109,8 @@ class OpWithOffsetSizesAndStridesConstantArgumentFolder final
// Create the new op in canonical form.
auto newOp =
- rewriter.create<OpType>(op.getLoc(), resultType, op.getSource(),
- mixedOffsets, mixedSizes, mixedStrides);
+ OpType::create(rewriter, op.getLoc(), resultType, op.getSource(),
+ mixedOffsets, mixedSizes, mixedStrides);
CastOpFunc()(rewriter, op, newOp);
return success();
diff --git a/mlir/include/mlir/Parser/Parser.h b/mlir/include/mlir/Parser/Parser.h
index 828760fcbefa9..8a8cfb9090143 100644
--- a/mlir/include/mlir/Parser/Parser.h
+++ b/mlir/include/mlir/Parser/Parser.h
@@ -64,7 +64,7 @@ inline OwningOpRef<ContainerOpT> constructContainerOpForParserIfNecessary(
"block that has an implicit terminator or does not require one");
OpBuilder builder(context);
- ContainerOpT op = builder.create<ContainerOpT>(sourceFileLoc);
+ ContainerOpT op = ContainerOpT::create(builder, sourceFileLoc);
OwningOpRef<ContainerOpT> opRef(op);
assert(op->getNumRegions() == 1 &&
llvm::hasSingleElement(op->getRegion(0)) &&
diff --git a/mlir/test/lib/Dialect/TestIRDLToCpp/TestIRDLToCppDialect.cpp b/mlir/test/lib/Dialect/TestIRDLToCpp/TestIRDLToCppDialect.cpp
index bda614a97ab42..7468fbe5401ce 100644
--- a/mlir/test/lib/Dialect/TestIRDLToCpp/TestIRDLToCppDialect.cpp
+++ b/mlir/test/lib/Dialect/TestIRDLToCpp/TestIRDLToCppDialect.cpp
@@ -43,8 +43,9 @@ struct TestOpConversion : public OpConversionPattern<test_irdl_to_cpp::BeefOp> {
assert(adaptor.getStructuredOperands(0).size() == 1);
assert(adaptor.getStructuredOperands(1).size() == 1);
- auto bar = rewriter.replaceOpWithNewOp<test_irdl_to_cpp::BarOp>(
- op, op->getResultTypes().front());
+ auto bar = rewriter.create<test_irdl_to_cpp::BarOp>(
+ op->getLoc(), op->getResultTypes().front());
+ rewriter.replaceOp(op, bar.getOperation());
rewriter.setInsertionPointAfter(bar);
rewriter.create<test_irdl_to_cpp::HashOp>(
|
See llvm#147168 for more info.
b34390b
to
81abf9c
Compare
stream << "\n"; | ||
stream << llvm::formatv( | ||
R"(static {0} create(::mlir::OpBuilder &opBuilder, ::mlir::Location location, {1} {2} ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {{});)", | ||
opStrings.opCppName, resultParams, operandParams); | ||
stream << "\n"; | ||
stream << llvm::formatv( | ||
R"(static {0} create(::mlir::ImplicitLocOpBuilder &opBuilder, {1} {2} ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {{});)", | ||
opStrings.opCppName, resultParams, operandParams); | ||
stream << "\n"; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@Moxinilian @math-fehr can you review the changes here and below (enabling create
support for irdl-to-cpp
).
See #147168 for more info.