Skip to content

Commit

Permalink
Reorganize pass folders
Browse files Browse the repository at this point in the history
  • Loading branch information
hanchenye committed Jan 15, 2024
1 parent 82ee7c4 commit 23302be
Show file tree
Hide file tree
Showing 11 changed files with 160 additions and 87 deletions.
3 changes: 3 additions & 0 deletions include/scalehls/Dialect/HLS/Transforms/Passes.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,9 @@ namespace mlir {
namespace scalehls {
namespace hls {

std::unique_ptr<Pass> createConvertEmptyTensorToAllocTensorPass();
std::unique_ptr<Pass> createConvertDataflowToFuncPass();
std::unique_ptr<Pass> createCreateDataflowPass();
std::unique_ptr<Pass> createLowerDataflowPass();

#define GEN_PASS_CLASSES
Expand Down
19 changes: 19 additions & 0 deletions include/scalehls/Dialect/HLS/Transforms/Passes.td
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,25 @@

include "mlir/Pass/PassBase.td"

def ConvertEmptyTensorToAllocTensor :
Pass<"scalehls-convert-empty-tensor-to-alloc-tensor", "func::FuncOp"> {
let summary = "Convert tensor.empty to hls.fdf.alloc_tensor";
let constructor = [{
mlir::scalehls::hls::createConvertEmptyTensorToAllocTensorPass()
}];
}

def ConvertDataflowToFunc :
Pass<"scalehls-convert-dataflow-to-func", "mlir::ModuleOp"> {
let summary = "Convert structural dataflow to function for C++ emission";
let constructor = "mlir::scalehls::hls::createConvertDataflowToFuncPass()";
}

def CreateDataflow : Pass<"scalehls-create-dataflow", "func::FuncOp"> {
let summary = "Convert linalg to functional dataflow";
let constructor = "mlir::scalehls::hls::createCreateDataflowPass()";
}

def LowerDataflow : Pass<"scalehls-lower-dataflow", "func::FuncOp"> {
let summary = "Convert functional to structural dataflow";
let constructor = "mlir::scalehls::hls::createLowerDataflowPass()";
Expand Down
11 changes: 3 additions & 8 deletions include/scalehls/Transforms/Passes.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,20 +22,15 @@ class FuncOp;
namespace mlir {
namespace scalehls {

//===----------------------------------------------------------------------===//
// Conversion Passes
//===----------------------------------------------------------------------===//

std::unique_ptr<Pass> createConvertLinalgToDataflowPass();
std::unique_ptr<Pass> createConvertDataflowToFuncPass();
std::unique_ptr<Pass> createRaiseSCFToAffinePass();

//===----------------------------------------------------------------------===//
// Transform Passes
//===----------------------------------------------------------------------===//

using namespace bufferization;

std::unique_ptr<Pass> createApplyTransformPatternPass();
std::unique_ptr<Pass> createRaiseSCFToAffinePass();

std::unique_ptr<Pass> createComprehensiveBufferizePass(
std::optional<BufferizationOptions::AllocationFn> allocationFn =
std::nullopt,
Expand Down
20 changes: 5 additions & 15 deletions include/scalehls/Transforms/Passes.td
Original file line number Diff line number Diff line change
Expand Up @@ -10,30 +10,20 @@
include "mlir/Pass/PassBase.td"

//===----------------------------------------------------------------------===//
// Conversion Passes
// Transform Passes
//===----------------------------------------------------------------------===//

def ConvertLinalgToDataflow :
Pass<"scalehls-convert-linalg-to-dataflow", "func::FuncOp"> {
let summary = "Convert linalg to functional dataflow";
let constructor = "mlir::scalehls::createConvertLinalgToDataflowPass()";
}

def ConvertDataflowToFunc :
Pass<"scalehls-convert-dataflow-to-func", "mlir::ModuleOp"> {
let summary = "Convert structural dataflow to function for C++ emission";
let constructor = "mlir::scalehls::createConvertDataflowToFuncPass()";
def ApplyTransformPattern :
Pass<"scalehls-apply-transform-pattern", "func::FuncOp"> {
let summary = "A test pass to apply transformation patterns";
let constructor = "mlir::scalehls::createApplyTransformPatternPass()";
}

def RaiseSCFToAffine : Pass<"scalehls-raise-scf-to-affine"> {
let summary = "Raise SCF to affine";
let constructor = "mlir::scalehls::createRaiseSCFToAffinePass()";
}

//===----------------------------------------------------------------------===//
// Transform Passes
//===----------------------------------------------------------------------===//

def ComprehensiveBufferize :
Pass<"scalehls-comprehensive-bufferize", "mlir::ModuleOp"> {
let summary = "Comprehensively bufferize the program";
Expand Down
3 changes: 3 additions & 0 deletions lib/Dialect/HLS/Transforms/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
add_mlir_dialect_library(MLIRScaleHLSHLSTransforms
BufferizableOpInterfaceImpl.cpp
CreateDataflow.cpp
ConvertEmptyTensorToAllocTensor.cpp
ConvertDataflowToFunc.cpp
LowerDataflow.cpp

DEPENDS
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
#include "mlir/Dialect/Affine/LoopUtils.h"
#include "mlir/Transforms/DialectConversion.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "scalehls/Dialect/HLS/Transforms/Passes.h"
#include "scalehls/Dialect/HLS/Utils/Utils.h"
#include "scalehls/Transforms/Passes.h"
#include "scalehls/Utils/Utils.h"

using namespace mlir;
Expand Down Expand Up @@ -108,6 +108,6 @@ struct ConvertDataflowToFunc
};
} // namespace

std::unique_ptr<Pass> scalehls::createConvertDataflowToFuncPass() {
std::unique_ptr<Pass> scalehls::hls::createConvertDataflowToFuncPass() {
return std::make_unique<ConvertDataflowToFunc>();
}
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
#include "mlir/IR/Dominance.h"
#include "mlir/Transforms/DialectConversion.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "scalehls/Dialect/HLS/Transforms/Passes.h"
#include "scalehls/Dialect/HLS/Utils/Utils.h"
#include "scalehls/Transforms/Passes.h"

using namespace mlir;
using namespace scalehls;
Expand Down Expand Up @@ -41,60 +41,13 @@ struct ConvertLinalgFillOp : public OpRewritePattern<linalg::FillOp> {
} // namespace

namespace {
// TODO: For now, we also dispatch most tensor ops into separate tasks. We
// should come up with a better way to handle them.
struct DispatchFuncOp : public OpRewritePattern<func::FuncOp> {
using OpRewritePattern<func::FuncOp>::OpRewritePattern;

LogicalResult matchAndRewrite(func::FuncOp func,
PatternRewriter &rewriter) const override {
auto dispatch = dispatchBlock(&func.front(), rewriter);
if (!dispatch)
return failure();

// Ensure each AllocTensorOp is only used once.
for (auto allocTensor :
llvm::make_early_inc_range(dispatch.getOps<hls::AllocTensorOp>())) {
for (auto &use : llvm::make_early_inc_range(allocTensor->getUses())) {
rewriter.setInsertionPoint(use.getOwner());
auto newAllocTensor =
cast<hls::AllocTensorOp>(rewriter.clone(*allocTensor));
use.set(newAllocTensor);
}
}

unsigned taskId = 0;
for (auto &op : llvm::make_early_inc_range(dispatch.getOps())) {
if (auto linalgOp = dyn_cast<linalg::LinalgOp>(op)) {
if (linalgOp.hasDynamicShape())
return linalgOp.emitOpError("cannot handle dynamic shape yet");
auto task = fuseOpsIntoTask({linalgOp}, rewriter);

std::string taskName =
func.getName().str() + "_" + std::to_string(taskId++);
linalgOp.getOperation()->setAttr(taskName, rewriter.getUnitAttr());
task->setAttr(taskName, rewriter.getUnitAttr());

} else if (isa<tensor::TensorDialect>(op.getDialect())) {
auto task = fuseOpsIntoTask({&op}, rewriter);

std::string taskName =
func.getName().str() + "_" + std::to_string(taskId++);
op.setAttr(taskName, rewriter.getUnitAttr());
task->setAttr(taskName, rewriter.getUnitAttr());
}
}
return success();
}
};
} // namespace

namespace {
struct ConvertLinalgToDataflow
: public ConvertLinalgToDataflowBase<ConvertLinalgToDataflow> {
struct ConvertEmptyTensorToAllocTensor
: public ConvertEmptyTensorToAllocTensorBase<
ConvertEmptyTensorToAllocTensor> {
void runOnOperation() override {
auto func = getOperation();
auto context = func.getContext();
auto builder = OpBuilder(context);

// Convert linalg ops to FDF ops.
ConversionTarget target(*context);
Expand All @@ -108,14 +61,21 @@ struct ConvertLinalgToDataflow
if (failed(applyPartialConversion(func, target, std::move(patterns))))
return signalPassFailure();

// Dispatch the current function to create the dataflow hierarchy.
patterns.clear();
patterns.add<DispatchFuncOp>(context);
(void)applyOpPatternsAndFold({func}, std::move(patterns));
// Ensure each AllocTensorOp is only used once.
for (auto allocTensor :
llvm::make_early_inc_range(func.getOps<hls::AllocTensorOp>())) {
for (auto &use : llvm::make_early_inc_range(allocTensor->getUses())) {
builder.setInsertionPoint(use.getOwner());
auto newAllocTensor =
cast<hls::AllocTensorOp>(builder.clone(*allocTensor));
use.set(newAllocTensor);
}
}
}
};
} // namespace

std::unique_ptr<Pass> scalehls::createConvertLinalgToDataflowPass() {
return std::make_unique<ConvertLinalgToDataflow>();
std::unique_ptr<Pass>
scalehls::hls::createConvertEmptyTensorToAllocTensorPass() {
return std::make_unique<ConvertEmptyTensorToAllocTensor>();
}
71 changes: 71 additions & 0 deletions lib/Dialect/HLS/Transforms/CreateDataflow.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
//===----------------------------------------------------------------------===//
//
// Copyright 2020-2021 The ScaleHLS Authors.
//
//===----------------------------------------------------------------------===//

#include "mlir/IR/Dominance.h"
#include "mlir/Transforms/DialectConversion.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "scalehls/Dialect/HLS/Transforms/Passes.h"
#include "scalehls/Dialect/HLS/Utils/Utils.h"

using namespace mlir;
using namespace scalehls;
using namespace hls;

namespace {
// TODO: For now, we also dispatch most tensor ops into separate tasks. We
// should come up with a better way to handle them.
struct DispatchFuncOp : public OpRewritePattern<func::FuncOp> {
using OpRewritePattern<func::FuncOp>::OpRewritePattern;

LogicalResult matchAndRewrite(func::FuncOp func,
PatternRewriter &rewriter) const override {
auto dispatch = dispatchBlock(&func.front(), rewriter);
if (!dispatch)
return failure();

unsigned taskId = 0;
for (auto &op : llvm::make_early_inc_range(dispatch.getOps())) {
if (auto linalgOp = dyn_cast<linalg::LinalgOp>(op)) {
if (linalgOp.hasDynamicShape())
return linalgOp.emitOpError("cannot handle dynamic shape yet");
auto task = fuseOpsIntoTask({linalgOp}, rewriter);

std::string taskName =
func.getName().str() + "_" + std::to_string(taskId++);
linalgOp.getOperation()->setAttr(taskName, rewriter.getUnitAttr());
task->setAttr(taskName, rewriter.getUnitAttr());

} else if (isa<tensor::TensorDialect>(op.getDialect())) {
auto task = fuseOpsIntoTask({&op}, rewriter);

std::string taskName =
func.getName().str() + "_" + std::to_string(taskId++);
op.setAttr(taskName, rewriter.getUnitAttr());
task->setAttr(taskName, rewriter.getUnitAttr());
}
}
return success();
}
};
} // namespace

namespace {
struct CreateDataflow : public CreateDataflowBase<CreateDataflow> {
void runOnOperation() override {
auto func = getOperation();
auto context = func.getContext();

// Dispatch the current function to create the dataflow hierarchy.
mlir::RewritePatternSet patterns(context);
patterns.add<DispatchFuncOp>(context);
(void)applyOpPatternsAndFold({func}, std::move(patterns));
}
};
} // namespace

std::unique_ptr<Pass> scalehls::hls::createCreateDataflowPass() {
return std::make_unique<CreateDataflow>();
}
32 changes: 32 additions & 0 deletions lib/Transforms/ApplyTransformPattern.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
//===----------------------------------------------------------------------===//
//
// Copyright 2020-2021 The ScaleHLS Authors.
//
//===----------------------------------------------------------------------===//

#include "mlir/Dialect/Tensor/Transforms/Transforms.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "scalehls/Transforms/Passes.h"

using namespace mlir;
using namespace scalehls;
using namespace hls;

namespace {
struct ApplyTransformPattern
: public ApplyTransformPatternBase<ApplyTransformPattern> {
void runOnOperation() override {
auto op = getOperation();
auto context = op->getContext();

// Lower copy operation.
mlir::RewritePatternSet patterns(context);
tensor::populateMergeConsecutiveInsertExtractSlicePatterns(patterns);
(void)applyPatternsAndFoldGreedily(op, std::move(patterns));
}
};
} // namespace

std::unique_ptr<Pass> scalehls::createApplyTransformPatternPass() {
return std::make_unique<ApplyTransformPattern>();
}
3 changes: 1 addition & 2 deletions lib/Transforms/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
add_mlir_library(MLIRScaleHLSTransforms
ConvertLinalgToDataflow.cpp
ConvertDataflowToFunc.cpp
ApplyTransformPattern.cpp
ComprehensiveBufferize.cpp
GenerateRuntimeFunc.cpp
LowerCopyToAffineLoops.cpp
Expand Down
5 changes: 3 additions & 2 deletions lib/Transforms/Pipelines.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,11 @@ void scalehls::addLinalgTransformPasses(OpPassManager &pm) {
pm.addPass(mlir::createLinalgElementwiseOpFusionPass());
pm.addPass(bufferization::createEmptyTensorEliminationPass());
pm.addPass(mlir::createCanonicalizerPass());
pm.addPass(hls::createConvertEmptyTensorToAllocTensorPass());
}

void scalehls::addConvertLinalgToDataflowPasses(OpPassManager &pm) {
pm.addNestedPass<func::FuncOp>(scalehls::createConvertLinalgToDataflowPass());
pm.addNestedPass<func::FuncOp>(hls::createCreateDataflowPass());
// pm.addPass(mlir::createLinalgGeneralizationPass());
pm.addPass(mlir::createCanonicalizerPass());
}
Expand All @@ -53,7 +54,7 @@ void scalehls::addLowerDataflowPasses(OpPassManager &pm) {
}

void scalehls::addConvertDataflowToFuncPasses(OpPassManager &pm) {
pm.addPass(scalehls::createConvertDataflowToFuncPass());
pm.addPass(hls::createConvertDataflowToFuncPass());
pm.addPass(scalehls::createGenerateRuntimeFuncPass());
// Lower linalg to affine loops.
pm.addNestedPass<func::FuncOp>(mlir::createConvertLinalgToAffineLoopsPass());
Expand Down

0 comments on commit 23302be

Please sign in to comment.