summaryrefslogtreecommitdiff
path: root/include/linalg3
diff options
context:
space:
mode:
Diffstat (limited to 'include/linalg3')
-rw-r--r--include/linalg3/Analysis.h37
-rw-r--r--include/linalg3/ConvertToLLVMDialect.h29
-rw-r--r--include/linalg3/Intrinsics.h31
-rw-r--r--include/linalg3/LoadStoreOps.h89
-rw-r--r--include/linalg3/Ops.h25
-rw-r--r--include/linalg3/TensorOps-inl.h145
-rw-r--r--include/linalg3/TensorOps.h54
-rw-r--r--include/linalg3/Transforms.h80
8 files changed, 490 insertions, 0 deletions
diff --git a/include/linalg3/Analysis.h b/include/linalg3/Analysis.h
new file mode 100644
index 0000000..813fc37
--- /dev/null
+++ b/include/linalg3/Analysis.h
@@ -0,0 +1,37 @@
+//===- Analysis.h - Linalg dialect Analysis function definitions ----------===//
+//
+// Copyright 2019 The MLIR Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// =============================================================================
+
+#ifndef LINALG3_ANALYSIS_H_
+#define LINALG3_ANALYSIS_H_
+
+#include "linalg2/Analysis.h"
+
+namespace mlir {
+class AffineMap;
+} // namespace mlir
+
+namespace linalg {
+
+/// Given a `map` specification and a subset of its results
+/// `[beginResult, endResult)`, returns the inverse map that maps result
+/// positions to dim positions.
+mlir::AffineMap inverseSubMap(mlir::AffineMap map, unsigned beginResult = 0,
+ unsigned endResult = 0);
+
+} // namespace linalg
+
+#endif // LINALG3_ANALYSIS_H_
diff --git a/include/linalg3/ConvertToLLVMDialect.h b/include/linalg3/ConvertToLLVMDialect.h
new file mode 100644
index 0000000..8f122e0
--- /dev/null
+++ b/include/linalg3/ConvertToLLVMDialect.h
@@ -0,0 +1,29 @@
+//===- ConvertToLLVMDialect.h - conversion from Linalg to LLVM --*- C++ -*-===//
+//
+// Copyright 2019 The MLIR Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// =============================================================================
+
+#ifndef LINALG3_CONVERTTOLLVMDIALECT_H_
+#define LINALG3_CONVERTTOLLVMDIALECT_H_
+
+namespace mlir {
+class Module;
+} // end namespace mlir
+
+namespace linalg {
+void convertLinalg3ToLLVM(mlir::Module &module);
+} // end namespace linalg
+
+#endif // LINALG3_CONVERTTOLLVMDIALECT_H_
diff --git a/include/linalg3/Intrinsics.h b/include/linalg3/Intrinsics.h
new file mode 100644
index 0000000..75a0417
--- /dev/null
+++ b/include/linalg3/Intrinsics.h
@@ -0,0 +1,31 @@
+//===- Intrinsics.h - Linalg intrinsics definitions -----------------------===//
+//
+// Copyright 2019 The MLIR Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// =============================================================================
+
+#ifndef LINALG3_INTRINSICS_H_
+#define LINALG3_INTRINSICS_H_
+
+#include "linalg2/Intrinsics.h"
+#include "linalg3/Ops.h"
+
+namespace linalg {
+namespace intrinsics {
+using load = mlir::edsc::intrinsics::ValueBuilder<LoadOp>;
+using store = mlir::edsc::intrinsics::OperationBuilder<StoreOp>;
+} // namespace intrinsics
+} // namespace linalg
+
+#endif // LINALG3_INTRINSICS_H_
diff --git a/include/linalg3/LoadStoreOps.h b/include/linalg3/LoadStoreOps.h
new file mode 100644
index 0000000..b77e702
--- /dev/null
+++ b/include/linalg3/LoadStoreOps.h
@@ -0,0 +1,89 @@
+//===- LoadStoreOps.h - Linalg dialect Load/Store operation definitions ---===//
+//
+// Copyright 2019 The MLIR Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// =============================================================================
+
+#ifndef LINALG3_LOADSTOREOP_H_
+#define LINALG3_LOADSTOREOP_H_
+
+#include "mlir/IR/OpDefinition.h"
+#include "mlir/Support/LLVM.h"
+
+namespace linalg {
+
+class ViewType;
+
+/// A linalg.LoadOp is the counterpart of affine.load but operating on ViewType
+/// instead of MemRefType.
+class LoadOp : public mlir::Op<LoadOp, mlir::OpTrait::VariadicOperands,
+ mlir::OpTrait::OneResult> {
+public:
+ using Op::Op;
+
+ //////////////////////////////////////////////////////////////////////////////
+ // Hooks to customize the behavior of this op.
+ //////////////////////////////////////////////////////////////////////////////
+ static llvm::StringRef getOperationName() { return "linalg.load"; }
+ static void build(mlir::Builder *b, mlir::OperationState *result,
+ mlir::Value *view,
+ mlir::ArrayRef<mlir::Value *> indices = {});
+ mlir::LogicalResult verify();
+ static bool parse(mlir::OpAsmParser *parser, mlir::OperationState *result);
+ void print(mlir::OpAsmPrinter *p);
+
+ //////////////////////////////////////////////////////////////////////////////
+ // Op-specific functionality.
+ //////////////////////////////////////////////////////////////////////////////
+ unsigned getRank();
+ ViewType getViewType();
+ mlir::Value *getView() { return getOperand(0); }
+ mlir::Operation::operand_range getIndices() {
+ return {operand_begin() + 1, operand_end()};
+ }
+};
+
+/// A linalg.StoreOp is the counterpart of affine.store but operating on
+/// ViewType instead of MemRefType.
+class StoreOp : public mlir::Op<StoreOp, mlir::OpTrait::VariadicOperands,
+ mlir::OpTrait::ZeroResult> {
+public:
+ using Op::Op;
+
+ //////////////////////////////////////////////////////////////////////////////
+ // Hooks to customize the behavior of this op.
+ //////////////////////////////////////////////////////////////////////////////
+ static llvm::StringRef getOperationName() { return "linalg.store"; }
+ static void build(mlir::Builder *b, mlir::OperationState *result,
+ mlir::Value *valueToStore, mlir::Value *view,
+ mlir::ArrayRef<mlir::Value *> indices = {});
+ mlir::LogicalResult verify();
+ static bool parse(mlir::OpAsmParser *parser, mlir::OperationState *result);
+ void print(mlir::OpAsmPrinter *p);
+
+ //////////////////////////////////////////////////////////////////////////////
+ // Op-specific functionality.
+ //////////////////////////////////////////////////////////////////////////////
+ unsigned getRank();
+ ViewType getViewType();
+ mlir::Value *getValueToStore() { return getOperand(0); }
+ mlir::Value *getView() { return getOperand(1); }
+ mlir::Operation::operand_range getIndices() {
+ return {operand_begin() + 2, operand_end()};
+ }
+};
+
+} // namespace linalg
+
+#endif // LINALG3_LOADSTOREOP_H_
diff --git a/include/linalg3/Ops.h b/include/linalg3/Ops.h
new file mode 100644
index 0000000..813cbff
--- /dev/null
+++ b/include/linalg3/Ops.h
@@ -0,0 +1,25 @@
+//===- Ops.h - Linalg Ops single entry point ------------------------------===//
+//
+// Copyright 2019 The MLIR Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// =============================================================================
+
+#ifndef LINALG3_OPS_H_
+#define LINALG3_OPS_H_
+
+#include "linalg2/Ops.h"
+#include "linalg3/LoadStoreOps.h"
+#include "linalg3/TensorOps.h"
+
+#endif // LINALG3_OPS_H_
diff --git a/include/linalg3/TensorOps-inl.h b/include/linalg3/TensorOps-inl.h
new file mode 100644
index 0000000..b651053
--- /dev/null
+++ b/include/linalg3/TensorOps-inl.h
@@ -0,0 +1,145 @@
+//===- TensorOps-inl.h - Linalg dialect TensorOps operation implementation ===//
+//
+// Copyright 2019 The MLIR Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// =============================================================================
+
+/// The TensorOp-inl.h inclusion pattern is chosen to allow gradual extension of
+/// TensorOps by adding implementations as they are needed in the appropriate
+/// step in the tutorial.
+#ifndef LINALG3_TENSOROPS_INL_H_
+#define LINALG3_TENSOROPS_INL_H_
+
+#include "linalg1/Common.h"
+#include "linalg1/Utils.h"
+#include "linalg2/TensorOps.h"
+#include "linalg3/Analysis.h"
+#include "linalg3/Ops.h"
+
+template <class ConcreteOp>
+mlir::Value *
+linalg::TensorContractionBase<ConcreteOp>::getInputView(unsigned viewIndex) {
+ return *(getInputs().begin() + viewIndex);
+}
+
+template <class ConcreteOp>
+mlir::Value *
+linalg::TensorContractionBase<ConcreteOp>::getOutputView(unsigned viewIndex) {
+ return *(getOutputs().begin() + viewIndex);
+}
+
+template <class ConcreteOp>
+llvm::SmallVector<mlir::AffineMap, 8>
+linalg::TensorContractionBase<ConcreteOp>::loopsToOperandRangeMaps() {
+ return static_cast<ConcreteOp *>(this)->loopsToOperandRangeMaps();
+}
+
+template <class ConcreteOp>
+void linalg::TensorContractionBase<ConcreteOp>::emitScalarImplementation(
+ llvm::ArrayRef<mlir::Value *> parallelIvs,
+ llvm::ArrayRef<mlir::Value *> reductionIvs) {
+ static_cast<ConcreteOp *>(this)->emitScalarImplementation(parallelIvs,
+ reductionIvs);
+}
+
+template <class ConcreteOp>
+mlir::AffineMap linalg::operandRangesToLoopsMap(
+ linalg::TensorContractionBase<ConcreteOp> &tensorContraction) {
+ mlir::AffineMap current;
+ // Individual submaps may not be invertible but their union must be invertible
+ // by construction.
+ for (auto m : tensorContraction.loopsToOperandRangeMaps()) {
+ if (!m)
+ continue;
+ if (!current) {
+ current = m;
+ continue;
+ }
+ llvm::SmallVector<mlir::AffineExpr, 8> results(current.getResults().begin(),
+ current.getResults().end());
+ results.append(m.getResults().begin(), m.getResults().end());
+ current = mlir::AffineMap::get(
+ std::max(current.getNumDims(), m.getNumDims()),
+ current.getNumSymbols() + m.getNumSymbols(), results, {});
+ }
+ return inverseSubMap(current);
+}
+
+// Extract the ranges from a given ViewOp or SliceOp.
+//
+// In the case of a ViewOp, things are simple: just traverse the indexings and
+// get all the ranges (i.e. drop the indices).
+//
+// In the case of a SliceOp, things are trickier because we need to handle a
+// potential rank-reduction:
+// 1. Examine the indexing to determine if it is rank-reducing.
+// 2. If it is rank-reducing, an offset of 1 is added to the dimensions such
+// that `d >= slicingDim`. This is to account for the rank reduction.
+// `getRootIndex` is then called on the **parent** view
+static llvm::SmallVector<mlir::Value *, 8>
+extractRangesFromViewOrSliceOp(mlir::Value *view) {
+ // This expects a viewType which must come from either ViewOp or SliceOp.
+ assert(view->getType().isa<linalg::ViewType>() && "expected ViewType");
+ if (auto viewOp = view->getDefiningOp()->dyn_cast<linalg::ViewOp>())
+ return viewOp.getRanges();
+
+ auto sliceOp = view->getDefiningOp()->cast<linalg::SliceOp>();
+ unsigned slicingDim = sliceOp.getSlicingDim();
+ auto *indexing = *(sliceOp.getIndexings().begin());
+ bool isRankReducing = indexing->getType().isa<mlir::IndexType>();
+ unsigned offset = 0;
+ llvm::SmallVector<mlir::Value *, 8> res;
+ res.reserve(sliceOp.getRank());
+ for (unsigned d = 0, e = sliceOp.getRank(); d < e; ++d) {
+ if (d == slicingDim && isRankReducing)
+ offset = 1;
+ auto *parentView = sliceOp.getParentView();
+ auto indexingPosPair = linalg::getViewRootIndexing(parentView, d + offset);
+ res.push_back(indexingPosPair.first);
+ }
+ return res;
+}
+
+template <class ConcreteOp>
+static llvm::SmallVector<mlir::Value *, 8>
+getInputRanges(linalg::TensorContractionBase<ConcreteOp> &tensorContraction) {
+ llvm::SmallVector<mlir::Value *, 8> res;
+ for (auto *in : tensorContraction.getInputs()) {
+ auto subres = extractRangesFromViewOrSliceOp(in);
+ res.append(subres.begin(), subres.end());
+ }
+ return res;
+}
+
+template <class ConcreteOp>
+static llvm::SmallVector<mlir::Value *, 8>
+getOutputRanges(linalg::TensorContractionBase<ConcreteOp> &tensorContraction) {
+ llvm::SmallVector<mlir::Value *, 8> res;
+ for (auto *out : tensorContraction.getOutputs()) {
+ auto subres = extractRangesFromViewOrSliceOp(out);
+ res.append(subres.begin(), subres.end());
+ }
+ return res;
+}
+
+template <class ConcreteOp>
+llvm::SmallVector<mlir::Value *, 8> linalg::getRanges(
+ linalg::TensorContractionBase<ConcreteOp> &tensorContraction) {
+ llvm::SmallVector<mlir::Value *, 8> res = getInputRanges(tensorContraction);
+ llvm::SmallVector<mlir::Value *, 8> tmp = getOutputRanges(tensorContraction);
+ res.append(tmp.begin(), tmp.end());
+ return res;
+}
+
+#endif // LINALG3_TENSOROPS_INL_H_
diff --git a/include/linalg3/TensorOps.h b/include/linalg3/TensorOps.h
new file mode 100644
index 0000000..bf5a377
--- /dev/null
+++ b/include/linalg3/TensorOps.h
@@ -0,0 +1,54 @@
+//===- TensorOps.h - Linalg dialect TensorOps operation definition --------===//
+//
+// Copyright 2019 The MLIR Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// =============================================================================
+
+#ifndef LINALG3_TENSOROPS_H_
+#define LINALG3_TENSOROPS_H_
+
+#include "linalg2/TensorOps.h"
+
+namespace linalg {
+
+///
+/// Ideally all these functions would go in an Analysis but as long as
+/// TensorContractionBase is templated, they need to remain close enough.
+///
+
+/// Takes a `tensorContraction` and a returns an AffineMap that can be used to
+/// map ranges to enclosing loops for all the operands' ranges.
+template <class ConcreteOp>
+mlir::AffineMap operandRangesToLoopsMap(
+ linalg::TensorContractionBase<ConcreteOp> &tensorContraction);
+
+/// Takes a `tensorContraction` and returns the ranges of all its operands.
+/// When an operand comes from a ViewOp, things are simple:
+/// just traverse the indexings and get all the ranges
+/// (i.e. drop the rank-reducing indices).
+/// In the case of a SliceOp, things are more involved because we need to handle
+/// potential rank-reductions.
+/// This function abstracts this complexity away and returns all the ranges.
+template <class ConcreteOp>
+llvm::SmallVector<mlir::Value *, 8>
+getRanges(linalg::TensorContractionBase<ConcreteOp> &tensorContraction);
+
+} // namespace linalg
+
+/// The TensorOp-inl.h inclusion pattern is chosen to allow gradual extension of
+/// TensorOps by adding implementations as they are needed in the appropriate
+/// step in the tutorial.
+#include "linalg3/TensorOps-inl.h"
+
+#endif // LINALG3_TENSOROPS_H_
diff --git a/include/linalg3/Transforms.h b/include/linalg3/Transforms.h
new file mode 100644
index 0000000..9af528e
--- /dev/null
+++ b/include/linalg3/Transforms.h
@@ -0,0 +1,80 @@
+//===- Transforms.h - Linalg dialect Transformations definition -----------===//
+//
+// Copyright 2019 The MLIR Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// =============================================================================
+
+#ifndef LINALG3_TRANSFORMS_H_
+#define LINALG3_TRANSFORMS_H_
+
+#include "linalg2/Transforms.h"
+#include "mlir/Support/LLVM.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Optional.h"
+
+namespace mlir {
+class AffineForOp;
+class AffineMap;
+class Function;
+class FunctionPassBase;
+class Operation;
+class Value;
+} // namespace mlir
+
+namespace linalg {
+
+struct RangeParts {
+ explicit RangeParts(unsigned reserved);
+ RangeParts(llvm::ArrayRef<mlir::Value *> ranges);
+ llvm::SmallVector<mlir::Value *, 4> makeRanges();
+
+ llvm::SmallVector<mlir::Value *, 4> mins;
+ llvm::SmallVector<mlir::Value *, 4> maxes;
+ llvm::SmallVector<mlir::Value *, 4> steps;
+};
+
+mlir::Value *
+makeFoldedComposedAffineApply(mlir::AffineMap map,
+ llvm::ArrayRef<mlir::Value *> operandsRef);
+
+llvm::SmallVector<mlir::Value *, 4>
+makeGenericLoopRanges(mlir::AffineMap operandRangesToLoopMaps,
+ llvm::ArrayRef<mlir::Value *> ranges,
+ llvm::ArrayRef<mlir::Value *> tileSizes = {});
+
+/// Traverses `f` and rewrites linalg.slice, and the operations it depends on,
+/// to only use linalg.view operations.
+void composeSliceOps(mlir::Function *f);
+
+/// Traverses `f` and rewrites linalg.matmul(resp. linalg.matvec)
+/// as linalg.matvec(resp. linalg.dot).
+void lowerToFinerGrainedTensorContraction(mlir::Function *f);
+
+/// Operation-wise writing of linalg operations to loop form.
+/// It is the caller's responsibility to erase the `op` if necessary.
+/// This returns the enclosing loops around the body of `op` for further
+/// composition of transformations.
+llvm::Optional<llvm::SmallVector<mlir::AffineForOp, 4>>
+writeAsLoops(mlir::Operation *op);
+
+/// Traverses `f` and rewrites linalg operations in loop form.
+void lowerToLoops(mlir::Function *f);
+
+/// Creates a pass that rewrites linalg.load and linalg.store to affine.load and
+/// affine.store operations.
+mlir::FunctionPassBase *createLowerLinalgLoadStorePass();
+
+} // namespace linalg
+
+#endif // LINALG3_TRANSFORMS_H_