Skip to content

Commit

Permalink
Merge branch 'main' into zos-time
Browse files Browse the repository at this point in the history
  • Loading branch information
cjvolzka authored Apr 5, 2024
2 parents 2b47a7e + b081bf8 commit 003d142
Show file tree
Hide file tree
Showing 11 changed files with 772 additions and 40 deletions.
698 changes: 677 additions & 21 deletions src/Accelerators/NNPA/Conversion/ZHighToZLow/ZHighToZLow.cpp

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions src/Conversion/ONNXToKrnl/ML/CategoryMapper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -150,15 +150,15 @@ struct ONNXCategoryMapperOpLowering
Value inputElem =
loadElement(X, loopInd, elementType, rank, createKrnl);
if (emitPrintStmts)
create.krnl.printf("inputElem: ", inputElem, elementType);
create.krnl.printf("inputElem: ", inputElem);

Value index, isIndexValid;
std::tie(index, isIndexValid) =
emitFindIndex(inputElem, elementType, perfectHashTable,
constantForCatsInt64s, constantForCatsStrings, create);

if (emitPrintStmts)
create.krnl.printf("index: ", index, index.getType());
create.krnl.printf("index: ", index);

// Store the final result.
scf::IfOp ifOp = rewriter.create<scf::IfOp>(
Expand Down
14 changes: 5 additions & 9 deletions src/Conversion/ONNXToKrnl/Math/Elementwise.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1251,14 +1251,11 @@ Value emitScalarOpFor<ONNXModOp>(ConversionPatternRewriter &rewriter,
create.math.select(needAdjust, adjustedRemainder, mathRemainder);

#ifdef DEBUG_ONNX_MOD
create.krnl.printf("XXXX emitScalarOpFor<ONNXModOp>: dividend=", dividend,
dividend.getType());
create.krnl.printf(", divisor=", divisor, divisor.getType());
create.krnl.printf(
", mathReminder=", mathRemainder, mathRemainder.getType());
create.krnl.printf(
", adjustedReminder=", adjustedRemainder, adjustedRemainder.getType());
create.krnl.printf(", Answer=", answer, answer.getType());
create.krnl.printf("XXXX emitScalarOpFor<ONNXModOp>: dividend=", dividend);
create.krnl.printf(", divisor=", divisor);
create.krnl.printf(", mathReminder=", mathRemainder);
create.krnl.printf(", adjustedReminder=", adjustedRemainder);
create.krnl.printf(", Answer=", answer);
create.krnl.printf("\n");
#endif

Expand Down Expand Up @@ -1504,7 +1501,6 @@ static LogicalResult getPartiallyFlattenedSimdCode(
IndexExprScope allocScope(create.vec, shapeHelper->getScope());
DimsExpr outputDims;
getIndexExprList<SymbolIndexExpr>(shapeHelper->getOutputDims(), outputDims);

// Alloc memory with padding for SIMD.
// For the moment, its ok to go here; if we truly have partial flattening of
// the simd code, then we only do it with static memref size that are
Expand Down
6 changes: 3 additions & 3 deletions src/Conversion/ONNXToKrnl/Tensor/GatherND.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ struct ONNXGatherNDOpLowering : public OpConversionPattern<ONNXGatherNDOp> {
int64_t n = (int64_t)indices.size();
for (int64_t i = 0; i < n; ++i) {
Value val = indices[i].getValue();
createKrnl.printf(val, val.getType());
createKrnl.printf(" ", val);
}
createKrnl.printf(")\n");
}
Expand Down Expand Up @@ -173,7 +173,7 @@ struct ONNXGatherNDOpLowering : public OpConversionPattern<ONNXGatherNDOp> {
reshapedIndicesAccessFct.pop_back();

if (emitPrintStmts) {
createKrnl.printf("index = ", indexVal, indexVal.getType());
createKrnl.printf("index = ", indexVal);
createKrnl.printf("\n");
}

Expand Down Expand Up @@ -231,7 +231,7 @@ struct ONNXGatherNDOpLowering : public OpConversionPattern<ONNXGatherNDOp> {
reshapedDataAccessFct.pop_back();

if (emitPrintStmts) {
createKrnl.printf("val = ", val, val.getType());
createKrnl.printf("val = ", val);
createKrnl.printf("\n");
}

Expand Down
11 changes: 8 additions & 3 deletions src/Dialect/Krnl/DialectBuilder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -301,9 +301,14 @@ void KrnlBuilder::printf(
b().create<KrnlPrintOp>(loc(), newFormat, input);
}

void KrnlBuilder::printf(Value input, Type inputType) const {
StringRef format = getFormat(inputType);
b().create<KrnlPrintOp>(loc(), format, input);
void KrnlBuilder::printf(
StringRef msg, Value input, bool endsWithNewLine) const {
KrnlBuilder::printf(msg, input, input.getType(), endsWithNewLine);
}

void KrnlBuilder::printf(
StringRef msg, IndexExpr input, bool endsWithNewLine) const {
KrnlBuilder::printf(msg, input.getValue(), endsWithNewLine);
}

// =============================================================================
Expand Down
8 changes: 6 additions & 2 deletions src/Dialect/Krnl/DialectBuilder.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -151,17 +151,21 @@ struct KrnlBuilder : public DialectBuilder {
mlir::Value strncmp(
mlir::Value str1, mlir::Value str2, mlir::Value len) const;
mlir::Value strlen(mlir::Value str) const;
// Debug: print messages, values, and tensors at runtime.
void printf(mlir::StringRef msg) const;
void printf(mlir::StringRef msg, mlir::Value input, /* type from input */
bool endsWithNewLine = false) const;
void printf(
mlir::StringRef msg, IndexExpr input, bool endsWithNewLine = false) const;
void printf(mlir::StringRef msg, mlir::Value input, mlir::Type inputType,
bool endsWithNewLine = false) const;
void printf(mlir::Value input, mlir::Type inputType) const;
void printTensor(mlir::StringRef msg, mlir::Value input) const;

// Onnx-mlir runtime functions.
void randomNormal(mlir::Value alloc, mlir::Value numberOfRandomValues,
mlir::Value mean, mlir::Value scale, mlir::Value seed) const;
mlir::Value findIndex(
mlir::Value input, mlir::Value G, mlir::Value V, mlir::Value len) const;
void printTensor(mlir::StringRef msg, mlir::Value input) const;
};

//====--- Support for Affine Builder with Krnl Mem Ops ------------------===//
Expand Down
17 changes: 17 additions & 0 deletions src/Dialect/Mlir/DialectBuilder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1391,6 +1391,23 @@ Value MemRefBuilder::dim(Value val, Value index) const {
return Value(b().createOrFold<memref::DimOp>(loc(), val, index));
}

//===----------------------------------------------------------------------===//
// Prefetch.

void MemRefBuilder::prefetch(Value memref, ValueRange indices, bool isWrite,
unsigned locality, bool isData) {
b().create<memref::PrefetchOp>(
loc(), memref, indices, isWrite, locality, isData);
}

void MemRefBuilder::prefetchIE(Value memref,
llvm::SmallVectorImpl<IndexExpr> &indices, bool isWrite, unsigned locality,
bool isData) {
SmallVector<Value, 4> indexVals;
IndexExpr::getValues(indices, indexVals);
prefetch(memref, indexVals, isWrite, locality, isData);
}

//===----------------------------------------------------------------------===//
// Structured Control Flow (SCF).
//===----------------------------------------------------------------------===//
Expand Down
11 changes: 11 additions & 0 deletions src/Dialect/Mlir/DialectBuilder.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -371,6 +371,11 @@ struct MemRefBuilder final : DialectBuilder {
mlir::Value dim(mlir::Value val, int64_t index) const;
mlir::Value dim(mlir::Value val, mlir::Value index) const;

void prefetchIE(mlir::Value memref, llvm::SmallVectorImpl<IndexExpr> &indices,
bool isWrite, unsigned locality, bool isData = true);
void prefetch(mlir::Value memref, mlir::ValueRange indices, bool isWrite,
unsigned locality, bool isData = true);

private:
mlir::IntegerAttr computeAlignment(int64_t alignment) const;
void computeDynSymbols(
Expand Down Expand Up @@ -519,6 +524,12 @@ struct GenericAffineBuilder final : DialectBuilder {
void storeIE(mlir::Value val, mlir::Value memref,
llvm::ArrayRef<IndexExpr> indices, mlir::ValueRange offsets) const;

void prefetch(mlir::Value memref, mlir::AffineMap map,
llvm::ArrayRef<mlir::Value> operands, bool isWrite, unsigned localityHint,
bool isDataCache = true) const;
void prefetchIE(mlir::Value memref, llvm::ArrayRef<IndexExpr> indices,
bool isWrite, unsigned localityHint, bool isDataCache = true) const;

void forIE(IndexExpr lb, IndexExpr ub, int64_t step,
mlir::function_ref<void(GenericAffineBuilder &, mlir::Value)> builderFn)
const;
Expand Down
18 changes: 18 additions & 0 deletions src/Dialect/Mlir/DialectBuilder.hpp.inc
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,24 @@ inline void GenericAffineBuilder<LOAD_OP, STORE_OP>::forIE(IndexExpr lb,
});
}

template <class LOAD_OP, class STORE_OP>
void GenericAffineBuilder<LOAD_OP, STORE_OP>::prefetch(mlir::Value memref,
mlir::AffineMap map, llvm::ArrayRef<mlir::Value> operands, bool isWrite,
unsigned localityHint, bool isDataCache) const {
b().template create<mlir::affine::AffinePrefetchOp>(
loc(), memref, map, operands, isWrite, localityHint, isDataCache);
}

template <class LOAD_OP, class STORE_OP>
inline void GenericAffineBuilder<LOAD_OP, STORE_OP>::prefetchIE(
mlir::Value memref, llvm::ArrayRef<IndexExpr> indices, bool isWrite,
unsigned localityHint, bool isDataCache) const {
llvm::SmallVector<mlir::Value, 8> operands;
mlir::AffineMap map;
IndexExpr::getAffineMapAndOperands(indices, map, operands);
prefetch(memref, map, operands, isWrite, localityHint, isDataCache);
}

template <class LOAD_OP, class STORE_OP>
inline void GenericAffineBuilder<LOAD_OP, STORE_OP>::forIE(
llvm::SmallVectorImpl<IndexExpr> &lbs,
Expand Down
23 changes: 23 additions & 0 deletions src/Dialect/Mlir/IndexExpr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -568,6 +568,29 @@ void IndexExpr::debugPrint(
valueList.emplace_back(expr.getValue());
}

/* static*/ void IndexExpr::getAffineMapAndOperands(
ArrayRef<IndexExpr> indexExprArray, AffineMap &map,
SmallVectorImpl<mlir::Value> &operands) {
assert(indexExprArray.size() > 0 && "expected at least one index expr");
SmallVector<AffineExpr, 8> affineExprList;
for (IndexExpr expr : indexExprArray) {
AffineMap tmpMap;
SmallVector<Value, 8> tmpOperands;
expr.getAffineMapAndOperands(tmpMap, tmpOperands);
operands = tmpOperands;
// Enqueue the affine expressions defined by this temp map.
for (AffineExpr affineExpr : tmpMap.getResults()) {
affineExprList.emplace_back(affineExpr);
}
}

// Now can generate a common map with all the results
unsigned dimCount = indexExprArray[0].getScope().getNumDims();
unsigned symCount = indexExprArray[0].getScope().getNumSymbols();
map = AffineMap::get(dimCount, symCount, affineExprList,
indexExprArray[0].getRewriter().getContext());
}

//===----------------------------------------------------------------------===//
// IndexExpr Op Support.
//===----------------------------------------------------------------------===//
Expand Down
2 changes: 2 additions & 0 deletions src/Dialect/Mlir/IndexExpr.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -498,6 +498,8 @@ class IndexExpr {
llvm::SmallVectorImpl<mlir::Value> &valueList);
static void getOpOrFoldResults(mlir::ArrayRef<IndexExpr> indexExprArray,
llvm::SmallVectorImpl<mlir::OpFoldResult> &resList);
static void getAffineMapAndOperands(mlir::ArrayRef<IndexExpr> indexExprArray,
mlir::AffineMap &map, llvm::SmallVectorImpl<mlir::Value> &operands);

// Possibly Affine Operations. Return a new IndexExpr
IndexExpr operator+(IndexExpr const b) const;
Expand Down

0 comments on commit 003d142

Please sign in to comment.