8000 Advance llvm-project and stablehlo. by stellaraccident · Pull Request #2619 · llvm/torch-mlir · GitHub
[go: up one dir, main page]
More Web Proxy on the site http://driver.im/
Skip to content

Advance llvm-project and stablehlo. #2619

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 13 commits into from
Dec 8, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion externals/llvm-project
Submodule llvm-project updated 3620 files
2 changes: 1 addition & 1 deletion externals/stablehlo
Submodule stablehlo updated 311 files
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ Value promoteType(PatternRewriter &rewriter, Location loc, Value input,
Value promoteAndBroadcast(ConversionPatternRewriter &rewriter, Value input,
TensorType outType);

SmallVector<size_t> toPositiveDims(ArrayRef<int64_t> dims, int64_t rank);
SmallVector<int64_t> toPositiveDims(ArrayRef<int64_t> dims, int64_t rank);

// Get the dimension sizes of the input tensor, given the dimension axes
FailureOr<SmallVector<Value, 4>> getDimSizesOfTensor(PatternRewriter &rewriter,
Expand Down
15 changes: 3 additions & 12 deletions lib/Conversion/TorchToStablehlo/Basic.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -615,12 +615,8 @@ class ConvertAtenTransposeIntOp
SmallVector<int64_t> permValues(inputRank);
std::iota(std::begin(permValues), std::end(permValues), 0);
std::swap(permValues[dim0], permValues[dim1]);
DenseIntElementsAttr permutation = DenseIntElementsAttr::get(
RankedTensorType::get({static_cast<long int>(permValues.size())},
rewriter.getI64Type()),
permValues);
rewriter.replaceOpWithNewOp<stablehlo::TransposeOp>(op, outType, self,
permutation);
permValues);
return success();
}
};
Expand Down Expand Up @@ -793,12 +789,8 @@ LogicalResult ConvertAtenOp<AtenPermuteOp>::matchAndRewrite(
return op.emitError("not all dims are valid");
}

DenseIntElementsAttr permutation = DenseIntElementsAttr::get(
RankedTensorType::get({static_cast<long int>(permValues.size())},
rewriter.getI64Type()),
permValues);
rewriter.replaceOpWithNewOp<stablehlo::TransposeOp>(op, outType, self,
permutation);
permValues);
return success();
}

Expand Down Expand Up @@ -1755,8 +1747,7 @@ LogicalResult ConvertAtenOp<AtenFlipOp>::matchAndRewrite(
}
}

rewriter.replaceOpWithNewOp<stablehlo::ReverseOp>(
op, outType, self, rewriter.getI64TensorAttr(dims));
rewriter.replaceOpWithNewOp<stablehlo::ReverseOp>(op, outType, self, dims);
return success();
}

Expand Down
16 changes: 5 additions & 11 deletions lib/Conversion/TorchToStablehlo/Linear.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,13 +62,9 @@ Value getPermutedTensor(PatternRewriter &rewriter, Operation *op, Value input,
newShape.push_back(inpShape[d]);
}

auto attrTy = RankedTensorType::get({static_cast<int64_t>(transDims.size())},
rewriter.getIntegerType(64));
auto permuteAttr = DenseIntElementsAttr::get(attrTy, transDims);

auto outTy = 10000 RankedTensorType::get(newShape, inputTy.getElementType());
auto result = rewriter.create<stablehlo::TransposeOp>(op->getLoc(), outTy,
input, permuteAttr);
input, transDims);
return result.getResult();
}

Expand Down Expand Up @@ -500,8 +496,8 @@ class ConvertAtenConvolutionOp : public ConvertAtenOp<AtenConvolutionOp> {
for (int64_t i = 0; i <= rank; i++)
transposeDims[i] = i;
std::swap(transposeDims[rank - 1], transposeDims[rank - 2]);
weight = rewriter.create<stablehlo::TransposeOp>(
op->getLoc(), weight, rewriter.getI64TensorAttr(transposeDims));
weight = rewriter.create<stablehlo::TransposeOp>(op->getLoc(), weight,
transposeDims);

// 3. [H, W, ..., G, OC, IC//G] => [H, W, ..., G*OC, IC//G]
weightShapeInt.erase(weightShapeInt.end() - 2);
Expand Down Expand Up @@ -546,12 +542,10 @@ class ConvertAtenConvolutionOp : public ConvertAtenOp<AtenConvolutionOp> {
}
auto transposeTy =
RankedTensorType::get(transposeShape, weightTy.getElementType());
DenseIntElementsAttr permAttr = DenseIntElementsAttr::get(
RankedTensorType::get({nDims}, rewriter.getI64Type()), perm);
auto transposeOp = rewriter.create<stablehlo::TransposeOp>(
op->getLoc(), transposeTy, weight, permAttr);
op->getLoc(), transposeTy, weight, perm);
auto reverseOp = rewriter.create<stablehlo::ReverseOp>(
op->getLoc(), transposeOp, rewriter.getI64TensorAttr({0, 1}));
op->getLoc(), transposeOp, ArrayRef<int64_t>{0, 1});

// Prepare for transposed convolution
SmallVector<int64_t> stablehloStrideVec(nSpatialDims, 1);
Expand Down
16 changes: 8 additions & 8 deletions lib/Conversion/TorchToStablehlo/StablehloLegalizeUtils.cpp
6D4E
Original file line number Diff line number Diff line change
Expand Up @@ -250,12 +250,12 @@ Value promoteAndBroadcast(ConversionPatternRewriter &rewriter, Value input,
return bcast_op.getResult();
}

SmallVector<size_t> toPositiveDims(ArrayRef<int64_t> dims, int64_t rank) {
SmallVector<size_t> posDims;
SmallVector<int64_t> toPositiveDims(ArrayRef<int64_t> dims, int64_t rank) {
SmallVector<int64_t> posDims;
posDims.reserve(rank);
std::transform(
dims.begin(), dims.end(), std::back_inserter(posDims),
[rank](int64_t d) -> size_t { return toPositiveDim(d, rank); });
[rank](int64_t d) -> int64_t { return toPositiveDim(d, rank); });
return posDims;
}

Expand Down Expand Up @@ -316,10 +316,10 @@ FailureOr<Value> unsqueezeTensor(PatternRewriter &rewriter, Operation *op,
op, "failed to get dimension sizes of the input");

auto dimSizes = *dimSizesInfo;
auto rank = dimSizes.size();
size_t newRank = rank + inputUnsqzDims.size();
int64_t rank = dimSizes.size();
int64_t newRank = rank + inputUnsqzDims.size();
auto unsqzDims = toPositiveDims(inputUnsqzDims, newRank);
for (size_t k = 0, sz = unsqzDims.size(); k < sz; ++k)
for (int64_t k = 0, sz = unsqzDims.size(); k < sz; ++k)
if (k > 1 && unsqzDims[k] <= unsqzDims[k - 1])
return rewriter.notifyMatchFailure(
op, "unsqueeze dimensions must be specified in order");
Expand All @@ -335,8 +335,8 @@ FailureOr<Value> unsqueezeTensor(PatternRewriter &rewriter, Operation *op,
std::vector<int64_t> newShape;
newDimSizes.reserve(newRank);
newShape.reserve(newRank);
for (size_t k = 0, i = 0, j = 0; k < newRank; ++k) {
if (j < unsqzDims.size() && unsqzDims[j] == k) {
for (int64_t k = 0, i = 0, j = 0; k < newRank; ++k) {
if (j < static_cast<int64_t>(unsqzDims.size()) && unsqzDims[j] == k) {
newDimSizes.push_back(one);
newShape.push_back(1);
j++;
Expand Down
41 changes: 24 additions & 17 deletions projects/pt1/e2e_testing/xfail_sets.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@
from torch_mlir_e2e_test.test_suite import COMMON_TORCH_MLIR_LOWERING_XFAILS
from torch_mlir._version import torch_version_for_comparison, version

print(f"TORCH_VERSION_FOR_COMPARISON =", torch_version_for_comparison())

LINALG_XFAIL_SET = COMMON_TORCH_MLIR_LOWERING_XFAILS | {
# Lowering Torch Backend IR -> Linalg-on-Tensors Backend IR failed
# 'linalg.depthwise_conv_2d_nchw_chw' op inferred input/output operand #1 has shape's dimension #0 to be 4, but found 8
Expand All @@ -21,6 +23,14 @@
"IscloseStaticModuleTrue_basic"
}

if torch_version_for_comparison() >= version.parse("2.2.0.dev20231204"):
LINALG_XFAIL_SET |= {
"Conv2dWithPaddingDilationStrideStaticModule_grouped",
"Conv2dWithPaddingDilationStrideStaticModule_grouped_multiplier",
"ConvolutionModule2DGroups_basic",
}


TORCHDYNAMO_XFAIL_SET = {
#### General TorchDynamo/PyTorch errors

Expand Down Expand Up @@ -306,10 +316,11 @@
"ArangeStartOutViewModule_basic",
}

if torch_version_for_comparison() < version.parse("2.1.0.dev"):
TORCHDYNAMO_XFAIL_SET -= {
"ScaledDotProductAttentionSameModule_basic",
"ScaledDotProductAttentionDifferentModule_basic",
if torch_version_for_comparison() >= version.parse("2.2.0.dev20231204"):
TORCHDYNAMO_XFAIL_SET |= {
"Conv2dWithPaddingDilationStrideStaticModule_grouped",
"Conv2dWithPaddingDilationStrideStaticModule_grouped_multiplier",
"ConvolutionModule2DGroups_basic",
}

TORCHDYNAMO_CRASHING_SET = {
Expand Down Expand Up @@ -1305,6 +1316,10 @@
"MeanModule_basic",
"ArangeStartOutModule_basic",
"ArangeStartOutViewModule_basic",
"Conv2dBiasNoPaddingModule_basic",
"Conv2dNoPaddingModule_basic",
"Conv2dWithPaddingDilationStrideModule_basic",
"Conv2dWithPaddingModule_basic",
}

MAKE_FX_TOSA_PASS_SET = (TOSA_PASS_SET | {
Expand Down Expand Up @@ -1335,20 +1350,12 @@
# failed to legalize operation 'torch.aten.to.dtype' that was explicitly marked illegal
"AtenEyeModuleInt2D_basic",
"AtenEyeMModuleInt2D_basic",
}

if torch_version_for_comparison() < version.parse("2.1.0.dev"):
MAKE_FX_TOSA_PASS_SET -= {
# 'tensor.expand_shape' op expected rank expansion, but found source rank 1 >= result rank 1
"ReshapeCollapseModule_basic",

# failed to lower torch.aten.empty.memory_format
"BatchNorm1DModule_basic",
"BatchNorm1DWith2DInputModule_basic",
"BatchNorm2DModule_basic",
"BatchNorm3DModule_basic",
"BatchNorm1DStaticShapeModule_basic",
}
"Conv2dBiasNoPaddingModule_basic",
"Conv2dNoPaddingModule_basic",
"Conv2dWithPaddingDilationStrideModule_basic",
"Conv2dWithPaddingModule_basic",
}

LTC_CRASHING_SET = {
# TODO: update test to move all inputs to the lazy device. Otherwise test fails with:
Expand Down
0