summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAart Bik <ajcbik@google.com>2021-10-27 20:35:52 -0700
committerAart Bik <ajcbik@google.com>2021-10-28 09:03:26 -0700
commit947e14be98796b5e680cc7bcae262d4256c8e05c (patch)
treee0fb6fa3333c32357f6cef64dcde6f7f39d72a01
parentf362aea42d29da2c8620c6eb8902f31d01613edc (diff)
downloadllvm-947e14be98796b5e680cc7bcae262d4256c8e05c.tar.gz
[mlir][sparse] move conversion test back to original CHECK testing
Rationale: The silent exit(1) gives little clues on where the error occurs on failure and may even be confusing at first. The CHECK testing of all computed values and indices may be a little bit more elaborate, but it directly pinpoints where errors happen if they occur. This style is also consistent with the other tests, which I actually prefer. Reviewed By: bixia Differential Revision: https://reviews.llvm.org/D112688
-rw-r--r--mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir210
1 files changed, 109 insertions, 101 deletions
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir
index 7fe6d00594d3..97458f8db862 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir
@@ -28,62 +28,21 @@
// Integration test that tests conversions between sparse tensors.
//
module {
- func private @exit(index) -> ()
-
//
- // Verify utilities.
+ // Output utilities.
//
- func @checkf64(%arg0: memref<?xf64>, %arg1: memref<?xf64>) {
+ func @dumpf64(%arg0: memref<?xf64>) {
%c0 = arith.constant 0 : index
- %c1 = arith.constant 1 : index
- // Same lengths?
- %0 = memref.dim %arg0, %c0 : memref<?xf64>
- %1 = memref.dim %arg1, %c0 : memref<?xf64>
- %2 = arith.cmpi ne, %0, %1 : index
- scf.if %2 {
- call @exit(%c1) : (index) -> ()
- }
- // Same content?
- scf.for %i = %c0 to %0 step %c1 {
- %a = memref.load %arg0[%i] : memref<?xf64>
- %b = memref.load %arg1[%i] : memref<?xf64>
- %c = arith.cmpf une, %a, %b : f64
- scf.if %c {
- call @exit(%c1) : (index) -> ()
- }
- }
+ %d0 = arith.constant -1.0 : f64
+ %0 = vector.transfer_read %arg0[%c0], %d0: memref<?xf64>, vector<25xf64>
+ vector.print %0 : vector<25xf64>
return
}
- func @check(%arg0: memref<?xindex>, %arg1: memref<?xindex>) {
+ func @dumpidx(%arg0: memref<?xindex>) {
%c0 = arith.constant 0 : index
- %c1 = arith.constant 1 : index
- // Same lengths?
- %0 = memref.dim %arg0, %c0 : memref<?xindex>
- %1 = memref.dim %arg1, %c0 : memref<?xindex>
- %2 = arith.cmpi ne, %0, %1 : index
- scf.if %2 {
- call @exit(%c1) : (index) -> ()
- }
- // Same content?
- scf.for %i = %c0 to %0 step %c1 {
- %a = memref.load %arg0[%i] : memref<?xindex>
- %b = memref.load %arg1[%i] : memref<?xindex>
- %c = arith.cmpi ne, %a, %b : index
- scf.if %c {
- call @exit(%c1) : (index) -> ()
- }
- }
- return
- }
-
- //
- // Output utility.
- //
- func @dumpf64(%arg0: memref<?xf64>) {
- %c0 = arith.constant 0 : index
- %d0 = arith.constant 0.0 : f64
- %0 = vector.transfer_read %arg0[%c0], %d0: memref<?xf64>, vector<24xf64>
- vector.print %0 : vector<24xf64>
+ %d0 = arith.constant 0 : index
+ %0 = vector.transfer_read %arg0[%c0], %d0: memref<?xindex>, vector<25xindex>
+ vector.print %0 : vector<25xindex>
return
}
@@ -133,13 +92,24 @@ module {
%i = sparse_tensor.convert %3 : tensor<2x3x4xf64, #Tensor3> to tensor<2x3x4xf64, #Tensor3>
//
- // Check values equality.
+ // Check values.
+ //
+ // CHECK: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, -1 )
+ // CHECK-NEXT: ( 1, 13, 2, 14, 3, 15, 4, 16, 5, 17, 6, 18, 7, 19, 8, 20, 9, 21, 10, 22, 11, 23, 12, 24, -1 )
+ // CHECK-NEXT: ( 1, 5, 9, 13, 17, 21, 2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23, 4, 8, 12, 16, 20, 24, -1 )
+ // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, -1 )
+ // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, -1 )
+ // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, -1 )
+ // CHECK-NEXT: ( 1, 13, 2, 14, 3, 15, 4, 16, 5, 17, 6, 18, 7, 19, 8, 20, 9, 21, 10, 22, 11, 23, 12, 24, -1 )
+ // CHECK-NEXT: ( 1, 13, 2, 14, 3, 15, 4, 16, 5, 17, 6, 18, 7, 19, 8, 20, 9, 21, 10, 22, 11, 23, 12, 24, -1 )
+ // CHECK-NEXT: ( 1, 13, 2, 14, 3, 15, 4, 16, 5, 17, 6, 18, 7, 19, 8, 20, 9, 21, 10, 22, 11, 23, 12, 24, -1 )
+ // CHECK-NEXT: ( 1, 5, 9, 13, 17, 21, 2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23, 4, 8, 12, 16, 20, 24, -1 )
+ // CHECK-NEXT: ( 1, 5, 9, 13, 17, 21, 2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23, 4, 8, 12, 16, 20, 24, -1 )
+ // CHECK-NEXT: ( 1, 5, 9, 13, 17, 21, 2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23, 4, 8, 12, 16, 20, 24, -1 )
//
-
%v1 = sparse_tensor.values %1 : tensor<2x3x4xf64, #Tensor1> to memref<?xf64>
%v2 = sparse_tensor.values %2 : tensor<2x3x4xf64, #Tensor2> to memref<?xf64>
%v3 = sparse_tensor.values %3 : tensor<2x3x4xf64, #Tensor3> to memref<?xf64>
-
%av = sparse_tensor.values %a : tensor<2x3x4xf64, #Tensor1> to memref<?xf64>
%bv = sparse_tensor.values %b : tensor<2x3x4xf64, #Tensor1> to memref<?xf64>
%cv = sparse_tensor.values %c : tensor<2x3x4xf64, #Tensor1> to memref<?xf64>
@@ -150,20 +120,59 @@ module {
%hv = sparse_tensor.values %h : tensor<2x3x4xf64, #Tensor3> to memref<?xf64>
%iv = sparse_tensor.values %i : tensor<2x3x4xf64, #Tensor3> to memref<?xf64>
- call @checkf64(%v1, %av) : (memref<?xf64>, memref<?xf64>) -> ()
- call @checkf64(%v1, %bv) : (memref<?xf64>, memref<?xf64>) -> ()
- call @checkf64(%v1, %cv) : (memref<?xf64>, memref<?xf64>) -> ()
- call @checkf64(%v2, %dv) : (memref<?xf64>, memref<?xf64>) -> ()
- call @checkf64(%v2, %ev) : (memref<?xf64>, memref<?xf64>) -> ()
- call @checkf64(%v2, %fv) : (memref<?xf64>, memref<?xf64>) -> ()
- call @checkf64(%v3, %gv) : (memref<?xf64>, memref<?xf64>) -> ()
- call @checkf64(%v3, %hv) : (memref<?xf64>, memref<?xf64>) -> ()
- call @checkf64(%v3, %iv) : (memref<?xf64>, memref<?xf64>) -> ()
+ call @dumpf64(%v1) : (memref<?xf64>) -> ()
+ call @dumpf64(%v2) : (memref<?xf64>) -> ()
+ call @dumpf64(%v3) : (memref<?xf64>) -> ()
+ call @dumpf64(%av) : (memref<?xf64>) -> ()
+ call @dumpf64(%bv) : (memref<?xf64>) -> ()
+ call @dumpf64(%cv) : (memref<?xf64>) -> ()
+ call @dumpf64(%dv) : (memref<?xf64>) -> ()
+ call @dumpf64(%ev) : (memref<?xf64>) -> ()
+ call @dumpf64(%fv) : (memref<?xf64>) -> ()
+ call @dumpf64(%gv) : (memref<?xf64>) -> ()
+ call @dumpf64(%hv) : (memref<?xf64>) -> ()
+ call @dumpf64(%iv) : (memref<?xf64>) -> ()
//
- // Check index equality.
+ // Check indices.
+ //
+ // CHECK-NEXT: ( 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
+ // CHECK-NEXT: ( 0, 1, 2, 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
+ // CHECK-NEXT: ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0 )
+ // CHECK-NEXT: ( 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
+ // CHECK-NEXT: ( 0, 1, 2, 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
+ // CHECK-NEXT: ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0 )
+ // CHECK-NEXT: ( 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
+ // CHECK-NEXT: ( 0, 1, 2, 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
+ // CHECK-NEXT: ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0 )
+ // CHECK-NEXT: ( 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
+ // CHECK-NEXT: ( 0, 1, 2, 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
+ // CHECK-NEXT: ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0 )
+ // CHECK-NEXT: ( 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
+ // CHECK-NEXT: ( 0, 1, 2, 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
+ // CHECK-NEXT: ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0 )
+ // CHECK-NEXT: ( 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
+ // CHECK-NEXT: ( 0, 1, 2, 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
+ // CHECK-NEXT: ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0 )
+ // CHECK-NEXT: ( 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
+ // CHECK-NEXT: ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
+ // CHECK-NEXT: ( 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0 )
+ // CHECK-NEXT: ( 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
+ // CHECK-NEXT: ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
+ // CHECK-NEXT: ( 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0 )
+ // CHECK-NEXT: ( 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
+ // CHECK-NEXT: ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
+ // CHECK-NEXT: ( 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0 )
+ // CHECK-NEXT: ( 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
+ // CHECK-NEXT: ( 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
+ // CHECK-NEXT: ( 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0 )
+ // CHECK-NEXT: ( 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
+ // CHECK-NEXT: ( 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
+ // CHECK-NEXT: ( 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0 )
+ // CHECK-NEXT: ( 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
+ // CHECK-NEXT: ( 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
+ // CHECK-NEXT: ( 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0 )
//
-
%v10 = sparse_tensor.indices %1, %c0 : tensor<2x3x4xf64, #Tensor1> to memref<?xindex>
%v11 = sparse_tensor.indices %1, %c1 : tensor<2x3x4xf64, #Tensor1> to memref<?xindex>
%v12 = sparse_tensor.indices %1, %c2 : tensor<2x3x4xf64, #Tensor1> to memref<?xindex>
@@ -204,46 +213,45 @@ module {
%i31 = sparse_tensor.indices %i, %c1 : tensor<2x3x4xf64, #Tensor3> to memref<?xindex>
%i32 = sparse_tensor.indices %i, %c2 : tensor<2x3x4xf64, #Tensor3> to memref<?xindex>
- call @check(%v10, %a10) : (memref<?xindex>, memref<?xindex>) -> ()
- call @check(%v11, %a11) : (memref<?xindex>, memref<?xindex>) -> ()
- call @check(%v12, %a12) : (memref<?xindex>, memref<?xindex>) -> ()
- call @check(%v10, %b10) : (memref<?xindex>, memref<?xindex>) -> ()
- call @check(%v11, %b11) : (memref<?xindex>, memref<?xindex>) -> ()
- call @check(%v12, %b12) : (memref<?xindex>, memref<?xindex>) -> ()
- call @check(%v10, %c10) : (memref<?xindex>, memref<?xindex>) -> ()
- call @check(%v11, %c11) : (memref<?xindex>, memref<?xindex>) -> ()
- call @check(%v12, %c12) : (memref<?xindex>, memref<?xindex>) -> ()
+ call @dumpidx(%v10) : (memref<?xindex>) -> ()
+ call @dumpidx(%v11) : (memref<?xindex>) -> ()
+ call @dumpidx(%v12) : (memref<?xindex>) -> ()
+ call @dumpidx(%v10) : (memref<?xindex>) -> ()
+ call @dumpidx(%v11) : (memref<?xindex>) -> ()
+ call @dumpidx(%v12) : (memref<?xindex>) -> ()
+ call @dumpidx(%v10) : (memref<?xindex>) -> ()
+ call @dumpidx(%v11) : (memref<?xindex>) -> ()
+ call @dumpidx(%v12) : (memref<?xindex>) -> ()
- call @check(%v20, %d20) : (memref<?xindex>, memref<?xindex>) -> ()
- call @check(%v21, %d21) : (memref<?xindex>, memref<?xindex>) -> ()
- call @check(%v22, %d22) : (memref<?xindex>, memref<?xindex>) -> ()
- call @check(%v20, %e20) : (memref<?xindex>, memref<?xindex>) -> ()
- call @check(%v21, %e21) : (memref<?xindex>, memref<?xindex>) -> ()
- call @check(%v22, %e22) : (memref<?xindex>, memref<?xindex>) -> ()
- call @check(%v20, %f20) : (memref<?xindex>, memref<?xindex>) -> ()
- call @check(%v21, %f21) : (memref<?xindex>, memref<?xindex>) -> ()
- call @check(%v22, %f22) : (memref<?xindex>, memref<?xindex>) -> ()
+ call @dumpidx(%a10) : (memref<?xindex>) -> ()
+ call @dumpidx(%a11) : (memref<?xindex>) -> ()
+ call @dumpidx(%a12) : (memref<?xindex>) -> ()
+ call @dumpidx(%b10) : (memref<?xindex>) -> ()
+ call @dumpidx(%b11) : (memref<?xindex>) -> ()
+ call @dumpidx(%b12) : (memref<?xindex>) -> ()
+ call @dumpidx(%c10) : (memref<?xindex>) -> ()
+ call @dumpidx(%c11) : (memref<?xindex>) -> ()
+ call @dumpidx(%c12) : (memref<?xindex>) -> ()
- call @check(%v30, %g30) : (memref<?xindex>, memref<?xindex>) -> ()
- call @check(%v31, %g31) : (memref<?xindex>, memref<?xindex>) -> ()
- call @check(%v32, %g32) : (memref<?xindex>, memref<?xindex>) -> ()
- call @check(%v30, %h30) : (memref<?xindex>, memref<?xindex>) -> ()
- call @check(%v31, %h31) : (memref<?xindex>, memref<?xindex>) -> ()
- call @check(%v32, %h32) : (memref<?xindex>, memref<?xindex>) -> ()
- call @check(%v30, %i30) : (memref<?xindex>, memref<?xindex>) -> ()
- call @check(%v31, %i31) : (memref<?xindex>, memref<?xindex>) -> ()
- call @check(%v32, %i32) : (memref<?xindex>, memref<?xindex>) -> ()
+ call @dumpidx(%d20) : (memref<?xindex>) -> ()
+ call @dumpidx(%d21) : (memref<?xindex>) -> ()
+ call @dumpidx(%d22) : (memref<?xindex>) -> ()
+ call @dumpidx(%e20) : (memref<?xindex>) -> ()
+ call @dumpidx(%e21) : (memref<?xindex>) -> ()
+ call @dumpidx(%e22) : (memref<?xindex>) -> ()
+ call @dumpidx(%f20) : (memref<?xindex>) -> ()
+ call @dumpidx(%f21) : (memref<?xindex>) -> ()
+ call @dumpidx(%f22) : (memref<?xindex>) -> ()
- //
- // Sanity check direct results.
- //
- // CHECK: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 )
- // CHECK-NEXT: ( 1, 13, 2, 14, 3, 15, 4, 16, 5, 17, 6, 18, 7, 19, 8, 20, 9, 21, 10, 22, 11, 23, 12, 24 )
- // CHECK-NEXT: ( 1, 5, 9, 13, 17, 21, 2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23, 4, 8, 12, 16, 20, 24 )
- //
- call @dumpf64(%v1) : (memref<?xf64>) -> ()
- call @dumpf64(%v2) : (memref<?xf64>) -> ()
- call @dumpf64(%v3) : (memref<?xf64>) -> ()
+ call @dumpidx(%g30) : (memref<?xindex>) -> ()
+ call @dumpidx(%g31) : (memref<?xindex>) -> ()
+ call @dumpidx(%g32) : (memref<?xindex>) -> ()
+ call @dumpidx(%h30) : (memref<?xindex>) -> ()
+ call @dumpidx(%h31) : (memref<?xindex>) -> ()
+ call @dumpidx(%h32) : (memref<?xindex>) -> ()
+ call @dumpidx(%i30) : (memref<?xindex>) -> ()
+ call @dumpidx(%i31) : (memref<?xindex>) -> ()
+ call @dumpidx(%i32) : (memref<?xindex>) -> ()
// Release the resources.
sparse_tensor.release %1 : tensor<2x3x4xf64, #Tensor1>