summaryrefslogtreecommitdiff
path: root/mlir/lib/ExecutionEngine/SparseTensor/NNZ.cpp
blob: c6fd669ad513f461cf7f8380f33cbfb8f8074878 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
//===- NNZ.cpp - NNZ-statistics for direct sparse2sparse conversion -------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains method definitions for `SparseTensorNNZ`.
//
// This file is part of the lightweight runtime support library for sparse
// tensor manipulations.  The functionality of the support library is meant
// to simplify benchmarking, testing, and debugging MLIR code operating on
// sparse tensors.  However, the provided functionality is **not** part of
// core MLIR itself.
//
//===----------------------------------------------------------------------===//

#include "mlir/ExecutionEngine/SparseTensor/Storage.h"

using namespace mlir::sparse_tensor;

//===----------------------------------------------------------------------===//
SparseTensorNNZ::SparseTensorNNZ(const std::vector<uint64_t> &lvlSizes,
                                 const std::vector<DimLevelType> &lvlTypes)
    : lvlSizes(lvlSizes), lvlTypes(lvlTypes), nnz(getLvlRank()) {
  assert(lvlSizes.size() == lvlTypes.size() && "Rank mismatch");
  bool alreadyCompressed = false;
  (void)alreadyCompressed;
  uint64_t sz = 1; // the product of all `lvlSizes` strictly less than `l`.
  for (uint64_t l = 0, lvlrank = getLvlRank(); l < lvlrank; ++l) {
    const DimLevelType dlt = lvlTypes[l];
    if (isCompressedDLT(dlt)) {
      if (alreadyCompressed)
        MLIR_SPARSETENSOR_FATAL(
            "Multiple compressed levels not currently supported");
      alreadyCompressed = true;
      nnz[l].resize(sz, 0); // Both allocate and zero-initialize.
    } else if (isDenseDLT(dlt)) {
      if (alreadyCompressed)
        MLIR_SPARSETENSOR_FATAL(
            "Dense after compressed not currently supported");
    } else if (isSingletonDLT(dlt)) {
      // Singleton after Compressed causes no problems for allocating
      // `nnz` nor for the yieldPos loop.  This remains true even
      // when adding support for multiple compressed dimensions or
      // for dense-after-compressed.
    } else {
      MLIR_SPARSETENSOR_FATAL("unsupported level type: %d\n",
                              static_cast<uint8_t>(dlt));
    }
    sz = detail::checkedMul(sz, lvlSizes[l]);
  }
}

void SparseTensorNNZ::forallCoords(uint64_t stopLvl,
                                   SparseTensorNNZ::NNZConsumer yield) const {
  assert(stopLvl < getLvlRank() && "Level out of bounds");
  assert(isCompressedDLT(lvlTypes[stopLvl]) &&
         "Cannot look up non-compressed levels");
  forallCoords(yield, stopLvl, 0, 0);
}

void SparseTensorNNZ::add(const std::vector<uint64_t> &lvlCoords) {
  uint64_t parentPos = 0;
  for (uint64_t l = 0, lvlrank = getLvlRank(); l < lvlrank; ++l) {
    if (isCompressedDLT(lvlTypes[l]))
      nnz[l][parentPos]++;
    parentPos = parentPos * lvlSizes[l] + lvlCoords[l];
  }
}

void SparseTensorNNZ::forallCoords(SparseTensorNNZ::NNZConsumer yield,
                                   uint64_t stopLvl, uint64_t parentPos,
                                   uint64_t l) const {
  assert(l <= stopLvl);
  if (l == stopLvl) {
    assert(parentPos < nnz[l].size() && "Cursor is out of range");
    yield(nnz[l][parentPos]);
  } else {
    const uint64_t sz = lvlSizes[l];
    const uint64_t pstart = parentPos * sz;
    for (uint64_t i = 0; i < sz; ++i)
      forallCoords(yield, stopLvl, pstart + i, l + 1);
  }
}