plan_memory.cc 13.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*!
 *  Copyright (c) 2016 by Contributors
 * \file plan_memory.cc
 * \brief Assign memory tag to each of the data entries.
 */
#include <nnvm/graph.h>
#include <nnvm/pass.h>
#include <nnvm/graph_attr_types.h>
#include <nnvm/op_attr_types.h>
#include <memory>
#include "./graph_algorithm.h"

namespace nnvm {
namespace pass {
15
namespace {
16 17 18 19 20 21

// simple graph based allocator.
class GraphAllocator {
 public:
  // storage id equals integer.
  using StorageID = int;
22

23 24
  // bad storage id
  static const StorageID kBadStorageID = -1;
25 26
  // external storage id
  static const StorageID kExternalStorageID = -2;
27 28
  // dynamic storage id
  static const StorageID kDynamicStorageID = -3;
29

30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
  // request a free storage
  StorageID Request(int dev_id, int dtype, TShape shape, uint32_t node_id) {
    if (shape.ndim() == 0) return kBadStorageID;
    // search memory block in [size / match_range_, size * match_range_)
    // TODO(tqchen) add size of the dtype, assume 4 bytes for now
    size_t size = shape.Size() * 4;
    if (match_range_ == 0) return this->Alloc(dev_id, size);
    auto begin = free_.lower_bound(size / match_range_);
    auto mid = free_.lower_bound(size);
    auto end = free_.upper_bound(size * match_range_);
    // search for memory blocks larger than requested
    for (auto it = mid; it != end; ++it) {
      StorageEntry *e = it->second;
      if (e->device_id != dev_id) continue;
      if (node_color_.size() != 0 &&
          node_color_[e->released_by_node] != node_color_[node_id]) continue;
      // Use exect matching strategy
      e->max_bytes = std::max(size, e->max_bytes);
      // find a exact match, erase from map and return
      free_.erase(it);
      return e->id;
    }
    // then search for memory blocks smaller than requested space
    for (auto it = mid; it != begin;) {
      --it;
      StorageEntry *e = it->second;
      if (e->device_id != dev_id) continue;
      if (node_color_.size() != 0 &&
          node_color_[e->released_by_node] != node_color_[node_id]) continue;
      // Use exect matching strategy
      e->max_bytes = std::max(size, e->max_bytes);
61
      // erase from map and return
62 63 64 65 66 67 68 69 70
      free_.erase(it);
      return e->id;
    }
    // cannot find anything return a new one.
    return this->Alloc(dev_id, size);
  }
  // release a memory space.
  void Release(StorageID id, uint32_t node_id) {
    CHECK_NE(id, kBadStorageID);
71
    if (id == kExternalStorageID || id == kDynamicStorageID) return;
72 73 74 75
    StorageEntry *e = data_[id].get();
    e->released_by_node = node_id;
    free_.insert({e->max_bytes, e});
  }
76

77 78 79 80 81 82 83 84 85 86
  // totoal number of bytes allocated
  size_t TotalAllocBytes() const {
    size_t total = 0;
    for (auto &p : data_) {
      total += p->max_bytes;
    }
    return total;
  }

  // constructor
87 88
  explicit GraphAllocator(const IndexedGraph* idx, const size_t match_range) : idx_(idx) {
    this->Init(match_range, dmlc::GetEnv("NNVM_EXEC_NUM_TEMP", 1));
89 90 91 92
  }

 private:
  // initialize the graph allocator
93
  void Init(const size_t match_range, const uint32_t num_match_color) {
94 95 96 97 98 99 100 101
    match_range_ = match_range;
    num_match_color_ = num_match_color;
    if (num_match_color_ > 1) {
      std::vector<uint32_t> importance(idx_->num_nodes(), 0);
      for (uint32_t nid = 0; nid < idx_->num_nodes(); ++nid) {
        if ((*idx_)[nid].source->is_variable()) continue;
        importance[nid] = 1;
      }
102
      num_match_color_ = pass::ColorNodeGroup(
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
          *idx_, importance, num_match_color_, &node_color_);
    }
  }

  StorageID Alloc(int dev_id, size_t size) {
    StorageID id = static_cast<StorageID>(data_.size());
    std::unique_ptr<StorageEntry> ptr(new StorageEntry());
    ptr->id = id;
    ptr->device_id = dev_id;
    ptr->max_bytes = size;
    data_.emplace_back(std::move(ptr));
    return id;
  }
  // internal storage entry
  struct StorageEntry {
    // the id of the entry.
    StorageID id;
    // the device id of the storage.
    int device_id;
    // maximum size of storage requested.
    size_t max_bytes{0};
    // node index that released it last time
    uint32_t released_by_node{0};
  };
  // scale used for rough match
  size_t match_range_;
  // whether use color based match algorithm
  uint32_t num_match_color_{1};
  // the size of each dtype
  std::vector<size_t> dtype_size_dict_;
  // free list of storage entry
  std::multimap<size_t, StorageEntry*> free_;
  // all the storage resources available
  std::vector<std::unique_ptr<StorageEntry> > data_;
  // color of nodes in the graph, used for auxiliary policy making.
  std::vector<uint32_t> node_color_;
  // internal indexed graph
  const IndexedGraph* idx_;
};

143 144 145
/*
 * Internal method to perform the memory allocation for a graph
 * */
146 147 148
size_t AllocMemory(const Graph& ret, const IndexedGraph& idx,
                   const std::pair<uint32_t, uint32_t>& node_range,
                   StorageVector* storage_ptr,
149 150
                   std::vector<int>* storage_inplace_index_ptr,
                   const std::vector<uint32_t>& entry_ref_count,
151
                   GraphAllocator* allocator) {
152 153
  static auto& finplace_option = Op::GetAttr<FInplaceOption>("FInplaceOption");
  static auto& finplace_identity = Op::GetAttr<FInplaceIdentity>("FInplaceIdentity");
154
  static auto& fignore_inputs = Op::GetAttr<FIgnoreInputs>("FIgnoreInputs");
155

156 157 158
  // Get reference
  auto &storage = *storage_ptr;
  auto &storage_inplace_index = *storage_inplace_index_ptr;
159

160
  // Get attributes from the graph
161 162 163 164 165 166 167 168
  const ShapeVector& shape_vec = ret.GetAttr<ShapeVector>("shape");
  const DTypeVector& dtype_vec = ret.GetAttr<DTypeVector>("dtype");
  const DeviceVector* device_vec = nullptr;

  if (ret.attrs.count("device") != 0) {
    device_vec = &(ret.GetAttr<DeviceVector>("device"));
  }
  size_t num_not_allocated = 0;
169
  std::vector<GraphAllocator::StorageID> storage_ref_count(idx.num_node_entries(), 0);
170

171
  for (uint32_t nid = node_range.first; nid < node_range.second; ++nid) {
172 173 174
    const auto& inode = idx[nid];
    if (inode.source->is_variable()) continue;
    // check inplace option
175 176
    if (finplace_option.count(inode.source->op()) != 0) {
      auto inplace_pairs = finplace_option[inode.source->op()](inode.source->attrs);
177 178 179 180 181 182 183 184 185 186 187 188
      std::vector<bool> identity;
      if (finplace_identity.count(inode.source->op()) != 0) {
        identity = finplace_identity[inode.source->op()](inode.source->attrs);
        CHECK_EQ(identity.size(), inplace_pairs.size())
            << "FInplaceOption and FInplaceIdentity returned vectors of different "
            << "size for operator " << inode.source->op()->name;
      } else {
        identity = std::vector<bool>(inplace_pairs.size(), false);
      }
      std::vector<bool> taken(inode.inputs.size(), false);
      for (size_t ipair = 0; ipair < inplace_pairs.size(); ++ipair) {
        const auto& kv = inplace_pairs[ipair];
189 190
        uint32_t eid_out = idx.entry_id(nid, kv.second);
        uint32_t eid_in = idx.entry_id(inode.inputs[kv.first]);
191 192
        auto sid_out = storage[eid_out];
        auto sid_in = storage[eid_in];
193 194 195
        bool ignore_all_inputs = (fignore_inputs.count(inode.source->op()) != 0 &&
                                  fignore_inputs[inode.source->op()](
                                      inode.source->attrs).size() == inode.source->num_inputs());
196 197 198
        if (taken[kv.first] == false &&
            sid_out == GraphAllocator::kBadStorageID &&
            sid_in >= 0 &&
199
            ((storage_ref_count[sid_in] == 1 && !ignore_all_inputs) || identity[ipair]) &&
200
            entry_ref_count[eid_out] > 0 &&
201 202
            shape_vec[eid_out].Size() == shape_vec[eid_in].Size() &&
            dtype_vec[eid_out] == dtype_vec[eid_in]) {
Tianqi Chen committed
203
          // inplace optimization
204 205 206 207 208 209
          taken[kv.first] = true;
          storage[eid_out] = sid_in;
          // Reuse storage for output and add ref count of output
          // to storage. This will get substracted later in free
          // input section.
          storage_ref_count[sid_in] += entry_ref_count[eid_out];
Tianqi Chen committed
210
          storage_inplace_index[eid_out] = kv.first;
211 212 213 214 215
        }
      }
    }
    // normal allocation
    const int dev_id = (device_vec != nullptr) ? device_vec->at(nid) : 0;
216 217
    // sort output nodes based on size before allocating output
    std::multimap<size_t, uint32_t> eids;
218 219
    for (uint32_t index = 0; index < inode.source->num_outputs(); ++index) {
      uint32_t eid = idx.entry_id(nid, index);
220
      // only request memory for kBadStorageID
221
      if (storage[eid] == GraphAllocator::kBadStorageID) {
222 223 224 225
        auto &eshape = shape_vec[eid];
        size_t esize = 0;
        if (eshape.ndim() != 0) esize = eshape.Size();
        eids.insert(std::make_pair(esize, eid));
226 227
      }
    }
228 229
    for (auto rit = eids.rbegin(); rit != eids.rend(); ++rit) {
        uint32_t eid = rit->second;
230
        auto sid = allocator->Request(dev_id, dtype_vec[eid], shape_vec[eid], nid);
231 232 233
        if (sid >= 0) {
          storage_ref_count[sid] = entry_ref_count[eid];
        }
234
        storage[eid] = sid;
235
    }
236 237 238 239 240 241
    // check if certain inputs is ignored.
    std::vector<uint32_t> ignore_inputs;
    if (fignore_inputs.count(inode.source->op()) != 0) {
      ignore_inputs = fignore_inputs[inode.source->op()](inode.source->attrs);
      std::sort(ignore_inputs.begin(), ignore_inputs.end());
    }
242
    // then free inputs
243 244 245 246
    for (size_t i = 0; i < inode.inputs.size(); ++i) {
      // ref counter of ignored input is already decreased.
      if (std::binary_search(ignore_inputs.begin(), ignore_inputs.end(), i)) continue;
      const auto& e = inode.inputs[i];
247
      uint32_t eid = idx.entry_id(e);
248 249 250
      auto sid = storage[eid];
      // storage_ref_count == 0 means it is taken by inplace op
      if (sid < 0) continue;
251
      // if we decrease it to zero, means we are ready to relase
252 253 254
      --storage_ref_count[sid];
      if (storage_ref_count[sid] == 0) {
        allocator->Release(sid, nid);
255 256 257
      }
    }
    // check if there are outputs that can be freeded immediately
258
    // these output are not referenced by any operator.
259 260
    for (uint32_t index = 0; index < inode.source->num_outputs(); ++index) {
      uint32_t eid = idx.entry_id(nid, index);
261 262 263
      auto sid = storage[eid];
      if (sid >= 0 && storage_ref_count[sid] == 0) {
        allocator->Release(sid, nid);
264 265
        // use -2 to indicate that the node was never touched.
        storage_inplace_index[eid] = -2;
266 267 268 269 270 271
      }
      if (storage[eid] == GraphAllocator::kBadStorageID) {
        ++num_not_allocated;
      }
    }
  }
272 273 274 275 276 277 278 279 280
  return num_not_allocated;
}


// function to plan memory
Graph PlanMemory(Graph ret) {
  // setup ref counter
  const IndexedGraph& idx = ret.indexed_graph();
  static auto& fignore_inputs = Op::GetAttr<FIgnoreInputs>("FIgnoreInputs");
281 282 283 284
  std::pair<uint32_t, uint32_t> node_range = {0, idx.num_nodes()};
  if (ret.attrs.count("node_range")) {
    node_range = ret.MoveCopyAttr<std::pair<uint32_t, uint32_t> >("node_range");
  }
285
  // reference counter of each node
286
  std::vector<uint32_t> ref_count;
287
  // step 1: initialize reference count
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
  if (ret.attrs.count("ref_count") != 0) {
    ref_count = ret.MoveCopyAttr<std::vector<uint32_t> >("ref_count");
  } else {
    ref_count.resize(idx.num_node_entries(), 0);
    for (uint32_t nid = 0; nid < idx.num_nodes(); ++nid) {
      const auto& inode = idx[nid];
      if (inode.source->is_variable()) continue;
      for (const auto& e : inode.inputs) {
        ++ref_count[idx.entry_id(e)];
      }
      // no dataflow dependency is needed for those are ignored.
      // revoke the dependency counter.
      if (fignore_inputs.count(inode.source->op()) != 0) {
        auto ignore_inputs = fignore_inputs[inode.source->op()](inode.source->attrs);
        for (uint32_t i : ignore_inputs) {
          --ref_count[idx.entry_id(inode.inputs[i])];
        }
305 306
      }
    }
307 308 309
    for (const auto& e : idx.outputs()) {
      ++ref_count[idx.entry_id(e)];
    }
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
  }
  // step 2: allocate memory.
  StorageVector storage;
  if (ret.attrs.count("storage") != 0) {
    storage = ret.MoveCopyAttr<StorageVector>("storage");
  } else {
    storage.resize(idx.num_node_entries(), -1);
  }

  // Search the best NNVM_EXEC_MATCH_RANGE parameter. This is turned off by default
  size_t min_allocated_bytes = -1;
  size_t max_match_range = dmlc::GetEnv("NNVM_EXEC_MATCH_RANGE", 16);
  size_t min_match_range =
         dmlc::GetEnv("NNVM_AUTO_SEARCH_MATCH_RANGE", false) ? 1 : max_match_range;
  for (size_t match_range = min_match_range; match_range <= max_match_range; match_range *= 2) {
    // Make a copy of related fields
    StorageVector storage_vec(storage);
    std::vector<int> storage_inplace_index(idx.num_node_entries(), -1);

    // the allocator
    GraphAllocator allocator(&idx, match_range);

    // number of entries that are not statically allocated.
    size_t storage_num_not_allocated =
334 335
      AllocMemory(ret, idx, node_range, &storage_vec, &storage_inplace_index,
                  ref_count, &allocator);
336
    size_t storage_allocated_bytes = allocator.TotalAllocBytes();
337

338 339 340 341 342 343 344 345
    // Choose the plan which leads to minimal memory usage
    if (min_allocated_bytes > storage_allocated_bytes) {
      ret.attrs["storage_id"] = std::make_shared<any>(std::move(storage_vec));
      ret.attrs["storage_inplace_index"] = std::make_shared<any>(std::move(storage_inplace_index));
      ret.attrs["storage_allocated_bytes"] = std::make_shared<any>(storage_allocated_bytes);
      ret.attrs["storage_num_not_allocated"] = std::make_shared<any>(storage_num_not_allocated);
      min_allocated_bytes = storage_allocated_bytes;
    }
346 347 348 349

    if (max_match_range == 0) {
      break;
    }
350
  }
351 352 353 354 355 356 357 358 359
  return ret;
}

NNVM_REGISTER_PASS(PlanMemory)
.describe("Plan the memory allocation of each node entries.")
.set_body(PlanMemory)
.set_change_graph(false)
.depend_graph_attr("dtype")
.depend_graph_attr("shape")
Tianqi Chen committed
360 361
.provide_graph_attr("storage_id")
.provide_graph_attr("storage_inplace_index");
362

363
}  // namespace
364 365
}  // namespace pass
}  // namespace nnvm