metal_module.mm 10.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *   http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 * KIND, either express or implied.  See the License for the
 * specific language governing permissions and limitations
 * under the License.
 */

20 21 22 23
/*!
 *  Copyright (c) 2017 by Contributors
 * \file metal_module.cc
 */
24
#include <dmlc/memory_io.h>
25 26 27 28 29
#include <tvm/runtime/registry.h>
#include <tvm/runtime/module.h>
#include <array>
#include <string>
#include <mutex>
30 31
#include "metal_module.h"
#include "metal_common.h"
32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
#include "../pack_args.h"
#include "../thread_storage_scope.h"
#include "../meta_data.h"
#include "../file_util.h"

namespace tvm {
namespace runtime {

// Module to support thread-safe multi-GPU execution.
// The runtime will contain a per-device module table
// The modules will be lazily loaded
class MetalModuleNode final :public runtime::ModuleNode {
 public:
  explicit MetalModuleNode(std::string data,
                           std::string fmt,
                           std::unordered_map<std::string, FunctionInfo> fmap,
                           std::string source)
      : data_(data), fmt_(fmt), fmap_(fmap), source_(source) {
  }
  const char* type_key() const final {
    return "metal";
  }

  PackedFunc GetFunction(
      const std::string& name,
      const std::shared_ptr<ModuleNode>& sptr_to_self) final;

  void SaveToFile(const std::string& file_name,
                  const std::string& format) final {
    std::string fmt = GetFileFormat(file_name, format);
    CHECK_EQ(fmt, fmt_)
        << "Can only save to format=" << fmt_;
    std::string meta_file = GetMetaFilePath(file_name);
    SaveMetaDataToFile(meta_file, fmap_);
    SaveBinaryToFile(file_name, data_);
  }

69 70 71 72 73
  void SaveToBinary(dmlc::Stream* stream) final {
    stream->Write(fmt_);
    stream->Write(fmap_);
    stream->Write(data_);
  }
74 75 76 77 78 79 80 81 82 83
  std::string GetSource(const std::string& format) final {
    if (format == fmt_) return data_;
    if (source_.length() != 0) {
      return source_;
    } else if (fmt_ == "metal") {
      return data_;
    } else {
      return "";
    }
  }
Tianqi Chen committed
84
  // get a from primary context in device_id
85 86
  id<MTLComputePipelineState> GetPipelineState(
      size_t device_id, const std::string& func_name) {
87
    metal::MetalWorkspace* w = metal::MetalWorkspace::Global().get();
88 89 90 91 92 93 94 95 96 97 98 99 100
    CHECK_LT(device_id, w->devices.size());
    // start lock scope.
    std::lock_guard<std::mutex> lock(mutex_);
    if (finfo_.size() <= device_id) {
      finfo_.resize(device_id + 1, DeviceEntry());
    }
    DeviceEntry& e = finfo_[device_id];
    auto it = e.smap.find(func_name);
    if (it != e.smap.end()) return it->second;
    // compile
    NSError* err_msg = nil;
    if (e.lib == nil) {
      if (fmt_ == "metal") {
101
        MTLCompileOptions *opts = [MTLCompileOptions alloc];
102 103 104 105
        // Use the Metal 1.2 for now.
        opts.languageVersion = MTLLanguageVersion1_2;
        opts.fastMathEnabled = YES;
        // opts = nil;
106 107 108
        e.lib = [
            w->devices[device_id]
             newLibraryWithSource:[NSString stringWithUTF8String:data_.c_str()]
109
             options:opts
110
             error:&err_msg];
111
        [opts dealloc];
112
        if (e.lib == nil) {
113 114 115
          LOG(FATAL) << "Fail to compile metal lib:"
                     << [[err_msg localizedDescription] UTF8String];
        }
116 117 118 119
        if (err_msg != nil) {
          LOG(INFO) << "Warning: "
                    << [[err_msg localizedDescription] UTF8String];
        }
120 121 122 123 124 125 126 127 128 129 130 131 132 133
      } else {
        // Build from library.
        auto q = dispatch_queue_create("q", DISPATCH_QUEUE_SERIAL);
        auto data = dispatch_data_create(
            data_.c_str(), data_.length(), q, ^{});
        e.lib = [
            w->devices[device_id]
             newLibraryWithData:data
             error:&err_msg];
        if (err_msg != nil || e.lib == nil) {
          LOG(FATAL) << "Fail to compile metal lib:"
                     << [[err_msg localizedDescription] UTF8String];
        }
      }
134
      [e.lib retain];
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
    }
    id<MTLFunction> f = [
        e.lib
         newFunctionWithName:
           [NSString stringWithUTF8String:func_name.c_str()]];
    CHECK(f != nil) << "cannot find function " << func_name;
    id<MTLComputePipelineState> state =
        [w->devices[device_id]
          newComputePipelineStateWithFunction:f
          error:&err_msg];
    CHECK(state != nil)
        << "cannot get state:" << " for function " << func_name
        << [[err_msg localizedDescription] UTF8String];
    // The state.threadExecutionWidth can change dynamically according
    // to the resource constraint in kernel, so it is not strictly hold
    // Turn of warp aware optimziation for now.
    // CHECK_EQ(state.threadExecutionWidth, w->warp_size[device_id]);
152
    e.smap[func_name] = [state retain];
153 154 155 156 157 158 159 160 161 162
    return state;
  }

 private:
  // device specific entry
  struct DeviceEntry {
    // library
    id<MTLLibrary> lib = nil;
    // state cache;
    std::unordered_map<std::string, id<MTLComputePipelineState> > smap;
163 164 165 166 167 168 169

    ~DeviceEntry() {
      if (lib != nil) [lib release];
      for (auto &&kv : smap) {
        [kv.second release];
      }
    }
170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
  };
  // the binary data
  std::string data_;
  // The format
  std::string fmt_;
  // function information table.
  std::unordered_map<std::string, FunctionInfo> fmap_;
  // The source
  std::string source_;
  // function information.
  std::vector<DeviceEntry> finfo_;
  // internal mutex when updating the module
  std::mutex mutex_;
};

185
// a wrapped function class to get packed func.
186 187 188 189 190 191 192 193 194
class MetalWrappedFunc {
 public:
  // initialize the METAL function.
  void Init(MetalModuleNode* m,
            std::shared_ptr<ModuleNode> sptr,
            const std::string& func_name,
            size_t num_buffer_args,
            size_t num_pack_args,
            const std::vector<std::string>& thread_axis_tags) {
195
    w_ = metal::MetalWorkspace::Global().get();
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
    m_ = m;
    sptr_ = sptr;
    func_name_ = func_name;
    num_buffer_args_ = num_buffer_args;
    num_pack_args_ = num_pack_args;
    std::fill(scache_.begin(), scache_.end(), (id<MTLComputePipelineState>)nil);
    thread_axis_cfg_.Init(num_buffer_args + num_pack_args, thread_axis_tags);
    metal::MetalThreadEntry* t = metal::MetalThreadEntry::ThreadLocal();
    int dev_id = t->context.device_id;
    scache_[dev_id] = m->GetPipelineState(dev_id, func_name);
  }
  // invoke the function with void arguments
  void operator()(TVMArgs args,
                  TVMRetValue* rv,
                  const ArgUnion* pack_args) const {
    metal::MetalThreadEntry* t = metal::MetalThreadEntry::ThreadLocal();
    int device_id = t->context.device_id;
    if (scache_[device_id] == nil) {
      scache_[device_id] = m_->GetPipelineState(device_id, func_name_);
    }
    ThreadWorkLoad wl = thread_axis_cfg_.Extract(args);
    id<MTLCommandQueue> queue = w_->GetCommandQueue(t->context);
    id<MTLCommandBuffer> cb = [queue commandBuffer];
    id<MTLComputeCommandEncoder> encoder = [cb computeCommandEncoder];
    [encoder setComputePipelineState:scache_[device_id]];
    for (size_t i = 0; i < num_buffer_args_; ++i) {
Tianqi Chen committed
222
      void* buf = args[static_cast<int>(i)];
223 224 225 226 227 228 229 230 231 232 233
      [encoder setBuffer:(__bridge id<MTLBuffer>)(buf) offset:0 atIndex:i];
    }
    if (num_pack_args_ != 0) {
      [encoder setBytes:pack_args
               length:num_pack_args_ * sizeof(ArgUnion)
               atIndex:num_buffer_args_];
    }
    // launch
    MTLSize dimGrid = MTLSizeMake(
        wl.grid_dim(0), wl.grid_dim(1), wl.grid_dim(2));
    MTLSize dimBlock = MTLSizeMake(
234
        wl.block_dim(0), wl.block_dim(1), wl.block_dim(2));
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
    [encoder dispatchThreadgroups: dimGrid
             threadsPerThreadgroup: dimBlock];
    [encoder endEncoding];
    [cb commit];
  }

 private:
  // Reference to global workspace.
  metal::MetalWorkspace* w_;
  // internal module
  MetalModuleNode* m_;
  // the resource holder
  std::shared_ptr<ModuleNode> sptr_;
  // The name of the function.
  std::string func_name_;
  // Number of buffer arguments
  size_t num_buffer_args_;
  // number of packed arguments.
  size_t num_pack_args_;
  // Device state cache per device.
  // mark as mutable, to enable lazy initialization
  mutable std::array<id<MTLComputePipelineState>, kMetalMaxNumDevice> scache_;
  // thread axis configuration
  ThreadAxisConfig thread_axis_cfg_;
};

PackedFunc MetalModuleNode::GetFunction(
      const std::string& name,
      const std::shared_ptr<ModuleNode>& sptr_to_self) {
  CHECK_EQ(sptr_to_self.get(), this);
  CHECK_NE(name, symbol::tvm_module_main)
      << "Device function do not have main";
  auto it = fmap_.find(name);
  if (it == fmap_.end()) return PackedFunc();
  const FunctionInfo& info = it->second;
  MetalWrappedFunc f;
  size_t num_buffer_args = NumBufferArgs(info.arg_types);
  f.Init(this, sptr_to_self, name,
         num_buffer_args, info.arg_types.size() - num_buffer_args,
         info.thread_axis_tags);
  return PackFuncNonBufferArg(f, info.arg_types);
}

Module MetalModuleCreate(
    std::string data,
    std::string fmt,
    std::unordered_map<std::string, FunctionInfo> fmap,
    std::string source) {
283
  metal::MetalWorkspace::Global()->Init();
284 285 286 287 288 289
  std::shared_ptr<MetalModuleNode> n =
      std::make_shared<MetalModuleNode>(data, fmt, fmap, source);
  return Module(n);
}

// Load module from module.
290 291
Module MetalModuleLoadFile(const std::string& file_name,
                           const std::string& format) {
292 293 294 295 296 297 298 299 300
  std::string data;
  std::unordered_map<std::string, FunctionInfo> fmap;
  std::string fmt = GetFileFormat(file_name, format);
  std::string meta_file = GetMetaFilePath(file_name);
  LoadBinaryFromFile(file_name, &data);
  LoadMetaDataFromFile(meta_file, &fmap);
  return MetalModuleCreate(data, fmt, fmap, "");
}

301 302 303 304 305 306 307 308 309 310 311
Module MetalModuleLoadBinary(void* strm) {
  dmlc::Stream* stream = static_cast<dmlc::Stream*>(strm);
  std::string data;
  std::unordered_map<std::string, FunctionInfo> fmap;
  std::string fmt;
  stream->Read(&fmt);
  stream->Read(&fmap);
  stream->Read(&data);
  return MetalModuleCreate(data, fmt, fmap, "");
}

312
TVM_REGISTER_GLOBAL("module.loadfile_metal")
313
.set_body_typed(MetalModuleLoadFile);
314 315

TVM_REGISTER_GLOBAL("module.loadbinary_metal")
316
.set_body_typed(MetalModuleLoadBinary);
317 318
}  // namespace runtime
}  // namespace tvm