pynq_driver.cc 5.27 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
9
 *
10
 *   http://www.apache.org/licenses/LICENSE-2.0
11
 *
12 13 14 15 16 17
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 * KIND, either express or implied.  See the License for the
 * specific language governing permissions and limitations
 * under the License.
18
 *
19
 * \file pynq_driver.c
20
 * \brief VTA driver for Zynq SoC boards with Pynq support (see pynq.io).
21 22
 */

23
#include <vta/driver.h>
24
#include <thread>
25
#include "pynq_driver.h"
26 27 28


void* VTAMemAlloc(size_t size, int cached) {
29 30
  assert(size <= VTA_MAX_XFER);
  // Rely on the pynq-specific cma library
31 32 33 34
  return cma_alloc(size, cached);
}

void VTAMemFree(void* buf) {
35
  // Rely on the pynq-specific cma library
36 37 38
  cma_free(buf);
}

39
vta_phy_addr_t VTAMemGetPhyAddr(void* buf) {
40 41 42
  return cma_get_phy_addr(buf);
}

43 44 45 46 47 48 49 50 51 52
void VTAMemCopyFromHost(void* dst, const void* src, size_t size) {
  // For SoC-based FPGAs that used shared memory with the CPU, use memcopy()
  memcpy(dst, src, size);
}

void VTAMemCopyToHost(void* dst, const void* src, size_t size) {
  // For SoC-based FPGAs that used shared memory with the CPU, use memcopy()
  memcpy(dst, src, size);
}

53 54
void VTAFlushCache(void* vir_addr, vta_phy_addr_t phy_addr, int size) {
  // Call the cma_flush_cache on the CMA buffer
55
  // so that the FPGA can read the buffer data.
56
  cma_flush_cache(vir_addr, phy_addr, size);
57 58
}

59 60
void VTAInvalidateCache(void* vir_addr, vta_phy_addr_t phy_addr, int size) {
  // Call the cma_invalidate_cache on the CMA buffer
61
  // so that the host needs to read the buffer data.
62
  cma_invalidate_cache(vir_addr, phy_addr, size);
63 64
}

65
void *VTAMapRegister(uint32_t addr) {
66 67 68 69 70
  // Align the base address with the pages
  uint32_t virt_base = addr & ~(getpagesize() - 1);
  // Calculate base address offset w.r.t the base address
  uint32_t virt_offset = addr - virt_base;
  // Open file and mmap
71
  uint32_t mmap_file = open("/dev/mem", O_RDWR|O_SYNC);
72
  return mmap(NULL,
73
              (VTA_IP_REG_MAP_RANGE + virt_offset),
74 75 76 77
              PROT_READ|PROT_WRITE,
              MAP_SHARED,
              mmap_file,
              virt_base);
78 79
}

80
void VTAUnmapRegister(void *vta) {
81
  // Unmap memory
82
  int status = munmap(vta, VTA_IP_REG_MAP_RANGE);
83
  assert(status == 0);
84 85
}

86
void VTAWriteMappedReg(void* base_addr, uint32_t offset, uint32_t val) {
87
  *((volatile uint32_t *) (reinterpret_cast<char *>(base_addr) + offset)) = val;
88 89
}

90
uint32_t VTAReadMappedReg(void* base_addr, uint32_t offset) {
91
  return *((volatile uint32_t *) (reinterpret_cast<char *>(base_addr) + offset));
92 93
}

94 95 96 97
class VTADevice {
 public:
  VTADevice() {
    // VTA stage handles
98 99 100 101
    vta_fetch_handle_ = VTAMapRegister(VTA_FETCH_ADDR);
    vta_load_handle_ = VTAMapRegister(VTA_LOAD_ADDR);
    vta_compute_handle_ = VTAMapRegister(VTA_COMPUTE_ADDR);
    vta_store_handle_ = VTAMapRegister(VTA_STORE_ADDR);
102 103 104 105
  }

  ~VTADevice() {
    // Close VTA stage handle
106 107 108 109
    VTAUnmapRegister(vta_fetch_handle_);
    VTAUnmapRegister(vta_load_handle_);
    VTAUnmapRegister(vta_compute_handle_);
    VTAUnmapRegister(vta_store_handle_);
110 111 112 113 114
  }

  int Run(vta_phy_addr_t insn_phy_addr,
          uint32_t insn_count,
          uint32_t wait_cycles) {
115 116 117 118 119 120 121
    VTAWriteMappedReg(vta_fetch_handle_, VTA_FETCH_INSN_COUNT_OFFSET, insn_count);
    VTAWriteMappedReg(vta_fetch_handle_, VTA_FETCH_INSN_ADDR_OFFSET, insn_phy_addr);
    VTAWriteMappedReg(vta_load_handle_, VTA_LOAD_INP_ADDR_OFFSET, 0);
    VTAWriteMappedReg(vta_load_handle_, VTA_LOAD_WGT_ADDR_OFFSET, 0);
    VTAWriteMappedReg(vta_compute_handle_, VTA_COMPUTE_UOP_ADDR_OFFSET, 0);
    VTAWriteMappedReg(vta_compute_handle_, VTA_COMPUTE_BIAS_ADDR_OFFSET, 0);
    VTAWriteMappedReg(vta_store_handle_, VTA_STORE_OUT_ADDR_OFFSET, 0);
122 123 124 125 126 127 128 129 130 131

    // VTA start
    VTAWriteMappedReg(vta_fetch_handle_, 0x0, VTA_START);
    VTAWriteMappedReg(vta_load_handle_, 0x0, VTA_AUTORESTART);
    VTAWriteMappedReg(vta_compute_handle_, 0x0, VTA_AUTORESTART);
    VTAWriteMappedReg(vta_store_handle_, 0x0, VTA_AUTORESTART);

    // Loop until the VTA is done
    unsigned t, flag = 0;
    for (t = 0; t < wait_cycles; ++t) {
132
      flag = VTAReadMappedReg(vta_compute_handle_, VTA_COMPUTE_DONE_RD_OFFSET);
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
      if (flag == VTA_DONE) break;
      std::this_thread::yield();
    }
    // Report error if timeout
    return t < wait_cycles ? 0 : 1;
  }

 private:
  // VTA handles (register maps)
  void* vta_fetch_handle_{nullptr};
  void* vta_load_handle_{nullptr};
  void* vta_compute_handle_{nullptr};
  void* vta_store_handle_{nullptr};
};

VTADeviceHandle VTADeviceAlloc() {
  return new VTADevice();
}

void VTADeviceFree(VTADeviceHandle handle) {
  delete static_cast<VTADevice*>(handle);
}

int VTADeviceRun(VTADeviceHandle handle,
                 vta_phy_addr_t insn_phy_addr,
                 uint32_t insn_count,
                 uint32_t wait_cycles) {
  return static_cast<VTADevice*>(handle)->Run(
      insn_phy_addr, insn_count, wait_cycles);
}