Commit 167c2048 by sakundu

Merge branch 'flow_scripts' of github.com:TILOS-AI-Institute/MacroPlacement into flow_scripts

parents adc3ef60 010e9c8e
......@@ -3,3 +3,9 @@ Flows/*/*/run-*
Flows/job
Flows/util/__pycache__
CodeElements/*/*/__pycache__
CodeElements/Plc_client/test/
CodeElements/Plc_client/test/*/*
CodeElements/Plc_client/plc_client_os.py
CodeElements/Plc_client/__pycache__/*
CodeElements/Plc_client/proto_reader.py
CodeElements/Plc_client/plc_client.py
......@@ -9,7 +9,20 @@ curl 'https://raw.githubusercontent.com/google-research/circuit_training/main/ci
sudo curl https://storage.googleapis.com/rl-infra-public/circuit-training/placement_cost/plc_wrapper_main \
-o /usr/local/bin/plc_wrapper_main
# Run plc testbench
python -m Plc_client.plc_client_os_test
# python -m Plc_client.plc_client_os_test [-h] [--helpfull] --netlist NETLIST [--plc PLC] --width WIDTH --height HEIGHT --col COL --row ROW [--rpmh RPMH] [--rpmv RPMV] [--marh MARH] [--marv MARV] [--smooth SMOOTH]
# Example
python -m Plc_client.plc_client_os_test --netlist ./Plc_client/test/ariane/netlist.pb.txt\
--plc ./Plc_client/test/ariane/initial.plc\
--width 356.592\
--height 356.640\
--col 35\
--row 33\
--rpmh 10\
--rpmv 10\
--marh 5\
--marv 5\
--smooth 2
```
## HPWL Computation
......@@ -32,10 +45,10 @@ Density cost is computed from grid cells density.
By default, any given input will have grid col/row set to 10/10 until user later defines in the .plc file.
Grid cell density is represented as an 1D array where the length is set to be
Grid cell density is represented as an 1D array where the length is set to be the following:
$$
grid_\{col} \cdot grid_\{row}
grid_{col} \cdot grid_{row}
$$
Each entry of this array represents the current occupied precentage within this cell.
......
import odb
import os
import datetime
import math
from math import gcd
class BookshelfToOdb:
def __init__(
self,
opendbpy,
opendb,
cellPadding,
modeFormat,
plFile,
nodeMapFile,
netMapFile):
self.odbpy = opendbpy
self.odb = opendb
self.year = datetime.datetime.now().year
self.month = datetime.datetime.now().strftime("%b")
self.day = datetime.datetime.now().day
self.user = 'Seungwon Kim at University of California, San Diego (sek006@ucsd.edu)'
self.modeFormat = modeFormat
self.plFile = plFile
self.nodeMapFile = nodeMapFile
self.netMapFile = netMapFile
self.chip = self.odb.getChip()
self.block = self.chip.getBlock()
self.siteWidth = self.block.getRows()[0].getSite().getWidth()
self.siteHeight = self.block.getRows()[0].getSite().getHeight()
print("siteWidth: %s" % self.siteWidth)
print("siteHeight: %s" % self.siteHeight)
print("GCD: %s" % gcd(self.siteWidth, self.siteHeight))
#self.scaleFactor = self.targetScale / self.siteHeight
self.scaleFactor = 1 / gcd(self.siteWidth, self.siteHeight)
self.targetScale = self.scaleFactor * self.siteHeight
print("Target siteHeight Scale: %s" % self.targetScale)
print(
"Scale Factor (Target siteHeight Scale / siteHeight): %s" %
self.scaleFactor)
def UpdatePl(self, dictNode):
with open(self.plFile, 'r') as inFile:
for line in inFile:
if len(line) < 3:
continue
if line.strip().startswith("UCLA"):
continue
if line.strip().startswith("#"):
continue
elif ':' not in line:
continue
elif self.modeFormat == 'ISPD11' and line.split()[-1] == '/FIXED':
# Fixed insts.
# It has defined in the original ODB, so don't need to
# update.
continue
elif (self.modeFormat == 'ISPD11' and line.split()[-1] == '/FIXED_NI') or (self.modeFormat != 'ISPD11' and line.split()[-1] == '/FIXED'):
# Fixed insts + boundary terminals
instMappedName = line.split()[0]
instLlx = float(line.split()[1])
instLly = float(line.split()[2])
instOrigName = dictNode[instMappedName]
#print(instOrigName, instMappedName)
# print(self.block.findBTerm(instOrigName))
bTerm = self.block.findBTerm(instOrigName)
if bTerm is not None:
bPins = bTerm.getBPins()
for bPin in bPins:
boxes = bPin.getBoxes()
# TODO: do not support multiple boxes
assert(len(boxes) == 1)
for box in boxes:
bTermWidth = int(box.xMax() - box.xMin())
bTermHeight = int(box.yMax() - box.yMin())
#print(bTermWidth, bTermHeight)
layerBPin = box.getTechLayer()
bPin.destroy(bPin)
bPin.create(bTerm)
bPinLlx = int(instLlx / self.scaleFactor)
bPinLly = int(instLly / self.scaleFactor)
bPinUrx = int(
instLlx / self.scaleFactor) + int(bTermWidth)
bPinUry = int(
instLly / self.scaleFactor) + int(bTermHeight)
box.create(
bPin, layerBPin, bPinLlx, bPinLly, bPinUrx, bPinUry)
# print(bPinLlx,bPinLly,bPinUrx,bPinUry)
bPin.setPlacementStatus('PLACED')
# TODO: Snapping to on-track?
continue
else:
instMappedName = line.split()[0]
instLlx = float(line.split()[1])
instLly = float(line.split()[2])
instOrigName = dictNode[instMappedName]
#print(instOrigName, instMappedName)
# print(self.block.findInst(instOrigName))
instOrig = self.block.findInst(instOrigName)
instOrigWidth = instOrig.getMaster().getWidth()
instOrigHeight = instOrig.getMaster().getHeight()
instOrigLlx = int(instLlx / self.scaleFactor)
instOrigLly = int(instLly / self.scaleFactor)
instOrig.setLocation(instOrigLlx, instOrigLly)
instOrig.setPlacementStatus('PLACED')
# TODO: Snapping to on-track?
def DecodeMap(self):
dictNode = dict()
with open(self.nodeMapFile, 'r') as inFile:
for line in inFile:
origName = line.split()[0]
mappedName = line.split()[1]
dictNode[mappedName] = origName
return dictNode
def UpdateOdb(self):
dictNode = self.DecodeMap()
self.UpdatePl(dictNode)
os.path.exists
dbName = './output/%s_pad%s_%s/%s_pad%s_%s_mapped.odb' % (
odbName, cellPadding, modeFormat, odbName, cellPadding, modeFormat)
defName = './output/%s_pad%s_%s/%s_mapped.def' % (
odbName, cellPadding, modeFormat, odbName)
if os.path.exists(dbName):
os.remove(dbName)
if os.path.exists(defName):
os.remove(defName)
odb.write_db(
self.odb,
'./%s_pad%s_%s/%s_pad%s_%s_mapped.odb' %
(odbName,
cellPadding,
modeFormat,
odbName,
cellPadding,
modeFormat))
odb.write_def(
self.block,
'./%s_pad%s_%s/%s_mapped.def' %
(odbName,
cellPadding,
modeFormat,
odbName))
if __name__ == "__main__":
################ Settings #################
odbPath = './odbFiles'
# The number of sites for cell padding (+left, +right)
cellPaddings = [0, 1, 2, 3, 4]
# Format list of Bookshelf to be created.
modeFormats = ['ISPD04', 'ISPD11']
# OpenDB list for Bookshelf generation
odbList = [
'sky130hd_ISPD2006_adaptec1',
]
###########################################
for modeFormat in modeFormats:
for cellPadding in cellPaddings:
for odbName in odbList:
plFile = './output/%s_pad%s_%s/%s_pad%s_%s_mapped.ntup.pl' % (
odbName, cellPadding, modeFormat, odbName, cellPadding, modeFormat)
nodeMapFile = './output/%s_pad%s_%s/%s_pad%s_%s_mapped.nodemap' % (
odbName, cellPadding, modeFormat, odbName, cellPadding, modeFormat)
netMapFile = './output/%s_pad%s_%s/%s_pad%s_%s_mapped.netmap' % (
odbName, cellPadding, modeFormat, odbName, cellPadding, modeFormat)
db = odb.dbDatabase.create()
print(odb)
odb.read_db(db, '%s/%s.odb' % (odbPath, odbName))
bs = BookshelfToOdb(
opendbpy=odb,
opendb=db,
cellPadding=cellPadding,
modeFormat=modeFormat,
plFile=plFile,
nodeMapFile=nodeMapFile,
netMapFile=netMapFile)
bs.UpdateOdb()
#import opendbpy as odb
import odb
import os
import datetime
import math
import namemap
from math import gcd
# import namemap
class OdbToBookshelf:
......@@ -126,7 +125,7 @@ class OdbToBookshelf:
def WriteNodes(self, bsName):
print("Writing .nodes")
f = open('./output/%s/%s.nodes' % (bsName, bsName), 'w')
f.write('UCSD nodes 1.0\n')
f.write('UCLA nodes 1.0\n')
f.write(
'# Created : %s %s %s\n' %
(self.month, self.day, self.year))
......@@ -214,7 +213,7 @@ class OdbToBookshelf:
def WriteRoute(self, bsName):
print("Writing .route")
f = open('./output/%s/%s.route' % (bsName, bsName), 'w')
f.write('UCSD route 1.0\n')
f.write('UCLA route 1.0\n')
f.write(
'# Created : %s %s %s\n' %
(self.month, self.day, self.year))
......@@ -435,7 +434,7 @@ class OdbToBookshelf:
def WriteWts(self, bsName):
print("Writing .wts")
f = open('./output/%s/%s.wts' % (bsName, bsName), 'w')
f.write('UCSD wts 1.0\n')
f.write('UCLA wts 1.0\n')
f.write(
'# Created : %s %s %s\n' %
(self.month, self.day, self.year))
......@@ -445,7 +444,7 @@ class OdbToBookshelf:
def WriteNets(self, bsName):
print("Writing .nets")
f = open('./output/%s/%s.nets' % (bsName, bsName), 'w')
f.write('UCSD nets 1.0\n')
f.write('UCLA nets 1.0\n')
f.write(
'# Created : %s %s %s\n' %
(self.month, self.day, self.year))
......@@ -531,29 +530,15 @@ class OdbToBookshelf:
#
# "getGeomShape().getPoints()" --> a, b, c, d, a
#
#tx = 0
#ty = 0
# t = odb.dbTransform(instOrient, instOrig)
# Calculate center of pin
# for pp in box.getGeomShape().getPoints()[:-1]:
# t.apply(pp)
# tx = tx + float(pp.getX())
# ty = ty + float(pp.getY())
# #print("tx, ty = %s %s"%(pp.getX()/1000,pp.getY()/1000))
# tt = tt + 1
rr = odb.Rect(box.xMin(), box.yMin(), box.xMax(), box.yMax())
# Calculate center of pin
for pp in rr.getPoints():
t.apply(pp)
# print('Hi ',type(pp), type(box))
# print('Here ', box.xMin(), box.yMin(), box.xMax(), box.yMax())
#
# print(pp)
# # box.getBox(pp)
# print(type(pp))
# t.apply(pp)
tt += 1
tx += pp.getX()
ty += pp.getY()
tx += float(pp.getX())
ty += float(pp.getY())
iPinXCen = float(tx) / float(tt)
iPinYCen = float(ty) / float(tt)
......@@ -575,7 +560,7 @@ class OdbToBookshelf:
def WritePl(self, bsName):
print("Writing .pl")
f = open('./output/%s/%s.pl' % (bsName, bsName), 'w')
f.write('UCSD pl 1.0\n')
f.write('UCLA pl 1.0\n')
f.write(
'# Created : %s %s %s\n' %
(self.month, self.day, self.year))
......@@ -637,7 +622,7 @@ class OdbToBookshelf:
def WriteScl(self, bsName):
print("Writing .scl")
f = open('./output/%s/%s.scl' % (bsName, bsName), 'w')
f.write('UCSD scl 1.0\n')
f.write('UCLA scl 1.0\n')
f.write(
'# Created : %s %s %s\n' %
(self.month, self.day, self.year))
......@@ -731,7 +716,7 @@ class OdbToBookshelf:
def WriteShapes(self, bsName):
print("Writing .shapes")
f = open('./output/%s/%s.shapes' % (bsName, bsName), 'w')
f.write('UCSD shapes 1.0\n')
f.write('UCLA shapes 1.0\n')
f.write(
'# Created : %s %s %s\n' %
(self.month, self.day, self.year))
......@@ -808,11 +793,11 @@ class OdbToBookshelf:
self.show(bsName)
# convert for mapping
# self.convert(bsName)
self.convert(bsName)
def convert(self, bsName):
os.chdir('output/%s' % bsName)
# namemap.main('%s.aux' % bsName)
namemap.main('%s.aux' % bsName)
os.chdir('../../')
def show(self, bsName):
......
'''
This script generates OpenDB database from LEF/DEF
'''
import odb
import sys
import os
import re
design = sys.argv[1]
def_file = sys.argv[2]
output_dir = sys.argv[3]
work_dir = re.search(r'(/\S+/MacroPlacement)', os.getcwd()).group(1)
sys.path.append(f'{work_dir}/Flows/util')
from convert_odb2bookshelf import OdbToBookshelf
lef_dir = f'{work_dir}/Enablements/NanGate45/lef'
lef_list = [f'{lef_dir}/NangateOpenCellLibrary.tech.lef',
f'{lef_dir}/NangateOpenCellLibrary.macro.mod.lef',
f'{lef_dir}/fakeram45_256x16.lef']
db = odb.dbDatabase.create()
for lef_file in lef_list:
odb.read_lef(db, lef_file)
odb.read_def(db, def_file)
chip = db.getChip()
tech = db.getTech()
libs = db.getLibs()
if chip is None:
exit("ERROR: READ DEF Failed")
if not os.path.exists(f'{output_dir}/RePlAce'):
os.makedirs(f'{output_dir}/RePlAce')
odb_file = f'{output_dir}/RePlAce/{design}.odb'
export_result = odb.write_db(db, odb_file)
if export_result != 1:
exit("ERROR: Export failed")
new_db = odb.dbDatabase.create()
odb.read_db(new_db, odb_file)
if new_db is None:
exit("ERROR: Import failed")
if odb.db_diff(db, new_db):
exit("ERROR: Difference found in exported and imported DB")
print(f"Successfully generated ODB format from LEF/DEF for {design}")
bs = OdbToBookshelf(
opendbpy=odb,
opendb=db,
cellPadding=0,
modeFormat="ISPD11",
layerCapacity='layeradjust_empty.tcl')
bs.WriteBookshelf(f'{design}_pad0_ISPD11')
import sys
import subprocess as sp
def ExecuteCommand(command):
print(command)
sp.call(command, shell=True)
def main(arg1):
print(arg1, "is parsing...")
f = open(arg1)
auxCont = f.read()
f.close()
fileList = []
afterColon = False
for word in auxCont.split(" "):
if word == ":":
afterColon = True
continue
if afterColon:
fileList.append(word.strip())
print(fileList)
nodeName = [l for l in fileList if l.endswith("nodes")][0]
netName = [l for l in fileList if l.endswith("nets")][0]
plName = [l for l in fileList if l.endswith("pl")][0]
routeName = [l for l in fileList if l.endswith("route")][0]
#routeName = nodeName.split(".")[0]+".route"
benchName = nodeName.split(".")[0]
print(nodeName, netName, plName, routeName)
#######################################
# Nodes Mapping
#######################################
f = open(nodeName, "r")
nodeCont = f.read()
f.close()
nameMap = dict()
instCnt = 0
pinCnt = 0
newCont = ""
isFirst = True
for curLine in nodeCont.split("\n"):
wordList = curLine.split()
if isFirst:
isFirst = False
newCont += curLine + "\n"
continue
if len(wordList) is 0:
newCont += "\n"
continue
if wordList[0] is "#":
newCont += curLine + "\n"
continue
if wordList[0] == "NumNodes" or wordList[0] == "NumTerminals":
newCont += curLine + "\n"
continue
newWord = ""
# if len(wordList) >= 4 and wordList[1] is "0" and wordList[2] is "0":
if len(wordList) >= 4 and wordList[1] is "1" and wordList[2] is "1":
newWord = "p" + str(pinCnt)
pinCnt += 1
# newCont += newWord + " " + wordList[1] + " " + wordList[2] + " " + wordList[3] + "\n"
#newCont += " " + newWord + " 0 0 terminal_NI\n"
newCont += " " + newWord + " 1 1 terminal_NI\n"
elif len(wordList) >= 4 and wordList[3] == "terminal":
#newWord = "p"+str(pinCnt)
#pinCnt += 1
newWord = "o" + str(instCnt)
instCnt += 1
newCont += " " + newWord + " " + " ".join(wordList[1:]) + "\n"
else:
newWord = "o" + str(instCnt)
instCnt += 1
newCont += " " + newWord + " " + \
wordList[1] + " " + wordList[2] + "\n"
nameMap[wordList[0]] = newWord
f = open(benchName + "_mapped.nodes", "w")
f.write(newCont)
f.close()
newCont = ""
for key, cont in nameMap.items():
newCont += "%s %s\n" % (key, cont)
f = open(benchName + "_mapped.nodemap", "w")
f.write(newCont)
f.close()
#######################################
# Nets Mapping
#######################################
f = open(netName, "r")
netCont = f.read()
f.close()
newCont = ""
isFirst = True
netCnt = 0
netNameMap = dict()
for curLine in netCont.split("\n"):
wordList = curLine.split()
if isFirst:
isFirst = False
newCont += curLine + "\n"
continue
if len(wordList) is 0:
newCont += "\n"
continue
if wordList[0] is "#":
newCont += curLine + "\n"
continue
if wordList[0] == "NumNets" or wordList[0] == "NumPins":
newCont += curLine + "\n"
continue
if wordList[0] == "NetDegree":
newWord = "n" + str(netCnt)
netCnt += 1
netNameMap[wordList[3]] = newWord
newCont += " ".join(wordList[0:3]) + " " + newWord + "\n"
continue
newCont += " " + nameMap[wordList[0]] + " " + wordList[1] + \
" " + wordList[2] + " " + wordList[3] + " " + wordList[4] + "\n"
f = open(benchName + "_mapped.nets", "w")
f.write(newCont)
f.close()
newCont = ""
for key, cont in netNameMap.items():
newCont += "%s %s\n" % (key, cont)
f = open(benchName + "_mapped.netmap", "w")
f.write(newCont)
f.close()
#######################################
# DP PL Mapping
#######################################
#
#dpPlName = plName.split(".")[0] + ".ntup.pl"
#f = open(dpPlName, "r")
#plCont= f.read()
# f.close()
#
#newCont = ""
#isFirst = True
#
# for curLine in plCont.split("\n"):
# wordList = curLine.split()
# if isFirst:
# isFirst = False
# newCont += curLine + "\n"
# continue
# if len(wordList) is 0:
# newCont += "\n"
# continue
# if wordList[0] is "#":
# newCont += curLine + "\n"
# continue
# if len(wordList) == 5:
# newCont += nameMap[ wordList[0] ] + " " + " ".join(wordList[1:5]) + "\n"
# elif len(wordList) == 6:
# newCont += nameMap[ wordList[0] ] + " " + " ".join(wordList[1:6]) + "\n"
#
#f = open(benchName + "_mapped.ntup.pl", "w")
# f.write(newCont)
# f.close()
#
#######################################
# GP PL Mapping
#######################################
gpPlName = plName.split(".")[0] + ".pl"
f = open(gpPlName, "r")
plCont = f.read()
f.close()
newCont = ""
isFirst = True
for curLine in plCont.split("\n"):
wordList = curLine.split()
if isFirst:
isFirst = False
newCont += curLine + "\n"
continue
if len(wordList) is 0:
newCont += "\n"
continue
if wordList[0] is "#":
newCont += curLine + "\n"
continue
if len(wordList) == 5:
newCont += nameMap[wordList[0]] + \
" " + " ".join(wordList[1:5]) + "\n"
elif len(wordList) == 6:
newCont += nameMap[wordList[0]] + \
" " + " ".join(wordList[1:6]) + "\n"
f = open(benchName + "_mapped.pl", "w")
f.write(newCont)
f.close()
#######################################
# ROUTE Mapping
#######################################
f = open(routeName, "r")
routeCont = f.read()
f.close()
newCont = ""
isFirst = True
for curLine in routeCont.split("\n"):
wordList = curLine.split()
if isFirst:
isFirst = False
newCont += curLine + "\n"
continue
if len(wordList) is 0:
newCont += "\n"
continue
if wordList[0] is "#":
newCont += curLine + "\n"
continue
if ":" in wordList:
newCont += curLine + "\n"
continue
newCont += " " + nameMap[wordList[0]] + " " + \
" ".join(wordList[1:len(wordList)]) + "\n"
f = open(benchName + "_mapped.route", "w")
f.write(newCont)
f.close()
#######################################
# scl, wts Mapping
#######################################
ExecuteCommand("cp %s.scl %s.scl" % (benchName, benchName + "_mapped"))
ExecuteCommand("cp %s.wts %s.wts" % (benchName, benchName + "_mapped"))
#######################################
# shapes Mapping
#######################################
f1 = open(benchName + "_mapped.nodemap", "r")
linesMap = f1.readlines()
f1.close()
listOrigName = []
listMapName = []
for line in linesMap:
listOrigName.append(line.split()[0])
listMapName.append(line.split()[1])
f2 = open(benchName + ".shapes", "r")
linesShapes = f2.readlines()
f2.close()
# Search mapping name from '.nodemap' file.
f = open(benchName + "_mapped.shapes", "w")
for line in linesShapes:
if len(line.split()) > 1:
if ':' == line.split()[1] and line.split()[
0] != 'NumNonRectangularNodes':
origName = line.split()[0]
idxMap = listOrigName.index(origName)
f.write('%s : %s\n' % (listMapName[idxMap], line.split()[2]))
continue
else:
f.write(line)
else:
f.write(line)
f.close()
#######################################
# aux writing
#######################################
f = open(benchName + "_mapped.aux", "w")
newCont = "RowBasedPlacement : %s_mapped.nodes %s_mapped.nets %s_mapped.wts %s_mapped.pl %s_mapped.scl %s_mapped.shapes %s_mapped.route" % (
benchName, benchName, benchName, benchName, benchName, benchName, benchName)
f.write(newCont)
f.close()
rm -rf output/ ETC/ outputs/
# run ODB to Bookshelf
#./openroad -python odb2bs.py
./openroad -python lefdef_to_odb.py ariane ../../ariane_replace.def ./
# prepare input dir structure for RePlAce
mkdir -p ETC/
ln -s $(readlink -f ./output/*) ./ETC/
# run RePlAce. If you want to change density, please put -den 0.8 (80%), etc.
./RePlAce-static -bmflag etc -bmname ariane_pad0_ISPD11 -pcofmax 1.03 |& tee replace_result.log
# bring the results
#ln -s outputs/ETC/ariane_pad0_ISPD11/experiment0/*.pl ./
# you can run invs to load *.pl results from here
/home/mgwoo/95_openroad/rosettaStone/github/RosettaStone/odbComm/namemap.py
\ No newline at end of file
'''
This script generates bookshelf format from odb format
'''
import os
import odb
import sys
import re
work_dir = re.search(r'(/\S+/MacroPlacement)', os.getcwd()).group(1)
sys.path.append(f'{work_dir}/Flows/util')
from convert_odb2bookshelf import OdbToBookshelf
design = sys.argv[1]
odb_file = sys.argv[2]
output_dir = sys.argv[3]
modeFormat = 'ISPD11'
cellPadding = 0
layerCapacity = 'layeradjust_empty.tcl'
def touch(fname, times=None):
with open(fname, 'a'):
os.utime(fname, times)
if not os.path.exists(layerCapacity):
touch(layerCapacity)
db = odb.dbDatabase.create()
odb.read_db(db, odb_file)
bs = OdbToBookshelf(opendbpy=odb, opendb=db, cellPadding=cellPadding,
modeFormat=modeFormat, layerCapacity=layerCapacity)
if not os.path.exists(f'{output_dir}/RePlAce'):
os.makedirs(f'{output_dir}/RePlAce')
bs.WriteBookshelf(f'{design}.bookshelf')
......@@ -2,7 +2,7 @@
deselectAll
set top_module [dbget top.name]
if {[dbget top.terms.pStatus -v -e fixed] == "" } {
if {[dbget top.terms.pStatus -v -e fixed] != "" } {
source ../../../../util/place_pin.tcl
}
......
......@@ -202,6 +202,7 @@ while allowing soft macros (standard-cell clusters) to also find good locations.
- [Force-directed placement](./CodeElements/FDPlacement/) places the center of each standard cell cluster onto centers of gridcells generated by [Gridding](./CodeElements/Gridding/).
- [Simulated annealing](./CodeElements/SimulatedAnnealing/) places the center of each macro onto centers of gridcells generated by [Gridding](./CodeElements/Gridding/). In Circuit Training, simulated annealing is used as a baseline to show the relative sample efficiency of RL.
- [LEF/DEF and Bookshelf (OpenDB, RosettaStone) translators](./CodeElements/FormatTranslators/) ease the translation between different representations of the same netlist.
- [Plc client](./CodeElements/Plc_client/) implements all three components of the proxy cost function: wirelength cost, density cost and congestion cost.
<!--## **Reproducible Example Solutions** -->
......@@ -218,18 +219,16 @@ We provide a competitive baseline for [Google Brain's Circuit Training](https://
- We do understand that Google has been working hard to complete the open-sourcing of Morpheus, and that this effort continues today. However, as pointed out in [this Doc](https://docs.google.com/document/d/1vkPRgJEiLIyT22AkQNAxO8JtIKiL95diVdJ_O4AFtJ8/edit?usp=sharing), it has been more than a year since "Data and Code Availability" was committed with publication of the [Nature paper](https://www.nature.com/articles/s41586-021-03544-w). We consider our work a "backstop" or "safety net" for Google's internal efforts, and a platform for researchers to build on.
**What can others contribute?**
- Our shopping list includes the following. Please join in!
- force-directed placement (and API): documentation and implementation
- adjacency matrix generation: documentation and implementation
- Our shopping list (updated August 2022) includes the following. Please join in!
- simulated annealing on the gridded canvas: documentation and implementation
- force-directed placement: documentation and implementation
- donated cloud resources (credits) for experimental studies
- relevant testcases with reference implementations and implementation flows (Cadence, OpenROAD preferred since scripts can be shared)
- protobuf, lef/def, Bookshelf: detailed and confirmed documentation, plus tests and other help to improve our initial versions of translators
- "fakeram" generator for the ASAP7 research PDK
- qrctechfile for NanGate45
- improved "fakeram" generator for the ASAP7 research PDK
**What is your timeline?**
- We hope to show significant progress at the [DAC-2022 Birds-of-a-Feather](https://59dac.conference-program.com/session/?sess=sess294) meeting (Open-Source EDA and Benchmarking Summit) on July 12, 2022, 7-10pm in Room 3000 of Moscone West in San Francisco.
- We showed our [progress](https://open-source-eda-birds-of-a-feather.github.io/doc/slides/MacroPlacement-SpecPart-DAC-BOF-v5.pdf) at the Open-Source EDA and Benchmarking Summit birds-of-a-feather [meeting](https://open-source-eda-birds-of-a-feather.github.io/) on July 12 at DAC-2022.
- We are now (late August 2022) studying benefits and limitations of the CT methodology itself, as noted in [this Doc](https://docs.google.com/document/d/1c-uweo3DHiCWZyBzAdNCqqcOrAbKq1sVIfY0_4bFCYE/edit).
## **Related Links**
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment