Commit f7a57e58 by Dinple

test update

parent 47af9d1a
......@@ -5,9 +5,13 @@ Flows/util/__pycache__
CodeElements/*/*/__pycache__
CodeElements/Plc_client/test/
CodeElements/Plc_client/test/*/*
CodeElements/Plc_client/plc_client_os.py
CodeElements/Plc_client/__pycache__/*
CodeElements/Plc_client/proto_reader.py
CodeElements/Plc_client/plc_client.py
CodeElements/failed_proxy_plc/*
CodeElements/CTModel/*
\ No newline at end of file
CodeElements/EvalCT/saved_policy/*
CodeElements/EvalCT/test/*
CodeElements/EvalCT/snapshot*
CodeElements/EvalCT/circuit_training
CodeElements/EvalCT/__pycache__/
CodeElements/EvalCT/eval_run*.plc
import collections
import functools
import os
import time
from typing import Text
import statistics
import re
from absl import app
from absl import flags
from absl.flags import argparse_flags
from circuit_training.environment import environment
from circuit_training.environment import placement_util
from tf_agents.experimental.distributed import reverb_variable_container
from tf_agents.metrics import py_metric
from tf_agents.metrics import py_metrics
from tf_agents.policies import greedy_policy # pylint: disable=unused-import
from tf_agents.policies import py_tf_eager_policy
from tf_agents.policies import policy_loader
from tf_agents.train import actor
from tf_agents.train import learner
from tf_agents.train.utils import train_utils
from tf_agents.trajectories import trajectory
from tf_agents.utils import common
from tf_agents.policies import greedy_policy # pylint: disable=unused-import
from tf_agents.system import system_multiprocessing as multiprocessing
"""
Example
At ./MacroPlacement/CodeElement/EvalCT, run the following command:
$ python3 -m eval_ct --netlist ./test/ariane/netlist.pb.txt\
--plc ./test/ariane/initial.plc\
--rundir run_os_64128_g657_ub5_nruns10_c5_r3_v3_rc1
"""
class InfoMetric(py_metric.PyStepMetric):
"""Observer for graphing the environment info metrics."""
def __init__(
self,
env,
info_metric_key: Text,
buffer_size: int = 1,
name: Text = 'InfoMetric',
):
"""Observer reporting TensorBoard metrics at the end of each episode.
Args:
env: environment.
info_metric_key: a string key from the environment info to report,
e.g. wirelength, density, congestion.
buffer_size: size of the buffer for calculating the aggregated metrics.
name: name of the observer object.
"""
super(InfoMetric, self).__init__(name + '_' + info_metric_key)
self._env = env
self._info_metric_key = info_metric_key
self._buffer = collections.deque(maxlen=buffer_size)
def call(self, traj: trajectory.Trajectory):
"""Report the requested metrics at the end of each episode."""
# We collect the metrics from the info from the environment instead.
# The traj argument is kept to be compatible with the actor/learner API
# for metrics.
del traj
if self._env.done:
# placement_util.save_placement(self._env._plc, './reload_weight.plc', '')
metric_value = self._env.get_info()[self._info_metric_key]
self._buffer.append(metric_value)
def result(self):
return statistics.mean(self._buffer)
def reset(self):
self._buffer.clear()
def evaulate(model_dir, create_env_fn):
# Create the path for the serialized greedy policy.
policy_saved_model_path = os.path.join(model_dir,
learner.POLICY_SAVED_MODEL_DIR,
learner.GREEDY_POLICY_SAVED_MODEL_DIR)
try:
assert os.path.isdir(policy_saved_model_path)
print("#[POLICY SAVED MODEL PATH] " + policy_saved_model_path)
except AssertionError:
print("[ERROR POLICY SAVED MODEL PATH NOT FOUND] " + policy_saved_model_path)
exit(0)
policy_saved_chkpt_path = os.path.join(model_dir,
learner.POLICY_SAVED_MODEL_DIR,
"checkpoints/policy_checkpoint_0000107200")
try:
assert os.path.isdir(policy_saved_chkpt_path)
print("#[POLICY SAVED CHECKPOINT PATH] " + policy_saved_chkpt_path)
except AssertionError:
print("[ERROR POLICY SAVED CHECKPOINT PATH NOT FOUND] " + policy_saved_chkpt_path)
exit(0)
saved_model_pb_path = os.path.join(policy_saved_model_path, 'saved_model.pb')
try:
assert os.path.isfile(saved_model_pb_path)
print("#[SAVED MODEL PB PATH] " + saved_model_pb_path)
except AssertionError:
print("[ERROR SAVE MODEL PB PATH NOT FOUND] " + saved_model_pb_path)
exit(0)
policy = policy_loader.load(policy_saved_model_path, policy_saved_chkpt_path)
train_step = train_utils.create_train_step()
# Create the environment.
env = create_env_fn()
# Create the evaluator actor.
info_metrics = [
InfoMetric(env, 'wirelength'),
InfoMetric(env, 'congestion'),
InfoMetric(env, 'density'),
]
eval_actor = actor.Actor(
env,
policy,
train_step,
episodes_per_run=1,
summary_dir=os.path.join(model_dir, learner.TRAIN_DIR, 'eval'),
metrics=[
py_metrics.NumberOfEpisodes(),
py_metrics.EnvironmentSteps(),
py_metrics.AverageReturnMetric(
name='eval_episode_return', buffer_size=1),
py_metrics.AverageEpisodeLengthMetric(buffer_size=1),
] + info_metrics,
name='performance')
eval_actor.run_and_log()
def main(args):
NETLIST_FILE = args.netlist
INIT_PLACEMENT = args.plc
GLOBAL_SEED = 111
CD_RUNTIME = False
RUN_NAME = args.rundir
EVAL_TESTCASE = re.search("/test/(.+?)/netlist.pb.txt", NETLIST_FILE).group(1)
print(EVAL_TESTCASE)
create_env_fn = functools.partial(
environment.create_circuit_environment,
netlist_file=NETLIST_FILE,
init_placement=INIT_PLACEMENT,
is_eval=True,
save_best_cost=True,
output_plc_file=str('./eval_' + RUN_NAME + '_to_' + EVAL_TESTCASE + '.plc'),
global_seed=GLOBAL_SEED,
cd_finetune=CD_RUNTIME
)
evaulate(model_dir=os.path.join("./saved_policy", RUN_NAME, str(GLOBAL_SEED)),
create_env_fn=create_env_fn)
def parse_flags(argv):
parser = argparse_flags.ArgumentParser(
description='An argparse + app.run example')
parser.add_argument("--netlist", required=True,
help="Path to netlist in pb.txt")
parser.add_argument("--plc", required=True,
help="Path to plc in .plc")
parser.add_argument("--rundir", required=True,
help="Path to run directory that contains saved policies")
return parser.parse_args(argv[1:])
if __name__ == '__main__':
app.run(main, flags_parser=parse_flags)
\ No newline at end of file
......@@ -1742,7 +1742,6 @@ class PlacementCost(object):
[IGNORE] THIS DOES NOT AFFECT DENSITY. SHOULD WE IMPLEMENT THIS AT ALL?
make soft macros as squares
"""
return
for mod_idx in self.soft_macro_indices:
mod = self.modules_w_pins[mod_idx]
mod_area = mod.get_width() * mod.get_height()
......
......@@ -4,7 +4,6 @@ import pandas as pd
import sys
import os
import traceback
import argparse
import math
import re
from random import randrange
......@@ -57,6 +56,18 @@ Example:
--marv 8.339\
--smooth 2
$ python3 -m Plc_client.plc_client_os_test --netlist ./Plc_client/test/g657_ub5_nruns10_c5_r3_v3_rc1/netlist.pb.txt\
--plc ./Plc_client/test/g657_ub5_nruns10_c5_r3_v3_rc1/legalized.plc\
--width 1357.360\
--height 1356.880\
--col 22\
--row 30\
--rpmh 11.285\
--rpmv 12.605\
--marh 7.143\
--marv 8.339\
--smooth 0
$ python3 -m Plc_client.plc_client_os_test --netlist ./Plc_client/test/0P2M0m/netlist.pb.txt\
--width 500\
--height 500\
......@@ -252,6 +263,8 @@ class PlacementCostTest():
self.plc.get_overlap_threshold()
print("overlap_threshold default", self.plc.get_overlap_threshold())
# self.plc.make_soft_macros_square()
if self.PLC_PATH:
print("#[PLC FILE FOUND] Loading info from .plc file")
self.plc_os.set_canvas_boundary_check(False)
......@@ -264,22 +277,33 @@ class PlacementCostTest():
else:
print("#[PLC FILE MISSING] Using only netlist info")
# self.plc.make_soft_macros_square()
self.plc.set_routes_per_micron(self.RPMH, self.RPMV)
self.plc_os.set_routes_per_micron(self.RPMH, self.RPMV)
# self.plc.make_soft_macros_square()
self.plc.set_macro_routing_allocation(self.MARH, self.MARV)
self.plc_os.set_macro_routing_allocation(self.MARH, self.MARV)
# self.plc.make_soft_macros_square()
self.plc.set_congestion_smooth_range(self.SMOOTH)
self.plc_os.set_congestion_smooth_range(self.SMOOTH)
self.plc.set_canvas_size(self.CANVAS_WIDTH, self.CANVAS_HEIGHT)
# self.plc.make_soft_macros_square()
self.plc.set_placement_grid(self.GRID_COL, self.GRID_ROW)
# self.plc.make_soft_macros_square() # in effect
self.plc.set_canvas_size(self.CANVAS_WIDTH, self.CANVAS_HEIGHT)
self.plc_os.set_canvas_size(self.CANVAS_WIDTH, self.CANVAS_HEIGHT)
self.plc_os.set_placement_grid(self.GRID_COL, self.GRID_ROW)
self.plc.make_soft_macros_square()
self.plc_os.make_soft_macros_square()
# self.plc.make_soft_macros_square()
# self.plc_os.make_soft_macros_square()
# [IGNORE] create_blockage must be defined BEFORE set_canvas_size
# and set_placement_grid in order to be considered on the canvas
......@@ -428,15 +452,6 @@ class PlacementCostTest():
except Exception as e:
print("[ERROR WIRELENGTH] Discrepancies found when computing wirelength -- GL {}, OS {}".format(
str(self.plc.get_cost()), self.plc_os.get_cost()))
# # if remove all soft macros
# soft_macro_indices = [
# m for m in self.plc.get_macro_indices() if self.plc.is_node_soft_macro(m)
# ]
# for mod_idx in soft_macro_indices:
# self.plc_os.unplace_node(mod_idx)
# self.plc.unplace_node(mod_idx)
print("GL WIRELENGTH: ", self.plc.get_wirelength())
print("OS WIRELENGTH: ", self.plc_os.get_wirelength())
......@@ -978,7 +993,7 @@ def main(args):
Uncomment any available tests
"""
# PCT.test_metadata()
PCT.test_proxy_cost()
# PCT.test_proxy_cost()
# PCT.test_proxy_hpwl()
# PCT.test_proxy_density()
# PCT.test_proxy_congestion()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment