Commit e2d2b230 by nzy

step1 & 4: wrap code in main()

parent 54409c18
from utils_vllm import vllm_chatcomplete
from utils import read_config
cfg = read_config()
vllm_chatcomplete(
cfg["model"],
cfg["sample"]["sample_prompt_path"],
cfg["sample"]["sample_result_path"],
cfg["sample"]["sampling_params"]
)
\ No newline at end of file
if __name__ == "__main__":
cfg = read_config()
vllm_chatcomplete(
cfg["model"],
cfg["sample"]["sample_prompt_path"],
cfg["sample"]["sample_result_path"],
cfg["sample"]["sampling_params"],
)
......@@ -2,22 +2,24 @@ from utils_vllm import vllm_chatcomplete, vllm_score
from utils import read_config
from transformers import AutoTokenizer
cfg = read_config()
vllm_chatcomplete(
cfg["critic"]["model_path"],
cfg["dataset"]["minimal_test_path"],
cfg["critic"]["test"]["reason_result_path"],
cfg["critic"]["test"]["sampling_params"]
)
tokenizer = AutoTokenizer.from_pretrained(cfg["model"])
score_tokens = tokenizer.encode("Yes")
assert len(score_tokens) == 1
score_token = score_tokens[0]
if __name__ == "__main__":
cfg = read_config()
vllm_chatcomplete(
cfg["critic"]["model_path"],
cfg["dataset"]["minimal_test_path"],
cfg["critic"]["test"]["reason_result_path"],
cfg["critic"]["test"]["sampling_params"]
)
vllm_score(
cfg["critic"]["model_path"],
cfg["critic"]["test"]["reason_result_path"],
cfg["critic"]["test"]["score_result_path"],
score_token
)
\ No newline at end of file
tokenizer = AutoTokenizer.from_pretrained(cfg["model"])
score_tokens = tokenizer.encode("Yes")
assert len(score_tokens) == 1
score_token = score_tokens[0]
vllm_score(
cfg["critic"]["model_path"],
cfg["critic"]["test"]["reason_result_path"],
cfg["critic"]["test"]["score_result_path"],
score_token
)
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment