Commit d447a68a by nanziyuan

migrate from llamafactory's chattemplate to llm's chattemplate

parent ab8ccadd
......@@ -70,7 +70,7 @@ def score_worker(cuda_device, prompts, model_path, score_token):
for response in output.outputs:
# response.logprobs: list[dict[int, Logprob]] https://github.com/vllm-project/vllm/blob/main/vllm/sequence.py
sample_logprobs = response.logprobs
logprob = sample_logprobs[1].get(score_token)
logprob = sample_logprobs[0].get(score_token)
newitem = item.copy()
# model always return 4 tokens, ['\n', 'Yes'/'No', '\n', <EOT>]
if logprob:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment