Commit 1d67f68a by wyt2000

fix: remove debug

parent 1bb353c5
......@@ -47,41 +47,36 @@ class GeneratorProcess(mp.Process):
item.get('_extra_prompt', str()),
]) for _, _, item in inputs
]
with open(f'output_rank_{self.local_rank}', 'a') as f:
f.write('\n########## INPUT BEGIN ##########\n')
f.write(model_inputs_text[0])
f.write('\n########## INPUT END ##########\n')
# encode inputs
model_inputs = [
tokenizer(inp, return_tensors="pt").to(model.device)
for inp in model_inputs_text
]
# generate
model_outputs = [
model.generate(**inp, max_new_tokens=self.max_tokens, eos_token_id=tokenizer.eos_token_id, do_sample=False)
for inp in model_inputs
]
# decode outputs
model_outputs = [
tokenizer.decode(out[0], skip_special_tokens=True)
for out in model_outputs
]
with open(f'output_rank_{self.local_rank}', 'a') as f:
f.write('\n########## RAW OUTPUT BEGIN ##########\n')
f.write(model_outputs[0])
f.write('\n########## RAW OUTPUT END ##########\n')
# remove inputs in decoding results
model_outputs = [
out[len(inp):].strip()
for inp, out in zip(model_inputs_text, model_outputs)
]
with open(f'output_rank_{self.local_rank}', 'a') as f:
f.write('\n########## REMOVED OUTPUT BEGIN ##########\n')
f.write(model_outputs[0])
f.write('\n########## REMOVED OUTPUT END ##########\n')
# extract code snippet
outputs = [self.output_func(_output) for _output in model_outputs]
with open(f'output_rank_{self.local_rank}', 'a') as f:
f.write('\n########## CLEANED OUTPUT BEGIN ##########\n')
f.write(outputs[0])
f.write('\n########## CLEANED OUTPUT END ##########\n')
# send to verify
with self.lock:
for (_, request_id, _), output in zip(inputs, outputs):
self.request_statuses[request_id] = output
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment