Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
codecritic
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Ziyuan Nan
codecritic
Commits
4b69d51d
Commit
4b69d51d
authored
Oct 21, 2024
by
nzy
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'main' of
http://62.234.201.16/nzy/codecritic
parents
9ff072e5
96e8a313
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
29 additions
and
23 deletions
+29
-23
step2_sftorm_convert_preference_to_sft.py
+9
-5
step3_train_sft_model.py
+4
-4
step4_test_sftorm.py
+2
-3
supervised.py
+2
-2
utils_preference_dataset.py
+2
-1
utils_vllm.py
+10
-8
No files found.
step2_sftorm_convert_preference_to_sft.py
View file @
4b69d51d
...
@@ -4,8 +4,8 @@
...
@@ -4,8 +4,8 @@
# 2. Using SFT (Supervised Fine-Tuning) directly
# 2. Using SFT (Supervised Fine-Tuning) directly
# This experiment aims to fairly compare these two approaches.
# This experiment aims to fairly compare these two approaches.
from
utils
import
load_json
,
save_json
from
utils
import
load_json
,
save_json
,
read_config
from
utils_preference_dataset
import
mk_critic_qa
,
mk_critic_verify
,
mk_sft_item
from
utils_preference_dataset
import
mk_critic_qa
,
mk_critic_verify
,
mk_sft_item
,
mk_sft_dataset_info
def
convert_preference_to_sft
(
item
):
def
convert_preference_to_sft
(
item
):
...
@@ -19,10 +19,15 @@ def convert_preference_to_sft(item):
...
@@ -19,10 +19,15 @@ def convert_preference_to_sft(item):
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
cfg
=
read_config
()
preference_path
=
None
preference_path
=
cfg
[
"preference_dataset"
][
"min_edit_distance"
][
"preference_dataset_path"
]
preference_dataset
=
load_json
(
preference_path
)
preference_dataset
=
load_json
(
preference_path
)
sft_dataset
=
[]
sft_dataset
=
[]
for
item
in
preference_dataset
:
for
item
in
preference_dataset
:
sft_dataset
.
extend
(
convert_preference_to_sft
(
item
))
sft_dataset
.
extend
(
convert_preference_to_sft
(
item
))
\ No newline at end of file
dataset_info
=
mk_sft_dataset_info
(
cfg
[
"sftorm"
][
"dataset_name"
])
save_json
(
sft_dataset
,
cfg
[
"sftorm"
][
"dataset_path"
])
save_json
(
dataset_info
,
cfg
[
"sftorm"
][
"dataset_info_path"
])
step3_train_sft_model.py
View file @
4b69d51d
...
@@ -17,6 +17,7 @@ cutoff_len: 4096
...
@@ -17,6 +17,7 @@ cutoff_len: 4096
max_samples: 10000
max_samples: 10000
overwrite_cache: true
overwrite_cache: true
preprocessing_num_workers: 16
preprocessing_num_workers: 16
mask_history: true
### output
### output
output_dir: {critic_model_path}
output_dir: {critic_model_path}
...
@@ -49,12 +50,12 @@ def mk_llamafactory_sft_yaml(cfg):
...
@@ -49,12 +50,12 @@ def mk_llamafactory_sft_yaml(cfg):
train_str
=
train_yaml
.
format
(
train_str
=
train_yaml
.
format
(
model_path
=
cfg
[
"model"
],
model_path
=
cfg
[
"model"
],
deepspeed_config_path
=
cfg
[
model_type
][
"train"
][
"deepspeed_cfg_path"
],
deepspeed_config_path
=
cfg
[
model_type
][
"train"
][
"deepspeed_cfg_path"
],
dataset_name
=
cfg
[
model_type
][
"
train"
][
"
dataset_name"
],
dataset_name
=
cfg
[
model_type
][
"dataset_name"
],
critic_model_path
=
cfg
[
model_type
][
"model_path"
],
critic_model_path
=
cfg
[
model_type
][
"model_path"
],
)
)
f
.
write
(
train_str
)
f
.
write
(
train_str
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
cfg
=
read_config
([
"model_type"
])
cfg
=
read_config
([
"model_type"
])
mk_llamafactory_sft_yaml
(
cfg
)
mk_llamafactory_sft_yaml
(
cfg
)
\ No newline at end of file
step4_test_sftorm.py
View file @
4b69d51d
...
@@ -21,7 +21,7 @@ if __name__ == "__main__":
...
@@ -21,7 +21,7 @@ if __name__ == "__main__":
save_jsonl
(
test_dataset
,
cfg
[
"sftorm"
][
"test"
][
"prompt_path"
])
save_jsonl
(
test_dataset
,
cfg
[
"sftorm"
][
"test"
][
"prompt_path"
])
tokenizer
=
AutoTokenizer
.
from_pretrained
(
cfg
[
"sftorm"
][
"model_path"
])
tokenizer
=
AutoTokenizer
.
from_pretrained
(
cfg
[
"sftorm"
][
"model_path"
])
score_tokens
=
tokenizer
.
encode
(
"Yes"
)
score_tokens
=
tokenizer
.
encode
(
"Yes"
,
add_special_tokens
=
False
)
assert
len
(
score_tokens
)
==
1
assert
len
(
score_tokens
)
==
1
score_token
=
score_tokens
[
0
]
score_token
=
score_tokens
[
0
]
...
@@ -36,4 +36,4 @@ if __name__ == "__main__":
...
@@ -36,4 +36,4 @@ if __name__ == "__main__":
groups
=
group_results
(
results
,
cfg
[
"apps"
])
groups
=
group_results
(
results
,
cfg
[
"apps"
])
eval_results
=
[
score_pass_at_k
(
groups
,
k
,
"sft-orm"
)
for
k
in
range
(
1
,
16
)]
eval_results
=
[
score_pass_at_k
(
groups
,
k
,
"sft-orm"
)
for
k
in
range
(
1
,
16
)]
save_jsonl
(
eval_results
,
cfg
[
"sftorm"
][
"test"
][
"eval_result_path"
])
save_jsonl
(
eval_results
,
cfg
[
"sftorm"
][
"test"
][
"eval_result_path"
])
print
(
eval_results
)
print
(
eval_results
)
\ No newline at end of file
supervised.py
View file @
4b69d51d
...
@@ -211,4 +211,5 @@ def print_supervised_dataset_example(example: Dict[str, List[int]], tokenizer: "
...
@@ -211,4 +211,5 @@ def print_supervised_dataset_example(example: Dict[str, List[int]], tokenizer: "
print
(
"input_ids:
\n
{}"
.
format
(
example
[
"input_ids"
]))
print
(
"input_ids:
\n
{}"
.
format
(
example
[
"input_ids"
]))
print
(
"inputs:
\n
{}"
.
format
(
tokenizer
.
decode
(
example
[
"input_ids"
],
skip_special_tokens
=
False
)))
print
(
"inputs:
\n
{}"
.
format
(
tokenizer
.
decode
(
example
[
"input_ids"
],
skip_special_tokens
=
False
)))
print
(
"label_ids:
\n
{}"
.
format
(
example
[
"labels"
]))
print
(
"label_ids:
\n
{}"
.
format
(
example
[
"labels"
]))
print
(
"labels:
\n
{}"
.
format
(
tokenizer
.
decode
(
valid_labels
,
skip_special_tokens
=
False
)))
print
(
"labels:
\n
{}"
.
format
(
tokenizer
.
decode
(
valid_labels
,
skip_special_tokens
=
False
)))
\ No newline at end of file
utils_preference_dataset.py
View file @
4b69d51d
...
@@ -68,7 +68,8 @@ def mk_critic_qa(instruction, code):
...
@@ -68,7 +68,8 @@ def mk_critic_qa(instruction, code):
]
]
def
mk_critic_verify
(
answer
:
bool
|
None
=
None
):
def
mk_critic_verify
(
answer
=
None
):
# answer: bool or none
message
=
[{
"role"
:
"user"
,
"content"
:
"Is the code correct (Yes/No)?"
}]
message
=
[{
"role"
:
"user"
,
"content"
:
"Is the code correct (Yes/No)?"
}]
if
answer
is
not
None
:
if
answer
is
not
None
:
response
=
"Yes"
if
answer
else
"No"
response
=
"Yes"
if
answer
else
"No"
...
...
utils_vllm.py
View file @
4b69d51d
...
@@ -6,7 +6,7 @@ from itertools import chain
...
@@ -6,7 +6,7 @@ from itertools import chain
from
functools
import
partial
from
functools
import
partial
from
utils
import
load_jsonl
,
save_jsonl
from
utils
import
load_jsonl
,
save_jsonl
import
numpy
as
np
def
generate_worker
(
cuda_device
,
prompts
,
model_path
,
sampling_params
):
def
generate_worker
(
cuda_device
,
prompts
,
model_path
,
sampling_params
):
os
.
environ
[
"CUDA_VISIBLE_DEVICES"
]
=
cuda_device
os
.
environ
[
"CUDA_VISIBLE_DEVICES"
]
=
cuda_device
...
@@ -53,8 +53,8 @@ def score_worker(cuda_device, prompts, model_path, score_token):
...
@@ -53,8 +53,8 @@ def score_worker(cuda_device, prompts, model_path, score_token):
vllm_sampling_params
=
SamplingParams
(
vllm_sampling_params
=
SamplingParams
(
n
=
1
,
n
=
1
,
temperature
=
0
,
temperature
=
0
,
max_tokens
=
1
,
max_tokens
=
5
,
logprobs
=
100
0
logprobs
=
2
0
)
)
text_prompts
=
[
tokenizer
.
apply_chat_template
(
item
[
"messages"
],
tokenize
=
False
,
add_generation_prompt
=
True
)
for
item
in
prompts
]
text_prompts
=
[
tokenizer
.
apply_chat_template
(
item
[
"messages"
],
tokenize
=
False
,
add_generation_prompt
=
True
)
for
item
in
prompts
]
...
@@ -65,12 +65,15 @@ def score_worker(cuda_device, prompts, model_path, score_token):
...
@@ -65,12 +65,15 @@ def score_worker(cuda_device, prompts, model_path, score_token):
for
response
in
output
.
outputs
:
for
response
in
output
.
outputs
:
# response.logprobs: list[dict[int, Logprob]] https://github.com/vllm-project/vllm/blob/main/vllm/sequence.py
# response.logprobs: list[dict[int, Logprob]] https://github.com/vllm-project/vllm/blob/main/vllm/sequence.py
sample_logprobs
=
response
.
logprobs
sample_logprobs
=
response
.
logprobs
logprob
=
sample_logprobs
[
0
]
.
get
(
score_token
)
logprob
=
sample_logprobs
[
1
]
.
get
(
score_token
)
newitem
=
item
.
copy
()
newitem
=
item
.
copy
()
# model always return 4 tokens, ['\n', 'Yes'/'No', '\n', <EOT>]
if
logprob
:
if
logprob
:
newitem
[
"score"
]
=
logprob
.
logprob
newitem
[
"score"
]
=
np
.
exp
(
logprob
.
logprob
)
newitem
[
"critic_text"
]
=
response
.
text
else
:
else
:
newitem
[
"score"
]
=
0
newitem
[
"score"
]
=
0
newitem
[
"critic_text"
]
=
response
.
text
result
.
append
(
newitem
)
result
.
append
(
newitem
)
return
result
return
result
...
@@ -118,4 +121,4 @@ def vllm_score(model_path, prompt_path, output_path, score_token):
...
@@ -118,4 +121,4 @@ def vllm_score(model_path, prompt_path, output_path, score_token):
results
=
list
(
chain
(
*
nested_results
))
results
=
list
(
chain
(
*
nested_results
))
print
(
f
"size of dataset: {len(results)}"
)
print
(
f
"size of dataset: {len(results)}"
)
save_jsonl
(
results
,
output_path
)
save_jsonl
(
results
,
output_path
)
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment