Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
codecritic
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Ziyuan Nan
codecritic
Commits
52fa8d54
Commit
52fa8d54
authored
Oct 08, 2024
by
nzy
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
step3 & 4: fix llamafactory related issues
parent
cc6917f5
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
9 additions
and
5 deletions
+9
-5
example_config.toml
+1
-0
step3_train_outcome_reward_model.py
+2
-2
step4_test_reward_model.py
+6
-3
No files found.
example_config.toml
View file @
52fa8d54
model
=
"/path/to/model"
llamafactory_model_template
=
""
llamafactory_path
=
""
apps
=
"/path/to/apps_dataset"
[sample]
...
...
step3_train_outcome_reward_model.py
View file @
52fa8d54
...
...
@@ -14,7 +14,7 @@ deepspeed: {deepspeed_config_path}
dataset: {dataset_name}
template: deepseekcoder
cutoff_len: 4096
max_samples: 1000
max_samples: 1000
0
overwrite_cache: true
preprocessing_num_workers: 16
...
...
@@ -36,7 +36,7 @@ bf16: true
ddp_timeout: 180000000
### eval
val_size: 0.1
val_size: 0.
0
1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500
...
...
step4_test_reward_model.py
View file @
52fa8d54
...
...
@@ -11,7 +11,7 @@ from utils_metric import group_results, score_pass_at_k
from
transformers
import
AutoTokenizer
def
run_server
(
api_port
,
cuda_device
,
rm_inference_yaml_path
):
def
run_server
(
api_port
,
cuda_device
,
rm_inference_yaml_path
,
llamafactory_path
):
env
=
os
.
environ
.
copy
()
env
[
"API_PORT"
]
=
api_port
env
[
"CUDA_VISIBLE_DEVICES"
]
=
cuda_device
...
...
@@ -20,6 +20,8 @@ def run_server(api_port, cuda_device, rm_inference_yaml_path):
stdout
=
subprocess
.
PIPE
,
stderr
=
subprocess
.
PIPE
,
env
=
env
,
cwd
=
llamafactory_path
,
shell
=
True
)
print
(
f
"Started server with PID {server_process.pid} on port {api_port} and CUDA device {cuda_device}"
...
...
@@ -73,13 +75,13 @@ def reward_model_inference(args):
def
mutli_process_reward_model_inference
(
test_path
,
model_path
,
inference_cfg_path
,
result_path
test_path
,
model_path
,
inference_cfg_path
,
result_path
,
llamafactory_path
):
cuda_devices
=
os
.
environ
[
"CUDA_VISIBLE_DEVICES"
]
.
split
(
","
)
gpu_num
=
len
(
cuda_devices
)
test_dataset
=
preprocess_dataset
(
model_path
,
load_jsonl
(
test_path
),
gpu_num
)
server_processes
=
[
run_server
(
8000
+
i
,
cuda_devices
[
i
],
inference_cfg_path
)
run_server
(
8000
+
i
,
cuda_devices
[
i
],
inference_cfg_path
,
llamafactory_path
)
for
i
in
range
(
gpu_num
)
]
time
.
sleep
(
300
)
# Wait for the servers to start (adjust the sleep time as needed)
...
...
@@ -97,6 +99,7 @@ if __name__ == "__main__":
cfg
[
"orm"
][
orm_test_model
][
"model_path"
],
cfg
[
"orm"
][
orm_test_model
][
"inference_yaml_path"
],
cfg
[
"orm"
][
orm_test_model
][
"minimal_test_score_path"
],
cfg
[
"llamafactory_path"
]
)
groups
=
group_results
(
results
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment