Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
codecritic
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Ziyuan Nan
codecritic
Commits
b319f162
Commit
b319f162
authored
Dec 28, 2024
by
nanziyuan
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
fix gen_dataset bugs
parent
b65ddb76
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
26 additions
and
5 deletions
+26
-5
codecritic/cli/gen_dataset.py
+6
-3
codecritic/evaluation/metric.py
+2
-2
scripts/gen_dataset.sh
+18
-0
No files found.
codecritic/cli/gen_dataset.py
View file @
b319f162
import
argparse
import
os
import
json
from
functools
import
partial
from
collections
import
defaultdict
...
...
@@ -21,7 +22,7 @@ def transform_to_prompt(apps, tokenizer):
for
split
in
[
"train"
,
"test"
]:
dataset
=
apps
[
split
]
for
item
in
dataset
:
task_id
=
split
+
"-"
+
str
(
item
[
"id"
])
task_id
=
split
+
"-"
+
str
(
item
[
"
problem_
id"
])
try
:
json
.
loads
(
item
[
"input_output"
])
except
ValueError
:
...
...
@@ -40,7 +41,7 @@ def transform_to_prompt(apps, tokenizer):
prompts
.
append
(
{
"dataset"
:
"apps-"
+
item
[
"difficulty"
],
"task_id"
:
"task_id"
,
"task_id"
:
task_id
,
"messages"
:
prompt
,
}
)
...
...
@@ -58,6 +59,7 @@ if __name__ == "__main__":
"--gpu"
,
type
=
int
,
default
=
1
,
help
=
"gpu number required by one model"
)
args
=
parser
.
parse_args
()
os
.
environ
[
"TOKENIZERS_PARALLELISM"
]
=
"false"
apps
=
load_dataset
(
args
.
apps
)
tokenizer
=
AutoTokenizer
.
from_pretrained
(
args
.
model
)
...
...
@@ -79,10 +81,11 @@ if __name__ == "__main__":
# postprocess
grouped
=
defaultdict
(
list
)
for
sample
in
dataset
:
grouped
[
sample
[
"task_id"
]]
=
sample
grouped
[
sample
[
"task_id"
]]
.
append
(
sample
)
def
is_in_test
(
task_id
):
split
,
idx
=
task_id
.
split
(
"-"
)
idx
=
int
(
idx
)
if
split
==
"test"
:
for
start
,
end
in
[(
0
,
300
),
(
3000
,
3100
),
(
4000
,
4100
)]:
if
start
<=
idx
<
end
:
...
...
codecritic/evaluation/metric.py
View file @
b319f162
...
...
@@ -28,7 +28,7 @@ def pass_at_k(samples, ks: list[int]):
# groupby taskid
grouped
=
defaultdict
(
list
)
for
sample
in
samples
:
grouped
[
sample
[
"task_id"
]]
=
sample
grouped
[
sample
[
"task_id"
]]
.
append
(
sample
)
num_samples
,
num_correct
=
[],
[]
for
task_id
,
group
in
grouped
.
items
():
...
...
@@ -65,7 +65,7 @@ def pos_neg_filter_uncertain(item, threshold):
def
top_at_k
(
samples
,
ks
:
list
[
int
],
score_func
):
grouped
=
defaultdict
(
list
)
for
sample
in
samples
:
grouped
[
sample
[
"task_id"
]]
=
sample
grouped
[
sample
[
"task_id"
]]
.
append
(
sample
)
num_samples
,
first_pass_indices
=
[],
[]
for
task_id
,
group
in
grouped
.
items
():
...
...
scripts/gen_dataset.sh
0 → 100644
View file @
b319f162
set
-xe
model
=
"/lustre/S/huangdi/open_for_out/models/Qwen2.5-Coder-7B-Instruct/"
project
=
"/lustre/S/nanziyuan/projects/ccc"
# APPS
CUDA_VISIBLE_DEVICES
=
0,1,2,3
\
python
-m
codecritic.cli.gen_dataset
\
--model
${
model
}
\
--apps
/lustre/S/nanziyuan/datasets/apps/
\
--train
"
${
project
}
/data/train/apps_train_samples.jsonl"
\
--test
"
${
project
}
/data/test/apps_test_samples.jsonl"
# HumanEval & MBPP
# HumanEvalPack
# BigCodeBench
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment