Unverified Commit fac99fa6 by Guangming Sheng Committed by GitHub

[ci] feat: add ci for sft trainer (#128)

- Support training several iters in SFT trainer
- Add CI for SFT trainer to train one iter.
parent fefca417
name: e2e_sft
on:
# Trigger the workflow on push or pull request,
# but only for the main branch
push:
branches:
- main
paths:
- "**/*.py"
- .github/workflows/e2e_sft.yml
pull_request:
branches:
- main
paths:
- "**/*.py"
- .github/workflows/e2e_sft.yml
- "tests/e2e/*.sh"
jobs:
e2e_sft:
runs-on: [self-hosted, l20-1]
env:
HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
NO_PROXY: "localhost,127.0.0.1"
HF_HUB_ENABLE_HF_TRANSFER: 1
container:
image: verlai/verl:vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te1.7-v0.0.3
options: --gpus all --shm-size=10g
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
- name: Install the current repository
run: |
pip3 install hf_transfer
pip3 install -e .[test]
- name: Prepare gsm8k dataset
run: |
ray stop --force
python3 examples/data_preprocess/gsm8k.py
- name: Running gsm8k e2e training tests on 8 L20 GPUs with rmpad using function rm
run: |
ray stop --force
bash tests/sft/run_sft.sh
\ No newline at end of file
# Tested with 2 & 4 GPUs
set -x
torchrun --standalone --nnodes=1 --nproc_per_node=8 \
-m verl.trainer.fsdp_sft_trainer \
data.train_files=$HOME/data/gsm8k/train.parquet \
data.val_files=$HOME/data/gsm8k/test.parquet \
data.prompt_key=extra_info \
data.response_key=extra_info \
+data.prompt_dict_keys=['question'] \
+data.response_dict_keys=['answer'] \
data.micro_batch_size=32 \
model.partial_pretrain=Qwen/Qwen2.5-0.5B-Instruct \
trainer.default_local_dir=$HOME/ckpts/ \
trainer.project_name=qwen2.5-sft \
trainer.experiment_name=gsm8k-sft-gemma-2b-it \
trainer.total_training_steps=1 \
trainer.logger=['console'] \
trainer.default_hdfs_dir=null $@
rm -rf $HOME/ckpts/
\ No newline at end of file
......@@ -33,6 +33,7 @@ trainer:
project_name: gsm8k-sft
experiment_name: test
total_epochs: 4
total_training_steps: null
logger: ['console']
seed: 1
......@@ -320,6 +320,15 @@ class FSDPSFTTrainer(object):
default_backend=self.config.trainer.logger)
global_step = 0
# compute the total training steps.
# the total training steps in SFT is mainly for early exit
total_training_steps = len(self.train_dataloader) * self.config.trainer.total_epochs
if self.config.trainer.total_training_steps is not None:
total_training_steps = self.config.trainer.total_training_steps
self.total_training_steps = total_training_steps
print(f'Total training steps: {self.total_training_steps}')
# TODO (zhangchi.usc1992) add back checkpoint manager. Currently, it blocks when uploading to hdfs. So very slow.
......@@ -332,6 +341,24 @@ class FSDPSFTTrainer(object):
tracking.log(data=metric, step=global_step)
global_step += 1
# for early exit validation
if global_step >= self.total_training_steps:
# Perform final validation
val_losses = []
for val_data in self.val_dataloader:
val_data = TensorDict(val_data, batch_size=self.config.data.micro_batch_size).cuda()
val_loss = self.validation_step(val_data)
val_losses.append(val_loss)
if rank == 0:
avg_val_loss = torch.mean(torch.stack(val_losses))
metric = {'val/loss': avg_val_loss.detach().item()}
tracking.log(data=metric, step=global_step)
torch.distributed.barrier()
# Save final checkpoint
self.save_checkpoint(step=global_step)
return
# validation
val_losses = []
for data in self.val_dataloader:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment