Skip to content

Commit

Permalink
⚗️ Style Transfer: modify hyperparameters to fix dpo learning problem
Browse files Browse the repository at this point in the history
  • Loading branch information
simonmeoni committed Oct 17, 2024
1 parent d3fa9b2 commit 0ca86de
Show file tree
Hide file tree
Showing 4 changed files with 22 additions and 3 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
export CUDA_VISIBLE_DEVICES=1
python style_transfer/run_rb_gen.py model.name=meta-llama/Llama-3.2-3B-Instruct \
model.peft_config.target_modules='["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]' \
dataset.name=bio-datasets/mimic_style_transfer \
max_steps=5 \
dataset.num_generated_samples=3500 \
score.model.model_name_or_path=sentence-transformers/all-mpnet-base-v2 \
dataset.sft_ratio=0.06 \
dataset.gen_ratio=0.7 \
dataset.sft_dataset=null \
sft.training_args.eval_steps=30 \
score.train.train_size=0.3 \
dpo.training_args.num_train_epochs=80 \
dpo.percentile=70
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,6 @@ python style_transfer/run_rb_gen.py model.name=meta-llama/Llama-3.2-3B-Instruct
dataset.gen_ratio=0.7 \
sft.training_args.eval_steps=30 \
score.train.train_size=0.3 \
dataset.sft_dataset.size=300
dataset.sft_dataset.size=300 \
dpo.training_args.num_train_epochs=80 \
dpo.percentile=70
4 changes: 3 additions & 1 deletion lib/style-transfer/bash/experiment/rb_gen/az/llama3.2-3b.sh
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,6 @@ python style_transfer/run_rb_gen.py model.name=meta-llama/Llama-3.2-3B-Instruct
dataset.gen_ratio=0.7 \
dataset.sft_dataset=null \
sft.training_args.eval_steps=30 \
score.train.train_size=0.3
score.train.train_size=0.3 \
dpo.training_args.num_train_epochs=80 \
dpo.percentile=70
3 changes: 2 additions & 1 deletion lib/style-transfer/configs/rb_gen/dpo/default.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,11 @@ training_args:
save_steps: 50
gradient_accumulation_steps: 16
gradient_checkpointing: false
learning_rate: 5e-7
learning_rate: 4e-6
weight_decay: 1e-7
eval_strategy: "no"
num_train_epochs: 5
output_dir: "models/dpo/"
optim: "adafactor"
save_only_model: true
remove_unused_columns: false
Expand Down

0 comments on commit 0ca86de

Please sign in to comment.