From 8012adcf01ef4c905590c33da934c8c29341bd7c Mon Sep 17 00:00:00 2001 From: Simon Meoni Date: Thu, 7 Nov 2024 17:18:29 +0100 Subject: [PATCH] =?UTF-8?q?=E2=9A=97=EF=B8=8F=20Style=20Transfer:=20add=20?= =?UTF-8?q?experiments?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../rb_gen/az/llama3.2-3b-complete-replaced.sh | 15 +++++++++++++++ .../rb_gen/az/llama3.2-3b-pb-seed-replaced.sh | 15 +++++++++++++++ 2 files changed, 30 insertions(+) create mode 100644 lib/style-transfer/bash/experiment/rb_gen/az/llama3.2-3b-complete-replaced.sh create mode 100644 lib/style-transfer/bash/experiment/rb_gen/az/llama3.2-3b-pb-seed-replaced.sh diff --git a/lib/style-transfer/bash/experiment/rb_gen/az/llama3.2-3b-complete-replaced.sh b/lib/style-transfer/bash/experiment/rb_gen/az/llama3.2-3b-complete-replaced.sh new file mode 100644 index 0000000..7ad5b83 --- /dev/null +++ b/lib/style-transfer/bash/experiment/rb_gen/az/llama3.2-3b-complete-replaced.sh @@ -0,0 +1,15 @@ +export CUDA_VISIBLE_DEVICES=$1 +python style_transfer/run_rb_gen.py model.name=meta-llama/Llama-3.2-3B-Instruct \ + model.peft_config.target_modules='["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]' \ + dataset.name=bio-datasets/mimic-iii-gpt4o-tokens \ + max_steps=5 \ + dataset.num_generated_samples=3500 \ + score.model.model_name_or_path=sentence-transformers/all-mpnet-base-v2 \ + dataset.sft_ratio=0.06 \ + dataset.gen_ratio=0.7 \ + dataset.sft_dataset=null \ + sft.training_args.eval_steps=30 \ + score.train.train_size=0.6 \ + dpo.training_args.num_train_epochs=10 \ + dpo.percentile=70 \ + score.batch_size=8 diff --git a/lib/style-transfer/bash/experiment/rb_gen/az/llama3.2-3b-pb-seed-replaced.sh b/lib/style-transfer/bash/experiment/rb_gen/az/llama3.2-3b-pb-seed-replaced.sh new file mode 100644 index 0000000..3748f99 --- /dev/null +++ b/lib/style-transfer/bash/experiment/rb_gen/az/llama3.2-3b-pb-seed-replaced.sh @@ -0,0 +1,15 @@ +export CUDA_VISIBLE_DEVICES=$1 +python style_transfer/run_rb_gen.py model.name=meta-llama/Llama-3.2-3B-Instruct \ + model.peft_config.target_modules='["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]' \ + dataset.name=bio-datasets/mimic-iii-gpt4o-tokens \ + max_steps=5 \ + dataset.num_generated_samples=3500 \ + score.model.model_name_or_path=sentence-transformers/all-mpnet-base-v2 \ + dataset.sft_ratio=0.06 \ + dataset.gen_ratio=0.7 \ + sft.training_args.eval_steps=30 \ + score.train.train_size=0.6 \ + dataset.sft_dataset.size=977 \ + dpo.training_args.num_train_epochs=20 \ + dpo.percentile=70 \ + score.batch_size=8