-
Notifications
You must be signed in to change notification settings - Fork 470
/
rft_sentiments.py
96 lines (81 loc) · 2.65 KB
/
rft_sentiments.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
# This script trains a model to output positive reviews
# using rejection finetuning with a sentiment classifier reward function.
import json
import os
import sys
from typing import List
import torch
from datasets import load_dataset
from transformers import pipeline
import trlx
from trlx.data.default_configs import (
ModelConfig,
OptimizerConfig,
SchedulerConfig,
TokenizerConfig,
TrainConfig,
TRLConfig,
)
from trlx.trainer.accelerate_rft_trainer import RFTConfig
def get_positive_score(scores):
"Extract value associated with a positive sentiment from pipeline's output"
return dict(map(lambda x: tuple(x.values()), scores))["POSITIVE"]
default_config = TRLConfig(
train=TrainConfig(
seq_length=1024,
epochs=100,
total_steps=1000,
batch_size=32,
checkpoint_interval=10000,
eval_interval=100,
pipeline="PromptPipeline",
trainer="AccelerateRFTTrainer",
),
model=ModelConfig(model_path="lvwerra/gpt2-imdb", num_layers_unfrozen=-1),
tokenizer=TokenizerConfig(tokenizer_path="gpt2", truncation_side="right"),
optimizer=OptimizerConfig(name="adamw", kwargs=dict(lr=3e-5, betas=(0.9, 0.95), eps=1.0e-8, weight_decay=1.0e-6)),
scheduler=SchedulerConfig(name="cosine_annealing", kwargs=dict(T_max=1e12, eta_min=3e-5)),
method=RFTConfig(
name="RFTConfig",
n_generations_per_prompt=4,
start_percentile=0.9,
end_percentile=0.95,
n_improve_steps=1,
gen_kwargs=dict(
max_new_tokens=40,
top_k=0,
top_p=1.0,
temperature=1.0,
do_sample=True,
),
),
)
def main(hparams={}):
config = TRLConfig.update(default_config, hparams)
if torch.cuda.is_available():
device = int(os.environ.get("LOCAL_RANK", 0))
else:
device = -1
sentiment_fn = pipeline(
"sentiment-analysis",
"lvwerra/distilbert-imdb",
top_k=2,
truncation=True,
batch_size=256,
device=device,
)
def reward_fn(samples: List[str], **kwargs) -> List[float]:
sentiments = list(map(get_positive_score, sentiment_fn(samples)))
return sentiments
# Take few words off of movies reviews as prompts
imdb = load_dataset("imdb", split="train[:512]")
prompts = [" ".join(review.split()[:4]) for review in imdb["text"]]
trlx.train(
reward_fn=reward_fn,
prompts=prompts,
eval_prompts=["I don't know much about Hungarian underground"] * 256,
config=config,
)
if __name__ == "__main__":
hparams = {} if len(sys.argv) == 1 else json.loads(sys.argv[1])
main(hparams)