-
Notifications
You must be signed in to change notification settings - Fork 2
/
train.py
92 lines (56 loc) · 2.31 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import torch
import torch.nn.functional as F
from dataset import train_loader,vocab_dict
from model import Transformer
import config
from tqdm import tqdm
from model import create_padding_mask, create_look_ahead_mask
# Getting the vocabulary size for the embedding matrix
vocab_size = len(vocab_dict)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
## Setting up the transformer
transformer = Transformer(d_model = config.d_model,
heads = config.heads,
num_layers = config.num_layers,
vocab_size = vocab_size)
## Sending the transformer to device
transformer = transformer.to(device)
## Hack no. 1 setting the parameters of layer to xavier_uniform
for p in transformer.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
## Want to train the loaded model
# checkpoint = torch.load('checkpoint.pth.tar')
# transformer = checkpoint['transformer']
## Hack no. 2 Got from pytorch transformer implementation
lr = 5.0 # learning rate
optimizer = torch.optim.SGD(transformer.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.95)
for epoch in range(config.epochs):
tot_loss = 0
count = 0
enum = 0
for (question, reply) in train_loader:
batch_size = question.shape[0]
src = question.to(device)
target = reply.to(device)
target_input = target[:, :-1]
## remember teacher forcing
ys = target[:, 1:].contiguous().view(-1)
src_mask = create_padding_mask(src)
trg_mask = create_look_ahead_mask(target_input)
preds = transformer(src, target_input, src_mask, trg_mask)
preds = preds.view(-1, preds.size(-1))
loss = F.cross_entropy(preds, ys, ignore_index = 0)
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(transformer.parameters(), 0.5)
optimizer.step()
tot_loss += loss.item() * batch_size
count += batch_size
enum += 1
if enum % 200 == 0:
print("Loss: {:.3f}".format(tot_loss/count))
print(f'{epoch} completed')
state = {'epoch': config.epochs, 'transformer': transformer, 'transformer_optimizer': trans_optim}
torch.save(state, 'checkpoint' + '.pth.tar')