-
Notifications
You must be signed in to change notification settings - Fork 4
/
train.py
75 lines (63 loc) · 2.57 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
# -*- coding: utf-8 -*-
# @File : train.py
# @Author : Kaicheng Yang
# @Time : 2022/01/26 11:03:11
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from model import CAFIA_Transformer
from tqdm import tqdm
import logging
from scheduler import cosine_lr
logging.basicConfig(level = logging.NOTSET)
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
device = torch.device("cuda")
def train_model(args, trainloader, testloader):
n_gpu = 1
model = CAFIA_Transformer(args)
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
model.train()
loss_fn = nn.CrossEntropyLoss()
opitimizer = optim.AdamW(model.parameters(), betas=(args.beta1, args.beta2), eps = args.eps, lr = args.learning_rate, weight_decay = args.weight_decay)
total_steps = (len(trainloader) // args.batch_size + 1) * args.epoches
scheduler = cosine_lr(opitimizer, args.learning_rate, args.warmup, total_steps)
nb_tr_steps = 0
logging.info('**************************** start to train *******************************')
for epoch in range(args.epoches):
train_loss = 0
train_iter = 0
for _, batch in enumerate(tqdm(trainloader, desc = "Iteration")):
nb_tr_steps += 1
opitimizer.zero_grad()
batch = tuple(t.to(device) for t in batch)
batch_X, batch_Y = batch
outputs = model(batch_X)
loss = loss_fn(outputs, batch_Y)
loss.backward()
opitimizer.step()
scheduler(nb_tr_steps)
train_loss += loss.item()
train_iter += 1
logging.info('Epoch:%d batch_loss:%f', epoch, loss)
train_loss = loss / train_iter
#eval
logging.info('**************************** start to evaluate *******************************')
model.eval()
total, correct = 0, 0
for _, batch in enumerate(tqdm(testloader, desc = "Iteration")):
batch = tuple(t.to(device) for t in batch)
batch_X, batch_Y = batch
outputs = model(batch_X)
_, predicted = torch.max(outputs.data, 1)
total += batch_Y.size(0)
correct += (predicted == batch_Y).sum()
acc = (correct / total).item()
logging.info('Epoch: %d train_loss: %f Accuracy: %f', epoch, train_loss, acc)
if not os.path.exists(args.output):
os.mkdir(args.output)
output_path = os.path.join(args.output, str(acc)+'.pt')
torch.save(model.state_dict(), output_path)