-
Notifications
You must be signed in to change notification settings - Fork 13
/
train_transformer.py
67 lines (55 loc) · 1.59 KB
/
train_transformer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
from transformer_maskgit import CTViT, MaskGit, MaskGITTransformer
from transformer_maskgit.videotextdataset import VideoTextDataset
from transformer_maskgit.train_transformer import TransformerTrainer
from torch.utils.data import Dataset, DataLoader, random_split
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.optim as optim
import torch.multiprocessing as mp
import os
def cycle(dl):
while True:
for data in dl:
yield data
def train():
# set up distributed training
ctvit = CTViT(
dim = 512,
codebook_size = 8192,
image_size = 128,
patch_size = 16,
temporal_patch_size = 2,
spatial_depth = 4,
temporal_depth = 4,
dim_head = 32,
heads = 8
)
# Load the pre-trained weights
pretrained_ctvit_path = 'pretrained_models/ctvit_pretrained.pt'
ctvit.load(pretrained_ctvit_path)
maskgit = MaskGit(
num_tokens=8192,
max_seq_len=10000,
dim=512,
dim_context=768,
depth=6,
)
transformer_model = MaskGITTransformer(
ctvit=ctvit,
maskgit=maskgit
)
batch_size=1
#transformer_model.load('pretrained_models/transformer_pretrained.pt')
# initialize DDP
trainer = TransformerTrainer(
transformer_model,
num_train_steps=100000000,
batch_size=1,
pretrained_ctvit_path='pretrained_models/ctvit_pretrained.pt',
results_folder="transformer_train"
)
trainer.train()
if __name__ == '__main__':
# set up multiprocessing
train()