-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdata_utils.py
More file actions
48 lines (29 loc) · 1.37 KB
/
data_utils.py
File metadata and controls
48 lines (29 loc) · 1.37 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import copy
from torch.utils.data import ConcatDataset, DataLoader
from dataset import SeqRecDataset
from tokenizer import MTGRecTokenizer, Tokenizer
def get_datasets(config):
train_dataset = SeqRecDataset(config, split='train')
valid_dataset = SeqRecDataset(config, split='valid', sample_ratio=config['val_ratio'])
test_dataset = SeqRecDataset(config, split='test')
return train_dataset, valid_dataset, test_dataset
def get_less_datasets(config):
train_dataset = SeqRecDataset(config, split='train')
valid_dataset = SeqRecDataset(config, split='valid')
return train_dataset, valid_dataset
def get_tokenizers(config):
tokenizers = []
for sem_id_epoch in config["sem_id_epochs"]:
tokenizer = MTGRecTokenizer(config, sem_id_epoch)
tokenizers.append(tokenizer)
if len(tokenizers) ==0:
tokenizers.append(Tokenizer(config))
return tokenizers
def get_dataloader(config, dataset, collate_fn, split):
if split == 'train':
dataloader = DataLoader(dataset, batch_size=config['train_batch_size'], collate_fn=collate_fn,
num_workers=config['num_proc'], shuffle=True)
else:
dataloader = DataLoader(dataset, batch_size=config['eval_batch_size'], collate_fn=collate_fn,
num_workers=config['num_proc'], shuffle=False)
return dataloader