-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathdataset.py
45 lines (35 loc) · 1.32 KB
/
dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
"""
Creates a character-level language-modelling dataset from a stream of text.
You shouldn't need to make any changes to this file.
"""
import torch
from torch.utils.data import Dataset
from torch.utils.data.dataloader import DataLoader
class CharDataset(Dataset):
"""
Emits batches of characters
"""
def __init__(self, config, data):
self.config = config
chars = sorted(list(set(data)))
data_size, vocab_size = len(data), len(chars)
print('data has %d characters, %d unique.' % (data_size, vocab_size))
self.stoi = { ch:i for i,ch in enumerate(chars) }
self.itos = { i:ch for i,ch in enumerate(chars) }
self.vocab_size = vocab_size
self.data = data
def get_vocab_size(self):
return self.vocab_size
def get_block_size(self):
return self.config.block_size
def __len__(self):
return len(self.data) - self.config.block_size
def __getitem__(self, idx):
# grab a chunk of (block_size + 1) characters from the data
chunk = self.data[idx:idx + self.config.block_size + 1]
# encode every character to an integer
dix = [self.stoi[s] for s in chunk]
# return as tensors
x = torch.tensor(dix[:-1], dtype=torch.long)
y = torch.tensor(dix[1:], dtype=torch.long)
return x, y