db
commited on
Commit
•
04b09ca
1
Parent(s):
2e9cfe3
init
Browse files- data/openwebtext/prepare.py +74 -0
- data/openwebtext/readme.md +15 -0
- data/shakespeare/prepare.py +33 -0
- data/shakespeare_char/prepare.py +68 -0
data/openwebtext/prepare.py
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# saves the openwebtext dataset to a binary file for training. following was helpful:
|
2 |
+
# https://github.com/HazyResearch/flash-attention/blob/main/training/src/datamodules/language_modeling_hf.py
|
3 |
+
|
4 |
+
import os
|
5 |
+
from tqdm import tqdm
|
6 |
+
import numpy as np
|
7 |
+
import tiktoken
|
8 |
+
from datasets import load_dataset # huggingface datasets
|
9 |
+
|
10 |
+
# number of workers in .map() call
|
11 |
+
# good number to use is ~order number of cpu cores // 2
|
12 |
+
num_proc = 8
|
13 |
+
|
14 |
+
# takes 54GB in huggingface .cache dir, about 8M documents (8,013,769)
|
15 |
+
dataset = load_dataset("openwebtext")
|
16 |
+
|
17 |
+
# owt by default only contains the 'train' split, so create a test split
|
18 |
+
split_dataset = dataset["train"].train_test_split(test_size=0.0005, seed=2357, shuffle=True)
|
19 |
+
split_dataset['val'] = split_dataset.pop('test') # rename the test split to val
|
20 |
+
|
21 |
+
# this results in:
|
22 |
+
# >>> split_dataset
|
23 |
+
# DatasetDict({
|
24 |
+
# train: Dataset({
|
25 |
+
# features: ['text'],
|
26 |
+
# num_rows: 8009762
|
27 |
+
# })
|
28 |
+
# val: Dataset({
|
29 |
+
# features: ['text'],
|
30 |
+
# num_rows: 4007
|
31 |
+
# })
|
32 |
+
# })
|
33 |
+
|
34 |
+
# we now want to tokenize the dataset. first define the encoding function (gpt2 bpe)
|
35 |
+
enc = tiktoken.get_encoding("gpt2")
|
36 |
+
def process(example):
|
37 |
+
ids = enc.encode_ordinary(example['text']) # encode_ordinary ignores any special tokens
|
38 |
+
ids.append(enc.eot_token) # add the end of text token, e.g. 50256 for gpt2 bpe
|
39 |
+
# note: I think eot should be prepended not appended... hmm. it's called "eot" though...
|
40 |
+
out = {'ids': ids, 'len': len(ids)}
|
41 |
+
return out
|
42 |
+
|
43 |
+
# tokenize the dataset
|
44 |
+
tokenized = split_dataset.map(
|
45 |
+
process,
|
46 |
+
remove_columns=['text'],
|
47 |
+
desc="tokenizing the splits",
|
48 |
+
num_proc=num_proc,
|
49 |
+
)
|
50 |
+
|
51 |
+
# concatenate all the ids in each dataset into one large file we can use for training
|
52 |
+
for split, dset in tokenized.items():
|
53 |
+
arr_len = np.sum(dset['len'])
|
54 |
+
filename = os.path.join(os.path.dirname(__file__), f'{split}.bin')
|
55 |
+
dtype = np.uint16 # (can do since enc.max_token_value == 50256 is < 2**16)
|
56 |
+
arr = np.memmap(filename, dtype=dtype, mode='w+', shape=(arr_len,))
|
57 |
+
total_batches = 1024
|
58 |
+
|
59 |
+
idx = 0
|
60 |
+
for batch_idx in tqdm(range(total_batches), desc=f'writing {filename}'):
|
61 |
+
# Batch together samples for faster write
|
62 |
+
batch = dset.shard(num_shards=total_batches, index=batch_idx, contiguous=True).with_format('numpy')
|
63 |
+
arr_batch = np.concatenate(batch['ids'])
|
64 |
+
# Write into mmap
|
65 |
+
arr[idx : idx + len(arr_batch)] = arr_batch
|
66 |
+
idx += len(arr_batch)
|
67 |
+
arr.flush()
|
68 |
+
|
69 |
+
# train.bin is ~17GB, val.bin ~8.5MB
|
70 |
+
# train has ~9B tokens (9,035,582,198)
|
71 |
+
# val has ~4M tokens (4,434,897)
|
72 |
+
|
73 |
+
# to read the bin files later, e.g. with numpy:
|
74 |
+
# m = np.memmap('train.bin', dtype=np.uint16, mode='r')
|
data/openwebtext/readme.md
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
## openwebtext dataset
|
3 |
+
|
4 |
+
after running `prepare.py` (preprocess) we get:
|
5 |
+
|
6 |
+
- train.bin is ~17GB, val.bin ~8.5MB
|
7 |
+
- train has ~9B tokens (9,035,582,198)
|
8 |
+
- val has ~4M tokens (4,434,897)
|
9 |
+
|
10 |
+
this came from 8,013,769 documents in total.
|
11 |
+
|
12 |
+
references:
|
13 |
+
|
14 |
+
- OpenAI's WebText dataset is discussed in [GPT-2 paper](https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf)
|
15 |
+
- [OpenWebText](https://skylion007.github.io/OpenWebTextCorpus/) dataset
|
data/shakespeare/prepare.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import requests
|
3 |
+
import tiktoken
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
# download the tiny shakespeare dataset
|
7 |
+
input_file_path = os.path.join(os.path.dirname(__file__), 'input.txt')
|
8 |
+
if not os.path.exists(input_file_path):
|
9 |
+
data_url = 'https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt'
|
10 |
+
with open(input_file_path, 'w') as f:
|
11 |
+
f.write(requests.get(data_url).text)
|
12 |
+
|
13 |
+
with open(input_file_path, 'r') as f:
|
14 |
+
data = f.read()
|
15 |
+
n = len(data)
|
16 |
+
train_data = data[:int(n*0.9)]
|
17 |
+
val_data = data[int(n*0.9):]
|
18 |
+
|
19 |
+
# encode with tiktoken gpt2 bpe
|
20 |
+
enc = tiktoken.get_encoding("gpt2")
|
21 |
+
train_ids = enc.encode_ordinary(train_data)
|
22 |
+
val_ids = enc.encode_ordinary(val_data)
|
23 |
+
print(f"train has {len(train_ids):,} tokens")
|
24 |
+
print(f"val has {len(val_ids):,} tokens")
|
25 |
+
|
26 |
+
# export to bin files
|
27 |
+
train_ids = np.array(train_ids, dtype=np.uint16)
|
28 |
+
val_ids = np.array(val_ids, dtype=np.uint16)
|
29 |
+
train_ids.tofile(os.path.join(os.path.dirname(__file__), 'train.bin'))
|
30 |
+
val_ids.tofile(os.path.join(os.path.dirname(__file__), 'val.bin'))
|
31 |
+
|
32 |
+
# train.bin has 301,966 tokens
|
33 |
+
# val.bin has 36,059 tokens
|
data/shakespeare_char/prepare.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Prepare the Shakespeare dataset for character-level language modeling.
|
3 |
+
So instead of encoding with GPT-2 BPE tokens, we just map characters to ints.
|
4 |
+
Will save train.bin, val.bin containing the ids, and meta.pkl containing the
|
5 |
+
encoder and decoder and some other related info.
|
6 |
+
"""
|
7 |
+
import os
|
8 |
+
import pickle
|
9 |
+
import requests
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
# download the tiny shakespeare dataset
|
13 |
+
input_file_path = os.path.join(os.path.dirname(__file__), 'input.txt')
|
14 |
+
if not os.path.exists(input_file_path):
|
15 |
+
data_url = 'https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt'
|
16 |
+
with open(input_file_path, 'w') as f:
|
17 |
+
f.write(requests.get(data_url).text)
|
18 |
+
|
19 |
+
with open(input_file_path, 'r') as f:
|
20 |
+
data = f.read()
|
21 |
+
print(f"length of dataset in characters: {len(data):,}")
|
22 |
+
|
23 |
+
# get all the unique characters that occur in this text
|
24 |
+
chars = sorted(list(set(data)))
|
25 |
+
vocab_size = len(chars)
|
26 |
+
print("all the unique characters:", ''.join(chars))
|
27 |
+
print(f"vocab size: {vocab_size:,}")
|
28 |
+
|
29 |
+
# create a mapping from characters to integers
|
30 |
+
stoi = { ch:i for i,ch in enumerate(chars) }
|
31 |
+
itos = { i:ch for i,ch in enumerate(chars) }
|
32 |
+
def encode(s):
|
33 |
+
return [stoi[c] for c in s] # encoder: take a string, output a list of integers
|
34 |
+
def decode(l):
|
35 |
+
return ''.join([itos[i] for i in l]) # decoder: take a list of integers, output a string
|
36 |
+
|
37 |
+
# create the train and test splits
|
38 |
+
n = len(data)
|
39 |
+
train_data = data[:int(n*0.9)]
|
40 |
+
val_data = data[int(n*0.9):]
|
41 |
+
|
42 |
+
# encode both to integers
|
43 |
+
train_ids = encode(train_data)
|
44 |
+
val_ids = encode(val_data)
|
45 |
+
print(f"train has {len(train_ids):,} tokens")
|
46 |
+
print(f"val has {len(val_ids):,} tokens")
|
47 |
+
|
48 |
+
# export to bin files
|
49 |
+
train_ids = np.array(train_ids, dtype=np.uint16)
|
50 |
+
val_ids = np.array(val_ids, dtype=np.uint16)
|
51 |
+
train_ids.tofile(os.path.join(os.path.dirname(__file__), 'train.bin'))
|
52 |
+
val_ids.tofile(os.path.join(os.path.dirname(__file__), 'val.bin'))
|
53 |
+
|
54 |
+
# save the meta information as well, to help us encode/decode later
|
55 |
+
meta = {
|
56 |
+
'vocab_size': vocab_size,
|
57 |
+
'itos': itos,
|
58 |
+
'stoi': stoi,
|
59 |
+
}
|
60 |
+
with open(os.path.join(os.path.dirname(__file__), 'meta.pkl'), 'wb') as f:
|
61 |
+
pickle.dump(meta, f)
|
62 |
+
|
63 |
+
# length of dataset in characters: 1115394
|
64 |
+
# all the unique characters:
|
65 |
+
# !$&',-.3:;?ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz
|
66 |
+
# vocab size: 65
|
67 |
+
# train has 1003854 tokens
|
68 |
+
# val has 111540 tokens
|