|
from transformers import AutoTokenizer |
|
import jsonlines |
|
import random |
|
import os |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("NilanE/tinyllama-relora-merge") |
|
|
|
max_seq_len = 2048 |
|
|
|
prompt = "Translate this from Japanese to English:\n### JAPANESE: \n### ENGLISH: </s>" |
|
|
|
input_file_path = "dataset-parallel-complete.jsonl" |
|
|
|
output_file_path = input_file_path.split('.')[0] + "-chunked." + input_file_path.split('.')[1] |
|
promptTokens = len(tokenizer.tokenize(prompt)) |
|
|
|
def load_jsonl(file_path): |
|
data = [] |
|
with jsonlines.open(file_path) as reader: |
|
for entry in reader: |
|
source = entry['src'].replace('</s>', '').strip() |
|
target = entry['trg'].replace('</s>', '').strip() |
|
data.append([source, target]) |
|
return data |
|
|
|
def save_jsonl(file_path, data): |
|
with jsonlines.open(file_path, 'w') as writer: |
|
writer.write_all(data) |
|
|
|
chunks = [] |
|
|
|
data = load_jsonl(input_file_path) |
|
|
|
|
|
max_seq_len -= 10 |
|
|
|
skippedDocs = 0 |
|
|
|
for doc in data: |
|
|
|
src_lines = doc[0].split('\n') |
|
trg_lines = doc[1].split('\n') |
|
|
|
out_src = [] |
|
out_trg = [] |
|
tokenCount = 0 |
|
lastTokenCount = 0 |
|
longLines = 0 |
|
|
|
try: |
|
for x in range(len(src_lines)): |
|
out_src.append(src_lines[x]) |
|
out_trg.append(trg_lines[x]) |
|
out_src_string = "\n".join(out_src) |
|
trg_src_string = "\n".join(out_trg) |
|
tokenCount = len(tokenizer.tokenize(out_src_string.strip() + trg_src_string.strip())) + promptTokens |
|
if tokenCount-lastTokenCount < max_seq_len-1: |
|
if tokenCount > max_seq_len-1: |
|
src_end = out_src.pop() |
|
trg_end = out_trg.pop() |
|
out_src_string = "\n".join(out_src) |
|
trg_src_string = "\n".join(out_trg) |
|
data = { |
|
'src' : out_src_string.strip(), |
|
'trg' : trg_src_string.strip() |
|
} |
|
chunks.append(data) |
|
out_src = [src_end] |
|
out_trg = [trg_end] |
|
elif x+1 == len(src_lines): |
|
data = { |
|
'src' : out_src_string.strip(), |
|
'trg' : trg_src_string.strip() |
|
} |
|
chunks.append(data) |
|
else: |
|
|
|
out_src.pop() |
|
out_trg.pop() |
|
out_src_string = "\n".join(out_src) |
|
trg_src_string = "\n".join(out_trg) |
|
tokenCount = len(tokenizer.tokenize(out_src_string.strip() + trg_src_string.strip())) + promptTokens |
|
longLines += 1 |
|
|
|
lastTokenCount = tokenCount |
|
except: |
|
skippedDocs += 1 |
|
|
|
|
|
random.shuffle(chunks) |
|
|
|
print(f"LINES LONGER THAN MAX SEQUENCE LENTH: {longLines}") |
|
print(f"SKIPPED DOCS: {skippedDocs}") |
|
|
|
|
|
if os.path.exists(output_file_path): |
|
os.remove(output_file_path) |
|
save_jsonl(output_file_path, chunks) |
|
|
|
|