mtasic85 commited on
Commit
39e0190
1 Parent(s): 3cb4a20

train model

Browse files
scripts/train_model.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gc
2
+
3
+ from datasets import load_dataset, Dataset
4
+ from transformers import AutoTokenizer
5
+
6
+
7
+ def _batch_iterator():
8
+ ## code
9
+ # dataset = load_dataset('bigcode/programming-languages-keywords', split='train')
10
+
11
+ # for row in dataset:
12
+ # for n in row['keywords']:
13
+ # yield n
14
+
15
+ # del dataset
16
+ # gc.collect()
17
+
18
+ # code
19
+ dataset = (
20
+ load_dataset('bigcode/the-stack-smol-xs', lang, split='train', trust_remote_code=True)
21
+ for lang in [
22
+ 'ada', 'agda', 'alloy', 'antlr', 'applescript', 'assembly', 'augeas', 'awk', 'batchfile', 'bison', 'bluespec', 'c',
23
+ 'c++', 'c-sharp', 'clojure', 'cmake', 'coffeescript', 'common-lisp', 'css', 'cuda', 'dart', 'dockerfile', 'elixir',
24
+ 'elm', 'emacs-lisp','erlang', 'f-sharp', 'fortran', 'glsl', 'go', 'groovy', 'haskell','html', 'idris', 'isabelle', 'java',
25
+ 'java-server-pages', 'javascript', 'julia', 'kotlin', 'lean', 'literate-agda', 'literate-coffeescript', 'literate-haskell',
26
+ 'lua', 'makefile', 'maple', 'markdown', 'mathematica', 'matlab', 'ocaml', 'pascal', 'perl', 'php', 'powershell', 'prolog',
27
+ 'protocol-buffer', 'python', 'r', 'racket', 'restructuredtext', 'rmarkdown', 'ruby', 'rust', 'sas', 'scala', 'scheme',
28
+ 'shell', 'smalltalk', 'solidity', 'sparql', 'sql', 'stan', 'standard-ml', 'stata', 'systemverilog', 'tcl', 'tcsh', 'tex',
29
+ 'thrift', 'typescript', 'verilog', 'vhdl', 'visual-basic', 'xslt', 'yacc', 'zig'
30
+ ]
31
+ )
32
+
33
+ for d in dataset:
34
+ for row in d:
35
+ yield row['content']
36
+
37
+ del dataset
38
+ gc.collect()
39
+
40
+ # text
41
+ dataset = load_dataset('nampdn-ai/tiny-textbooks', split='train')
42
+
43
+ for row in dataset:
44
+ yield row['text']
45
+
46
+ del dataset
47
+ gc.collect()
48
+
49
+ ## text
50
+ # dataset = (
51
+ # load_dataset('wikimedia/wikisource', lang, split='train')
52
+ # for lang in ['20231201.ar', '20231201.as', '20231201.az', '20231201.ban', '20231201.be', '20231201.bg', '20231201.bn', '20231201.br', '20231201.bs', '20231201.ca', '20231201.cs', '20231201.cy', '20231201.da', '20231201.de', '20231201.el', '20231201.en', '20231201.eo', '20231201.es', '20231201.et', '20231201.eu', '20231201.fa', '20231201.fi', '20231201.fo', '20231201.fr', '20231201.gl', '20231201.gu', '20231201.he', '20231201.hi', '20231201.hr', '20231201.hu', '20231201.hy', '20231201.id', '20231201.is', '20231201.it', '20231201.ja', '20231201.jv', '20231201.kn', '20231201.ko', '20231201.la', '20231201.li', '20231201.lij', '20231201.lt', '20231201.mk', '20231201.ml', '20231201.mr', '20231201.nap', '20231201.nl', '20231201.no', '20231201.or', '20231201.pa', '20231201.pl', '20231201.pms', '20231201.pt', '20231201.ro', '20231201.ru', '20231201.sa', '20231201.sah', '20231201.sk', '20231201.sl', '20231201.sr', '20231201.su', '20231201.sv', '20231201.ta', '20231201.te', '20231201.th', '20231201.tr', '20231201.uk', '20231201.vec', '20231201.vi', '20231201.wa', '20231201.yi', '20231201.zh', '20231201.zh-min-nan']
53
+ # )
54
+ #
55
+ # for d in dataset:
56
+ # for row in d['text']:
57
+ # yield row
58
+ #
59
+ # del dataset
60
+ # gc.collect()
61
+
62
+ # text
63
+ dataset = (
64
+ load_dataset('xu-song/cc100-samples', lang, split='train')
65
+ for lang in ['am', 'ar', 'as', 'az', 'be', 'bg', 'bn', 'bn_rom', 'br', 'bs', 'ca', 'cs', 'cy', 'da', 'de', 'el', 'en', 'eo', 'es', 'et', 'eu', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gn', 'gu', 'ha', 'he', 'hi', 'hi_rom', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'ku', 'ky', 'la', 'lg', 'li', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'my_zaw', 'ne', 'nl', 'no', 'ns', 'om', 'or', 'pa', 'pl', 'ps', 'pt', 'qu', 'rm', 'ro', 'ru', 'sa', 'si', 'sc', 'sd', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'ta_rom', 'te', 'te_rom', 'th', 'tl', 'tn', 'tr', 'ug', 'uk', 'ur', 'ur_rom', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh-Hans', 'zh-Hant', 'zu']
66
+ )
67
+
68
+ for d in dataset:
69
+ for row in d['text']:
70
+ yield row
71
+
72
+ del dataset
73
+ gc.collect()
74
+
75
+ ## text
76
+ # dataset = (
77
+ # load_dataset('csebuetnlp/xlsum', lang, split='train')
78
+ # for lang in ['amharic', 'arabic', 'azerbaijani', 'bengali', 'burmese', 'chinese_simplified', 'chinese_traditional', 'english', 'french', 'gujarati', 'hausa', 'hindi', 'igbo', 'indonesian', 'japanese', 'kirundi', 'korean', 'kyrgyz', 'marathi', 'nepali', 'oromo', 'pashto', 'persian', 'pidgin', 'portuguese', 'punjabi', 'russian', 'scottish_gaelic', 'serbian_cyrillic', 'serbian_latin', 'sinhala', 'somali', 'spanish', 'swahili', 'tamil', 'telugu', 'thai', 'tigrinya', 'turkish', 'ukrainian', 'urdu', 'uzbek', 'vietnamese', 'welsh', 'yoruba']
79
+ # )
80
+ #
81
+ # for d in dataset:
82
+ # for row in d['text']:
83
+ # yield row
84
+ #
85
+ # del dataset
86
+ # gc.collect()
87
+
88
+ ## text
89
+ # dataset = load_dataset('recursal/SuperWikiNEXT-32B', split='train')
90
+ #
91
+ # for row in dataset['text']:
92
+ # yield row
93
+ #
94
+ # del dataset
95
+ # gc.collect()
96
+
97
+ # code
98
+ dataset = load_dataset('m-a-p/CodeFeedback-Filtered-Instruction', split='train')
99
+
100
+ for row in dataset:
101
+ yield row['query'] + '\n' + row['answer']
102
+
103
+ del dataset
104
+ gc.collect()
105
+
106
+ # code
107
+ dataset = load_dataset('nampdn-ai/tiny-codes', split='train')
108
+
109
+ for row in dataset:
110
+ yield row['prompt'] + '\n' + row['response']
111
+
112
+ del dataset
113
+ gc.collect()
114
+
115
+ # math
116
+ dataset = load_dataset('ajibawa-2023/Maths-College', split='train')
117
+
118
+ for row in dataset:
119
+ yield row['instruction'] + '\n' + row['output']
120
+
121
+ del dataset
122
+ gc.collect()
123
+
124
+ # math
125
+ dataset = load_dataset('microsoft/orca-math-word-problems-200k', split='train')
126
+
127
+ for row in dataset:
128
+ yield row['question'] + '\n' + row['answer']
129
+
130
+ del dataset
131
+ gc.collect()
132
+
133
+ # text
134
+ dataset = load_dataset('mlabonne/FineTome-100k', split='train')
135
+
136
+ for row in dataset['conversations']:
137
+ yield '\n'.join(n['value'] for n in row)
138
+
139
+ del dataset
140
+ gc.collect()
141
+
142
+ # instruction
143
+ dataset = load_dataset('arcee-ai/agent-data', split='train')
144
+
145
+ for row in dataset['conversations']:
146
+ yield '\n'.join(n['value'] for n in row)
147
+
148
+ del dataset
149
+ gc.collect()
150
+
151
+ # instruction
152
+ dataset = (
153
+ load_dataset('cognitivecomputations/SystemChat-2.0', data_files='SystemChat_filtered.jsonl', split='train'),
154
+ load_dataset('cognitivecomputations/SystemChat-2.0', data_files='SystemChat_multilingual.jsonl', split='train'),
155
+ )
156
+
157
+ for d in dataset:
158
+ for row in d['messages']:
159
+ yield '\n'.join(n['content'] for n in row)
160
+
161
+ del dataset
162
+ gc.collect()
163
+
164
+ # emoji
165
+ dataset = load_dataset('badrex/llm-emoji-dataset', split='train')
166
+
167
+ for row in dataset:
168
+ yield f'{row["character"]}\n{row["unicode"]}\n{row["short description"]}\n{row["tags"]}\n{row["LLM description"]}'
169
+
170
+ del dataset
171
+ gc.collect()
172
+
173
+
174
+ def batch_iterator():
175
+ for text in _batch_iterator():
176
+ row = {'text': text}
177
+ yield row
178
+
179
+ tokenizer = AutoTokenizer.from_pretrained('../')
180
+
181
+ dataset = Dataset.from_generator(batch_iterator)
182
+ print(dataset)
183
+ print(dir(dataset))
184
+ input()
scripts/train_tokenizer.py CHANGED
@@ -108,7 +108,7 @@ def batch_iterator():
108
  del dataset
109
  gc.collect()
110
 
111
- # code
112
  # dataset = load_dataset('nampdn-ai/tiny-codes', split='train')
113
  #
114
  # for row in dataset:
 
108
  del dataset
109
  gc.collect()
110
 
111
+ ## code
112
  # dataset = load_dataset('nampdn-ai/tiny-codes', split='train')
113
  #
114
  # for row in dataset: