Create scripts/byte_exemplars.py
Browse filesscripts to use to write byte versions of unicode and korean exemplars
- scripts/byte_exemplars.py +37 -0
scripts/byte_exemplars.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import tokenizers
|
3 |
+
|
4 |
+
# unicode exemplar json
|
5 |
+
with open("lang_exemplars.json") as file_handle:
|
6 |
+
unicode_exemplars = json.load(file_handle)
|
7 |
+
|
8 |
+
# (17411, 'a')
|
9 |
+
print(len(unicode_exemplars),unicode_exemplars[0])
|
10 |
+
|
11 |
+
# ksx-1001 that includes 2350 korean "exemplar" characters
|
12 |
+
with open("ksx-1001.txt") as file_handle:
|
13 |
+
kor_exemplar = file_handle.readlines()[0].split()
|
14 |
+
|
15 |
+
# (2350, '가')
|
16 |
+
len(kor_exemplar), kor_exemplar[0]
|
17 |
+
|
18 |
+
# initialize bytelevel tokenizer
|
19 |
+
ByteLevel = tokenizers.pre_tokenizers.ByteLevel(False,False)
|
20 |
+
|
21 |
+
# make new bytelevel exemplar json
|
22 |
+
byte_exemplars = []
|
23 |
+
for exemplar in unicode_exemplars:
|
24 |
+
byte_exemplars.append(ByteLevel.pre_tokenize_str(exemplar)[0][0])
|
25 |
+
byte_exemplars = sorted(byte_exemplars,key=lambda x: [x,len(x)])
|
26 |
+
len(byte_exemplars), byte_exemplars[0]
|
27 |
+
|
28 |
+
# make new korean bytelevel exemplar json
|
29 |
+
kb_exemplar = []
|
30 |
+
for exemplar in kor_exemplar:
|
31 |
+
kb_exemplar.append(ByteLevel.pre_tokenize_str(exemplar)[0][0])
|
32 |
+
kb_exemplar = sorted(kb_exemplar,key=lambda x: [x,len(x)])
|
33 |
+
len(kb_exemplar), kb_exemplar[0]
|
34 |
+
|
35 |
+
# write new byte_exemplar json. ensure_ascii=False is important
|
36 |
+
with open("byte_exemplars.json","w") as out_handle:
|
37 |
+
json.dump(byte_exemplars,out_handle,ensure_ascii=False, indent=2)
|