add tokenizer
Browse files- merges.txt +0 -0
- special_tokens_map.json +1 -0
- tokenizer.json +0 -0
- tokenizer_config.json +1 -0
- vocab.json +0 -0
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>"}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "add_prefix_space": false, "special_tokens_map_file": "/root/.cache/huggingface/transformers/eea55dc204333a37c9f45d89c1052c0a9d63e06b9436506032d8242ad5e6df36.78b6ed69c656c44d9052f32c25d283e88026750a6f2b9eda91648840c84ef783", "name_or_path": "GroNLP/gpt2-medium-dutch-embeddings", "tokenizer_class": "GPT2Tokenizer"}
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|