|
{ |
|
"add_bos_token": false, |
|
"add_prefix_space": false, |
|
"added_tokens_decoder": { |
|
"0": { |
|
"content": "<pad>", |
|
"lstrip": false, |
|
"normalized": true, |
|
"rstrip": false, |
|
"single_word": false, |
|
"special": true |
|
}, |
|
"1": { |
|
"content": "<|endoftext|>", |
|
"lstrip": false, |
|
"normalized": true, |
|
"rstrip": false, |
|
"single_word": false, |
|
"special": true |
|
}, |
|
"2": { |
|
"content": "<s>", |
|
"lstrip": false, |
|
"normalized": true, |
|
"rstrip": false, |
|
"single_word": false, |
|
"special": true |
|
}, |
|
"3": { |
|
"content": "</s>", |
|
"lstrip": false, |
|
"normalized": true, |
|
"rstrip": false, |
|
"single_word": false, |
|
"special": true |
|
}, |
|
"5": { |
|
"content": "<mask>", |
|
"lstrip": false, |
|
"normalized": false, |
|
"rstrip": false, |
|
"single_word": false, |
|
"special": true |
|
} |
|
}, |
|
"additional_special_tokens": [], |
|
"bos_token": "<s>", |
|
"clean_up_tokenization_spaces": false, |
|
"eos_token": "</s>", |
|
"errors": "replace", |
|
"mask_token": "<mask>", |
|
"model_max_length": 2048, |
|
"pad_token": "<pad>", |
|
"padding_side": "left", |
|
"sep_token": "<s>", |
|
"tokenizer_class": "GPT2Tokenizer", |
|
"tokenizer_file": null, |
|
"unk_token": "<|endoftext|>" |
|
} |
|
|