{ "additional_special_tokens": [ "À", "Á", "Â", "Ã", "Ä", "Å", "Æ" ], "bos_token": { "__type": "AddedToken", "content": "Å", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false }, "clean_up_tokenization_spaces": true, "cls_token": { "__type": "AddedToken", "content": "Ä", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false }, "eos_token": { "__type": "AddedToken", "content": "Æ", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false }, "mask_token": { "__type": "AddedToken", "content": "Á", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false }, "model_max_length": 128, "pad_token": { "__type": "AddedToken", "content": "Â", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false }, "padding_side": "right", "sep_token": { "__type": "AddedToken", "content": "Ã", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false }, "tokenizer_class": "PreTrainedTokenizerFast", "unk_token": { "__type": "AddedToken", "content": "À", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false } }