Spaces:
No application file
No application file
Tatiana
commited on
Commit
•
0d3411a
1
Parent(s):
08f4a85
init
Browse files- .gitattributes +2 -0
- README.md +3 -3
- model/config.json +47 -0
- nlp_st.py +120 -0
- pictures/im1.png +0 -0
- requirements.txt +77 -0
- task2.py +43 -0
- task3.py +32 -0
- tokenizer/special_tokens_map.json +7 -0
- tokenizer/tokenizer_config.json +57 -0
- tokenizer/vocab.txt +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
model/model.safetensors filter=lfs diff=lfs merge=lfs -text
|
37 |
+
hunter_generator.pt filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
---
|
2 |
title: Nlp Project
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: streamlit
|
7 |
sdk_version: 1.29.0
|
8 |
app_file: app.py
|
|
|
1 |
---
|
2 |
title: Nlp Project
|
3 |
+
emoji: 📉
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: yellow
|
6 |
sdk: streamlit
|
7 |
sdk_version: 1.29.0
|
8 |
app_file: app.py
|
model/config.json
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "DeepPavlov/rubert-base-cased",
|
3 |
+
"architectures": [
|
4 |
+
"BertForSequenceClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"directionality": "bidi",
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.1,
|
11 |
+
"hidden_size": 768,
|
12 |
+
"id2label": {
|
13 |
+
"0": "LABEL_0",
|
14 |
+
"1": "LABEL_1",
|
15 |
+
"2": "LABEL_2",
|
16 |
+
"3": "LABEL_3",
|
17 |
+
"4": "LABEL_4"
|
18 |
+
},
|
19 |
+
"initializer_range": 0.02,
|
20 |
+
"intermediate_size": 3072,
|
21 |
+
"label2id": {
|
22 |
+
"LABEL_0": 0,
|
23 |
+
"LABEL_1": 1,
|
24 |
+
"LABEL_2": 2,
|
25 |
+
"LABEL_3": 3,
|
26 |
+
"LABEL_4": 4
|
27 |
+
},
|
28 |
+
"layer_norm_eps": 1e-12,
|
29 |
+
"max_position_embeddings": 512,
|
30 |
+
"model_type": "bert",
|
31 |
+
"num_attention_heads": 12,
|
32 |
+
"num_hidden_layers": 12,
|
33 |
+
"output_past": true,
|
34 |
+
"pad_token_id": 0,
|
35 |
+
"pooler_fc_size": 768,
|
36 |
+
"pooler_num_attention_heads": 12,
|
37 |
+
"pooler_num_fc_layers": 3,
|
38 |
+
"pooler_size_per_head": 128,
|
39 |
+
"pooler_type": "first_token_transform",
|
40 |
+
"position_embedding_type": "absolute",
|
41 |
+
"problem_type": "single_label_classification",
|
42 |
+
"torch_dtype": "float32",
|
43 |
+
"transformers_version": "4.35.2",
|
44 |
+
"type_vocab_size": 2,
|
45 |
+
"use_cache": true,
|
46 |
+
"vocab_size": 119547
|
47 |
+
}
|
nlp_st.py
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import streamlit as st
|
3 |
+
import io
|
4 |
+
import imageio
|
5 |
+
from PIL import Image
|
6 |
+
import torch.nn as nn
|
7 |
+
import time
|
8 |
+
from task2 import predict_class
|
9 |
+
from task3 import generate_text
|
10 |
+
import tempfile
|
11 |
+
import os
|
12 |
+
from transformers import pipeline
|
13 |
+
|
14 |
+
# Замените "ваш-пользователь/ваш-новый-репозиторий" на ваш новый путь на Hugging Face
|
15 |
+
model_path = "HaggiVaggi/nlp_project"
|
16 |
+
generator = pipeline('text-generation', model=model_path)
|
17 |
+
|
18 |
+
st.title('Обработка естественного языка • Natural Language Processing')
|
19 |
+
|
20 |
+
with st.sidebar:
|
21 |
+
st.header('Выберите страницу')
|
22 |
+
page = st.selectbox("Выберите страницу", ["Главная", "Отзывы на рестораны",\
|
23 |
+
"Тематика новостей", "GPT by GPT-team", "Итоги"])
|
24 |
+
|
25 |
+
if page == "Главная":
|
26 |
+
st.header('Выполнила команда "GPT":')
|
27 |
+
st.subheader('🦁Рома')
|
28 |
+
st.subheader('🐯Руслан')
|
29 |
+
st.subheader('🐱Тата')
|
30 |
+
|
31 |
+
|
32 |
+
st.header(" 🌟 " * 10)
|
33 |
+
|
34 |
+
st.header('Наши задачи:')
|
35 |
+
st.subheader('*Задача №1*: Классификация отзыва на рестораны')
|
36 |
+
st.subheader('*Задача №2*: Классификация тематики новостей из телеграм каналов')
|
37 |
+
st.subheader('*Задача №2*: Генерация текста GPT-моделью по пользовательскому prompt')
|
38 |
+
|
39 |
+
|
40 |
+
elif page == "Отзывы на рестораны":
|
41 |
+
st.header("Отзывы на рестораны:")
|
42 |
+
|
43 |
+
|
44 |
+
elif page == "Тематика новостей":
|
45 |
+
st.header("Тематика новостей:")
|
46 |
+
|
47 |
+
st.markdown(f"<span style='font-size:{30}px; color:purple'>{'Модель: DeepPavlov/rubert-base-cased'}</span>", unsafe_allow_html=True)
|
48 |
+
st.info('Модель основана на архитектуре BERT (Bidirectional Encoder Representations from Transformers), представленной в [статье](https://arxiv.org/abs/1810.04805)')
|
49 |
+
st.info('Rubert-base-cased: "cased" означает, что в этой модели сохранен регистр слов. Это важно для русского языка, где регистр может влиять на смысл слов.')
|
50 |
+
st.info('В библиотеке [Transformers от Hugging Face](https://huggingface.co/DeepPavlov/rubert-base-cased), слой классификации представляется в виде BertForSequenceClassification. Этот классификатор добавляется к основной модели BERT и обучается на конкретной задаче классификации текста.')
|
51 |
+
user_input = st.text_area('Введите текст поста и мы узнаем, к какой тематике его отнести:')
|
52 |
+
if st.button("Предсказать"):
|
53 |
+
pred = predict_class(user_input)
|
54 |
+
st.subheader("Это текст по теме:" )
|
55 |
+
st.markdown(f'<span style="font-size:{25}px; color:pink">{pred}</span>', unsafe_allow_html=True)
|
56 |
+
st.subheader("Accuracy и Loss на 5 эпохах" )
|
57 |
+
image_1 = imageio.imread('pictures/im1.png')[:, :, :]
|
58 |
+
st.image(image_1)
|
59 |
+
|
60 |
+
|
61 |
+
|
62 |
+
elif page == "GPT by GPT-team":
|
63 |
+
st.header("GPT by GPT-team:")
|
64 |
+
st.markdown(f"<span style='font-size:{30}px; color:green'>{'Модель: GPT2LMHeadModel'}</span>", unsafe_allow_html=True)
|
65 |
+
st.info('[GPT2LMHeadModel](https://huggingface.co/docs/transformers/model_doc/gpt2) - это модель, способная генерировать текст, учитывая предшествующий контекст.')
|
66 |
+
st.info('[Sberbank-ai/rugpt3small_based_on_gpt2](https://huggingface.co/ai-forever/rugpt3small_based_on_gpt2): Это конкретная предобученная модель GPT-2, которая была дообучена на русском языке командой Sber AI.\
|
67 |
+
Она обладает способностью генерировать текст, принимая на вход текстовый контекст.')
|
68 |
+
user_input2 = st.text_area("Введите текст:", "")
|
69 |
+
if st.button("Сгенерировать"):
|
70 |
+
generated = generate_text(user_input2)
|
71 |
+
st.subheader("Сгенерированный текст:")
|
72 |
+
st.markdown(f'<span style="font-size:{25}px; color:green">{generated}</span>', unsafe_allow_html=True)
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
# st.subheader("- Модель: *ConvAutoencoder()*")
|
77 |
+
# st.subheader("- Количество эпох обучения: *100*")
|
78 |
+
|
79 |
+
# st.info('Расширение картинки должно быть в формате .jpg /.jpeg /.png')
|
80 |
+
# image_url2 = st.text_input("Введите URL изображения")
|
81 |
+
# start_time2 = time.time()
|
82 |
+
|
83 |
+
# if image_url2:
|
84 |
+
# # Загрузка изображения по ��сылке
|
85 |
+
# response2 = requests.get(image_url2)
|
86 |
+
# image2 = Image.open(io.BytesIO(response2.content))
|
87 |
+
# st.subheader('Ваше фото до обработки:')
|
88 |
+
# st.image(image2)
|
89 |
+
# prediction_result = predict_1(image2)
|
90 |
+
|
91 |
+
# show_result_button3 = st.button("Показать результат", key="result_button_3")
|
92 |
+
# if show_result_button3:
|
93 |
+
# st.success("Ваш результат готов!")
|
94 |
+
|
95 |
+
# st.subheader("Ваше фото после обработки:")
|
96 |
+
# st.image(prediction_result, channels='GRAY')
|
97 |
+
# st.subheader(f'Время предсказания: {round((time.time() - start_time2), 2)} сек.')
|
98 |
+
# st.header('🎈' * 10)
|
99 |
+
|
100 |
+
|
101 |
+
|
102 |
+
|
103 |
+
|
104 |
+
elif page == "Итоги":
|
105 |
+
st.header('Результаты и выводы')
|
106 |
+
# st.subheader('*Задача №1*: Детектирование ветряных мельниц')
|
107 |
+
|
108 |
+
# st.subheader("Метрики из Clear ML")
|
109 |
+
# image_1 = Image.open("pictures/P_curve.png")
|
110 |
+
# image_2 = Image.open("pictures/PR_curve.png")
|
111 |
+
# image_3 = Image.open("pictures/R_curve.png")
|
112 |
+
# image_4 = Image.open("pictures/F1_curve.png")
|
113 |
+
|
114 |
+
# # Отображаем изображения в одной строке
|
115 |
+
# st.image([image_1, image_2, image_3, image_4], caption=['Image 1 - P_curve', 'Image 2 - PR_curve', 'Image 3 - R_curve', 'Image 4 - F1_curve'], width=300)
|
116 |
+
|
117 |
+
# st.subheader("Результативные графики из Clear ML")
|
118 |
+
# image_5 = imageio.imread('pictures/plots.jpg')[:, :, :]
|
119 |
+
# st.image(image_5)
|
120 |
+
|
pictures/im1.png
ADDED
requirements.txt
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
altair==5.2.0
|
2 |
+
attrs==23.1.0
|
3 |
+
blinker==1.7.0
|
4 |
+
cachetools==5.3.2
|
5 |
+
certifi==2023.11.17
|
6 |
+
charset-normalizer==3.3.2
|
7 |
+
click==8.1.7
|
8 |
+
filelock==3.13.1
|
9 |
+
fsspec==2023.12.1
|
10 |
+
gitdb==4.0.11
|
11 |
+
GitPython==3.1.40
|
12 |
+
huggingface-hub==0.19.4
|
13 |
+
idna==3.6
|
14 |
+
imageio==2.33.0
|
15 |
+
importlib-metadata==6.11.0
|
16 |
+
Jinja2==3.1.2
|
17 |
+
joblib==1.3.2
|
18 |
+
jsonschema==4.20.0
|
19 |
+
jsonschema-specifications==2023.11.2
|
20 |
+
markdown-it-py==3.0.0
|
21 |
+
MarkupSafe==2.1.3
|
22 |
+
mdurl==0.1.2
|
23 |
+
mpmath==1.3.0
|
24 |
+
networkx==3.2.1
|
25 |
+
numpy==1.26.2
|
26 |
+
nvidia-cublas-cu12==12.1.3.1
|
27 |
+
nvidia-cuda-cupti-cu12==12.1.105
|
28 |
+
nvidia-cuda-nvrtc-cu12==12.1.105
|
29 |
+
nvidia-cuda-runtime-cu12==12.1.105
|
30 |
+
nvidia-cudnn-cu12==8.9.2.26
|
31 |
+
nvidia-cufft-cu12==11.0.2.54
|
32 |
+
nvidia-curand-cu12==10.3.2.106
|
33 |
+
nvidia-cusolver-cu12==11.4.5.107
|
34 |
+
nvidia-cusparse-cu12==12.1.0.106
|
35 |
+
nvidia-nccl-cu12==2.18.1
|
36 |
+
nvidia-nvjitlink-cu12==12.3.101
|
37 |
+
nvidia-nvtx-cu12==12.1.105
|
38 |
+
packaging==23.2
|
39 |
+
pandas==2.1.3
|
40 |
+
Pillow==10.1.0
|
41 |
+
protobuf==4.25.1
|
42 |
+
pyarrow==14.0.1
|
43 |
+
pydeck==0.8.1b0
|
44 |
+
Pygments==2.17.2
|
45 |
+
python-dateutil==2.8.2
|
46 |
+
pytz==2023.3.post1
|
47 |
+
PyYAML==6.0.1
|
48 |
+
referencing==0.32.0
|
49 |
+
regex==2023.10.3
|
50 |
+
requests==2.31.0
|
51 |
+
rich==13.7.0
|
52 |
+
rpds-py==0.13.2
|
53 |
+
safetensors==0.4.1
|
54 |
+
scikit-learn==1.3.2
|
55 |
+
scipy==1.11.4
|
56 |
+
sentencepiece==0.1.99
|
57 |
+
six==1.16.0
|
58 |
+
smmap==5.0.1
|
59 |
+
streamlit==1.29.0
|
60 |
+
sympy==1.12
|
61 |
+
tenacity==8.2.3
|
62 |
+
threadpoolctl==3.2.0
|
63 |
+
tokenizers==0.15.0
|
64 |
+
toml==0.10.2
|
65 |
+
toolz==0.12.0
|
66 |
+
torch==2.1.1
|
67 |
+
tornado==6.4
|
68 |
+
tqdm==4.66.1
|
69 |
+
transformers==4.35.2
|
70 |
+
triton==2.1.0
|
71 |
+
typing_extensions==4.8.0
|
72 |
+
tzdata==2023.3
|
73 |
+
tzlocal==5.2
|
74 |
+
urllib3==2.1.0
|
75 |
+
validators==0.22.0
|
76 |
+
watchdog==3.0.0
|
77 |
+
zipp==3.17.0
|
task2.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import BertTokenizer, BertForSequenceClassification
|
2 |
+
import torch
|
3 |
+
from sklearn.preprocessing import LabelEncoder
|
4 |
+
|
5 |
+
#Загрузка сохраненной модели и токенизатора в Streamlit
|
6 |
+
loaded_model_path = "/home/tata/DS_bootcamp/ds-phase-2/10-nlp/project4/model"
|
7 |
+
loaded_tokenizer_path = "/home/tata/DS_bootcamp/ds-phase-2/10-nlp/project4/tokenizer"
|
8 |
+
|
9 |
+
loaded_model = BertForSequenceClassification.from_pretrained(loaded_model_path)
|
10 |
+
loaded_tokenizer = BertTokenizer.from_pretrained(loaded_tokenizer_path)
|
11 |
+
|
12 |
+
labels = ['мода', 'спорт', 'технологии', 'финансы', 'крипта']
|
13 |
+
label_encoder = LabelEncoder()
|
14 |
+
label_encoder.fit(labels)
|
15 |
+
|
16 |
+
def predict_class(user_input, model=loaded_model, tokenizer=loaded_tokenizer, label_encoder=label_encoder, max_length=128):
|
17 |
+
if not user_input:
|
18 |
+
return "Введите текст"
|
19 |
+
def tokenize_text(text):
|
20 |
+
encoded_text = tokenizer.encode_plus(
|
21 |
+
text,
|
22 |
+
add_special_tokens=True,
|
23 |
+
max_length=max_length,
|
24 |
+
pad_to_max_length=True,
|
25 |
+
return_attention_mask=True,
|
26 |
+
return_tensors='pt'
|
27 |
+
)
|
28 |
+
return encoded_text
|
29 |
+
|
30 |
+
encoded_text = tokenize_text(user_input)
|
31 |
+
with torch.no_grad():
|
32 |
+
model.eval()
|
33 |
+
input_ids = encoded_text['input_ids']
|
34 |
+
attention_mask = encoded_text['attention_mask']
|
35 |
+
outputs = model(input_ids, attention_mask=attention_mask)
|
36 |
+
logits = outputs.logits
|
37 |
+
predicted_class_index = torch.argmax(logits, dim=1).item()
|
38 |
+
|
39 |
+
# Получение названия класса
|
40 |
+
predicted_class = label_encoder.classes_[predicted_class_index]
|
41 |
+
return predicted_class
|
42 |
+
|
43 |
+
|
task3.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import torch
|
3 |
+
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
4 |
+
|
5 |
+
|
6 |
+
model_name_or_path = "sberbank-ai/rugpt3small_based_on_gpt2"
|
7 |
+
tokenizer = GPT2Tokenizer.from_pretrained(model_name_or_path)
|
8 |
+
model = GPT2LMHeadModel.from_pretrained(
|
9 |
+
model_name_or_path,
|
10 |
+
output_attentions = False,
|
11 |
+
output_hidden_states = False,
|
12 |
+
)
|
13 |
+
|
14 |
+
# Загрузка сохраненных весов
|
15 |
+
model_weights_path = "/home/tata/DS_bootcamp/ds-phase-2/10-nlp/project4/hunter_generator.pt"
|
16 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
17 |
+
model.load_state_dict(torch.load(model_weights_path, map_location=device))
|
18 |
+
model.eval()
|
19 |
+
def generate_text(user_input, model=model, tokenizer=tokenizer):
|
20 |
+
input_ids = tokenizer.encode(user_input, return_tensors="pt")
|
21 |
+
with torch.no_grad():
|
22 |
+
out = model.generate(
|
23 |
+
input_ids,
|
24 |
+
do_sample=True,
|
25 |
+
num_beams=3,
|
26 |
+
temperature=1.05,
|
27 |
+
top_p=.8,
|
28 |
+
max_length=50,
|
29 |
+
)
|
30 |
+
generated_text = list(map(tokenizer.decode, out))[0]
|
31 |
+
return generated_text
|
32 |
+
|
tokenizer/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
tokenizer/tokenizer_config.json
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_basic_tokenize": true,
|
47 |
+
"do_lower_case": false,
|
48 |
+
"mask_token": "[MASK]",
|
49 |
+
"model_max_length": 1000000000000000019884624838656,
|
50 |
+
"never_split": null,
|
51 |
+
"pad_token": "[PAD]",
|
52 |
+
"sep_token": "[SEP]",
|
53 |
+
"strip_accents": null,
|
54 |
+
"tokenize_chinese_chars": true,
|
55 |
+
"tokenizer_class": "BertTokenizer",
|
56 |
+
"unk_token": "[UNK]"
|
57 |
+
}
|
tokenizer/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|