Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -8,7 +8,7 @@ from sudachipy import dictionary
|
|
8 |
from sudachipy import tokenizer as sudachi_tokenizer
|
9 |
from transformers import AutoModelForCausalLM, PreTrainedTokenizer, T5Tokenizer
|
10 |
|
11 |
-
|
12 |
model_dir = Path(__file__).parents[0] / "model"
|
13 |
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
|
14 |
tokenizer = T5Tokenizer.from_pretrained(model_dir)
|
@@ -120,7 +120,9 @@ def create_highlighted_text(
|
|
120 |
if mean_surprisal is None:
|
121 |
highlighted_text = "<h2><b>" + label + "</b></h2>"
|
122 |
else:
|
123 |
-
highlighted_text =
|
|
|
|
|
124 |
for token, score in tokens2scores:
|
125 |
highlighted_text += highlight_token(token, score)
|
126 |
return highlighted_text
|
@@ -168,6 +170,7 @@ def main(input_text: str) -> Tuple[str, str, str]:
|
|
168 |
offsets = calc_offsets(sudachi_tokenize(input_text))
|
169 |
tokens2surprisal = aggregate_surprisals_by_offset(char2surprisal, offsets)
|
170 |
tokens2surprisal = normalize_surprisals(tokens2surprisal)
|
|
|
171 |
highlighted_text = create_highlighted_text(
|
172 |
"学習後モデル", tokens2surprisal, mean_surprisal
|
173 |
)
|
@@ -200,13 +203,13 @@ def main(input_text: str) -> Tuple[str, str, str]:
|
|
200 |
if __name__ == "__main__":
|
201 |
demo = gr.Interface(
|
202 |
fn=main,
|
203 |
-
title="
|
204 |
-
description="
|
205 |
show_label=True,
|
206 |
inputs=gr.Textbox(
|
207 |
lines=5,
|
208 |
-
label="
|
209 |
-
placeholder="
|
210 |
),
|
211 |
outputs=[
|
212 |
gr.HTML(label="学習前モデル", show_label=True),
|
|
|
8 |
from sudachipy import tokenizer as sudachi_tokenizer
|
9 |
from transformers import AutoModelForCausalLM, PreTrainedTokenizer, T5Tokenizer
|
10 |
|
11 |
+
|
12 |
model_dir = Path(__file__).parents[0] / "model"
|
13 |
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
|
14 |
tokenizer = T5Tokenizer.from_pretrained(model_dir)
|
|
|
120 |
if mean_surprisal is None:
|
121 |
highlighted_text = "<h2><b>" + label + "</b></h2>"
|
122 |
else:
|
123 |
+
highlighted_text = (
|
124 |
+
"<h2><b>" + label + f"</b>(サプライザル平均値: {mean_surprisal:.3f})</h2>"
|
125 |
+
)
|
126 |
for token, score in tokens2scores:
|
127 |
highlighted_text += highlight_token(token, score)
|
128 |
return highlighted_text
|
|
|
170 |
offsets = calc_offsets(sudachi_tokenize(input_text))
|
171 |
tokens2surprisal = aggregate_surprisals_by_offset(char2surprisal, offsets)
|
172 |
tokens2surprisal = normalize_surprisals(tokens2surprisal)
|
173 |
+
|
174 |
highlighted_text = create_highlighted_text(
|
175 |
"学習後モデル", tokens2surprisal, mean_surprisal
|
176 |
)
|
|
|
203 |
if __name__ == "__main__":
|
204 |
demo = gr.Interface(
|
205 |
fn=main,
|
206 |
+
title="文章の読みやすさを自動評価するAI",
|
207 |
+
description="文章を入力すると、読みづらい表現は赤く、読みやすい表現は青くハイライトされて出力されます。",
|
208 |
show_label=True,
|
209 |
inputs=gr.Textbox(
|
210 |
lines=5,
|
211 |
+
label="文章",
|
212 |
+
placeholder="ここに文章を入力してください。",
|
213 |
),
|
214 |
outputs=[
|
215 |
gr.HTML(label="学習前モデル", show_label=True),
|