fix bugs
Browse files- 1_app.py +44 -0
- 1_textInput.py +18 -0
- BERT_inference.py +4 -203
- FZY3JW.cw127.pkl +0 -0
- FZY3JW.pkl +0 -0
- __pycache__/BERT_inference.cpython-39.pyc +0 -0
- __pycache__/classification.cpython-39.pyc +0 -0
- __pycache__/inference.cpython-39.pyc +0 -0
- __pycache__/outline.cpython-39.pyc +0 -0
- __pycache__/run.cpython-39.pyc +0 -0
- __pycache__/util.cpython-39.pyc +0 -0
- app.py +18 -9
- classification.py +2 -2
- inference.py +3 -17
- outline.py +117 -18
- run.py +4 -5
- test.txt +32 -0
- textInput.py +102 -4
- train.py +217 -0
- util.py +10 -3
1_app.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import gradio as gr
|
3 |
+
import textInput
|
4 |
+
|
5 |
+
output = []
|
6 |
+
keys = []
|
7 |
+
|
8 |
+
# css = ".output {min-height: 500px}"
|
9 |
+
|
10 |
+
|
11 |
+
with gr.Blocks(css = ".output {min-height: 500px}") as demo:
|
12 |
+
#用markdown语法编辑输出一段话
|
13 |
+
gr.Markdown("# 文本分类系统")
|
14 |
+
gr.Markdown("请选择要输入的文件或填入文本")
|
15 |
+
topic_num = gr.Number(label="主题个数")
|
16 |
+
max_length = gr.Number(label="摘要最大长度")
|
17 |
+
with gr.Tabs():
|
18 |
+
with gr.Tab("文本输入"):
|
19 |
+
text_input = gr.TextArea(lines=10)
|
20 |
+
text_button = gr.Button("生成")
|
21 |
+
|
22 |
+
with gr.Tab("文件输入"):
|
23 |
+
gr.Markdown("目前支持的格式有PDF、Word、txt")
|
24 |
+
file_input = gr.File(file_types=["text", ".pdf", ".docx"])
|
25 |
+
file_button = gr.Button("生成")
|
26 |
+
# 设置tab选项卡
|
27 |
+
with gr.Tabs():
|
28 |
+
with gr.Tab("分类页"):
|
29 |
+
text_keys_output = gr.TextArea(lines=30)
|
30 |
+
|
31 |
+
with gr.Tab("摘要页",):
|
32 |
+
#Blocks特有组件,设置所有子组件按水平排列
|
33 |
+
text_ab_output = gr.TextArea(lines=30)
|
34 |
+
|
35 |
+
with gr.Tab("下载页"):
|
36 |
+
file_txt_output = gr.File(label="txt格式")
|
37 |
+
file_docx_output = gr.File(label="docx格式")
|
38 |
+
file_pdf_output = gr.File(label="pdf格式")
|
39 |
+
# with gr.Accordion("Open for More!"):
|
40 |
+
# gr.Markdown("Look at me...")
|
41 |
+
text_button.click(textInput.text_dump_to_lines, inputs=[text_input,topic_num,max_length], outputs=[text_keys_output,text_ab_output,file_txt_output,file_docx_output,file_pdf_output])
|
42 |
+
file_button.click(textInput.file_dump_to_lines,inputs=[file_input,topic_num,max_length], outputs=[text_keys_output,text_ab_output,file_txt_output,file_docx_output,file_pdf_output])
|
43 |
+
|
44 |
+
demo.launch()
|
1_textInput.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import docx
|
2 |
+
from docx.oxml.ns import qn
|
3 |
+
from docx.shared import Pt,RGBColor
|
4 |
+
import fitz
|
5 |
+
import os
|
6 |
+
from fpdf import FPDF
|
7 |
+
import run
|
8 |
+
|
9 |
+
def text_dump_to_lines(text,topic_num,max_length):
|
10 |
+
lines = [x.strip() for x in text.split("\n") if x.strip()!='']
|
11 |
+
sentences = run.texClear(lines)
|
12 |
+
keys, output = run.textToAb(sentences,lines,int(topic_num),int(max_length))
|
13 |
+
keysText = "\n".join(keys)
|
14 |
+
outputText = "\n".join(output)
|
15 |
+
# text = "\n".join(lines)
|
16 |
+
# return text, text, dump_to_txt(lines), dump_to_docx(lines), dump_to_pdf(lines)
|
17 |
+
return keysText, outputText, dump_to_txt(output), dump_to_docx(output), dump_to_pdf(output)
|
18 |
+
|
BERT_inference.py
CHANGED
@@ -1,217 +1,18 @@
|
|
1 |
-
# %%
|
2 |
-
import numpy as np
|
3 |
-
import pandas as pd
|
4 |
-
import csv
|
5 |
-
import torch.nn as nn
|
6 |
-
from torch.optim.lr_scheduler import ReduceLROnPlateau
|
7 |
-
from torch.utils.data import TensorDataset, DataLoader
|
8 |
-
from transformers import BertTokenizer,BertConfig,AdamW
|
9 |
-
from sklearn.metrics import accuracy_score
|
10 |
-
from sklearn.metrics import classification_report
|
11 |
-
from tqdm import tqdm
|
12 |
-
import torch
|
13 |
-
import transformers
|
14 |
-
from torch.utils.data import Dataset, DataLoader
|
15 |
-
|
16 |
-
# %%
|
17 |
-
|
18 |
-
class MyDataSet(Dataset):
|
19 |
-
def __init__(self, loaded_data):
|
20 |
-
self.data = loaded_data
|
21 |
-
|
22 |
-
def __len__(self):
|
23 |
-
return len(self.data)
|
24 |
-
|
25 |
-
def __getitem__(self, idx):
|
26 |
-
return self.data[idx]
|
27 |
-
|
28 |
-
Data_path = "/kaggle/input/inference/train.csv"
|
29 |
-
Totle_data = pd.read_csv(Data_path)
|
30 |
-
Totle_data = Totle_data.sample(frac=0.1)
|
31 |
-
Totle_data = Totle_data.dropna(axis=0,subset = ["2"])
|
32 |
-
custom_dataset = MyDataSet(Totle_data)
|
33 |
-
#按照比例划分
|
34 |
-
train_size = int(len(custom_dataset) * 0.6)
|
35 |
-
validate_size = int(len(custom_dataset) * 0.1)
|
36 |
-
test_size = len(custom_dataset) - validate_size - train_size
|
37 |
-
train_dataset, validate_dataset, test_dataset = torch.utils.data.random_split(custom_dataset, [train_size, validate_size, test_size])
|
38 |
-
|
39 |
-
#设置保存路径
|
40 |
-
train_data_path="Bert_Try.csv"
|
41 |
-
dev_data_path = "Bert_Dev.csv"
|
42 |
-
test_data_path="Bert_Test.csv"
|
43 |
-
|
44 |
-
train_dataset = Totle_data.iloc[train_dataset.indices]
|
45 |
-
validate_dataset = Totle_data.iloc[validate_dataset.indices]
|
46 |
-
test_dataset = Totle_data.iloc[test_dataset.indices]
|
47 |
-
|
48 |
-
#index参数设置为False表示不保存行索引,header设置为False表示不保存列索引
|
49 |
-
train_dataset.to_csv(train_data_path,index=False,header=True)
|
50 |
-
validate_dataset.to_csv(dev_data_path ,index=False,header=True)
|
51 |
-
test_dataset.to_csv(test_data_path,index=False,header=True)
|
52 |
-
|
53 |
-
# %%
|
54 |
-
data = pd.read_csv(train_data_path)
|
55 |
-
data.head
|
56 |
|
57 |
-
|
|
|
58 |
|
59 |
class BertClassificationModel(nn.Module):
|
60 |
def __init__(self):
|
61 |
super(BertClassificationModel, self).__init__()
|
62 |
-
#加载预训练模型
|
63 |
pretrained_weights="bert-base-chinese"
|
64 |
self.bert = transformers.BertModel.from_pretrained(pretrained_weights)
|
65 |
for param in self.bert.parameters():
|
66 |
-
param.requires_grad = True
|
67 |
-
#定义线性函数
|
68 |
self.dense = nn.Linear(768, 3)
|
69 |
|
70 |
def forward(self, input_ids,token_type_ids,attention_mask):
|
71 |
-
#得到bert_output
|
72 |
bert_output = self.bert(input_ids=input_ids,token_type_ids=token_type_ids, attention_mask=attention_mask)
|
73 |
-
#获得预训练模型的输出
|
74 |
bert_cls_hidden_state = bert_output[1]
|
75 |
-
#将768维的向量输入到线性层映射为二维向量
|
76 |
linear_output = self.dense(bert_cls_hidden_state)
|
77 |
-
return linear_output
|
78 |
-
|
79 |
-
# %%
|
80 |
-
|
81 |
-
def encoder(max_len,vocab_path,text_list):
|
82 |
-
#将text_list embedding成bert模型可用的输入形式
|
83 |
-
#加载分词模型
|
84 |
-
tokenizer = BertTokenizer.from_pretrained("bert-base-chinese")
|
85 |
-
tokenizer = tokenizer(
|
86 |
-
text_list,
|
87 |
-
padding = True,
|
88 |
-
truncation = True,
|
89 |
-
max_length = max_len,
|
90 |
-
return_tensors='pt' # 返回的类型为pytorch tensor
|
91 |
-
)
|
92 |
-
input_ids = tokenizer['input_ids']
|
93 |
-
token_type_ids = tokenizer['token_type_ids']
|
94 |
-
attention_mask = tokenizer['attention_mask']
|
95 |
-
return input_ids,token_type_ids,attention_mask
|
96 |
-
|
97 |
-
# %%
|
98 |
-
labels2dict = {"neutral":0,"entailment":1,"contradiction":2}
|
99 |
-
def load_data(path):
|
100 |
-
csvFileObj = open(path)
|
101 |
-
readerObj = csv.reader(csvFileObj)
|
102 |
-
text_list = []
|
103 |
-
labels = []
|
104 |
-
for row in readerObj:
|
105 |
-
#跳过表头
|
106 |
-
if readerObj.line_num == 1:
|
107 |
-
continue
|
108 |
-
#label在什么位置就改成对应的index
|
109 |
-
label = int(labels2dict[row[0]])
|
110 |
-
text = row[1]
|
111 |
-
text_list.append(text)
|
112 |
-
labels.append(label)
|
113 |
-
#调用encoder函数,获得预训练模型的三种输入形式
|
114 |
-
input_ids,token_type_ids,attention_mask = encoder(max_len=150,vocab_path="/root/Bert/bert-base-chinese/vocab.txt",text_list=text_list)
|
115 |
-
labels = torch.tensor(labels)
|
116 |
-
#将encoder的返回值以及label封装为Tensor的形式
|
117 |
-
data = TensorDataset(input_ids,token_type_ids,attention_mask,labels)
|
118 |
-
return data
|
119 |
-
|
120 |
-
# %%
|
121 |
-
#设定batch_size
|
122 |
-
batch_size = 16
|
123 |
-
#引入数据路径
|
124 |
-
train_data_path="Bert_Try.csv"
|
125 |
-
dev_data_path="Bert_Dev.csv"
|
126 |
-
test_data_path="Bert_Test.csv"
|
127 |
-
#调用load_data函数,将数据加载为Tensor形式
|
128 |
-
train_data = load_data(train_data_path)
|
129 |
-
dev_data = load_data(dev_data_path)
|
130 |
-
test_data = load_data(test_data_path)
|
131 |
-
#将训练数据和测试数据进行DataLoader实例化
|
132 |
-
train_loader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True)
|
133 |
-
dev_loader = DataLoader(dataset=dev_data, batch_size=batch_size, shuffle=True)
|
134 |
-
test_loader = DataLoader(dataset=test_data, batch_size=batch_size, shuffle=False)
|
135 |
-
|
136 |
-
# %%
|
137 |
-
def dev(model,dev_loader):
|
138 |
-
model.to(device)
|
139 |
-
|
140 |
-
model.eval()
|
141 |
-
|
142 |
-
with torch.no_grad():
|
143 |
-
correct = 0
|
144 |
-
total = 0
|
145 |
-
for step, (input_ids,token_type_ids,attention_mask,labels) in tqdm(enumerate(dev_loader),desc='Dev Itreation:'):
|
146 |
-
input_ids,token_type_ids,attention_mask,labels=input_ids.to(device),token_type_ids.to(device),attention_mask.to(device),labels.to(device)
|
147 |
-
out_put = model(input_ids,token_type_ids,attention_mask)
|
148 |
-
_, predict = torch.max(out_put.data, 1)
|
149 |
-
correct += (predict==labels).sum().item()
|
150 |
-
total += labels.size(0)
|
151 |
-
res = correct / total
|
152 |
-
return res
|
153 |
-
|
154 |
-
# %%
|
155 |
-
|
156 |
-
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
157 |
-
def train(model,train_loader,dev_loader) :
|
158 |
-
|
159 |
-
model.to(device)
|
160 |
-
model.train()
|
161 |
-
criterion = nn.CrossEntropyLoss()
|
162 |
-
param_optimizer = list(model.named_parameters())
|
163 |
-
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
|
164 |
-
|
165 |
-
optimizer_grouped_parameters = [
|
166 |
-
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
|
167 |
-
'weight_decay': 0.01},
|
168 |
-
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
|
169 |
-
]
|
170 |
-
|
171 |
-
optimizer_params = {'lr': 1e-5, 'eps': 1e-6, 'correct_bias': False}
|
172 |
-
optimizer = AdamW(optimizer_grouped_parameters, **optimizer_params)
|
173 |
-
scheduler = ReduceLROnPlateau(optimizer,mode='max',factor=0.5,min_lr=1e-7, patience=5,verbose= True, threshold=0.0001, eps=1e-08)
|
174 |
-
t_total = len(train_loader)
|
175 |
-
|
176 |
-
total_epochs = 10
|
177 |
-
bestAcc = 0
|
178 |
-
correct = 0
|
179 |
-
total = 0
|
180 |
-
print('Training and verification begin!')
|
181 |
-
for epoch in range(total_epochs):
|
182 |
-
for step, (input_ids,token_type_ids,attention_mask,labels) in enumerate(train_loader):
|
183 |
-
|
184 |
-
optimizer.zero_grad()
|
185 |
-
input_ids,token_type_ids,attention_mask,labels=input_ids.to(device),token_type_ids.to(device),attention_mask.to(device),labels.to(device)
|
186 |
-
out_put = model(input_ids,token_type_ids,attention_mask)
|
187 |
-
loss = criterion(out_put, labels)
|
188 |
-
_, predict = torch.max(out_put.data, 1)
|
189 |
-
correct += (predict == labels).sum().item()
|
190 |
-
total += labels.size(0)
|
191 |
-
loss.backward()
|
192 |
-
optimizer.step()
|
193 |
-
#每两步进行一次打印
|
194 |
-
if (step + 1) % 10 == 0:
|
195 |
-
train_acc = correct / total
|
196 |
-
print("Train Epoch[{}/{}],step[{}/{}],tra_acc{:.6f} %,loss:{:.6f}".format(epoch + 1, total_epochs, step + 1, len(train_loader),train_acc*100,loss.item()))
|
197 |
-
#每五十次进行一次验证
|
198 |
-
if (step + 1) % 200 == 0:
|
199 |
-
train_acc = correct / total
|
200 |
-
#调用验证函数dev对模型进行验证,并将有效果提升的模型进行保存
|
201 |
-
acc = dev(model, dev_loader)
|
202 |
-
if bestAcc < acc:
|
203 |
-
bestAcc = acc
|
204 |
-
#模型保存路径
|
205 |
-
path = 'bert_model.pkl'
|
206 |
-
torch.save(model, path)
|
207 |
-
print("DEV Epoch[{}/{}],step[{}/{}],tra_acc{:.6f} %,bestAcc{:.6f}%,dev_acc{:.6f} %,loss:{:.6f}".format(epoch + 1, total_epochs, step + 1, len(train_loader),train_acc*100,bestAcc*100,acc*100,loss.item()))
|
208 |
-
scheduler.step(bestAcc)
|
209 |
-
|
210 |
-
# %%
|
211 |
-
|
212 |
-
path = '/kaggle/input/inference/bert_model.pkl'
|
213 |
-
# model = torch.load(path)
|
214 |
-
#实例化模型
|
215 |
-
model = BertClassificationModel()
|
216 |
-
#调用训练函数进行训练与验证
|
217 |
-
train(model,train_loader,dev_loader)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
|
2 |
+
import transformers
|
3 |
+
import torch.nn as nn
|
4 |
|
5 |
class BertClassificationModel(nn.Module):
|
6 |
def __init__(self):
|
7 |
super(BertClassificationModel, self).__init__()
|
|
|
8 |
pretrained_weights="bert-base-chinese"
|
9 |
self.bert = transformers.BertModel.from_pretrained(pretrained_weights)
|
10 |
for param in self.bert.parameters():
|
11 |
+
param.requires_grad = True
|
|
|
12 |
self.dense = nn.Linear(768, 3)
|
13 |
|
14 |
def forward(self, input_ids,token_type_ids,attention_mask):
|
|
|
15 |
bert_output = self.bert(input_ids=input_ids,token_type_ids=token_type_ids, attention_mask=attention_mask)
|
|
|
16 |
bert_cls_hidden_state = bert_output[1]
|
|
|
17 |
linear_output = self.dense(bert_cls_hidden_state)
|
18 |
+
return linear_output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
FZY3JW.cw127.pkl
ADDED
Binary file (518 Bytes). View file
|
|
FZY3JW.pkl
ADDED
Binary file (140 kB). View file
|
|
__pycache__/BERT_inference.cpython-39.pyc
ADDED
Binary file (1.08 kB). View file
|
|
__pycache__/classification.cpython-39.pyc
CHANGED
Binary files a/__pycache__/classification.cpython-39.pyc and b/__pycache__/classification.cpython-39.pyc differ
|
|
__pycache__/inference.cpython-39.pyc
CHANGED
Binary files a/__pycache__/inference.cpython-39.pyc and b/__pycache__/inference.cpython-39.pyc differ
|
|
__pycache__/outline.cpython-39.pyc
CHANGED
Binary files a/__pycache__/outline.cpython-39.pyc and b/__pycache__/outline.cpython-39.pyc differ
|
|
__pycache__/run.cpython-39.pyc
CHANGED
Binary files a/__pycache__/run.cpython-39.pyc and b/__pycache__/run.cpython-39.pyc differ
|
|
__pycache__/util.cpython-39.pyc
CHANGED
Binary files a/__pycache__/util.cpython-39.pyc and b/__pycache__/util.cpython-39.pyc differ
|
|
app.py
CHANGED
@@ -5,31 +5,40 @@ import textInput
|
|
5 |
output = []
|
6 |
keys = []
|
7 |
|
|
|
8 |
|
9 |
-
|
|
|
10 |
#用markdown语法编辑输出一段话
|
11 |
gr.Markdown("# 文本分类系统")
|
12 |
gr.Markdown("请选择要输入的文件或填入文本")
|
13 |
-
topic_num = gr.Number(label
|
14 |
max_length = gr.Number(label="摘要最大长度")
|
15 |
with gr.Tabs():
|
16 |
with gr.Tab("文本输入"):
|
17 |
-
text_input = gr.
|
18 |
text_button = gr.Button("生成")
|
19 |
|
20 |
with gr.Tab("文件输入"):
|
21 |
gr.Markdown("目前支持的格式有PDF、Word、txt")
|
22 |
-
file_input = gr.File()
|
|
|
23 |
# 设置tab选项卡
|
24 |
with gr.Tabs():
|
25 |
with gr.Tab("分类页"):
|
26 |
-
text_keys_output = gr.
|
27 |
|
28 |
-
with gr.Tab("摘要页"):
|
29 |
#Blocks特有组件,设置所有子组件按水平排列
|
30 |
-
text_ab_output = gr.
|
|
|
|
|
|
|
|
|
|
|
31 |
# with gr.Accordion("Open for More!"):
|
32 |
# gr.Markdown("Look at me...")
|
33 |
-
text_button.click(textInput.
|
34 |
-
|
|
|
35 |
demo.launch()
|
|
|
5 |
output = []
|
6 |
keys = []
|
7 |
|
8 |
+
# css = ".output {min-height: 500px}"
|
9 |
|
10 |
+
|
11 |
+
with gr.Blocks(css = ".output {min-height: 500px}") as demo:
|
12 |
#用markdown语法编辑输出一段话
|
13 |
gr.Markdown("# 文本分类系统")
|
14 |
gr.Markdown("请选择要输入的文件或填入文本")
|
15 |
+
topic_num = gr.Number(label="主题个数")
|
16 |
max_length = gr.Number(label="摘要最大长度")
|
17 |
with gr.Tabs():
|
18 |
with gr.Tab("文本输入"):
|
19 |
+
text_input = gr.TextArea(lines=10)
|
20 |
text_button = gr.Button("生成")
|
21 |
|
22 |
with gr.Tab("文件输入"):
|
23 |
gr.Markdown("目前支持的格式有PDF、Word、txt")
|
24 |
+
file_input = gr.File(file_types=["text", ".pdf", ".docx"])
|
25 |
+
file_button = gr.Button("生成")
|
26 |
# 设置tab选项卡
|
27 |
with gr.Tabs():
|
28 |
with gr.Tab("分类页"):
|
29 |
+
text_keys_output = gr.TextArea(lines=30)
|
30 |
|
31 |
+
with gr.Tab("摘要页",):
|
32 |
#Blocks特有组件,设置所有子组件按水平排列
|
33 |
+
text_ab_output = gr.TextArea(lines=30)
|
34 |
+
|
35 |
+
with gr.Tab("下载页"):
|
36 |
+
file_txt_output = gr.File(label="txt格式")
|
37 |
+
file_docx_output = gr.File(label="docx格式")
|
38 |
+
file_pdf_output = gr.File(label="pdf格式")
|
39 |
# with gr.Accordion("Open for More!"):
|
40 |
# gr.Markdown("Look at me...")
|
41 |
+
text_button.click(textInput.text_dump_to_lines, inputs=[text_input,topic_num,max_length], outputs=[text_keys_output,text_ab_output,file_txt_output,file_docx_output,file_pdf_output])
|
42 |
+
file_button.click(textInput.file_dump_to_lines,inputs=[file_input,topic_num,max_length], outputs=[text_keys_output,text_ab_output,file_txt_output,file_docx_output,file_pdf_output])
|
43 |
+
|
44 |
demo.launch()
|
classification.py
CHANGED
@@ -12,13 +12,13 @@ def classify_by_topic(articles, central_topics):
|
|
12 |
def compute_similarity(articles, central_topics):
|
13 |
|
14 |
model = AutoModel.from_pretrained("distilbert-base-multilingual-cased")
|
15 |
-
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-multilingual-cased"
|
16 |
|
17 |
def sentence_to_vector(sentence, context):
|
18 |
|
19 |
sentence = context[0]+context[1]+sentence*4+context[2]+context[3]
|
20 |
tokens = tokenizer.encode_plus(
|
21 |
-
sentence, add_special_tokens=True, return_tensors="pt")
|
22 |
|
23 |
outputs = model(**tokens)
|
24 |
hidden_states = outputs.last_hidden_state
|
|
|
12 |
def compute_similarity(articles, central_topics):
|
13 |
|
14 |
model = AutoModel.from_pretrained("distilbert-base-multilingual-cased")
|
15 |
+
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-multilingual-cased")
|
16 |
|
17 |
def sentence_to_vector(sentence, context):
|
18 |
|
19 |
sentence = context[0]+context[1]+sentence*4+context[2]+context[3]
|
20 |
tokens = tokenizer.encode_plus(
|
21 |
+
sentence, add_special_tokens=True, return_tensors="pt",max_length = 512,truncation=True)
|
22 |
|
23 |
outputs = model(**tokens)
|
24 |
hidden_states = outputs.last_hidden_state
|
inference.py
CHANGED
@@ -5,6 +5,7 @@ import torch
|
|
5 |
import torch.nn as nn
|
6 |
from torch import cuda
|
7 |
from transformers import BertTokenizer
|
|
|
8 |
|
9 |
|
10 |
def encoder(max_len,text):
|
@@ -35,31 +36,16 @@ def predict(model,device,text):
|
|
35 |
# print(probs)
|
36 |
return probs[0][1]
|
37 |
|
38 |
-
|
39 |
-
class BertClassificationModel(nn.Module):
|
40 |
-
def __init__(self):
|
41 |
-
super(BertClassificationModel, self).__init__()
|
42 |
-
pretrained_weights="bert-base-chinese"
|
43 |
-
self.bert = transformers.BertModel.from_pretrained(pretrained_weights)
|
44 |
-
for param in self.bert.parameters():
|
45 |
-
param.requires_grad = True
|
46 |
-
self.dense = nn.Linear(768, 3)
|
47 |
-
|
48 |
-
def forward(self, input_ids,token_type_ids,attention_mask):
|
49 |
-
bert_output = self.bert(input_ids=input_ids,token_type_ids=token_type_ids, attention_mask=attention_mask)
|
50 |
-
bert_cls_hidden_state = bert_output[1]
|
51 |
-
linear_output = self.dense(bert_cls_hidden_state)
|
52 |
-
return linear_output
|
53 |
|
54 |
def inference_matrix(topics):
|
55 |
device = torch.device('cuda' if cuda.is_available() else 'cpu')
|
56 |
-
load_path = "bert_model.pkl"
|
57 |
model = torch.load(load_path,map_location=torch.device(device))
|
58 |
matrix = np.zeros([len(topics),len(topics)],dtype=float)
|
59 |
for i,i_text in enumerate(topics):
|
60 |
for j,j_text in enumerate(topics):
|
61 |
if(i == j):
|
62 |
-
matrix[i][j] =
|
63 |
else:
|
64 |
test = i_text+" 是否包含 "+j_text
|
65 |
outputs = predict(model,device,test)
|
|
|
5 |
import torch.nn as nn
|
6 |
from torch import cuda
|
7 |
from transformers import BertTokenizer
|
8 |
+
from BERT_inference import BertClassificationModel
|
9 |
|
10 |
|
11 |
def encoder(max_len,text):
|
|
|
36 |
# print(probs)
|
37 |
return probs[0][1]
|
38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
def inference_matrix(topics):
|
41 |
device = torch.device('cuda' if cuda.is_available() else 'cpu')
|
42 |
+
load_path = "TSA/bert_model.pkl"
|
43 |
model = torch.load(load_path,map_location=torch.device(device))
|
44 |
matrix = np.zeros([len(topics),len(topics)],dtype=float)
|
45 |
for i,i_text in enumerate(topics):
|
46 |
for j,j_text in enumerate(topics):
|
47 |
if(i == j):
|
48 |
+
matrix[i][j] = 0
|
49 |
else:
|
50 |
test = i_text+" 是否包含 "+j_text
|
51 |
outputs = predict(model,device,test)
|
outline.py
CHANGED
@@ -1,38 +1,137 @@
|
|
1 |
import numpy as np
|
2 |
from scipy.cluster.hierarchy import linkage, fcluster, dendrogram
|
3 |
import matplotlib.pyplot as plt
|
|
|
4 |
|
5 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
-
Z = linkage(matrix, method="average")
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
structure = {}
|
18 |
-
for
|
19 |
-
|
20 |
-
|
21 |
-
|
|
|
|
|
22 |
outline = ""
|
23 |
outline_list = []
|
24 |
for key in sorted(structure.keys()):
|
25 |
-
outline_list.append(f"
|
26 |
-
outline = outline+f"
|
27 |
for sentence in structure[key]:
|
28 |
outline_list.append(sentence)
|
29 |
outline = outline+f"- {sentence}\n"
|
30 |
return outline,outline_list
|
31 |
if __name__ == "__main__":
|
32 |
-
matrix = np.array([[
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
|
|
37 |
print(passage_outline(matrix,sentences)[0])
|
38 |
|
|
|
1 |
import numpy as np
|
2 |
from scipy.cluster.hierarchy import linkage, fcluster, dendrogram
|
3 |
import matplotlib.pyplot as plt
|
4 |
+
import numpy as np
|
5 |
|
6 |
+
def find_parent(matrix, node):
|
7 |
+
# 获取该节点的所有可能的父节点及其概率
|
8 |
+
parents = matrix[:, node]
|
9 |
+
# 找出其中概率最大的父节点
|
10 |
+
max_parent = np.argmax(parents)
|
11 |
+
# 如果该父节点的概率不为零,那么返回该父节点
|
12 |
+
if parents[max_parent] > 0:
|
13 |
+
return max_parent
|
14 |
+
# 否则,返回None,表示该节点没有父节点
|
15 |
+
else:
|
16 |
+
return None
|
17 |
|
|
|
18 |
|
19 |
+
def find_tree(matrix, node, depth=1, children=[], max_depth=1, visited=set()):
|
20 |
+
# 定义一个空列表,用来存储结果
|
21 |
+
result = []
|
22 |
+
# 调用find_parent函数,找出该节点的最可能的父节点
|
23 |
+
parent = find_parent(matrix, node)
|
24 |
+
# 如果该父节点存在,且不在visited中
|
25 |
+
if parent is not None and parent not in visited:
|
26 |
+
# 将该父子关系添加到结果列表中
|
27 |
+
result.append([parent, node])
|
28 |
+
# 遍历当前节点的所有子节点
|
29 |
+
for child in children:
|
30 |
+
# 将该子节点和当前节点的关系也添加到结果列表中
|
31 |
+
result.append([node, child])
|
32 |
+
# 如果当前深度小于最大深度,那么递归地调用find_tree函数,传入概率矩阵和该父节点作为参数,并将深度加一
|
33 |
+
if depth < max_depth:
|
34 |
+
# 将当前节点添加到visited中
|
35 |
+
visited.add(node)
|
36 |
+
# 递归调用find_tree函数,并将visited传入
|
37 |
+
result.extend(find_tree(matrix, parent, depth + 1, visited=visited))
|
38 |
+
# 返回结果列表
|
39 |
+
return result
|
40 |
|
41 |
+
# 定义一个函数,它接受一个树形结构作为参数,然后返回该树形结构的概率
|
42 |
+
def find_prob(tree, matrix):
|
43 |
+
# 定义一个变量,用来存储概率,初始值为1
|
44 |
+
prob = 1
|
45 |
+
# 遍历树形结构中的每个父子关系
|
46 |
+
for parent, child in tree:
|
47 |
+
# 将该父子关系对应的概率乘到变量上
|
48 |
+
prob *= matrix[parent][child]
|
49 |
+
# 返回最终的概率
|
50 |
+
return prob
|
51 |
|
52 |
+
# 定义一个函数,它接受一个概率矩阵和一个k值作为参数,然后返回前k个最可能的森林及其概率
|
53 |
+
def find_forests(matrix, k):
|
54 |
+
# 定义一个空字典,用来存储每个森林及其概率
|
55 |
+
forests = {}
|
56 |
+
# 遍历所有的节点
|
57 |
+
for i in range(len(matrix)):
|
58 |
+
# 获取该节点的所有可能的子节点及其概率
|
59 |
+
children = matrix[i]
|
60 |
+
# 定义一个空列表,用来存储该节点的所有子节点
|
61 |
+
child_list = []
|
62 |
+
# 遍历所有的子节点
|
63 |
+
for j in range(len(children)):
|
64 |
+
# 如果子节点的概率不为零
|
65 |
+
if children[j] > 0:
|
66 |
+
# 将该子节点添加到列表中
|
67 |
+
child_list.append(j)
|
68 |
+
# 调用find_tree函数,传入概率矩阵、当前节点和子节点列表作为参数,并将返回的结果赋值给tree
|
69 |
+
tree = find_tree(matrix, i, children=child_list)
|
70 |
+
tree = tuple([tuple(x) for x in tree]) # 将列表中的每个元素转换为元组,然后再将整个列表转换为元组
|
71 |
+
if tree:
|
72 |
+
# 调用find_prob函数,传入tree作为参数,并将返回的结果赋值给prob
|
73 |
+
prob = find_prob(tree, matrix)
|
74 |
+
# 如果tree已经在字典的键中,即该树形结构已经被添加过
|
75 |
+
if tuple(tree) in forests: # 将tree转换为元组
|
76 |
+
# 那么将prob加到字典的值上,表示该树形结构出现了多次
|
77 |
+
forests[tuple(tree)] += prob # 将tree转换为元组
|
78 |
+
# 否则,即该树形结构是第一次被添加
|
79 |
+
else:
|
80 |
+
# 那么将tree和prob作为键值对添加到字典中
|
81 |
+
forests[tuple(tree)] = prob # 将tree转换为元组
|
82 |
+
# 将字典按照值降序排序,得到一个列表,每个元素是一个键值对的元组
|
83 |
+
sorted_forests = sorted(forests.items(), key=lambda x: x[1], reverse=True)
|
84 |
+
# 返回列表的第一个元素,即最可能的森林及其概率
|
85 |
+
forest, prob = sorted_forests[0]
|
86 |
+
# 定义一个空字典,用来存储带有层级结构的森林
|
87 |
+
result = {}
|
88 |
+
# 遍历森林中的每个树形结构
|
89 |
+
for parent, child in forest:
|
90 |
+
# 如果父节点已经在字典的键中,即该父节点已经有其他子节点
|
91 |
+
if parent in result:
|
92 |
+
# 那么将当前子节点添加到父节点对应的值中,即该父节点的子节点列表中
|
93 |
+
result[parent].append(child)
|
94 |
+
# 否则,即该父节点是第一次出现
|
95 |
+
else:
|
96 |
+
# 那么将父节点和当前子节点作为键值对添加到字典中,即创建���个新的子节点列表
|
97 |
+
result[parent] = [child]
|
98 |
+
# 返回带有层级结构的字典和概率
|
99 |
+
return result, prob
|
100 |
+
def passage_outline(matrix,sentences):
|
101 |
|
102 |
+
# Z = linkage(matrix, method="average")
|
103 |
+
# median = np.median(matrix)
|
104 |
+
# print(median)
|
105 |
+
# print('yes')
|
106 |
+
# labels = fcluster(Z, t=median, criterion="distance")
|
107 |
+
# print(labels)
|
108 |
+
# 调用函数,传入概率矩阵作为参数,并且指定最大深度为1
|
109 |
+
result, prob = find_forests(matrix, 1)
|
110 |
+
# 打印结果字典和概率
|
111 |
+
print(result, prob)
|
112 |
+
# 根据簇标签和主题句子生成文章结构
|
113 |
structure = {}
|
114 |
+
for each in result.keys():
|
115 |
+
structure[each] =[sentences[i] for i in result[each]]
|
116 |
+
# for label, sentence in zip(result.keys(), sentences):
|
117 |
+
# if label not in structure:
|
118 |
+
# structure[label] = []
|
119 |
+
# structure[label].append(sentence)
|
120 |
outline = ""
|
121 |
outline_list = []
|
122 |
for key in sorted(structure.keys()):
|
123 |
+
outline_list.append(f"主题:")
|
124 |
+
outline = outline+f"主题:\n"
|
125 |
for sentence in structure[key]:
|
126 |
outline_list.append(sentence)
|
127 |
outline = outline+f"- {sentence}\n"
|
128 |
return outline,outline_list
|
129 |
if __name__ == "__main__":
|
130 |
+
matrix = np.array([[0.0 ,0.02124888, 0.10647043 ,0.09494194 ,0.0689209 ],
|
131 |
+
[0.01600688 ,0.0 ,0.05879448 ,0.0331325 , 0.0155093 ],
|
132 |
+
[0.01491911 ,0.01652437, 0.0, 0.04714563, 0.04577385],
|
133 |
+
[0.01699071 ,0.0313585 , 0.040299 ,0.0 ,0.014933 ],
|
134 |
+
[0.02308992 ,0.02791895 ,0.06547201, 0.08517842 ,0.0]])
|
135 |
+
sentences = ["主题句子1", "主题句子2", "主题句子3", "主题句子4", "主题句子5"]
|
136 |
print(passage_outline(matrix,sentences)[0])
|
137 |
|
run.py
CHANGED
@@ -25,6 +25,8 @@ from inference import BertClassificationModel
|
|
25 |
|
26 |
def texClear(article):
|
27 |
sentencesCleared = [util.clean_text(sentence) for sentence in article]
|
|
|
|
|
28 |
return sentencesCleared
|
29 |
|
30 |
def textToAb(sentences, article, topic_num, max_length):
|
@@ -34,14 +36,11 @@ def textToAb(sentences, article, topic_num, max_length):
|
|
34 |
title_dict,title = util.generation(groups, max_length)
|
35 |
# ans:
|
36 |
# {Ai_abstruct:(main_sentence,paragraph)}
|
37 |
-
|
38 |
matrix = inference.inference_matrix(title)
|
39 |
|
40 |
_,outline_list = outline.passage_outline(matrix,title)
|
41 |
|
42 |
output = util.formate_text(title_dict,outline_list)
|
43 |
-
keys = []
|
44 |
-
for key in title.keys():
|
45 |
-
keys.append(key)
|
46 |
|
47 |
-
return
|
|
|
25 |
|
26 |
def texClear(article):
|
27 |
sentencesCleared = [util.clean_text(sentence) for sentence in article]
|
28 |
+
sentencesCleared = [string for string in sentencesCleared if string != '' ]
|
29 |
+
# print(sentencesCleared)
|
30 |
return sentencesCleared
|
31 |
|
32 |
def textToAb(sentences, article, topic_num, max_length):
|
|
|
36 |
title_dict,title = util.generation(groups, max_length)
|
37 |
# ans:
|
38 |
# {Ai_abstruct:(main_sentence,paragraph)}
|
39 |
+
print(title)
|
40 |
matrix = inference.inference_matrix(title)
|
41 |
|
42 |
_,outline_list = outline.passage_outline(matrix,title)
|
43 |
|
44 |
output = util.formate_text(title_dict,outline_list)
|
|
|
|
|
|
|
45 |
|
46 |
+
return title, output
|
test.txt
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
计算机科学与技术在现代教育中的应用
|
2 |
+
1. 增强效率
|
3 |
+
计算机科学与技术的应用可以在现代教育中大大增强教学和学习的效率。从教学方面来看,教师不仅可以利用电脑和互联网以及多种课程支持软件,提供丰富多彩的课堂内容,而且可以通过投影仪、多媒体幻灯片和电视等技术来辅助教学、展示多样性以及改进课堂氛围,以帮助提高学生的学习兴趣。在学习方面,学生可以通过网络课程或通过互联网查阅各种信息来获取知识,大大增强学习的效率和效果;此外,学生也可以利用图形配文工具,进一步帮助让文字表达更加明确,使推理更加简单和便捷。考试方面,计算机科学与技术也可以发挥独特的作用,比如,通过计算机在线考试系统来更好地实施考试,使考试结果更准确、及时;而且,在考试中加入计算机图形,有助于将考试难度提高,从而更好地测试学生的思维能力。综上所述,计算机科学与技术已在现代教育中发挥着重要作用,有助于大幅提高教育的效率。
|
4 |
+
|
5 |
+
1.1 方便高效的教学
|
6 |
+
计算机科学是认知科学研究的主要领域之一,它主要研究信息处理和控制的机制,而技术代表了手段、方式和手段。由此可见,计算机科学与技术对于现代教育显然是至关重要的,它们不仅具有极大的潜力,而且可以为教育活动提供有效、高效的支持。
|
7 |
+
计算机技术可以用于从基础到较高水平的各种学习成果的测量。例如,学校可以利用电脑系统开发和管理学生的作业和测验,提供准确的评估机制和工具,并且能够帮助学生更好地学习和理解学科知识。不仅如此,计算机技术还可以通过开放式教育、网络教学形式等方式改进常规的教学与学习模式,从而提升教育质量。
|
8 |
+
带有计算机技术的教室可以更好地提升学生的学习体验,让学生能够了解复杂的概念,从而对学科有更深入的理解。同时,计算机科学可以为学校和家庭提供有效的学习渠道,让学生从最舒适的环境中接受教育,激发学生的更多想象力和创造力。
|
9 |
+
总之,计算机科学和技术在现代教育中发挥着至关重要的作用。它们不仅改变了现代教育的局面,提供了更为有效和高效的教学办法,而且同时可以为学生和家长带来更多种可能性,提高全社会的教育素质。在未来,计算机科学与技术将继续发挥重要作用,促进发展学科教育,推动教育革新。
|
10 |
+
|
11 |
+
1.2 提高教学质量
|
12 |
+
在当今的社会,计算机科学与技术已成为教育的一个重要组成部分,甚至有助于提高当今教育的质量。对于那些精通计算机技术的教师而言,他们可以更好地把这些知识和技术纳入到课堂教学中,帮助学生更好地理解这些难以把握的概念。使用计算机技术,教师可以为学生提供更多有趣的学习方式,丰富课程、带来新的学习方式。例如,一些计算机科学和技术的软件可以帮助学生可视化地理解这些复杂的课程和概念,他们可以以一种全新的方法去接受这些知识,也可以使他们更容易地掌握新内容。此外,在现代教育中使用技术可以更快地从一个概念转到另一个概念,教师们可以更快更有效地完成他们的课程计划。
|
13 |
+
在教学中,计算机科学与技术可以使教学变得生动有趣自然,尤其是对于跨学科的课程,而且教师可以更轻松地与学生进行交流,使学生了解知识的更多细节,并为学生提出困惑的问题和解决可能存在的问题。例如,可以使用在线视频来进行实时互动,也可以使用一个特定的计算机程序来解决数学问题,使学生更容易掌握数学概念。此外,教师可以使用网络资源来激发学生的学习兴趣,从而提高学生对课程的学习兴趣。在学校里,也可以使用计算机技术支持学生的作业管理,教师可以节省时间来更有效地完成诸多任务,同时学生也可以使用聊天机器人等软件来了解和学习课程的内容。
|
14 |
+
因此,计算机科学与技术对当今的教育有着重要的意义和影响。它可以为教师提供更有效的教学工具,促进他们教授新内容,同时也为学生提供新有趣的学习方式,帮助他们更好地理解它们。此外,教师也可以使用计算机技术来节省自己的工作时间,减少课堂的压力,提高教学的质量和效率。因此,在当今快速发展的教育环境中,计算机科学与技术的应用已经成为必不可少的有效途径。
|
15 |
+
|
16 |
+
2. 拓展视野
|
17 |
+
计算机科学与技术在现代教育中已经广泛应用,从初中到高等教育都可以看出其影响的足迹。光影现代化的信息技术越来越成为教学及学习的重要工具,成为一个不可缺少的教育基础设施,深刻改变了传统教育模式,开发了教学资源和学习资源,让学习更便捷、趣味性增强,并且让课堂及学习活动更加紧凑有效,更有利于学习者们的认知过程的发展。
|
18 |
+
计算机技术在教育中的拓展视野和使用方式变得越来越广泛。计算机可以作为通讯工具,连接教师和学生之间,通过电子邮件、网络学习平台以及虚拟互动空间等在线社区来实现教学过程的自动化,为新的教育理念提供技术支持。在线教学平台可以实现远程教学,远程教学可以不受地域限制,跨国受教,这样一个跨境学习者网络就可以形成,让教师和学生有机会正向互动,极大地拓展了学习者们的认知视野。
|
19 |
+
计算机科学与技术不仅仅只是改变教育理念,还可以帮助学习者们更好的执行学习计划,学习分析
|
20 |
+
|
21 |
+
2.1 扩展课程范围
|
22 |
+
计算机科学与技术(CST)在现代教育中占据着重要的地位,它可以帮助学生提高他们的数学能力,掌握现代技术,从而帮助他们牢牢把握未来社会发展的主动权。在现代课程体系中,CST有着广泛的应用。
|
23 |
+
首先,CST可以帮助学生增强数学素养。在学校里,机器学习(ML)课程可以帮助学生在使用技术的同时加强基础的数学能力,加深学生对数学的理解。ML还可以让学生在开发数学方面得到更多的练习机会。
|
24 |
+
此外,CST还可以帮助学生学习现代技术。现在,许多社会都在使用一些前沿技术,如机器人,人工智能,大数据分析等,因此要掌握这些技术,掌握CST就变得更加重要。在学校的课程中,学生可以学习有关编程语言,系统管理,网络架构等内容,帮助他们更好地掌握相关技术。
|
25 |
+
最后,CST还可以帮助学生牢牢把握未来社会发展的主动权。现代社会,基于信息技术的环境正在变得越来越广泛,因此对于学生来说,学习CST成为至关重要。CST不仅可以让他们掌握基本技术,也可以让他们熟悉信息技术产业和社会热点,为他们深入学习未来技术提供强有力的支持。
|
26 |
+
总之,CST在现代教育中占据着重要的地位,可以帮助学生提高数学能力,掌握现代技术,从而牢牢把握未来社会发展的主动权。在学校的课程体系里,针对不同的学生层次,可以提供机器学习,编程,系统管理,网络架构等知识的相关课程。当学生拥有这样的学习能力时,他们就可以跟上社会的变化,并使自己更好地适应未来发展的趋势。
|
27 |
+
|
28 |
+
2.2 探索技术
|
29 |
+
计算机科学与技术已经成为现代教育领域中越来越重要的部分。随着新世纪到来,各种形式的技术不断提高,人们都在探索如何最好地使用新技术和知识来更好地教育我们的学生。在21世纪,计算机科学和技术在教育领域发挥着重要作用,它可以改善学习环境,使学习变得更有效率,并为学生提供独特的机会来拓宽知识面。
|
30 |
+
计算机科学和技术可以用来支持传统的教育方法,从而改善传统的学习方式。它可以使学生针对个性化的教学目标进行自主学习。计算机科学和技术也可以用来提高学生的技能。例如,它可以帮助学生学习新技能,包括计算机编程,图形设计,信息技术以及许多其他领域。这些新技能将令学生能够更好地发挥创新的潜力,并有助于将他们的职业发展到新的高度。
|
31 |
+
计算机科学和技术还可以为学生带来更加丰富的学习体验。它可以通过科技手段激发学习的兴趣,通过虚拟实践提高学习的效果,使得学习者更好地理解课程内容,增加学习的效率。此外,计算机科学技术还可以通过社交媒体大大节省学习者与教师之间的交流时间,使教师可以精准地帮助学生解决学习中遇到的难题。
|
32 |
+
总而言之,计算机科学和技术为现代教育领域提供了无限的机遇,能够有效地改善教育水平,提高学生的学习能力以及技能,帮助他们获得更广阔的知识面。此外,计算机科学技术还可以为学生带来可持续的学习体验,以及更遥远的跨文化交流机会。让我们共同努力,使计算机科学和技术发挥最大的社会效应,为人类发展服务。
|
textInput.py
CHANGED
@@ -1,13 +1,111 @@
|
|
1 |
import run
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
def text_dump_to_json(text,topic_num,max_length):
|
4 |
-
lines =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
sentences = run.texClear(lines)
|
6 |
keys, output = run.textToAb(sentences,lines,int(topic_num),int(max_length))
|
7 |
keysText = "\n".join(keys)
|
8 |
outputText = "\n".join(output)
|
9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
-
|
|
|
|
|
|
|
|
|
|
1 |
import run
|
2 |
+
import util
|
3 |
+
import docx
|
4 |
+
from docx.oxml.ns import qn
|
5 |
+
from docx.shared import Pt,RGBColor
|
6 |
+
import fitz
|
7 |
+
import os
|
8 |
+
from fpdf import FPDF
|
9 |
+
import run
|
10 |
+
from BERT_inference import BertClassificationModel
|
11 |
+
|
12 |
|
13 |
def text_dump_to_json(text,topic_num,max_length):
|
14 |
+
lines = util.seg(text)
|
15 |
+
sentences = run.texClear(lines)
|
16 |
+
print(sentences)
|
17 |
+
keys, output = run.textToAb(sentences,lines,int(topic_num),int(max_length))
|
18 |
+
keysText = "\n".join(keys)
|
19 |
+
outputText = "\n".join(output)
|
20 |
+
print(keys,output)
|
21 |
+
return keysText, outputText, dump_to_txt(output), dump_to_docx(output), dump_to_pdf(output)
|
22 |
+
|
23 |
+
def file_dump_to_lines(file,topic_num,max_length):
|
24 |
+
lines = []
|
25 |
+
# print(file.name)
|
26 |
+
fileFormat = file.name.split(".")[-1]
|
27 |
+
# print(fileFormat)
|
28 |
+
if fileFormat == "txt":
|
29 |
+
with open(file.name, encoding='utf-8') as f:
|
30 |
+
content = f.read()
|
31 |
+
lines = [x.strip() for x in content.split("\n") if x.strip()!='']
|
32 |
+
elif fileFormat == "docx":
|
33 |
+
doc=docx.Document(file.name)
|
34 |
+
paragraphs = doc.paragraphs
|
35 |
+
lines = [par.text for par in paragraphs]
|
36 |
+
elif fileFormat == "pdf":
|
37 |
+
pdf = fitz.open(file.name)
|
38 |
+
for page in pdf:
|
39 |
+
pageText = page.get_text("text")
|
40 |
+
lines.extend([x.strip() for x in pageText.split("\n") if x.strip()!=''])
|
41 |
+
# print(lines)
|
42 |
+
# text = "\n".join(lines)
|
43 |
sentences = run.texClear(lines)
|
44 |
keys, output = run.textToAb(sentences,lines,int(topic_num),int(max_length))
|
45 |
keysText = "\n".join(keys)
|
46 |
outputText = "\n".join(output)
|
47 |
+
# text = "\n".join(lines)
|
48 |
+
# return text, text, dump_to_txt(lines), dump_to_docx(lines), dump_to_pdf(lines)
|
49 |
+
return keysText, outputText, dump_to_txt(output), dump_to_docx(output), dump_to_pdf(output)
|
50 |
+
|
51 |
+
def dump_to_txt(lines):
|
52 |
+
text = "\n".join(lines)
|
53 |
+
with open('temp.txt',mode="w",encoding="utf-8") as f:
|
54 |
+
f.write(text)
|
55 |
+
path = os.path.abspath('temp.txt')
|
56 |
+
return path
|
57 |
+
|
58 |
+
def dump_to_docx(lines):
|
59 |
+
document = docx.Document()
|
60 |
+
document.styles['Normal'].font.name = u'宋体'
|
61 |
+
document.styles['Normal']._element.rPr.rFonts.set(qn('w:eastAsia'), u'宋体')
|
62 |
+
document.styles['Normal'].font.size = Pt(14)
|
63 |
+
document.styles['Normal'].font.color.rgb = RGBColor(0,0,0)
|
64 |
+
|
65 |
+
|
66 |
+
paragraph = document.add_paragraph()
|
67 |
+
run = paragraph.add_run()
|
68 |
+
#run.font.name = 'Times New Roman'
|
69 |
+
run.font.name=u'Cambria'
|
70 |
+
run.font.color.rgb = RGBColor(0,0,0)
|
71 |
+
run._element.rPr.rFonts.set(qn('w:eastAsia'), u'Cambria')
|
72 |
+
|
73 |
+
for line in lines:
|
74 |
+
document.add_paragraph(line)
|
75 |
+
|
76 |
+
document.save(r'temp.docx')
|
77 |
+
path = os.path.abspath('temp.docx')
|
78 |
+
|
79 |
+
return path
|
80 |
|
81 |
+
def dump_to_pdf(lines):
|
82 |
+
pdf = FPDF()
|
83 |
+
#读取字体文件
|
84 |
+
pdf.add_font('FZY3JW', '', 'TSA/FZY3JW.TTF', True)
|
85 |
+
pdf.add_page()
|
86 |
+
#设置pdf字体大小
|
87 |
+
pdf.set_font("FZY3JW", size=12)
|
88 |
+
#打开txt文本
|
89 |
+
try:
|
90 |
+
#按行读取txt文本内容
|
91 |
+
for line in lines:
|
92 |
+
str=line
|
93 |
+
num=len(str)
|
94 |
+
temp=45#判断标志,实现pdf文件每行最多村45个字符
|
95 |
+
for j in range(0,num,temp):
|
96 |
+
if(j+temp<num):
|
97 |
+
data=str[j:j+temp]
|
98 |
+
else:
|
99 |
+
data=str[j:num]
|
100 |
+
pdf.cell(0, 5, data, ln=1)
|
101 |
+
except Exception as e:
|
102 |
+
print(e)
|
103 |
+
pdf.output("temp.pdf")
|
104 |
+
path = os.path.abspath('temp.pdf')
|
105 |
+
return path
|
106 |
|
107 |
+
if __name__ == "__main__":
|
108 |
+
with open('TSA/test.txt', 'r', encoding='utf-8') as f:
|
109 |
+
data = f.read()
|
110 |
+
# print(data)
|
111 |
+
text_dump_to_json(data,10,50)
|
train.py
ADDED
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# %%
|
2 |
+
import numpy as np
|
3 |
+
import pandas as pd
|
4 |
+
import csv
|
5 |
+
import torch.nn as nn
|
6 |
+
from torch.optim.lr_scheduler import ReduceLROnPlateau
|
7 |
+
from torch.utils.data import TensorDataset, DataLoader
|
8 |
+
from transformers import BertTokenizer,BertConfig,AdamW
|
9 |
+
from sklearn.metrics import accuracy_score
|
10 |
+
from sklearn.metrics import classification_report
|
11 |
+
from tqdm import tqdm
|
12 |
+
import torch
|
13 |
+
import transformers
|
14 |
+
from torch.utils.data import Dataset, DataLoader
|
15 |
+
|
16 |
+
# %%
|
17 |
+
|
18 |
+
class MyDataSet(Dataset):
|
19 |
+
def __init__(self, loaded_data):
|
20 |
+
self.data = loaded_data
|
21 |
+
|
22 |
+
def __len__(self):
|
23 |
+
return len(self.data)
|
24 |
+
|
25 |
+
def __getitem__(self, idx):
|
26 |
+
return self.data[idx]
|
27 |
+
|
28 |
+
Data_path = "/kaggle/input/inference/train.csv"
|
29 |
+
Totle_data = pd.read_csv(Data_path)
|
30 |
+
Totle_data = Totle_data.sample(frac=0.1)
|
31 |
+
Totle_data = Totle_data.dropna(axis=0,subset = ["2"])
|
32 |
+
custom_dataset = MyDataSet(Totle_data)
|
33 |
+
#按照比例划分
|
34 |
+
train_size = int(len(custom_dataset) * 0.6)
|
35 |
+
validate_size = int(len(custom_dataset) * 0.1)
|
36 |
+
test_size = len(custom_dataset) - validate_size - train_size
|
37 |
+
train_dataset, validate_dataset, test_dataset = torch.utils.data.random_split(custom_dataset, [train_size, validate_size, test_size])
|
38 |
+
|
39 |
+
#设置保存路径
|
40 |
+
train_data_path="Bert_Try.csv"
|
41 |
+
dev_data_path = "Bert_Dev.csv"
|
42 |
+
test_data_path="Bert_Test.csv"
|
43 |
+
|
44 |
+
train_dataset = Totle_data.iloc[train_dataset.indices]
|
45 |
+
validate_dataset = Totle_data.iloc[validate_dataset.indices]
|
46 |
+
test_dataset = Totle_data.iloc[test_dataset.indices]
|
47 |
+
|
48 |
+
#index参数设置为False表示不保存行索引,header设置为False表示不保存列索引
|
49 |
+
train_dataset.to_csv(train_data_path,index=False,header=True)
|
50 |
+
validate_dataset.to_csv(dev_data_path ,index=False,header=True)
|
51 |
+
test_dataset.to_csv(test_data_path,index=False,header=True)
|
52 |
+
|
53 |
+
# %%
|
54 |
+
data = pd.read_csv(train_data_path)
|
55 |
+
data.head
|
56 |
+
|
57 |
+
# %%
|
58 |
+
|
59 |
+
class BertClassificationModel(nn.Module):
|
60 |
+
def __init__(self):
|
61 |
+
super(BertClassificationModel, self).__init__()
|
62 |
+
#加载预训练模型
|
63 |
+
pretrained_weights="bert-base-chinese"
|
64 |
+
self.bert = transformers.BertModel.from_pretrained(pretrained_weights)
|
65 |
+
for param in self.bert.parameters():
|
66 |
+
param.requires_grad = True
|
67 |
+
#定义线性函数
|
68 |
+
self.dense = nn.Linear(768, 3)
|
69 |
+
|
70 |
+
def forward(self, input_ids,token_type_ids,attention_mask):
|
71 |
+
#得到bert_output
|
72 |
+
bert_output = self.bert(input_ids=input_ids,token_type_ids=token_type_ids, attention_mask=attention_mask)
|
73 |
+
#获得预训练模型的输出
|
74 |
+
bert_cls_hidden_state = bert_output[1]
|
75 |
+
#将768维的向量输入到线性层映射为二维向量
|
76 |
+
linear_output = self.dense(bert_cls_hidden_state)
|
77 |
+
return linear_output
|
78 |
+
|
79 |
+
# %%
|
80 |
+
|
81 |
+
def encoder(max_len,vocab_path,text_list):
|
82 |
+
#将text_list embedding成bert模型可用的输入形式
|
83 |
+
#加载分词模型
|
84 |
+
tokenizer = BertTokenizer.from_pretrained("bert-base-chinese")
|
85 |
+
tokenizer = tokenizer(
|
86 |
+
text_list,
|
87 |
+
padding = True,
|
88 |
+
truncation = True,
|
89 |
+
max_length = max_len,
|
90 |
+
return_tensors='pt' # 返回的类型为pytorch tensor
|
91 |
+
)
|
92 |
+
input_ids = tokenizer['input_ids']
|
93 |
+
token_type_ids = tokenizer['token_type_ids']
|
94 |
+
attention_mask = tokenizer['attention_mask']
|
95 |
+
return input_ids,token_type_ids,attention_mask
|
96 |
+
|
97 |
+
# %%
|
98 |
+
labels2dict = {"neutral":0,"entailment":1,"contradiction":2}
|
99 |
+
def load_data(path):
|
100 |
+
csvFileObj = open(path)
|
101 |
+
readerObj = csv.reader(csvFileObj)
|
102 |
+
text_list = []
|
103 |
+
labels = []
|
104 |
+
for row in readerObj:
|
105 |
+
#跳过表头
|
106 |
+
if readerObj.line_num == 1:
|
107 |
+
continue
|
108 |
+
#label在什么位置就改成对应的index
|
109 |
+
label = int(labels2dict[row[0]])
|
110 |
+
text = row[1]
|
111 |
+
text_list.append(text)
|
112 |
+
labels.append(label)
|
113 |
+
#调用encoder函数,获得预训练模型的三种输入形式
|
114 |
+
input_ids,token_type_ids,attention_mask = encoder(max_len=150,vocab_path="/root/Bert/bert-base-chinese/vocab.txt",text_list=text_list)
|
115 |
+
labels = torch.tensor(labels)
|
116 |
+
#将encoder的返回值以及label封装为Tensor的形式
|
117 |
+
data = TensorDataset(input_ids,token_type_ids,attention_mask,labels)
|
118 |
+
return data
|
119 |
+
|
120 |
+
# %%
|
121 |
+
#设定batch_size
|
122 |
+
batch_size = 16
|
123 |
+
#引入数据路径
|
124 |
+
train_data_path="Bert_Try.csv"
|
125 |
+
dev_data_path="Bert_Dev.csv"
|
126 |
+
test_data_path="Bert_Test.csv"
|
127 |
+
#调用load_data函数,将数据加载为Tensor形式
|
128 |
+
train_data = load_data(train_data_path)
|
129 |
+
dev_data = load_data(dev_data_path)
|
130 |
+
test_data = load_data(test_data_path)
|
131 |
+
#将训练数据和测试数据进行DataLoader实例化
|
132 |
+
train_loader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True)
|
133 |
+
dev_loader = DataLoader(dataset=dev_data, batch_size=batch_size, shuffle=True)
|
134 |
+
test_loader = DataLoader(dataset=test_data, batch_size=batch_size, shuffle=False)
|
135 |
+
|
136 |
+
# %%
|
137 |
+
def dev(model,dev_loader):
|
138 |
+
model.to(device)
|
139 |
+
|
140 |
+
model.eval()
|
141 |
+
|
142 |
+
with torch.no_grad():
|
143 |
+
correct = 0
|
144 |
+
total = 0
|
145 |
+
for step, (input_ids,token_type_ids,attention_mask,labels) in tqdm(enumerate(dev_loader),desc='Dev Itreation:'):
|
146 |
+
input_ids,token_type_ids,attention_mask,labels=input_ids.to(device),token_type_ids.to(device),attention_mask.to(device),labels.to(device)
|
147 |
+
out_put = model(input_ids,token_type_ids,attention_mask)
|
148 |
+
_, predict = torch.max(out_put.data, 1)
|
149 |
+
correct += (predict==labels).sum().item()
|
150 |
+
total += labels.size(0)
|
151 |
+
res = correct / total
|
152 |
+
return res
|
153 |
+
|
154 |
+
# %%
|
155 |
+
|
156 |
+
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
157 |
+
def train(model,train_loader,dev_loader) :
|
158 |
+
|
159 |
+
model.to(device)
|
160 |
+
model.train()
|
161 |
+
criterion = nn.CrossEntropyLoss()
|
162 |
+
param_optimizer = list(model.named_parameters())
|
163 |
+
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
|
164 |
+
|
165 |
+
optimizer_grouped_parameters = [
|
166 |
+
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
|
167 |
+
'weight_decay': 0.01},
|
168 |
+
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
|
169 |
+
]
|
170 |
+
|
171 |
+
optimizer_params = {'lr': 1e-5, 'eps': 1e-6, 'correct_bias': False}
|
172 |
+
optimizer = AdamW(optimizer_grouped_parameters, **optimizer_params)
|
173 |
+
scheduler = ReduceLROnPlateau(optimizer,mode='max',factor=0.5,min_lr=1e-7, patience=5,verbose= True, threshold=0.0001, eps=1e-08)
|
174 |
+
t_total = len(train_loader)
|
175 |
+
|
176 |
+
total_epochs = 10
|
177 |
+
bestAcc = 0
|
178 |
+
correct = 0
|
179 |
+
total = 0
|
180 |
+
print('Training and verification begin!')
|
181 |
+
for epoch in range(total_epochs):
|
182 |
+
for step, (input_ids,token_type_ids,attention_mask,labels) in enumerate(train_loader):
|
183 |
+
|
184 |
+
optimizer.zero_grad()
|
185 |
+
input_ids,token_type_ids,attention_mask,labels=input_ids.to(device),token_type_ids.to(device),attention_mask.to(device),labels.to(device)
|
186 |
+
out_put = model(input_ids,token_type_ids,attention_mask)
|
187 |
+
loss = criterion(out_put, labels)
|
188 |
+
_, predict = torch.max(out_put.data, 1)
|
189 |
+
correct += (predict == labels).sum().item()
|
190 |
+
total += labels.size(0)
|
191 |
+
loss.backward()
|
192 |
+
optimizer.step()
|
193 |
+
#每两步进行一次打印
|
194 |
+
if (step + 1) % 10 == 0:
|
195 |
+
train_acc = correct / total
|
196 |
+
print("Train Epoch[{}/{}],step[{}/{}],tra_acc{:.6f} %,loss:{:.6f}".format(epoch + 1, total_epochs, step + 1, len(train_loader),train_acc*100,loss.item()))
|
197 |
+
#每五十次进行一次验证
|
198 |
+
if (step + 1) % 200 == 0:
|
199 |
+
train_acc = correct / total
|
200 |
+
#调用验证函数dev对模型进行验证,并将有效果提升的模型进行保存
|
201 |
+
acc = dev(model, dev_loader)
|
202 |
+
if bestAcc < acc:
|
203 |
+
bestAcc = acc
|
204 |
+
#模型保存路径
|
205 |
+
path = 'bert_model.pkl'
|
206 |
+
torch.save(model, path)
|
207 |
+
print("DEV Epoch[{}/{}],step[{}/{}],tra_acc{:.6f} %,bestAcc{:.6f}%,dev_acc{:.6f} %,loss:{:.6f}".format(epoch + 1, total_epochs, step + 1, len(train_loader),train_acc*100,bestAcc*100,acc*100,loss.item()))
|
208 |
+
scheduler.step(bestAcc)
|
209 |
+
|
210 |
+
# %%
|
211 |
+
|
212 |
+
path = '/kaggle/input/inference/bert_model.pkl'
|
213 |
+
# model = torch.load(path)
|
214 |
+
#实例化模型
|
215 |
+
model = BertClassificationModel()
|
216 |
+
#调用训练函数进行训练与验证
|
217 |
+
train(model,train_loader,dev_loader)
|
util.py
CHANGED
@@ -8,18 +8,20 @@ import time
|
|
8 |
|
9 |
@backoff.on_exception(backoff.expo, requests.exceptions.RequestException)
|
10 |
def post_url(url, headers, payload):
|
11 |
-
time.sleep(
|
12 |
response = requests.request("POST", url, headers=headers, data=payload)
|
13 |
return response
|
14 |
|
15 |
|
16 |
def seg(text):
|
17 |
-
|
|
|
|
|
18 |
return sentences
|
19 |
|
20 |
|
21 |
def clean_text(text):
|
22 |
-
text = text.replace('\n', "
|
23 |
text = re.sub(r"-", " ", text)
|
24 |
text = re.sub(r"\d+/\d+/\d+", "", text) # 日期
|
25 |
text = re.sub(r"[0-2]?[0-9]:[0-6][0-9]", "", text) # 时间
|
@@ -73,6 +75,11 @@ def generation(para, max_length):
|
|
73 |
response = post_url(url, headers, payload)
|
74 |
text_dict = json.loads(response.text)
|
75 |
# print(text_dict)
|
|
|
|
|
|
|
|
|
|
|
76 |
topic[text_dict['summary']] = (j, k)
|
77 |
Ai_abstract.append(text_dict['summary'])
|
78 |
return topic,Ai_abstract
|
|
|
8 |
|
9 |
@backoff.on_exception(backoff.expo, requests.exceptions.RequestException)
|
10 |
def post_url(url, headers, payload):
|
11 |
+
time.sleep(1)
|
12 |
response = requests.request("POST", url, headers=headers, data=payload)
|
13 |
return response
|
14 |
|
15 |
|
16 |
def seg(text):
|
17 |
+
text = text.replace('\n', " ")
|
18 |
+
sentences = re.split(r'(?<=[。!?.!?:])\s*', text)
|
19 |
+
sentences = [string for string in sentences if string != '']
|
20 |
return sentences
|
21 |
|
22 |
|
23 |
def clean_text(text):
|
24 |
+
text = text.replace('\n', "")
|
25 |
text = re.sub(r"-", " ", text)
|
26 |
text = re.sub(r"\d+/\d+/\d+", "", text) # 日期
|
27 |
text = re.sub(r"[0-2]?[0-9]:[0-6][0-9]", "", text) # 时间
|
|
|
75 |
response = post_url(url, headers, payload)
|
76 |
text_dict = json.loads(response.text)
|
77 |
# print(text_dict)
|
78 |
+
while('summary' not in text_dict.keys()):
|
79 |
+
response = post_url(url, headers, payload)
|
80 |
+
text_dict = json.loads(response.text)
|
81 |
+
print("ReTrying")
|
82 |
+
|
83 |
topic[text_dict['summary']] = (j, k)
|
84 |
Ai_abstract.append(text_dict['summary'])
|
85 |
return topic,Ai_abstract
|