viklofg commited on
Commit
93d3903
1 Parent(s): 5cbf007

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +89 -0
  2. post_ocr.py +218 -0
  3. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import AutoTokenizer, T5ForConditionalGeneration
3
+ import post_ocr
4
+
5
+
6
+ # Load model
7
+ @st.cache_resource
8
+ def load_model():
9
+ return T5ForConditionalGeneration.from_pretrained('viklofg/swedish-ocr-correction')
10
+ model = load_model()
11
+
12
+
13
+ # Load tokenizer
14
+ @st.cache_resource
15
+ def load_tokenizer():
16
+ return AutoTokenizer.from_pretrained('google/byt5-small')
17
+ tokenizer = load_tokenizer()
18
+
19
+
20
+ # Set model and tokenizer
21
+ post_ocr.set_model(model, tokenizer)
22
+
23
+
24
+ # Title
25
+ st.title(':memo: Swedish OCR correction')
26
+ # Input and output areas
27
+ tab1, tab2 = st.tabs(["Text input", "From file"])
28
+
29
+
30
+ def clean_inputs():
31
+ st.session_state.inputs = {'tab1': None, 'tab2': None}
32
+
33
+ if 'inputs' not in st.session_state:
34
+ clean_inputs()
35
+
36
+
37
+ def clean_outputs():
38
+ st.session_state.outputs = {'tab1': None, 'tab2': None}
39
+
40
+ if 'outputs' not in st.session_state:
41
+ clean_outputs()
42
+
43
+
44
+ # Sidebar (settings and stuff)
45
+ with st.sidebar:
46
+ st.header('Settings')
47
+ n_candidates = st.number_input('Overlap', help='A higher value may lead to better quality, but takes longer time', value=1, min_value=1, max_value=7, step=2, on_change=clean_inputs)
48
+
49
+ st.header('Output')
50
+ show_changes = st.toggle('Show changes')
51
+
52
+
53
+ def handle_input(input_, id_):
54
+
55
+ with st.container(border=True):
56
+ st.caption('Output')
57
+
58
+ # Only update the output if the input has been updated
59
+ if input_ and st.session_state.inputs[id_] != input_:
60
+ st.session_state.inputs[id_] = input_
61
+ with st.spinner('Generating...'):
62
+ output = post_ocr.process(input_, n_candidates)
63
+ st.session_state.outputs[id_] = output
64
+
65
+ # Display output
66
+ output = st.session_state.outputs[id_]
67
+ if output is not None:
68
+ st.write(post_ocr.diff(input_, output) if show_changes else output)
69
+
70
+
71
+ # Manual entry tab
72
+ with tab1:
73
+ typed_input = st.text_area('Input OCR', placeholder='Enter OCR generated text', label_visibility='collapsed')
74
+ handle_input(typed_input, 'tab1')
75
+
76
+
77
+ # File upload tab
78
+ with tab2:
79
+ uploaded_file = st.file_uploader('Choose a file', type='.txt')
80
+
81
+ if uploaded_file is not None:
82
+ text = uploaded_file.getvalue().decode('utf-8')
83
+
84
+ # Display file content
85
+ with st.container(border=True):
86
+ st.caption(f'File content')
87
+ st.write(text)
88
+
89
+ handle_input(text, 'tab2')
post_ocr.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import jiwer
2
+
3
+
4
+ _MODEL = None
5
+ _TOKENIZER = None
6
+
7
+
8
+ def bytelen(string):
9
+ """Return the length of `string` in utf-8 bytes"""
10
+ return len(bytes(string, encoding='utf-8', errors='ignore'))
11
+
12
+
13
+ def take_bytes(words: list[str], n_bytes: int) -> tuple[list[str], list[str]]:
14
+ """
15
+ Take `n_bytes` of words from a list of words `words`
16
+
17
+ Arguments:
18
+ words: A list of words
19
+ n_bytes: max size of words to take (in bytes)
20
+
21
+ Returns:
22
+ A tuple (head, tail) where `head` is max n_bytes,
23
+ `tail` is the remaining words
24
+ """
25
+ current_n_bytes = 0
26
+ for i, word in enumerate(words):
27
+ if current_n_bytes + bytelen(word) > n_bytes:
28
+ return words[:i-1], words[i-1:]
29
+ current_n_bytes += bytelen(word) + 1 # add 1 to account for space between words
30
+ return words, []
31
+
32
+
33
+ def split(text, max_len, offset=0):
34
+ """Split `text` in chunks of at most `max_len` UTF-8 bytes"""
35
+ words = text.split(' ')
36
+ chunks = []
37
+
38
+ if offset:
39
+ chunk, words = take_bytes(words, offset)
40
+ chunks.append(' '.join(chunk))
41
+
42
+ while words:
43
+ chunk, words = take_bytes(words, max_len)
44
+ chunks.append(' '.join(chunk))
45
+
46
+ return chunks
47
+
48
+
49
+ class TextMerger:
50
+ """Class for merging texts."""
51
+
52
+ EMPTY = '🗌'
53
+ SPACE = '_'
54
+
55
+ def __init__(self, original: str, char_level=False):
56
+ """
57
+ Arguments:
58
+ original: The original text.
59
+ """
60
+
61
+ self.char_level = char_level
62
+ self.word_level = not char_level
63
+ self.original = self._process_incoming(original)
64
+ self.original_padded = self._pad_between_words(self.original)
65
+ self.candidates = [[] for _ in self.original_padded]
66
+ self.candidate_texts = []
67
+ self.alignments = []
68
+
69
+ def _pad_between_words(self, string: str) -> list[str]:
70
+ """
71
+ Insert `EMPTY` constant between words in `string`.
72
+ Used for aligning suggested insertions.
73
+
74
+ Example:
75
+ 'Hello world' -> [EMPTY, 'Hello', EMPTY, 'world', EMPTY]
76
+ """
77
+ words = string.split(' ')
78
+ padded = [self.EMPTY]
79
+ for word in words:
80
+ padded.append(word)
81
+ padded.append(self.EMPTY)
82
+ return padded
83
+
84
+ def _process_incoming(self, text):
85
+ if self.char_level:
86
+ return ' '.join(text.replace(' ', self.SPACE))
87
+ return text.replace('\n', '\n ')
88
+
89
+ def _process_outgoing(self, words: list[str]):
90
+ if self.char_level:
91
+ return ''.join(words).replace(self.SPACE, ' ')
92
+ return ' '.join(words).replace('\n ', '\n')
93
+
94
+ def add_candidate_texts(self, texts: list[str]):
95
+ for text in texts:
96
+ self.add_candidate_text(text)
97
+
98
+ def add_candidate_text(self, text: str):
99
+ """
100
+ Add `text` as a candidate correction of `original`
101
+ """
102
+ # Bookkeeping
103
+ self.candidate_texts.append(text)
104
+ text = self._process_incoming(text)
105
+ jiwer_result = jiwer.process_words(
106
+ self.original,
107
+ text,
108
+ reference_transform = jiwer.Compose([jiwer.ReduceToListOfListOfWords()]),
109
+ hypothesis_transform = jiwer.Compose([jiwer.ReduceToListOfListOfWords()]))
110
+
111
+ self.alignments.append(jiwer_result)
112
+
113
+ # Work through the jiwer results and fill in the candidates list
114
+ text = jiwer_result.hypotheses[0] #text.split(' ')
115
+ for chunk in jiwer_result.alignments[0]:
116
+ x0, x1 = chunk.ref_start_idx, chunk.ref_end_idx
117
+ y0, y1 = chunk.hyp_start_idx, chunk.hyp_end_idx
118
+
119
+ if chunk.type == 'substitute':
120
+ # Append the suggested substitution to the candidate list
121
+ for i in range(x1-x0):
122
+ self.candidates[2*(x0+i)+1].extend(text[y0+i:y0+i+1])
123
+
124
+ # Insert the suggested insertion as a suggestion to
125
+ # the `EMPTY` item between words in the original
126
+ elif chunk.type == 'insert':
127
+ if self.char_level:
128
+ self.candidates[2*x0].append(''.join(text[y0:y1]))
129
+ else:
130
+ self.candidates[2*x0].append(' '.join(text[y0:y1]))
131
+
132
+ # This word is suggested to be deleted, append EMPTY as a candidate
133
+ elif chunk.type == 'delete':
134
+ for i in range(x1-x0):
135
+ self.candidates[2*(x0+i)+1].append(self.EMPTY)
136
+
137
+
138
+ def combine(self) -> str:
139
+ """
140
+ Combine the current candidate texts
141
+ """
142
+ out = []
143
+ for original, candidates in zip(self.original_padded, self.candidates):
144
+ correction_candidate = self._best_candidate(candidates, original)
145
+ out.append(correction_candidate)
146
+ out = [word for word in out if word != self.EMPTY]
147
+ return self._process_outgoing(out)
148
+
149
+
150
+ def _best_candidate(self, candidates, original):
151
+ """
152
+ Return the best candidate out of `candidates`
153
+
154
+ Uses majority vote to determine the best candidate.
155
+ Example: best_candidate(['Hello', 'Hello', 'Hallå']) -> 'Hello'
156
+ """
157
+ if len(candidates) < self._majority():
158
+ return original
159
+
160
+ if len(set(candidates)) == 1:
161
+ return candidates[0]
162
+
163
+ if self.word_level:
164
+ tm = TextMerger(original, char_level=True)
165
+ tm.add_candidate_texts(candidates)
166
+ return tm.combine()
167
+
168
+ else:
169
+ candidate, n_votes = max(((candidate, candidates.count(candidate)) for candidate in candidates), key=lambda x: x[1])
170
+ return candidate if n_votes >= self._majority() else original
171
+
172
+ def _majority(self):
173
+ return 1 + len(self.candidate_texts) // 2
174
+
175
+
176
+ def process(text: str, n_candidates: int = 1):
177
+
178
+ if n_candidates == 1:
179
+ splits = split(text, 127)
180
+ return ' '.join(generate(splits))
181
+
182
+ combiner = TextMerger(text)
183
+ splits = [split(text, 127, 127 * i // n_candidates) for i in range(n_candidates)]
184
+ outputs = [generate(lines) for lines in splits]
185
+ for output in outputs:
186
+ combiner.add_candidate_text(' '.join(output))
187
+ return combiner.combine()
188
+
189
+
190
+ def generate(texts):
191
+ inputs = _TOKENIZER(texts, padding=True, truncation=True, return_tensors='pt')
192
+ output_ids = _MODEL.generate(**inputs)
193
+ return _TOKENIZER.batch_decode(output_ids, skip_special_tokens=True)
194
+
195
+
196
+ def diff(old: str, new: str):
197
+ """Display the difference between old and new"""
198
+ result = jiwer.process_characters(old, new)
199
+ output = ''
200
+ for chunk in result.alignments[0]:
201
+ old_chars = ''.join(old[chunk.ref_start_idx:chunk.ref_end_idx])
202
+ new_chars = ''.join(new[chunk.hyp_start_idx:chunk.hyp_end_idx])
203
+
204
+ if chunk.type == 'equal':
205
+ output += old_chars
206
+ continue
207
+
208
+ if old_chars and not old_chars.isspace():
209
+ output += f':red[~~{old_chars.strip()}~~]'
210
+
211
+ output += f':green[{new_chars}]'
212
+ return output
213
+
214
+
215
+ def set_model(model, tokenizer):
216
+ global _MODEL, _TOKENIZER
217
+ _MODEL = model
218
+ _TOKENIZER = tokenizer
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch
2
+ transformers
3
+ jiwer