Datasets:
Update files from the datasets library (from 1.16.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.16.0
- README.md +1 -0
- wmt20_mlqe_task3.py +90 -57
README.md
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
---
|
|
|
2 |
annotations_creators:
|
3 |
- expert-generated
|
4 |
- machine-generated
|
|
|
1 |
---
|
2 |
+
pretty_name: WMT20 - MultiLingual Quality Estimation (MLQE) Task3
|
3 |
annotations_creators:
|
4 |
- expert-generated
|
5 |
- machine-generated
|
wmt20_mlqe_task3.py
CHANGED
@@ -16,7 +16,6 @@
|
|
16 |
|
17 |
|
18 |
import csv
|
19 |
-
import glob
|
20 |
import os
|
21 |
|
22 |
import datasets
|
@@ -155,87 +154,121 @@ class Wmt20MlqeTask3(datasets.GeneratorBasedBuilder):
|
|
155 |
|
156 |
def _split_generators(self, dl_manager):
|
157 |
"""Returns SplitGenerators."""
|
158 |
-
|
159 |
return [
|
160 |
datasets.SplitGenerator(
|
161 |
name=datasets.Split.TRAIN,
|
162 |
gen_kwargs={
|
163 |
-
"
|
164 |
"split": "train",
|
|
|
165 |
},
|
166 |
),
|
167 |
datasets.SplitGenerator(
|
168 |
name=datasets.Split.TEST,
|
169 |
gen_kwargs={
|
170 |
-
"
|
171 |
"split": "test",
|
|
|
172 |
},
|
173 |
),
|
174 |
datasets.SplitGenerator(
|
175 |
name=datasets.Split.VALIDATION,
|
176 |
gen_kwargs={
|
177 |
-
"
|
178 |
"split": "dev",
|
|
|
179 |
},
|
180 |
),
|
181 |
]
|
182 |
|
183 |
-
def _generate_examples(self,
|
184 |
"""Yields examples."""
|
185 |
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
for
|
|
|
232 |
]
|
233 |
-
else:
|
234 |
-
annotations = []
|
235 |
-
token_annotations = []
|
236 |
|
237 |
-
|
238 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
239 |
"source_segments": source_segments,
|
240 |
"source_tokenized": source_tokenized,
|
241 |
"mt_segments": mt_segments,
|
|
|
16 |
|
17 |
|
18 |
import csv
|
|
|
19 |
import os
|
20 |
|
21 |
import datasets
|
|
|
154 |
|
155 |
def _split_generators(self, dl_manager):
|
156 |
"""Returns SplitGenerators."""
|
157 |
+
downloaded_files = dl_manager.download(_URLs)
|
158 |
return [
|
159 |
datasets.SplitGenerator(
|
160 |
name=datasets.Split.TRAIN,
|
161 |
gen_kwargs={
|
162 |
+
"main_dir": "task3/train",
|
163 |
"split": "train",
|
164 |
+
"files": dl_manager.iter_archive(downloaded_files["train+dev"]),
|
165 |
},
|
166 |
),
|
167 |
datasets.SplitGenerator(
|
168 |
name=datasets.Split.TEST,
|
169 |
gen_kwargs={
|
170 |
+
"main_dir": "test-blind",
|
171 |
"split": "test",
|
172 |
+
"files": dl_manager.iter_archive(downloaded_files["test"]),
|
173 |
},
|
174 |
),
|
175 |
datasets.SplitGenerator(
|
176 |
name=datasets.Split.VALIDATION,
|
177 |
gen_kwargs={
|
178 |
+
"main_dir": "task3/dev",
|
179 |
"split": "dev",
|
180 |
+
"files": dl_manager.iter_archive(downloaded_files["train+dev"]),
|
181 |
},
|
182 |
),
|
183 |
]
|
184 |
|
185 |
+
def _generate_examples(self, main_dir, split, files):
|
186 |
"""Yields examples."""
|
187 |
|
188 |
+
prev_folder = None
|
189 |
+
source_segments, source_tokenized, mt_segments, mt_tokenized = [None] * 4
|
190 |
+
token_index, total_words, annotations, token_annotations = [], [], [], []
|
191 |
+
for path, f in files:
|
192 |
+
if path.startswith(main_dir):
|
193 |
+
dir_name = path.split("/")[main_dir.count("/") + 1]
|
194 |
+
folder = main_dir + "/" + dir_name
|
195 |
+
|
196 |
+
if prev_folder is not None and prev_folder != folder:
|
197 |
+
yield prev_folder, {
|
198 |
+
"document_id": os.path.basename(prev_folder),
|
199 |
+
"source_segments": source_segments,
|
200 |
+
"source_tokenized": source_tokenized,
|
201 |
+
"mt_segments": mt_segments,
|
202 |
+
"mt_tokenized": mt_tokenized,
|
203 |
+
"annotations": annotations,
|
204 |
+
"token_annotations": token_annotations,
|
205 |
+
"token_index": token_index,
|
206 |
+
"total_words": total_words,
|
207 |
+
}
|
208 |
+
source_segments, source_tokenized, mt_segments, mt_tokenized = [None] * 4
|
209 |
+
token_index, total_words, annotations, token_annotations = [], [], [], []
|
210 |
+
|
211 |
+
prev_folder = folder
|
212 |
+
|
213 |
+
source_segments_path = "/".join([folder, "source.segments"])
|
214 |
+
source_tokenized_path = "/".join([folder, "source.tokenized"])
|
215 |
+
mt_segments_path = "/".join([folder, "mt.segments"])
|
216 |
+
mt_tokenized_path = "/".join([folder, "mt.tokenized"])
|
217 |
+
total_words_path = "/".join([folder, "total_words"])
|
218 |
+
token_index_path = "/".join([folder, "token_index"])
|
219 |
+
|
220 |
+
if path == source_segments_path:
|
221 |
+
source_segments = f.read().decode("utf-8").splitlines()
|
222 |
+
elif path == source_tokenized_path:
|
223 |
+
source_tokenized = f.read().decode("utf-8").splitlines()
|
224 |
+
elif path == mt_segments_path:
|
225 |
+
mt_segments = f.read().decode("utf-8").splitlines()
|
226 |
+
elif path == mt_tokenized_path:
|
227 |
+
mt_tokenized = f.read().decode("utf-8").splitlines()
|
228 |
+
elif path == total_words_path:
|
229 |
+
total_words = f.read().decode("utf-8").splitlines()[0]
|
230 |
+
elif path == token_index_path:
|
231 |
+
token_index = [
|
232 |
+
[idx.split(" ") for idx in line.split("\t")]
|
233 |
+
for line in f.read().decode("utf-8").splitlines()
|
234 |
+
if line != ""
|
235 |
]
|
|
|
|
|
|
|
236 |
|
237 |
+
if split in ["train", "dev"]:
|
238 |
+
annotations_path = "/".join([folder, "annotations.tsv"])
|
239 |
+
token_annotations_path = "/".join([folder, "token_annotations.tsv"])
|
240 |
+
|
241 |
+
if path == annotations_path:
|
242 |
+
lines = (line.decode("utf-8") for line in f)
|
243 |
+
reader = csv.DictReader(lines, delimiter="\t")
|
244 |
+
annotations = [
|
245 |
+
{
|
246 |
+
"segment_id": row["segment_id"].split(" "),
|
247 |
+
"annotation_start": row["annotation_start"].split(" "),
|
248 |
+
"annotation_length": row["annotation_length"].split(" "),
|
249 |
+
"severity": row["severity"],
|
250 |
+
"severity_weight": row["severity_weight"],
|
251 |
+
"category": row["category"],
|
252 |
+
}
|
253 |
+
for row in reader
|
254 |
+
]
|
255 |
+
elif path == token_annotations_path:
|
256 |
+
lines = (line.decode("utf-8") for line in f)
|
257 |
+
reader = csv.DictReader(lines, delimiter="\t")
|
258 |
+
token_annotations = [
|
259 |
+
{
|
260 |
+
"segment_id": row["segment_id"].split(" "),
|
261 |
+
"first_token": row["first_token"].replace("-", "-1").split(" "),
|
262 |
+
"last_token": row["last_token"].replace("-", "-1").split(" "),
|
263 |
+
"token_after_gap": row["token_after_gap"].replace("-", "-1").split(" "),
|
264 |
+
"severity": row["severity"],
|
265 |
+
"category": row["category"],
|
266 |
+
}
|
267 |
+
for row in reader
|
268 |
+
]
|
269 |
+
if prev_folder is not None:
|
270 |
+
yield prev_folder, {
|
271 |
+
"document_id": os.path.basename(prev_folder),
|
272 |
"source_segments": source_segments,
|
273 |
"source_tokenized": source_tokenized,
|
274 |
"mt_segments": mt_segments,
|