Commit
•
98d425e
1
Parent(s):
8e45719
Support streaming (#4)
Browse files- Host data file (4696d9fc2f7f678cd27880718b9f102cefe74496)
- Update and refactor code (46790a05ffbb5a4277937ec95f08970c396398b7)
- data.zip +3 -0
- sofc_materials_articles.py +12 -28
data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5430c7563887db6067e75bcfbad99dfa83b02b38834dbf5cfd355919d84554ec
|
3 |
+
size 3627968
|
sofc_materials_articles.py
CHANGED
@@ -45,7 +45,7 @@ _HOMEPAGE = "https://arxiv.org/abs/2006.03039"
|
|
45 |
|
46 |
_LICENSE = ""
|
47 |
|
48 |
-
_URL = "
|
49 |
|
50 |
|
51 |
class SOFCMaterialsArticles(datasets.GeneratorBasedBuilder):
|
@@ -232,33 +232,14 @@ class SOFCMaterialsArticles(datasets.GeneratorBasedBuilder):
|
|
232 |
|
233 |
def _split_generators(self, dl_manager):
|
234 |
"""Returns SplitGenerators."""
|
235 |
-
|
236 |
-
|
237 |
-
data_dir = dl_manager.download_and_extract(my_urls)
|
238 |
-
|
239 |
-
data_dir = os.path.join(data_dir, "sofc-exp_textmining_resources-master/sofc-exp-corpus")
|
240 |
-
|
241 |
-
metadata = pd.read_csv(os.path.join(data_dir, "SOFC-Exp-Metadata.csv"), sep="\t")
|
242 |
-
|
243 |
-
text_base_path = os.path.join(data_dir, "texts")
|
244 |
-
|
245 |
-
text_files_available = [
|
246 |
-
os.path.split(i.rstrip(".txt"))[-1] for i in glob.glob(os.path.join(text_base_path, "*.txt"))
|
247 |
-
]
|
248 |
-
|
249 |
-
metadata = metadata[metadata["name"].map(lambda x: x in text_files_available)]
|
250 |
-
|
251 |
-
names = {}
|
252 |
-
splits = ["train", "test", "dev"]
|
253 |
-
for split in splits:
|
254 |
-
names[split] = metadata[metadata["set"] == split]["name"].tolist()
|
255 |
|
256 |
return [
|
257 |
datasets.SplitGenerator(
|
258 |
name=datasets.Split.TRAIN,
|
259 |
# These kwargs will be passed to _generate_examples
|
260 |
gen_kwargs={
|
261 |
-
"names": names["train"],
|
262 |
"data_dir": data_dir,
|
263 |
"split": "train",
|
264 |
},
|
@@ -266,21 +247,26 @@ class SOFCMaterialsArticles(datasets.GeneratorBasedBuilder):
|
|
266 |
datasets.SplitGenerator(
|
267 |
name=datasets.Split.TEST,
|
268 |
# These kwargs will be passed to _generate_examples
|
269 |
-
gen_kwargs={
|
|
|
|
|
|
|
270 |
),
|
271 |
datasets.SplitGenerator(
|
272 |
name=datasets.Split.VALIDATION,
|
273 |
# These kwargs will be passed to _generate_examples
|
274 |
gen_kwargs={
|
275 |
-
"names": names["dev"],
|
276 |
"data_dir": data_dir,
|
277 |
-
"split": "
|
278 |
},
|
279 |
),
|
280 |
]
|
281 |
|
282 |
-
def _generate_examples(self,
|
283 |
"""Yields examples."""
|
|
|
|
|
|
|
284 |
# The dataset consists of the original article text as well as annotations
|
285 |
textfile_base_path = os.path.join(data_dir, "texts")
|
286 |
annotations_base_path = os.path.join(data_dir, "annotations")
|
@@ -308,7 +294,6 @@ class SOFCMaterialsArticles(datasets.GeneratorBasedBuilder):
|
|
308 |
# For each text file, we'll load all of the
|
309 |
# associated annotation files
|
310 |
for id_, name in enumerate(sorted(names)):
|
311 |
-
|
312 |
# Load the main source text
|
313 |
textfile_path = os.path.join(textfile_base_path, name + ".txt")
|
314 |
text = open(textfile_path, encoding="utf-8").read()
|
@@ -383,7 +368,6 @@ class SOFCMaterialsArticles(datasets.GeneratorBasedBuilder):
|
|
383 |
# Iterate through the spans data
|
384 |
spans = []
|
385 |
for span in spans_raw:
|
386 |
-
|
387 |
# Split out the elements in each tab-delimited line
|
388 |
_, span_id, entity_label_or_exp, sentence_id, begin_char_offset, end_char_offset = span.split("\t")
|
389 |
|
|
|
45 |
|
46 |
_LICENSE = ""
|
47 |
|
48 |
+
_URL = "data.zip"
|
49 |
|
50 |
|
51 |
class SOFCMaterialsArticles(datasets.GeneratorBasedBuilder):
|
|
|
232 |
|
233 |
def _split_generators(self, dl_manager):
|
234 |
"""Returns SplitGenerators."""
|
235 |
+
data_dir = dl_manager.download_and_extract(_URL)
|
236 |
+
data_dir = os.path.join(data_dir, "sofc-exp-corpus")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
237 |
|
238 |
return [
|
239 |
datasets.SplitGenerator(
|
240 |
name=datasets.Split.TRAIN,
|
241 |
# These kwargs will be passed to _generate_examples
|
242 |
gen_kwargs={
|
|
|
243 |
"data_dir": data_dir,
|
244 |
"split": "train",
|
245 |
},
|
|
|
247 |
datasets.SplitGenerator(
|
248 |
name=datasets.Split.TEST,
|
249 |
# These kwargs will be passed to _generate_examples
|
250 |
+
gen_kwargs={
|
251 |
+
"data_dir": data_dir,
|
252 |
+
"split": "test",
|
253 |
+
},
|
254 |
),
|
255 |
datasets.SplitGenerator(
|
256 |
name=datasets.Split.VALIDATION,
|
257 |
# These kwargs will be passed to _generate_examples
|
258 |
gen_kwargs={
|
|
|
259 |
"data_dir": data_dir,
|
260 |
+
"split": "dev",
|
261 |
},
|
262 |
),
|
263 |
]
|
264 |
|
265 |
+
def _generate_examples(self, data_dir, split):
|
266 |
"""Yields examples."""
|
267 |
+
metadata = pd.read_csv(os.path.join(data_dir, "SOFC-Exp-Metadata.csv"), sep="\t")
|
268 |
+
names = metadata[metadata["set"] == split]["name"].tolist()
|
269 |
+
|
270 |
# The dataset consists of the original article text as well as annotations
|
271 |
textfile_base_path = os.path.join(data_dir, "texts")
|
272 |
annotations_base_path = os.path.join(data_dir, "annotations")
|
|
|
294 |
# For each text file, we'll load all of the
|
295 |
# associated annotation files
|
296 |
for id_, name in enumerate(sorted(names)):
|
|
|
297 |
# Load the main source text
|
298 |
textfile_path = os.path.join(textfile_base_path, name + ".txt")
|
299 |
text = open(textfile_path, encoding="utf-8").read()
|
|
|
368 |
# Iterate through the spans data
|
369 |
spans = []
|
370 |
for span in spans_raw:
|
|
|
371 |
# Split out the elements in each tab-delimited line
|
372 |
_, span_id, entity_label_or_exp, sentence_id, begin_char_offset, end_char_offset = span.split("\t")
|
373 |
|