Datasets:

Languages:
English
License:
phlobo commited on
Commit
0cc98b3
1 Parent(s): 38ff03d

Update drugprot based on git version 64f6de0

Browse files
Files changed (1) hide show
  1. drugprot.py +44 -28
drugprot.py CHANGED
@@ -22,7 +22,7 @@ https://biocreative.bioinformatics.udel.edu/tasks/biocreative-vii/track-1/
22
  """
23
  import collections
24
  from pathlib import Path
25
- from typing import Dict, Iterator, Tuple
26
 
27
  import datasets
28
 
@@ -30,7 +30,7 @@ from .bigbiohub import kb_features
30
  from .bigbiohub import BigBioConfig
31
  from .bigbiohub import Tasks
32
 
33
- _LANGUAGES = ['English']
34
  _PUBMED = True
35
  _LOCAL = False
36
  _CITATION = """\
@@ -55,9 +55,11 @@ between them corresponding to a specific set of biologically relevant relation t
55
 
56
  _HOMEPAGE = "https://biocreative.bioinformatics.udel.edu/tasks/biocreative-vii/track-1/"
57
 
58
- _LICENSE = 'Creative Commons Attribution 4.0 International'
59
 
60
- _URLS = {_DATASETNAME: "https://zenodo.org/record/5119892/files/drugprot-training-development-test-background.zip?download=1"}
 
 
61
 
62
  _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.RELATION_EXTRACTION]
63
 
@@ -139,32 +141,44 @@ class DrugProtDataset(datasets.GeneratorBasedBuilder):
139
  return [
140
  datasets.SplitGenerator(
141
  name=datasets.Split.TRAIN,
142
- gen_kwargs={"data_dir": data_dir, "split": "training"},
 
 
 
 
143
  ),
144
  datasets.SplitGenerator(
145
  name=datasets.Split.VALIDATION,
146
- gen_kwargs={"data_dir": data_dir, "split": "development"},
 
 
 
 
 
 
 
 
 
 
 
 
147
  ),
148
  ]
149
 
150
- def _generate_examples(self, data_dir: Path, split: str) -> Iterator[Tuple[str, Dict]]:
151
  if self.config.name == "drugprot_source":
152
- documents = self._read_source_examples(data_dir, split)
153
  for document_id, document in documents.items():
154
  yield document_id, document
155
 
156
  elif self.config.name == "drugprot_bigbio_kb":
157
- documents = self._read_source_examples(data_dir, split)
158
  for document_id, document in documents.items():
159
  yield document_id, self._transform_source_to_kb(document)
160
 
161
- def _read_source_examples(self, input_dir: Path, split: str) -> Dict:
162
  """ """
163
- split_dir = input_dir / split
164
- abstracts_file = split_dir / f"drugprot_{split}_abstracs.tsv"
165
- entities_file = split_dir / f"drugprot_{split}_entities.tsv"
166
- relations_file = split_dir / f"drugprot_{split}_relations.tsv"
167
-
168
  document_to_entities = collections.defaultdict(list)
169
  for line in entities_file.read_text().splitlines():
170
  columns = line.split("\t")
@@ -180,20 +194,22 @@ class DrugProtDataset(datasets.GeneratorBasedBuilder):
180
  )
181
 
182
  document_to_relations = collections.defaultdict(list)
183
- for line in relations_file.read_text().splitlines():
184
- columns = line.split("\t")
185
- document_id = columns[0]
186
-
187
- document_relations = document_to_relations[document_id]
188
 
189
- document_relations.append(
190
- {
191
- "id": document_id + "_" + str(len(document_relations)),
192
- "type": columns[1],
193
- "arg1_id": document_id + "_" + columns[2][5:],
194
- "arg2_id": document_id + "_" + columns[3][5:],
195
- }
196
- )
 
 
 
 
 
 
 
197
 
198
  document_to_source = {}
199
  for line in abstracts_file.read_text().splitlines():
 
22
  """
23
  import collections
24
  from pathlib import Path
25
+ from typing import Dict, Iterator, Tuple, Optional
26
 
27
  import datasets
28
 
 
30
  from .bigbiohub import BigBioConfig
31
  from .bigbiohub import Tasks
32
 
33
+ _LANGUAGES = ["English"]
34
  _PUBMED = True
35
  _LOCAL = False
36
  _CITATION = """\
 
55
 
56
  _HOMEPAGE = "https://biocreative.bioinformatics.udel.edu/tasks/biocreative-vii/track-1/"
57
 
58
+ _LICENSE = "CC_BY_4p0"
59
 
60
+ _URLS = {
61
+ _DATASETNAME: "https://zenodo.org/record/5119892/files/drugprot-training-development-test-background.zip?download=1"
62
+ }
63
 
64
  _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.RELATION_EXTRACTION]
65
 
 
141
  return [
142
  datasets.SplitGenerator(
143
  name=datasets.Split.TRAIN,
144
+ gen_kwargs={
145
+ "abstracts_file": data_dir / "training" / "drugprot_training_abstracs.tsv",
146
+ "entities_file": data_dir / "training" / "drugprot_training_entities.tsv",
147
+ "relations_file": data_dir / "training" / "drugprot_training_relations.tsv",
148
+ },
149
  ),
150
  datasets.SplitGenerator(
151
  name=datasets.Split.VALIDATION,
152
+ gen_kwargs={
153
+ "abstracts_file": data_dir / "development" / "drugprot_development_abstracs.tsv",
154
+ "entities_file": data_dir / "development" / "drugprot_development_entities.tsv",
155
+ "relations_file": data_dir / "development" / "drugprot_development_relations.tsv",
156
+ },
157
+ ),
158
+ datasets.SplitGenerator(
159
+ name=datasets.Split("test_background"),
160
+ gen_kwargs={
161
+ "abstracts_file": data_dir / "test-background" / "test_background_abstracts.tsv",
162
+ "entities_file": data_dir / "test-background" / "test_background_entities.tsv",
163
+ "relations_file": None,
164
+ },
165
  ),
166
  ]
167
 
168
+ def _generate_examples(self, **kwargs) -> Iterator[Tuple[str, Dict]]:
169
  if self.config.name == "drugprot_source":
170
+ documents = self._read_source_examples(**kwargs)
171
  for document_id, document in documents.items():
172
  yield document_id, document
173
 
174
  elif self.config.name == "drugprot_bigbio_kb":
175
+ documents = self._read_source_examples(**kwargs)
176
  for document_id, document in documents.items():
177
  yield document_id, self._transform_source_to_kb(document)
178
 
179
+ def _read_source_examples(self, abstracts_file: Path, entities_file: Path, relations_file: Optional[Path]) -> Dict:
180
  """ """
181
+ # Note: The split "test-background" does not contain any relations
 
 
 
 
182
  document_to_entities = collections.defaultdict(list)
183
  for line in entities_file.read_text().splitlines():
184
  columns = line.split("\t")
 
194
  )
195
 
196
  document_to_relations = collections.defaultdict(list)
 
 
 
 
 
197
 
198
+ if relations_file is not None:
199
+ for line in relations_file.read_text().splitlines():
200
+ columns = line.split("\t")
201
+ document_id = columns[0]
202
+
203
+ document_relations = document_to_relations[document_id]
204
+
205
+ document_relations.append(
206
+ {
207
+ "id": document_id + "_" + str(len(document_relations)),
208
+ "type": columns[1],
209
+ "arg1_id": document_id + "_" + columns[2][5:],
210
+ "arg2_id": document_id + "_" + columns[3][5:],
211
+ }
212
+ )
213
 
214
  document_to_source = {}
215
  for line in abstracts_file.read_text().splitlines():