Datasets:

Languages:
English
License:

Upload adjusted drugprot.py to include "test-background" split

#1
Files changed (1) hide show
  1. drugprot.py +39 -25
drugprot.py CHANGED
@@ -22,7 +22,7 @@ https://biocreative.bioinformatics.udel.edu/tasks/biocreative-vii/track-1/
22
  """
23
  import collections
24
  from pathlib import Path
25
- from typing import Dict, Iterator, Tuple
26
 
27
  import datasets
28
 
@@ -139,32 +139,44 @@ class DrugProtDataset(datasets.GeneratorBasedBuilder):
139
  return [
140
  datasets.SplitGenerator(
141
  name=datasets.Split.TRAIN,
142
- gen_kwargs={"data_dir": data_dir, "split": "training"},
 
 
 
 
143
  ),
144
  datasets.SplitGenerator(
145
  name=datasets.Split.VALIDATION,
146
- gen_kwargs={"data_dir": data_dir, "split": "development"},
 
 
 
 
 
 
 
 
 
 
 
 
147
  ),
148
  ]
149
 
150
- def _generate_examples(self, data_dir: Path, split: str) -> Iterator[Tuple[str, Dict]]:
151
  if self.config.name == "drugprot_source":
152
- documents = self._read_source_examples(data_dir, split)
153
  for document_id, document in documents.items():
154
  yield document_id, document
155
 
156
  elif self.config.name == "drugprot_bigbio_kb":
157
- documents = self._read_source_examples(data_dir, split)
158
  for document_id, document in documents.items():
159
  yield document_id, self._transform_source_to_kb(document)
160
 
161
- def _read_source_examples(self, input_dir: Path, split: str) -> Dict:
162
  """ """
163
- split_dir = input_dir / split
164
- abstracts_file = split_dir / f"drugprot_{split}_abstracs.tsv"
165
- entities_file = split_dir / f"drugprot_{split}_entities.tsv"
166
- relations_file = split_dir / f"drugprot_{split}_relations.tsv"
167
-
168
  document_to_entities = collections.defaultdict(list)
169
  for line in entities_file.read_text().splitlines():
170
  columns = line.split("\t")
@@ -180,20 +192,22 @@ class DrugProtDataset(datasets.GeneratorBasedBuilder):
180
  )
181
 
182
  document_to_relations = collections.defaultdict(list)
183
- for line in relations_file.read_text().splitlines():
184
- columns = line.split("\t")
185
- document_id = columns[0]
186
 
187
- document_relations = document_to_relations[document_id]
188
-
189
- document_relations.append(
190
- {
191
- "id": document_id + "_" + str(len(document_relations)),
192
- "type": columns[1],
193
- "arg1_id": document_id + "_" + columns[2][5:],
194
- "arg2_id": document_id + "_" + columns[3][5:],
195
- }
196
- )
 
 
 
 
 
197
 
198
  document_to_source = {}
199
  for line in abstracts_file.read_text().splitlines():
 
22
  """
23
  import collections
24
  from pathlib import Path
25
+ from typing import Dict, Iterator, Tuple, Optional
26
 
27
  import datasets
28
 
 
139
  return [
140
  datasets.SplitGenerator(
141
  name=datasets.Split.TRAIN,
142
+ gen_kwargs={
143
+ "abstracts_file": data_dir / "training" / "drugprot_training_abstracs.tsv",
144
+ "entities_file": data_dir / "training" / "drugprot_training_entities.tsv",
145
+ "relations_file": data_dir / "training" / "drugprot_training_relations.tsv",
146
+ },
147
  ),
148
  datasets.SplitGenerator(
149
  name=datasets.Split.VALIDATION,
150
+ gen_kwargs={
151
+ "abstracts_file": data_dir / "development" / "drugprot_development_abstracs.tsv",
152
+ "entities_file": data_dir / "development" / "drugprot_development_entities.tsv",
153
+ "relations_file": data_dir / "development" / "drugprot_development_relations.tsv",
154
+ },
155
+ ),
156
+ datasets.SplitGenerator(
157
+ name=datasets.Split("test_background"),
158
+ gen_kwargs={
159
+ "abstracts_file": data_dir / "test-background" / "test_background_abstracts.tsv",
160
+ "entities_file": data_dir / "test-background" / "test_background_entities.tsv",
161
+ "relations_file": None,
162
+ },
163
  ),
164
  ]
165
 
166
+ def _generate_examples(self, **kwargs) -> Iterator[Tuple[str, Dict]]:
167
  if self.config.name == "drugprot_source":
168
+ documents = self._read_source_examples(**kwargs)
169
  for document_id, document in documents.items():
170
  yield document_id, document
171
 
172
  elif self.config.name == "drugprot_bigbio_kb":
173
+ documents = self._read_source_examples(**kwargs)
174
  for document_id, document in documents.items():
175
  yield document_id, self._transform_source_to_kb(document)
176
 
177
+ def _read_source_examples(self, abstracts_file: Path, entities_file: Path, relations_file: Optional[Path]) -> Dict:
178
  """ """
179
+ # Note: The split "test-background" does not contain any relations
 
 
 
 
180
  document_to_entities = collections.defaultdict(list)
181
  for line in entities_file.read_text().splitlines():
182
  columns = line.split("\t")
 
192
  )
193
 
194
  document_to_relations = collections.defaultdict(list)
 
 
 
195
 
196
+ if relations_file is not None:
197
+ for line in relations_file.read_text().splitlines():
198
+ columns = line.split("\t")
199
+ document_id = columns[0]
200
+
201
+ document_relations = document_to_relations[document_id]
202
+
203
+ document_relations.append(
204
+ {
205
+ "id": document_id + "_" + str(len(document_relations)),
206
+ "type": columns[1],
207
+ "arg1_id": document_id + "_" + columns[2][5:],
208
+ "arg2_id": document_id + "_" + columns[3][5:],
209
+ }
210
+ )
211
 
212
  document_to_source = {}
213
  for line in abstracts_file.read_text().splitlines():