Datasets:

Languages:
English
ArXiv:
License:
gabrielaltay commited on
Commit
e917b46
1 Parent(s): 4530e42

upload hubscripts/tmvar_v3_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. tmvar_v3.py +307 -0
tmvar_v3.py ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ This dataset contains 500 PubMed articles manually annotated with mutation
17
+ mentions of various kinds and dbsnp normalizations for each of them. In
18
+ addition, it contains variant normalization options such as allele-specific
19
+ identifiers from the ClinGen Allele Registry It can be used for NER tasks and
20
+ NED tasks, This dataset does NOT have splits.
21
+ """
22
+ import itertools
23
+
24
+ import datasets
25
+ from bioc import pubtator
26
+
27
+ from .bigbiohub import kb_features
28
+ from .bigbiohub import BigBioConfig
29
+ from .bigbiohub import Tasks
30
+
31
+ _CITATION = """\
32
+ @misc{https://doi.org/10.48550/arxiv.2204.03637,
33
+ title = {tmVar 3.0: an improved variant concept recognition and normalization tool},
34
+ author = {
35
+ Wei, Chih-Hsuan and Allot, Alexis and Riehle, Kevin and Milosavljevic,
36
+ Aleksandar and Lu, Zhiyong
37
+ },
38
+ year = 2022,
39
+ publisher = {arXiv},
40
+ doi = {10.48550/ARXIV.2204.03637},
41
+ url = {https://arxiv.org/abs/2204.03637},
42
+ copyright = {Creative Commons Attribution 4.0 International},
43
+ keywords = {
44
+ Computation and Language (cs.CL), FOS: Computer and information sciences,
45
+ FOS: Computer and information sciences
46
+ }
47
+ }
48
+
49
+ """
50
+ _LANGUAGES = ['English']
51
+ _PUBMED = True
52
+ _LOCAL = False
53
+
54
+ _DATASETNAME = "tmvar_v3"
55
+ _DISPLAYNAME = "tmVar v3"
56
+
57
+ _DESCRIPTION = """\
58
+ This dataset contains 500 PubMed articles manually annotated with mutation \
59
+ mentions of various kinds and dbsnp normalizations for each of them. In \
60
+ addition, it contains variant normalization options such as allele-specific \
61
+ identifiers from the ClinGen Allele Registry It can be used for NER tasks and \
62
+ NED tasks, This dataset does NOT have splits.
63
+ """
64
+
65
+ _HOMEPAGE = "https://www.ncbi.nlm.nih.gov/research/bionlp/Tools/tmvar/"
66
+
67
+ _LICENSE = 'License information unavailable'
68
+
69
+ _URLS = {_DATASETNAME: "ftp://ftp.ncbi.nlm.nih.gov/pub/lu/tmVar3/tmVar3Corpus.txt"}
70
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.NAMED_ENTITY_DISAMBIGUATION]
71
+ _SOURCE_VERSION = "3.0.0"
72
+ _BIGBIO_VERSION = "1.0.0"
73
+ logger = datasets.utils.logging.get_logger(__name__)
74
+
75
+
76
+ class TmvarV3Dataset(datasets.GeneratorBasedBuilder):
77
+ """
78
+ This dataset contains 500 PubMed articles manually annotated with mutation mentions of various kinds and various normalizations for each of them.
79
+ """
80
+
81
+ DEFAULT_CONFIG_NAME = "tmvar_v3_source"
82
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
83
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
84
+ BUILDER_CONFIGS = []
85
+ BUILDER_CONFIGS.append(
86
+ BigBioConfig(
87
+ name=f"{_DATASETNAME}_source",
88
+ version=SOURCE_VERSION,
89
+ description=f"{_DATASETNAME} source schema",
90
+ schema="source",
91
+ subset_id=f"{_DATASETNAME}",
92
+ )
93
+ )
94
+ BUILDER_CONFIGS.append(
95
+ BigBioConfig(
96
+ name=f"{_DATASETNAME}_bigbio_kb",
97
+ version=BIGBIO_VERSION,
98
+ description=f"{_DATASETNAME} BigBio schema",
99
+ schema="bigbio_kb",
100
+ subset_id=f"{_DATASETNAME}",
101
+ )
102
+ )
103
+
104
+ def _info(self) -> datasets.DatasetInfo:
105
+ type_to_db_mapping = {
106
+ "CorrespondingGene": "NCBI Gene",
107
+ "tmVar": "tmVar",
108
+ "dbSNP": "dbSNP",
109
+ "VariantGroup": "VariantGroup",
110
+ "NCBI Taxonomy": "NCBI Taxonomy",
111
+ }
112
+ if self.config.schema == "source":
113
+ features = datasets.Features(
114
+ {
115
+ "pmid": datasets.Value("string"),
116
+ "passages": [
117
+ {
118
+ "type": datasets.Value("string"),
119
+ "text": datasets.Sequence(datasets.Value("string")),
120
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
121
+ }
122
+ ],
123
+ "entities": [
124
+ {
125
+ "text": datasets.Sequence(datasets.Value("string")),
126
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
127
+ "semantic_type_id": datasets.Sequence(
128
+ datasets.Value("string")
129
+ ),
130
+ "normalized": {
131
+ key: datasets.Sequence(datasets.Value("string"))
132
+ for key in type_to_db_mapping.keys()
133
+ },
134
+ }
135
+ ],
136
+ }
137
+ )
138
+ elif self.config.schema == "bigbio_kb":
139
+ features = kb_features
140
+ return datasets.DatasetInfo(
141
+ description=_DESCRIPTION,
142
+ features=features,
143
+ homepage=_HOMEPAGE,
144
+ license=str(_LICENSE),
145
+ citation=_CITATION,
146
+ )
147
+
148
+ def _split_generators(self, dl_manager):
149
+ """Returns SplitGenerators."""
150
+ url = _URLS[_DATASETNAME]
151
+ test_filepath = dl_manager.download(url)
152
+ return [
153
+ datasets.SplitGenerator(
154
+ name=datasets.Split.TEST,
155
+ gen_kwargs={
156
+ "filepath": test_filepath,
157
+ },
158
+ )
159
+ ]
160
+
161
+ def get_normalizations(self, id, type, doc_id):
162
+ """
163
+ Given a type and a number of normalizations ids, this function returns a dictionary of the normalized ids
164
+ """
165
+ base_dict = {
166
+ key: []
167
+ for key in [
168
+ "tmVar",
169
+ "CorrespondingGene",
170
+ "dbSNP",
171
+ "VariantGroup",
172
+ "NCBI Taxonomy",
173
+ ]
174
+ }
175
+ ids = id.split(";")
176
+ if type in ["CellLine", "Species"]:
177
+ id_vals = ids[0].split(",")
178
+ base_dict["NCBI Taxonomy"] = id_vals
179
+ elif type == "Gene":
180
+ id_vals = ids[0].split(",")
181
+ base_dict["CorrespondingGene"] = id_vals
182
+ else:
183
+ for id in ids:
184
+ if "|" in id:
185
+ base_dict["tmVar"].append(id)
186
+ elif id[:2] == "rs":
187
+ base_dict["dbSNP"].append(id[2:])
188
+ elif ":" in id:
189
+ db_name, db_id = id.split(":")
190
+ if db_name == "RS#":
191
+ db_name = "dbSNP"
192
+ # Hacky fix below for doc ID: 18272172
193
+ elif db_name == "Va1iantGroup":
194
+ db_name = "VariantGroup"
195
+ elif db_name == "Gene":
196
+ db_name = "CorrespondingGene"
197
+ elif db_name == "Disease":
198
+ continue
199
+ db_ids = db_id.split(",")
200
+ base_dict[db_name].extend(db_ids)
201
+ else:
202
+ logger.info(
203
+ f"Malformed normalization in Document {doc_id}. Type: {type}, Number: {id}"
204
+ )
205
+ continue
206
+ return base_dict
207
+
208
+ def pubtator_to_source(self, filepath):
209
+ """
210
+ Converts pubtator to source schema
211
+ """
212
+ with open(filepath, "r", encoding="utf8") as fstream:
213
+ for doc in pubtator.iterparse(fstream):
214
+ document = {}
215
+ document["pmid"] = doc.pmid
216
+ title = doc.title
217
+ abstract = doc.abstract
218
+ document["passages"] = [
219
+ {"type": "title", "text": [title], "offsets": [[0, len(title)]]},
220
+ {
221
+ "type": "abstract",
222
+ "text": [abstract],
223
+ "offsets": [[len(title) + 1, len(title) + len(abstract) + 1]],
224
+ },
225
+ ]
226
+ document["entities"] = [
227
+ {
228
+ "offsets": [[mention.start, mention.end]],
229
+ "text": [mention.text],
230
+ "semantic_type_id": [mention.type],
231
+ "normalized": self.get_normalizations(
232
+ mention.id,
233
+ mention.type,
234
+ doc.pmid,
235
+ ),
236
+ }
237
+ for mention in doc.annotations
238
+ ]
239
+ yield document
240
+
241
+ def pubtator_to_bigbio_kb(self, filepath):
242
+ """
243
+ Converts pubtator to bigbio_kb schema
244
+ """
245
+ with open(filepath, "r", encoding="utf8") as fstream:
246
+ uid = itertools.count(0)
247
+ for doc in pubtator.iterparse(fstream):
248
+ document = {}
249
+ title = doc.title
250
+ abstract = doc.abstract
251
+ document["id"] = next(uid)
252
+ document["document_id"] = doc.pmid
253
+ document["passages"] = [
254
+ {
255
+ "id": next(uid),
256
+ "type": "title",
257
+ "text": [title],
258
+ "offsets": [[0, len(title)]],
259
+ },
260
+ {
261
+ "id": next(uid),
262
+ "type": "abstract",
263
+ "text": [abstract],
264
+ "offsets": [[len(title) + 1, len(title) + len(abstract) + 1]],
265
+ },
266
+ ]
267
+ document["entities"] = [
268
+ {
269
+ "id": next(uid),
270
+ "offsets": [[mention.start, mention.end]],
271
+ "text": [mention.text],
272
+ "type": [mention.type],
273
+ "normalized": self.get_normalizations(
274
+ mention.id, mention.type, doc.pmid
275
+ ),
276
+ }
277
+ for mention in doc.annotations
278
+ ]
279
+ db_id_mapping = {
280
+ "dbSNP": "dbSNP",
281
+ "CorrespondingGene": "NCBI Gene",
282
+ "tmVar": "dbSNP",
283
+ }
284
+ for entity in document["entities"]:
285
+ normalized_bigbio_kb = []
286
+ for key, id_list in entity["normalized"].items():
287
+ if key in db_id_mapping.keys():
288
+ normalized_bigbio_kb.extend(
289
+ [
290
+ {"db_name": db_id_mapping[key], "db_id": id}
291
+ for id in id_list
292
+ ]
293
+ )
294
+ entity["normalized"] = normalized_bigbio_kb
295
+ document["relations"] = []
296
+ document["events"] = []
297
+ document["coreferences"] = []
298
+ yield document
299
+
300
+ def _generate_examples(self, filepath):
301
+ """Yields examples as (key, example) tuples."""
302
+ if self.config.schema == "source":
303
+ for source_example in self.pubtator_to_source(filepath):
304
+ yield source_example["pmid"], source_example
305
+ elif self.config.schema == "bigbio_kb":
306
+ for bigbio_example in self.pubtator_to_bigbio_kb(filepath):
307
+ yield bigbio_example["document_id"], bigbio_example