Datasets:
Upload dataloading script and README.md
Browse files- README.md +565 -0
- knowledge_net.py +368 -0
README.md
ADDED
@@ -0,0 +1,565 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
annotations_creators:
|
3 |
+
- expert-generated
|
4 |
+
language:
|
5 |
+
- en
|
6 |
+
language_creators:
|
7 |
+
- found
|
8 |
+
license: []
|
9 |
+
multilinguality:
|
10 |
+
- monolingual
|
11 |
+
pretty_name: KnowledgeNet is a dataset for automatically populating a knowledge base
|
12 |
+
size_categories:
|
13 |
+
- 10K<n<100K
|
14 |
+
source_datasets: []
|
15 |
+
tags:
|
16 |
+
- knowledgenet
|
17 |
+
task_categories:
|
18 |
+
- text-classification
|
19 |
+
task_ids:
|
20 |
+
- multi-class-classification
|
21 |
+
- entity-linking-classification
|
22 |
+
dataset_info:
|
23 |
+
- config_name: knet
|
24 |
+
features:
|
25 |
+
- name: fold
|
26 |
+
dtype: int32
|
27 |
+
- name: documentId
|
28 |
+
dtype: string
|
29 |
+
- name: source
|
30 |
+
dtype: string
|
31 |
+
- name: documentText
|
32 |
+
dtype: string
|
33 |
+
- name: passages
|
34 |
+
sequence:
|
35 |
+
- name: passageId
|
36 |
+
dtype: string
|
37 |
+
- name: passageStart
|
38 |
+
dtype: int32
|
39 |
+
- name: passageEnd
|
40 |
+
dtype: int32
|
41 |
+
- name: passageText
|
42 |
+
dtype: string
|
43 |
+
- name: exhaustivelyAnnotatedProperties
|
44 |
+
sequence:
|
45 |
+
- name: propertyId
|
46 |
+
dtype: string
|
47 |
+
- name: propertyName
|
48 |
+
dtype: string
|
49 |
+
- name: propertyDescription
|
50 |
+
dtype: string
|
51 |
+
- name: facts
|
52 |
+
sequence:
|
53 |
+
- name: factId
|
54 |
+
dtype: string
|
55 |
+
- name: propertyId
|
56 |
+
dtype: string
|
57 |
+
- name: humanReadable
|
58 |
+
dtype: string
|
59 |
+
- name: annotatedPassage
|
60 |
+
dtype: string
|
61 |
+
- name: subjectStart
|
62 |
+
dtype: int32
|
63 |
+
- name: subjectEnd
|
64 |
+
dtype: int32
|
65 |
+
- name: subjectText
|
66 |
+
dtype: string
|
67 |
+
- name: subjectUri
|
68 |
+
dtype: string
|
69 |
+
- name: objectStart
|
70 |
+
dtype: int32
|
71 |
+
- name: objectEnd
|
72 |
+
dtype: int32
|
73 |
+
- name: objectText
|
74 |
+
dtype: string
|
75 |
+
- name: objectUri
|
76 |
+
dtype: string
|
77 |
+
splits:
|
78 |
+
- name: train
|
79 |
+
num_bytes: 10161415
|
80 |
+
num_examples: 3977
|
81 |
+
download_size: 14119313
|
82 |
+
dataset_size: 10161415
|
83 |
+
- config_name: knet_tokenized
|
84 |
+
features:
|
85 |
+
- name: doc_id
|
86 |
+
dtype: string
|
87 |
+
- name: passage_id
|
88 |
+
dtype: string
|
89 |
+
- name: fact_id
|
90 |
+
dtype: string
|
91 |
+
- name: tokens
|
92 |
+
sequence: string
|
93 |
+
- name: subj_start
|
94 |
+
dtype: int32
|
95 |
+
- name: subj_end
|
96 |
+
dtype: int32
|
97 |
+
- name: subj_type
|
98 |
+
dtype:
|
99 |
+
class_label:
|
100 |
+
names:
|
101 |
+
'0': O
|
102 |
+
'1': PER
|
103 |
+
'2': ORG
|
104 |
+
'3': LOC
|
105 |
+
'4': DATE
|
106 |
+
- name: subj_uri
|
107 |
+
dtype: string
|
108 |
+
- name: obj_start
|
109 |
+
dtype: int32
|
110 |
+
- name: obj_end
|
111 |
+
dtype: int32
|
112 |
+
- name: obj_type
|
113 |
+
dtype:
|
114 |
+
class_label:
|
115 |
+
names:
|
116 |
+
'0': O
|
117 |
+
'1': PER
|
118 |
+
'2': ORG
|
119 |
+
'3': LOC
|
120 |
+
'4': DATE
|
121 |
+
- name: obj_uri
|
122 |
+
dtype: string
|
123 |
+
- name: relation
|
124 |
+
dtype:
|
125 |
+
class_label:
|
126 |
+
names:
|
127 |
+
'0': NO_RELATION
|
128 |
+
'1': DATE_OF_BIRTH
|
129 |
+
'2': DATE_OF_DEATH
|
130 |
+
'3': PLACE_OF_RESIDENCE
|
131 |
+
'4': PLACE_OF_BIRTH
|
132 |
+
'5': NATIONALITY
|
133 |
+
'6': EMPLOYEE_OR_MEMBER_OF
|
134 |
+
'7': EDUCATED_AT
|
135 |
+
'8': POLITICAL_AFFILIATION
|
136 |
+
'9': CHILD_OF
|
137 |
+
'10': SPOUSE
|
138 |
+
'11': DATE_FOUNDED
|
139 |
+
'12': HEADQUARTERS
|
140 |
+
'13': SUBSIDIARY_OF
|
141 |
+
'14': FOUNDED_BY
|
142 |
+
'15': CEO
|
143 |
+
splits:
|
144 |
+
- name: train
|
145 |
+
num_bytes: 4511963
|
146 |
+
num_examples: 10895
|
147 |
+
download_size: 14119313
|
148 |
+
dataset_size: 4511963
|
149 |
+
- config_name: knet_re
|
150 |
+
features:
|
151 |
+
- name: documentId
|
152 |
+
dtype: string
|
153 |
+
- name: passageId
|
154 |
+
dtype: string
|
155 |
+
- name: factId
|
156 |
+
dtype: string
|
157 |
+
- name: passageText
|
158 |
+
dtype: string
|
159 |
+
- name: humanReadable
|
160 |
+
dtype: string
|
161 |
+
- name: annotatedPassage
|
162 |
+
dtype: string
|
163 |
+
- name: subjectStart
|
164 |
+
dtype: int32
|
165 |
+
- name: subjectEnd
|
166 |
+
dtype: int32
|
167 |
+
- name: subjectText
|
168 |
+
dtype: string
|
169 |
+
- name: subjectType
|
170 |
+
dtype:
|
171 |
+
class_label:
|
172 |
+
names:
|
173 |
+
'0': O
|
174 |
+
'1': PER
|
175 |
+
'2': ORG
|
176 |
+
'3': LOC
|
177 |
+
'4': DATE
|
178 |
+
- name: subjectUri
|
179 |
+
dtype: string
|
180 |
+
- name: objectStart
|
181 |
+
dtype: int32
|
182 |
+
- name: objectEnd
|
183 |
+
dtype: int32
|
184 |
+
- name: objectText
|
185 |
+
dtype: string
|
186 |
+
- name: objectType
|
187 |
+
dtype:
|
188 |
+
class_label:
|
189 |
+
names:
|
190 |
+
'0': O
|
191 |
+
'1': PER
|
192 |
+
'2': ORG
|
193 |
+
'3': LOC
|
194 |
+
'4': DATE
|
195 |
+
- name: objectUri
|
196 |
+
dtype: string
|
197 |
+
- name: relation
|
198 |
+
dtype:
|
199 |
+
class_label:
|
200 |
+
names:
|
201 |
+
'0': NO_RELATION
|
202 |
+
'1': DATE_OF_BIRTH
|
203 |
+
'2': DATE_OF_DEATH
|
204 |
+
'3': PLACE_OF_RESIDENCE
|
205 |
+
'4': PLACE_OF_BIRTH
|
206 |
+
'5': NATIONALITY
|
207 |
+
'6': EMPLOYEE_OR_MEMBER_OF
|
208 |
+
'7': EDUCATED_AT
|
209 |
+
'8': POLITICAL_AFFILIATION
|
210 |
+
'9': CHILD_OF
|
211 |
+
'10': SPOUSE
|
212 |
+
'11': DATE_FOUNDED
|
213 |
+
'12': HEADQUARTERS
|
214 |
+
'13': SUBSIDIARY_OF
|
215 |
+
'14': FOUNDED_BY
|
216 |
+
'15': CEO
|
217 |
+
splits:
|
218 |
+
- name: train
|
219 |
+
num_bytes: 6098219
|
220 |
+
num_examples: 10895
|
221 |
+
download_size: 14119313
|
222 |
+
dataset_size: 6098219
|
223 |
+
---
|
224 |
+
# Dataset Card for "KnowledgeNet"
|
225 |
+
## Table of Contents
|
226 |
+
- [Table of Contents](#table-of-contents)
|
227 |
+
- [Dataset Description](#dataset-description)
|
228 |
+
- [Dataset Summary](#dataset-summary)
|
229 |
+
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
|
230 |
+
- [Languages](#languages)
|
231 |
+
- [Dataset Structure](#dataset-structure)
|
232 |
+
- [Data Instances](#data-instances)
|
233 |
+
- [Data Fields](#data-fields)
|
234 |
+
- [Data Splits](#data-splits)
|
235 |
+
- [Dataset Creation](#dataset-creation)
|
236 |
+
- [Curation Rationale](#curation-rationale)
|
237 |
+
- [Source Data](#source-data)
|
238 |
+
- [Annotations](#annotations)
|
239 |
+
- [Personal and Sensitive Information](#personal-and-sensitive-information)
|
240 |
+
- [Considerations for Using the Data](#considerations-for-using-the-data)
|
241 |
+
- [Social Impact of Dataset](#social-impact-of-dataset)
|
242 |
+
- [Discussion of Biases](#discussion-of-biases)
|
243 |
+
- [Other Known Limitations](#other-known-limitations)
|
244 |
+
- [Additional Information](#additional-information)
|
245 |
+
- [Dataset Curators](#dataset-curators)
|
246 |
+
- [Licensing Information](#licensing-information)
|
247 |
+
- [Citation Information](#citation-information)
|
248 |
+
- [Contributions](#contributions)
|
249 |
+
## Dataset Description
|
250 |
+
- **Repository:** [knowledge-net](https://github.com/diffbot/knowledge-net)
|
251 |
+
- **Paper:** [KnowledgeNet: A Benchmark Dataset for Knowledge Base Population](https://aclanthology.org/D19-1069/)
|
252 |
+
- **Size of downloaded dataset files:** 12.59 MB
|
253 |
+
- **Size of the generated dataset:** 6.1 MB
|
254 |
+
### Dataset Summary
|
255 |
+
KnowledgeNet is a benchmark dataset for the task of automatically populating a knowledge base (Wikidata) with facts
|
256 |
+
expressed in natural language text on the web. KnowledgeNet provides text exhaustively annotated with facts, thus
|
257 |
+
enabling the holistic end-to-end evaluation of knowledge base population systems as a whole, unlike previous benchmarks
|
258 |
+
that are more suitable for the evaluation of individual subcomponents (e.g., entity linking, relation extraction).
|
259 |
+
|
260 |
+
For instance, the dataset contains text expressing the fact (Gennaro Basile; RESIDENCE; Moravia), in the passage:
|
261 |
+
"Gennaro Basile was an Italian painter, born in Naples but active in the German-speaking countries. He settled at Brünn,
|
262 |
+
in Moravia, and lived about 1756..."
|
263 |
+
|
264 |
+
For a description of the dataset and baseline systems, please refer to their
|
265 |
+
[EMNLP paper](https://github.com/diffbot/knowledge-net/blob/master/knowledgenet-emnlp-cameraready.pdf).
|
266 |
+
|
267 |
+
Note: This Datasetreader currently only supports the `train` split and does not contain negative examples.
|
268 |
+
In addition to the original format this repository also provides two version (`knet_re`, `knet_tokenized`) that are
|
269 |
+
easier to use for simple relation extraction. You can load them with
|
270 |
+
`datasets.load_dataset("DFKI-SLT/knowledge_net", name="<config>")`.
|
271 |
+
|
272 |
+
### Supported Tasks and Leaderboards
|
273 |
+
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
274 |
+
|
275 |
+
### Languages
|
276 |
+
The language in the dataset is English.
|
277 |
+
|
278 |
+
## Dataset Structure
|
279 |
+
### Data Instances
|
280 |
+
#### knet
|
281 |
+
- **Size of downloaded dataset files:** 12.59 MB
|
282 |
+
- **Size of the generated dataset:** 10.16 MB
|
283 |
+
|
284 |
+
An example of 'train' looks as follows:
|
285 |
+
```json
|
286 |
+
{
|
287 |
+
"fold": 2,
|
288 |
+
"documentId": "8313",
|
289 |
+
"source": "DBpedia Abstract",
|
290 |
+
"documentText": "Gennaro Basile\n\nGennaro Basile was an Italian painter, born in Naples but active in the German-speaking countries. He settled at Brünn, in Moravia, and lived about 1756. His best picture is the altar-piece in the chapel of the chateau at Seeberg, in Salzburg. Most of his works remained in Moravia.",
|
291 |
+
"passages": [
|
292 |
+
{
|
293 |
+
"passageId": "8313:16:114",
|
294 |
+
"passageStart": 16,
|
295 |
+
"passageEnd": 114,
|
296 |
+
"passageText": "Gennaro Basile was an Italian painter, born in Naples but active in the German-speaking countries.",
|
297 |
+
"exhaustivelyAnnotatedProperties": [
|
298 |
+
{
|
299 |
+
"propertyId": "12",
|
300 |
+
"propertyName": "PLACE_OF_BIRTH",
|
301 |
+
"propertyDescription": "Describes the relationship between a person and the location where she/he was born."
|
302 |
+
}
|
303 |
+
],
|
304 |
+
"facts": [
|
305 |
+
{
|
306 |
+
"factId": "8313:16:30:63:69:12",
|
307 |
+
"propertyId": "12",
|
308 |
+
"humanReadable": "<Gennaro Basile> <PLACE_OF_BIRTH> <Naples>",
|
309 |
+
"annotatedPassage": "<Gennaro Basile> was an Italian painter, born in <Naples> but active in the German-speaking countries.",
|
310 |
+
"subjectStart": 16,
|
311 |
+
"subjectEnd": 30,
|
312 |
+
"subjectText": "Gennaro Basile",
|
313 |
+
"subjectUri": "http://www.wikidata.org/entity/Q19517888",
|
314 |
+
"objectStart": 63,
|
315 |
+
"objectEnd": 69,
|
316 |
+
"objectText": "Naples",
|
317 |
+
"objectUri": "http://www.wikidata.org/entity/Q2634"
|
318 |
+
}
|
319 |
+
]
|
320 |
+
},
|
321 |
+
{
|
322 |
+
"passageId": "8313:115:169",
|
323 |
+
"passageStart": 115,
|
324 |
+
"passageEnd": 169,
|
325 |
+
"passageText": "He settled at Brünn, in Moravia, and lived about 1756.",
|
326 |
+
"exhaustivelyAnnotatedProperties": [
|
327 |
+
{
|
328 |
+
"propertyId": "11",
|
329 |
+
"propertyName": "PLACE_OF_RESIDENCE",
|
330 |
+
"propertyDescription": "Describes the relationship between a person and the location where she/he lives/lived."
|
331 |
+
},
|
332 |
+
{
|
333 |
+
"propertyId": "12",
|
334 |
+
"propertyName": "PLACE_OF_BIRTH",
|
335 |
+
"propertyDescription": "Describes the relationship between a person and the location where she/he was born."
|
336 |
+
}
|
337 |
+
],
|
338 |
+
"facts": [
|
339 |
+
{
|
340 |
+
"factId": "8313:115:117:129:134:11",
|
341 |
+
"propertyId": "11",
|
342 |
+
"humanReadable": "<He> <PLACE_OF_RESIDENCE> <Brünn>",
|
343 |
+
"annotatedPassage": "<He> settled at <Brünn>, in Moravia, and lived about 1756.",
|
344 |
+
"subjectStart": 115,
|
345 |
+
"subjectEnd": 117,
|
346 |
+
"subjectText": "He",
|
347 |
+
"subjectUri": "http://www.wikidata.org/entity/Q19517888",
|
348 |
+
"objectStart": 129,
|
349 |
+
"objectEnd": 134,
|
350 |
+
"objectText": "Brünn",
|
351 |
+
"objectUri": "http://www.wikidata.org/entity/Q14960"
|
352 |
+
},
|
353 |
+
{
|
354 |
+
"factId": "8313:115:117:139:146:11",
|
355 |
+
"propertyId": "11",
|
356 |
+
"humanReadable": "<He> <PLACE_OF_RESIDENCE> <Moravia>",
|
357 |
+
"annotatedPassage": "<He> settled at Brünn, in <Moravia>, and lived about 1756.",
|
358 |
+
"subjectStart": 115,
|
359 |
+
"subjectEnd": 117,
|
360 |
+
"subjectText": "He",
|
361 |
+
"subjectUri": "http://www.wikidata.org/entity/Q19517888",
|
362 |
+
"objectStart": 139,
|
363 |
+
"objectEnd": 146,
|
364 |
+
"objectText": "Moravia",
|
365 |
+
"objectUri": "http://www.wikidata.org/entity/Q43266"
|
366 |
+
}
|
367 |
+
]
|
368 |
+
}
|
369 |
+
]
|
370 |
+
}
|
371 |
+
```
|
372 |
+
|
373 |
+
#### knet_re
|
374 |
+
- **Size of downloaded dataset files:** 12.59 MB
|
375 |
+
- **Size of the generated dataset:** 6.1 MB
|
376 |
+
|
377 |
+
An example of 'train' looks as follows:
|
378 |
+
```json
|
379 |
+
{
|
380 |
+
"documentId": "7",
|
381 |
+
"passageId": "7:23:206",
|
382 |
+
"factId": "7:23:44:138:160:1",
|
383 |
+
"passageText": "Tata Chemicals Europe (formerly Brunner Mond (UK) Limited) is a UK-based chemicals company that is a subsidiary of Tata Chemicals Limited, itself a part of the India-based Tata Group.",
|
384 |
+
"humanReadable": "<Tata Chemicals Europe> <SUBSIDIARY_OF> <Tata Chemicals Limited>",
|
385 |
+
"annotatedPassage": "<Tata Chemicals Europe> (formerly Brunner Mond (UK) Limited) is a UK-based chemicals company that is a subsidiary of <Tata Chemicals Limited>, itself a part of the India-based Tata Group.",
|
386 |
+
"subjectStart": 0,
|
387 |
+
"subjectEnd": 21,
|
388 |
+
"subjectText": "Tata Chemicals Europe",
|
389 |
+
"subjectType": 2,
|
390 |
+
"subjectUri": "",
|
391 |
+
"objectStart": 115,
|
392 |
+
"objectEnd": 137,
|
393 |
+
"objectText": "Tata Chemicals Limited",
|
394 |
+
"objectType": 2,
|
395 |
+
"objectUri": "http://www.wikidata.org/entity/Q2331365",
|
396 |
+
"relation": 13
|
397 |
+
}
|
398 |
+
```
|
399 |
+
|
400 |
+
#### knet_tokenized
|
401 |
+
- **Size of downloaded dataset files:** 12.59 MB
|
402 |
+
- **Size of the generated dataset:** 4.5 MB
|
403 |
+
|
404 |
+
An example of 'train' looks as follows:
|
405 |
+
```json
|
406 |
+
{
|
407 |
+
"doc_id": "7",
|
408 |
+
"passage_id": "7:23:206",
|
409 |
+
"fact_id": "7:162:168:183:205:1",
|
410 |
+
"tokens": ["Tata", "Chemicals", "Europe", "(", "formerly", "Brunner", "Mond", "(", "UK", ")", "Limited", ")", "is", "a", "UK", "-", "based", "chemicals", "company", "that", "is", "a", "subsidiary", "of", "Tata", "Chemicals", "Limited", ",", "itself", "a", "part", "of", "the", "India", "-", "based", "Tata", "Group", "."],
|
411 |
+
"subj_start": 28,
|
412 |
+
"subj_end": 29,
|
413 |
+
"subj_type": 2,
|
414 |
+
"subj_uri": "http://www.wikidata.org/entity/Q2331365",
|
415 |
+
"obj_start": 33,
|
416 |
+
"obj_end": 38,
|
417 |
+
"obj_type": 2,
|
418 |
+
"obj_uri": "http://www.wikidata.org/entity/Q331715",
|
419 |
+
"relation": 13
|
420 |
+
}
|
421 |
+
```
|
422 |
+
### Data Fields
|
423 |
+
|
424 |
+
#### knet
|
425 |
+
- `fold`: the fold, a `int` feature.
|
426 |
+
- `documentId`: the document id, a `string` feature.
|
427 |
+
- `source`: the source, a `string` feature.
|
428 |
+
- `documenText`: the document text, a `string` feature.
|
429 |
+
- `passages`: the list of passages, a `list` of `dict`.
|
430 |
+
- `passageId`: the passage id, a `string` feature.
|
431 |
+
- `passageStart`: the passage start, a `int` feature.
|
432 |
+
- `passageEnd`: the passage end, a `int` feature.
|
433 |
+
- `passageText`: the passage text, a `string` feature.
|
434 |
+
- `exhaustivelyAnnotatedProperties`: the list of exhaustively annotated properties, a `list` of `dict`.
|
435 |
+
- `propertyId`: the property id, a `string` feature.
|
436 |
+
- `propertyName`: the property name, a `string` feature.
|
437 |
+
- `propertyDescription`: the property description, a `string` feature.
|
438 |
+
- `facts`: the list of facts, a `list` of `dict`.
|
439 |
+
- `factId`: the fact id, a `string` feature.
|
440 |
+
- `propertyId`: the property id, a `string` feature.
|
441 |
+
- `humanReadable`: the human readable annotation, a `string` feature.
|
442 |
+
- `annotatedPassage`: the annotated passage, a `string` feature.
|
443 |
+
- `subjectStart`: the subject start, a `int` feature.
|
444 |
+
- `subjectEnd`: the subject end, a `int` feature.
|
445 |
+
- `subjectText`: the subject text, a `string` feature.
|
446 |
+
- `subjectUri`: the subject uri, a `string` feature.
|
447 |
+
- `objectStart`: the object start, a `int` feature.
|
448 |
+
- `objectEnd`: the object end, a `int` feature.
|
449 |
+
- `objectText`: the object text, a `string` feature.
|
450 |
+
- `objectUri`: the object uri, a `string` feature.
|
451 |
+
|
452 |
+
#### knet_re
|
453 |
+
- `documentId`: the document id, a `string` feature.
|
454 |
+
- `passageId`: the passage id, a `string` feature.
|
455 |
+
- `passageText`: the passage text, a `string` feature.
|
456 |
+
- `factId`: the fact id, a `string` feature.
|
457 |
+
- `humanReadable`: human-readable annotation, a `string` features.
|
458 |
+
- `annotatedPassage`: annotated passage, a `string` feature.
|
459 |
+
- `subjectStart`: the index of the start character of the relation subject mention, an `ìnt` feature.
|
460 |
+
- `subjectEnd`: the index of the end character of the relation subject mention, exclusive, an `ìnt` feature.
|
461 |
+
- `subjectText`: the text the subject mention, a `string` feature.
|
462 |
+
- `subjectType`: the NER type of the subject mention, a `string` classification label.
|
463 |
+
|
464 |
+
```json
|
465 |
+
{"O": 0, "PER": 1, "ORG": 2, "LOC": 3, "DATE": 4}
|
466 |
+
```
|
467 |
+
|
468 |
+
- `subjectUri`: the Wikidata URI of the subject mention, a `string` feature.
|
469 |
+
- `objectStart`: the index of the start character of the relation object mention, an `ìnt` feature.
|
470 |
+
- `objectEnd`: the index of the end character of the relation object mention, exclusive, an `ìnt` feature.
|
471 |
+
- `objectText`: the text the object mention, a `string` feature.
|
472 |
+
- `objectType`: the NER type of the object mention, a `string` classification label.
|
473 |
+
|
474 |
+
```json
|
475 |
+
{"O": 0, "PER": 1, "ORG": 2, "LOC": 3, "DATE": 4}
|
476 |
+
```
|
477 |
+
|
478 |
+
- `objectUri`: the Wikidata URI of the object mention, a `string` feature.
|
479 |
+
- `relation`: the relation label of this instance, a `string` classification label.
|
480 |
+
|
481 |
+
```json
|
482 |
+
{"NO_RELATION": 0, "DATE_OF_BIRTH": 1, "DATE_OF_DEATH": 2, "PLACE_OF_RESIDENCE": 3, "PLACE_OF_BIRTH": 4, "NATIONALITY": 5, "EMPLOYEE_OR_MEMBER_OF": 6, "EDUCATED_AT": 7, "POLITICAL_AFFILIATION": 8, "CHILD_OF": 9, "SPOUSE": 10, "DATE_FOUNDED": 11, "HEADQUARTERS": 12, "SUBSIDIARY_OF": 13, "FOUNDED_BY": 14, "CEO": 15}
|
483 |
+
```
|
484 |
+
|
485 |
+
#### knet_tokenized
|
486 |
+
- `doc_id`: the document id, a `string` feature.
|
487 |
+
- `passage_id`: the passage id, a `string` feature.
|
488 |
+
- `factId`: the fact id, a `string` feature.
|
489 |
+
- `tokens`: the list of tokens of this passage, obtained with spaCy, a `list` of `string` features.
|
490 |
+
- `subj_start`: the index of the start token of the relation subject mention, an `ìnt` feature.
|
491 |
+
- `subj_end`: the index of the end token of the relation subject mention, exclusive, an `ìnt` feature.
|
492 |
+
- `subj_type`: the NER type of the subject mention, a `string` classification label.
|
493 |
+
|
494 |
+
```json
|
495 |
+
{"O": 0, "PER": 1, "ORG": 2, "LOC": 3, "DATE": 4}
|
496 |
+
```
|
497 |
+
|
498 |
+
|
499 |
+
- `subj_uri`: the Wikidata URI of the subject mention, a `string` feature.
|
500 |
+
- `obj_start`: the index of the start token of the relation object mention, an `ìnt` feature.
|
501 |
+
- `obj_end`: the index of the end token of the relation object mention, exclusive, an `ìnt` feature.
|
502 |
+
- `obj_type`: the NER type of the object mention, a `string` classification label.
|
503 |
+
|
504 |
+
```json
|
505 |
+
{"O": 0, "PER": 1, "ORG": 2, "LOC": 3, "DATE": 4}
|
506 |
+
```
|
507 |
+
|
508 |
+
- `obj_uri`: the Wikidata URI of the object mention, a `string` feature.
|
509 |
+
- `relation`: the relation label of this instance, a `string` classification label.
|
510 |
+
|
511 |
+
```json
|
512 |
+
{"NO_RELATION": 0, "DATE_OF_BIRTH": 1, "DATE_OF_DEATH": 2, "PLACE_OF_RESIDENCE": 3, "PLACE_OF_BIRTH": 4, "NATIONALITY": 5, "EMPLOYEE_OR_MEMBER_OF": 6, "EDUCATED_AT": 7, "POLITICAL_AFFILIATION": 8, "CHILD_OF": 9, "SPOUSE": 10, "DATE_FOUNDED": 11, "HEADQUARTERS": 12, "SUBSIDIARY_OF": 13, "FOUNDED_BY": 14, "CEO": 15}
|
513 |
+
```
|
514 |
+
|
515 |
+
|
516 |
+
### Data Splits
|
517 |
+
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
518 |
+
## Dataset Creation
|
519 |
+
### Curation Rationale
|
520 |
+
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
521 |
+
### Source Data
|
522 |
+
#### Initial Data Collection and Normalization
|
523 |
+
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
524 |
+
#### Who are the source language producers?
|
525 |
+
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
526 |
+
### Annotations
|
527 |
+
#### Annotation process
|
528 |
+
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
529 |
+
are labeled as no_relation.
|
530 |
+
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
531 |
+
### Personal and Sensitive Information
|
532 |
+
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
533 |
+
## Considerations for Using the Data
|
534 |
+
### Social Impact of Dataset
|
535 |
+
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
536 |
+
### Discussion of Biases
|
537 |
+
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
538 |
+
### Other Known Limitations
|
539 |
+
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
540 |
+
## Additional Information
|
541 |
+
### Dataset Curators
|
542 |
+
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
543 |
+
### Licensing Information
|
544 |
+
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
545 |
+
### Citation Information
|
546 |
+
```
|
547 |
+
@inproceedings{mesquita-etal-2019-knowledgenet,
|
548 |
+
title = "{K}nowledge{N}et: A Benchmark Dataset for Knowledge Base Population",
|
549 |
+
author = "Mesquita, Filipe and
|
550 |
+
Cannaviccio, Matteo and
|
551 |
+
Schmidek, Jordan and
|
552 |
+
Mirza, Paramita and
|
553 |
+
Barbosa, Denilson",
|
554 |
+
booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)",
|
555 |
+
month = nov,
|
556 |
+
year = "2019",
|
557 |
+
address = "Hong Kong, China",
|
558 |
+
publisher = "Association for Computational Linguistics",
|
559 |
+
url = "https://aclanthology.org/D19-1069",
|
560 |
+
doi = "10.18653/v1/D19-1069",
|
561 |
+
pages = "749--758",}
|
562 |
+
```
|
563 |
+
|
564 |
+
### Contributions
|
565 |
+
Thanks to [@phucdev](https://github.com/phucdev) for adding this dataset.
|
knowledge_net.py
ADDED
@@ -0,0 +1,368 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The current dataset script contributor.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
"""The KnowledgeNet dataset for automatically populating a knowledge base"""
|
17 |
+
|
18 |
+
import json
|
19 |
+
import re
|
20 |
+
import datasets
|
21 |
+
|
22 |
+
_CITATION = """\
|
23 |
+
@inproceedings{mesquita-etal-2019-knowledgenet,
|
24 |
+
title = "{K}nowledge{N}et: A Benchmark Dataset for Knowledge Base Population",
|
25 |
+
author = "Mesquita, Filipe and
|
26 |
+
Cannaviccio, Matteo and
|
27 |
+
Schmidek, Jordan and
|
28 |
+
Mirza, Paramita and
|
29 |
+
Barbosa, Denilson",
|
30 |
+
booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)",
|
31 |
+
month = nov,
|
32 |
+
year = "2019",
|
33 |
+
address = "Hong Kong, China",
|
34 |
+
publisher = "Association for Computational Linguistics",
|
35 |
+
url = "https://aclanthology.org/D19-1069",
|
36 |
+
doi = "10.18653/v1/D19-1069",
|
37 |
+
pages = "749--758",}
|
38 |
+
"""
|
39 |
+
|
40 |
+
_DESCRIPTION = """\
|
41 |
+
KnowledgeNet is a benchmark dataset for the task of automatically populating a knowledge base (Wikidata) with facts
|
42 |
+
expressed in natural language text on the web. KnowledgeNet provides text exhaustively annotated with facts, thus
|
43 |
+
enabling the holistic end-to-end evaluation of knowledge base population systems as a whole, unlike previous benchmarks
|
44 |
+
that are more suitable for the evaluation of individual subcomponents (e.g., entity linking, relation extraction).
|
45 |
+
|
46 |
+
For instance, the dataset contains text expressing the fact (Gennaro Basile; RESIDENCE; Moravia), in the passage:
|
47 |
+
"Gennaro Basile was an Italian painter, born in Naples but active in the German-speaking countries. He settled at Brünn,
|
48 |
+
in Moravia, and lived about 1756..."
|
49 |
+
|
50 |
+
For a description of the dataset and baseline systems, please refer to their
|
51 |
+
[EMNLP paper](https://github.com/diffbot/knowledge-net/blob/master/knowledgenet-emnlp-cameraready.pdf).
|
52 |
+
|
53 |
+
Note: This Datasetreader currently only supports the `train` split and does not contain negative examples
|
54 |
+
"""
|
55 |
+
|
56 |
+
_HOMEPAGE = "https://github.com/diffbot/knowledge-net"
|
57 |
+
|
58 |
+
_LICENSE = ""
|
59 |
+
|
60 |
+
# The HuggingFace dataset library don't host the datasets but only point to the original files
|
61 |
+
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
62 |
+
_URLS = {
|
63 |
+
"train": "https://raw.githubusercontent.com/diffbot/knowledge-net/master/dataset/train.json",
|
64 |
+
"test": "https://raw.githubusercontent.com/diffbot/knowledge-net/master/dataset/test-no-facts.json"
|
65 |
+
}
|
66 |
+
|
67 |
+
_VERSION = datasets.Version("1.1.0")
|
68 |
+
|
69 |
+
_CLASS_LABELS = [
|
70 |
+
"NO_RELATION",
|
71 |
+
"DATE_OF_BIRTH",
|
72 |
+
"DATE_OF_DEATH",
|
73 |
+
"PLACE_OF_RESIDENCE",
|
74 |
+
"PLACE_OF_BIRTH",
|
75 |
+
"NATIONALITY",
|
76 |
+
"EMPLOYEE_OR_MEMBER_OF",
|
77 |
+
"EDUCATED_AT",
|
78 |
+
"POLITICAL_AFFILIATION",
|
79 |
+
"CHILD_OF",
|
80 |
+
"SPOUSE",
|
81 |
+
"DATE_FOUNDED",
|
82 |
+
"HEADQUARTERS",
|
83 |
+
"SUBSIDIARY_OF",
|
84 |
+
"FOUNDED_BY",
|
85 |
+
"CEO"
|
86 |
+
]
|
87 |
+
|
88 |
+
_NER_CLASS_LABELS = [
|
89 |
+
"O",
|
90 |
+
"PER",
|
91 |
+
"ORG",
|
92 |
+
"LOC",
|
93 |
+
"DATE"
|
94 |
+
]
|
95 |
+
|
96 |
+
|
97 |
+
def get_entity_types_from_relation(relation_label):
|
98 |
+
if relation_label == "DATE_OF_BIRTH":
|
99 |
+
subj_type = "PER"
|
100 |
+
obj_type = "DATE"
|
101 |
+
elif relation_label == "DATE_OF_DEATH":
|
102 |
+
subj_type = "PER"
|
103 |
+
obj_type = "DATE"
|
104 |
+
elif relation_label == "PLACE_OF_RESIDENCE":
|
105 |
+
subj_type = "PER"
|
106 |
+
obj_type = "LOC"
|
107 |
+
elif relation_label == "PLACE_OF_BIRTH":
|
108 |
+
subj_type = "PER"
|
109 |
+
obj_type = "LOC"
|
110 |
+
elif relation_label == "NATIONALITY":
|
111 |
+
subj_type = "PER"
|
112 |
+
obj_type = "LOC"
|
113 |
+
elif relation_label == "EMPLOYEE_OR_MEMBER_OF":
|
114 |
+
subj_type = "PER"
|
115 |
+
obj_type = "ORG"
|
116 |
+
elif relation_label == "EDUCATED_AT":
|
117 |
+
subj_type = "PER"
|
118 |
+
obj_type = "ORG"
|
119 |
+
elif relation_label == "POLITICAL_AFFILIATION":
|
120 |
+
subj_type = "PER"
|
121 |
+
obj_type = "ORG"
|
122 |
+
elif relation_label == "CHILD_OF":
|
123 |
+
subj_type = "PER"
|
124 |
+
obj_type = "PER"
|
125 |
+
elif relation_label == "SPOUSE":
|
126 |
+
subj_type = "PER"
|
127 |
+
obj_type = "PER"
|
128 |
+
elif relation_label == "DATE_FOUNDED":
|
129 |
+
subj_type = "ORG"
|
130 |
+
obj_type = "DATE"
|
131 |
+
elif relation_label == "HEADQUARTERS":
|
132 |
+
subj_type = "ORG"
|
133 |
+
obj_type = "LOC"
|
134 |
+
elif relation_label == "SUBSIDIARY_OF":
|
135 |
+
subj_type = "ORG"
|
136 |
+
obj_type = "ORG"
|
137 |
+
elif relation_label == "FOUNDED_BY":
|
138 |
+
subj_type = "ORG"
|
139 |
+
obj_type = "PER"
|
140 |
+
elif relation_label == "CEO":
|
141 |
+
subj_type = "ORG"
|
142 |
+
obj_type = "PER"
|
143 |
+
else:
|
144 |
+
raise ValueError(f"Unknown relation label: {relation_label}")
|
145 |
+
return subj_type, obj_type
|
146 |
+
|
147 |
+
|
148 |
+
def remove_contiguous_whitespaces(text):
|
149 |
+
# +1 to account for regular whitespace at the beginning
|
150 |
+
contiguous_whitespaces_indices = [(m.start(0) + 1, m.end(0)) for m in re.finditer(' +', text)]
|
151 |
+
cleaned_text = re.sub(" +", " ", text)
|
152 |
+
return cleaned_text, contiguous_whitespaces_indices
|
153 |
+
|
154 |
+
|
155 |
+
def fix_char_index(char_index, contiguous_whitespaces_indices):
|
156 |
+
new_char_index = char_index
|
157 |
+
offset = 0
|
158 |
+
for ws_start, ws_end in contiguous_whitespaces_indices:
|
159 |
+
if char_index >= ws_end:
|
160 |
+
offset = offset + (ws_end - ws_start)
|
161 |
+
new_char_index -= offset
|
162 |
+
return new_char_index
|
163 |
+
|
164 |
+
|
165 |
+
class KnowledgeNet(datasets.GeneratorBasedBuilder):
|
166 |
+
"""The KnowledgeNet dataset for automatically populating a knowledge base"""
|
167 |
+
|
168 |
+
BUILDER_CONFIGS = [
|
169 |
+
datasets.BuilderConfig(
|
170 |
+
name="knet", version=_VERSION, description="The original KnowledgeNet formatted for RE."
|
171 |
+
),
|
172 |
+
datasets.BuilderConfig(
|
173 |
+
name="knet_re", version=_VERSION, description="The original KnowledgeNet formatted for RE."
|
174 |
+
),
|
175 |
+
datasets.BuilderConfig(
|
176 |
+
name="knet_tokenized", version=_VERSION, description="KnowledgeNet tokenized and reformatted."
|
177 |
+
),
|
178 |
+
]
|
179 |
+
|
180 |
+
DEFAULT_CONFIG_NAME = "knet" # type: ignore
|
181 |
+
|
182 |
+
def _info(self):
|
183 |
+
if self.config.name == "knet_tokenized":
|
184 |
+
features = datasets.Features(
|
185 |
+
{
|
186 |
+
"doc_id": datasets.Value("string"),
|
187 |
+
"passage_id": datasets.Value("string"),
|
188 |
+
"fact_id": datasets.Value("string"),
|
189 |
+
"tokens": datasets.Sequence(datasets.Value("string")),
|
190 |
+
"subj_start": datasets.Value("int32"),
|
191 |
+
"subj_end": datasets.Value("int32"),
|
192 |
+
"subj_type": datasets.ClassLabel(names=_NER_CLASS_LABELS),
|
193 |
+
"subj_uri": datasets.Value("string"),
|
194 |
+
"obj_start": datasets.Value("int32"),
|
195 |
+
"obj_end": datasets.Value("int32"),
|
196 |
+
"obj_type": datasets.ClassLabel(names=_NER_CLASS_LABELS),
|
197 |
+
"obj_uri": datasets.Value("string"),
|
198 |
+
"relation": datasets.ClassLabel(names=_CLASS_LABELS),
|
199 |
+
}
|
200 |
+
)
|
201 |
+
elif self.config.name == "knet_re":
|
202 |
+
features = datasets.Features(
|
203 |
+
{
|
204 |
+
"documentId": datasets.Value("string"),
|
205 |
+
"passageId": datasets.Value("string"),
|
206 |
+
"factId": datasets.Value("string"),
|
207 |
+
"passageText": datasets.Value("string"),
|
208 |
+
"humanReadable": datasets.Value("string"),
|
209 |
+
"annotatedPassage": datasets.Value("string"),
|
210 |
+
"subjectStart": datasets.Value("int32"),
|
211 |
+
"subjectEnd": datasets.Value("int32"),
|
212 |
+
"subjectText": datasets.Value("string"),
|
213 |
+
"subjectType": datasets.ClassLabel(names=_NER_CLASS_LABELS),
|
214 |
+
"subjectUri": datasets.Value("string"),
|
215 |
+
"objectStart": datasets.Value("int32"),
|
216 |
+
"objectEnd": datasets.Value("int32"),
|
217 |
+
"objectText": datasets.Value("string"),
|
218 |
+
"objectType": datasets.ClassLabel(names=_NER_CLASS_LABELS),
|
219 |
+
"objectUri": datasets.Value("string"),
|
220 |
+
"relation": datasets.ClassLabel(names=_CLASS_LABELS),
|
221 |
+
}
|
222 |
+
)
|
223 |
+
else:
|
224 |
+
features = datasets.Features(
|
225 |
+
{
|
226 |
+
"fold": datasets.Value("int32"),
|
227 |
+
"documentId": datasets.Value("string"),
|
228 |
+
"source": datasets.Value("string"),
|
229 |
+
"documentText": datasets.Value("string"),
|
230 |
+
"passages": [{
|
231 |
+
"passageId": datasets.Value("string"),
|
232 |
+
"passageStart": datasets.Value("int32"),
|
233 |
+
"passageEnd": datasets.Value("int32"),
|
234 |
+
"passageText": datasets.Value("string"),
|
235 |
+
"exhaustivelyAnnotatedProperties": [{
|
236 |
+
"propertyId": datasets.Value("string"),
|
237 |
+
"propertyName": datasets.Value("string"),
|
238 |
+
"propertyDescription": datasets.Value("string"),
|
239 |
+
}],
|
240 |
+
"facts": [{
|
241 |
+
"factId": datasets.Value("string"),
|
242 |
+
"propertyId": datasets.Value("string"),
|
243 |
+
"humanReadable": datasets.Value("string"),
|
244 |
+
"annotatedPassage": datasets.Value("string"),
|
245 |
+
"subjectStart": datasets.Value("int32"),
|
246 |
+
"subjectEnd": datasets.Value("int32"),
|
247 |
+
"subjectText": datasets.Value("string"),
|
248 |
+
"subjectUri": datasets.Value("string"),
|
249 |
+
"objectStart": datasets.Value("int32"),
|
250 |
+
"objectEnd": datasets.Value("int32"),
|
251 |
+
"objectText": datasets.Value("string"),
|
252 |
+
"objectUri": datasets.Value("string"),
|
253 |
+
}],
|
254 |
+
}],
|
255 |
+
}
|
256 |
+
)
|
257 |
+
|
258 |
+
return datasets.DatasetInfo(
|
259 |
+
# This is the description that will appear on the datasets page.
|
260 |
+
description=_DESCRIPTION,
|
261 |
+
# This defines the different columns of the dataset and their types
|
262 |
+
features=features, # Here we define them above because they are different between the two configurations
|
263 |
+
# If there's a common (input, target) tuple from the features,
|
264 |
+
# specify them here. They'll be used if as_supervised=True in
|
265 |
+
# builder.as_dataset.
|
266 |
+
supervised_keys=None,
|
267 |
+
# Homepage of the dataset for documentation
|
268 |
+
homepage=_HOMEPAGE,
|
269 |
+
# License for the dataset if available
|
270 |
+
license=_LICENSE,
|
271 |
+
# Citation for the dataset
|
272 |
+
citation=_CITATION,
|
273 |
+
)
|
274 |
+
|
275 |
+
def _split_generators(self, dl_manager):
|
276 |
+
"""Returns SplitGenerators."""
|
277 |
+
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
|
278 |
+
|
279 |
+
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
|
280 |
+
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
281 |
+
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
282 |
+
|
283 |
+
downloaded_files = dl_manager.download_and_extract(_URLS)
|
284 |
+
# splits = [datasets.Split.TRAIN, datasets.Split.TEST]
|
285 |
+
splits = [datasets.Split.TRAIN]
|
286 |
+
return [datasets.SplitGenerator(name=i, gen_kwargs={"filepath": downloaded_files[str(i)], "split": i})
|
287 |
+
for i in splits]
|
288 |
+
|
289 |
+
def _generate_examples(self, filepath, split):
|
290 |
+
"""Yields examples."""
|
291 |
+
# This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
|
292 |
+
# It is in charge of opening the given file and yielding (key, example) tuples from the dataset
|
293 |
+
# The key is not important, it's more here for legacy reason (legacy from tfds)
|
294 |
+
if self.config.name == "knet_tokenized":
|
295 |
+
from spacy.lang.en import English
|
296 |
+
word_splitter = English()
|
297 |
+
else:
|
298 |
+
word_splitter = None
|
299 |
+
with open(filepath, encoding="utf-8") as f:
|
300 |
+
for line in f:
|
301 |
+
doc = json.loads(line)
|
302 |
+
if self.config.name == "knet":
|
303 |
+
yield doc["documentId"], doc
|
304 |
+
else:
|
305 |
+
for passage in doc["passages"]:
|
306 |
+
# Skip passages without facts right away
|
307 |
+
if len(passage["facts"]) == 0:
|
308 |
+
continue
|
309 |
+
|
310 |
+
text = passage["passageText"]
|
311 |
+
passage_start = passage["passageStart"]
|
312 |
+
|
313 |
+
if self.config.name == "knet_tokenized":
|
314 |
+
cleaned_text, contiguous_ws_indices = remove_contiguous_whitespaces(text)
|
315 |
+
spacy_doc = word_splitter(cleaned_text)
|
316 |
+
word_tokens = [t.text for t in spacy_doc]
|
317 |
+
for fact in passage["facts"]:
|
318 |
+
subj_start = fix_char_index(fact["subjectStart"] - passage_start, contiguous_ws_indices)
|
319 |
+
subj_end = fix_char_index(fact["subjectEnd"] - passage_start, contiguous_ws_indices)
|
320 |
+
obj_start = fix_char_index(fact["objectStart"] - passage_start, contiguous_ws_indices)
|
321 |
+
obj_end = fix_char_index(fact["objectEnd"] - passage_start, contiguous_ws_indices)
|
322 |
+
# Get exclusive token spans from char spans
|
323 |
+
subj_span = spacy_doc.char_span(subj_start, subj_end, alignment_mode="expand")
|
324 |
+
obj_span = spacy_doc.char_span(obj_start, obj_end, alignment_mode="expand")
|
325 |
+
|
326 |
+
relation_label = fact["humanReadable"].split(">")[1][2:]
|
327 |
+
subj_type, obj_type = get_entity_types_from_relation(relation_label)
|
328 |
+
id_ = fact["factId"]
|
329 |
+
|
330 |
+
yield id_, {
|
331 |
+
"doc_id": doc["documentId"],
|
332 |
+
"passage_id": passage["passageId"],
|
333 |
+
"fact_id": id_,
|
334 |
+
"tokens": word_tokens,
|
335 |
+
"subj_start": subj_span.start,
|
336 |
+
"subj_end": subj_span.end,
|
337 |
+
"subj_type": subj_type,
|
338 |
+
"subj_uri": fact["subjectUri"],
|
339 |
+
"obj_start": obj_span.start,
|
340 |
+
"obj_end": obj_span.end,
|
341 |
+
"obj_type": obj_type,
|
342 |
+
"obj_uri": fact["objectUri"],
|
343 |
+
"relation": relation_label
|
344 |
+
}
|
345 |
+
else:
|
346 |
+
for fact in passage["facts"]:
|
347 |
+
relation_label = fact["humanReadable"].split(">")[1][2:]
|
348 |
+
subj_type, obj_type = get_entity_types_from_relation(relation_label)
|
349 |
+
id_ = fact["factId"]
|
350 |
+
yield id_, {
|
351 |
+
"documentId": doc["documentId"],
|
352 |
+
"passageId": passage["passageId"],
|
353 |
+
"passageText": passage["passageText"],
|
354 |
+
"factId": id_,
|
355 |
+
"humanReadable": fact["humanReadable"],
|
356 |
+
"annotatedPassage": fact["annotatedPassage"],
|
357 |
+
"subjectStart": fact["subjectStart"] - passage_start,
|
358 |
+
"subjectEnd": fact["subjectEnd"] - passage_start,
|
359 |
+
"subjectText": fact["subjectText"],
|
360 |
+
"subjectType": subj_type,
|
361 |
+
"subjectUri": fact["subjectUri"],
|
362 |
+
"objectStart": fact["objectStart"] - passage_start,
|
363 |
+
"objectEnd": fact["objectEnd"] - passage_start,
|
364 |
+
"objectText": fact["objectText"],
|
365 |
+
"objectType": obj_type,
|
366 |
+
"objectUri": fact["objectUri"],
|
367 |
+
"relation": relation_label
|
368 |
+
}
|