|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""The KnowledgeNet dataset for automatically populating a knowledge base""" |
|
|
|
import json |
|
import re |
|
import datasets |
|
|
|
_CITATION = """\ |
|
@inproceedings{mesquita-etal-2019-knowledgenet, |
|
title = "{K}nowledge{N}et: A Benchmark Dataset for Knowledge Base Population", |
|
author = "Mesquita, Filipe and |
|
Cannaviccio, Matteo and |
|
Schmidek, Jordan and |
|
Mirza, Paramita and |
|
Barbosa, Denilson", |
|
booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
month = nov, |
|
year = "2019", |
|
address = "Hong Kong, China", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://aclanthology.org/D19-1069", |
|
doi = "10.18653/v1/D19-1069", |
|
pages = "749--758",} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
KnowledgeNet is a benchmark dataset for the task of automatically populating a knowledge base (Wikidata) with facts |
|
expressed in natural language text on the web. KnowledgeNet provides text exhaustively annotated with facts, thus |
|
enabling the holistic end-to-end evaluation of knowledge base population systems as a whole, unlike previous benchmarks |
|
that are more suitable for the evaluation of individual subcomponents (e.g., entity linking, relation extraction). |
|
|
|
For instance, the dataset contains text expressing the fact (Gennaro Basile; RESIDENCE; Moravia), in the passage: |
|
"Gennaro Basile was an Italian painter, born in Naples but active in the German-speaking countries. He settled at Brünn, |
|
in Moravia, and lived about 1756..." |
|
|
|
For a description of the dataset and baseline systems, please refer to their |
|
[EMNLP paper](https://github.com/diffbot/knowledge-net/blob/master/knowledgenet-emnlp-cameraready.pdf). |
|
|
|
Note: This Datasetreader currently only supports the `train` split and does not contain negative examples |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/diffbot/knowledge-net" |
|
|
|
_LICENSE = "" |
|
|
|
|
|
|
|
_URLS = { |
|
"train": "https://raw.githubusercontent.com/diffbot/knowledge-net/master/dataset/train.json", |
|
"test": "https://raw.githubusercontent.com/diffbot/knowledge-net/master/dataset/test-no-facts.json" |
|
} |
|
|
|
_VERSION = datasets.Version("1.1.0") |
|
|
|
_CLASS_LABELS = [ |
|
"NO_RELATION", |
|
"DATE_OF_BIRTH", |
|
"DATE_OF_DEATH", |
|
"PLACE_OF_RESIDENCE", |
|
"PLACE_OF_BIRTH", |
|
"NATIONALITY", |
|
"EMPLOYEE_OR_MEMBER_OF", |
|
"EDUCATED_AT", |
|
"POLITICAL_AFFILIATION", |
|
"CHILD_OF", |
|
"SPOUSE", |
|
"DATE_FOUNDED", |
|
"HEADQUARTERS", |
|
"SUBSIDIARY_OF", |
|
"FOUNDED_BY", |
|
"CEO" |
|
] |
|
|
|
_NER_CLASS_LABELS = [ |
|
"O", |
|
"PER", |
|
"ORG", |
|
"LOC", |
|
"DATE" |
|
] |
|
|
|
|
|
def get_entity_types_from_relation(relation_label): |
|
if relation_label == "DATE_OF_BIRTH": |
|
subj_type = "PER" |
|
obj_type = "DATE" |
|
elif relation_label == "DATE_OF_DEATH": |
|
subj_type = "PER" |
|
obj_type = "DATE" |
|
elif relation_label == "PLACE_OF_RESIDENCE": |
|
subj_type = "PER" |
|
obj_type = "LOC" |
|
elif relation_label == "PLACE_OF_BIRTH": |
|
subj_type = "PER" |
|
obj_type = "LOC" |
|
elif relation_label == "NATIONALITY": |
|
subj_type = "PER" |
|
obj_type = "LOC" |
|
elif relation_label == "EMPLOYEE_OR_MEMBER_OF": |
|
subj_type = "PER" |
|
obj_type = "ORG" |
|
elif relation_label == "EDUCATED_AT": |
|
subj_type = "PER" |
|
obj_type = "ORG" |
|
elif relation_label == "POLITICAL_AFFILIATION": |
|
subj_type = "PER" |
|
obj_type = "ORG" |
|
elif relation_label == "CHILD_OF": |
|
subj_type = "PER" |
|
obj_type = "PER" |
|
elif relation_label == "SPOUSE": |
|
subj_type = "PER" |
|
obj_type = "PER" |
|
elif relation_label == "DATE_FOUNDED": |
|
subj_type = "ORG" |
|
obj_type = "DATE" |
|
elif relation_label == "HEADQUARTERS": |
|
subj_type = "ORG" |
|
obj_type = "LOC" |
|
elif relation_label == "SUBSIDIARY_OF": |
|
subj_type = "ORG" |
|
obj_type = "ORG" |
|
elif relation_label == "FOUNDED_BY": |
|
subj_type = "ORG" |
|
obj_type = "PER" |
|
elif relation_label == "CEO": |
|
subj_type = "ORG" |
|
obj_type = "PER" |
|
else: |
|
raise ValueError(f"Unknown relation label: {relation_label}") |
|
return subj_type, obj_type |
|
|
|
|
|
def remove_contiguous_whitespaces(text): |
|
|
|
contiguous_whitespaces_indices = [(m.start(0) + 1, m.end(0)) for m in re.finditer(' +', text)] |
|
cleaned_text = re.sub(" +", " ", text) |
|
return cleaned_text, contiguous_whitespaces_indices |
|
|
|
|
|
def fix_char_index(char_index, contiguous_whitespaces_indices): |
|
new_char_index = char_index |
|
offset = 0 |
|
for ws_start, ws_end in contiguous_whitespaces_indices: |
|
if char_index >= ws_end: |
|
offset = offset + (ws_end - ws_start) |
|
new_char_index -= offset |
|
return new_char_index |
|
|
|
|
|
class KnowledgeNet(datasets.GeneratorBasedBuilder): |
|
"""The KnowledgeNet dataset for automatically populating a knowledge base""" |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="knet", version=_VERSION, description="The original KnowledgeNet formatted for RE." |
|
), |
|
datasets.BuilderConfig( |
|
name="knet_re", version=_VERSION, description="The original KnowledgeNet formatted for RE." |
|
), |
|
datasets.BuilderConfig( |
|
name="knet_tokenized", version=_VERSION, description="KnowledgeNet tokenized and reformatted." |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "knet" |
|
|
|
def _info(self): |
|
if self.config.name == "knet_tokenized": |
|
features = datasets.Features( |
|
{ |
|
"doc_id": datasets.Value("string"), |
|
"passage_id": datasets.Value("string"), |
|
"fact_id": datasets.Value("string"), |
|
"tokens": datasets.Sequence(datasets.Value("string")), |
|
"subj_start": datasets.Value("int32"), |
|
"subj_end": datasets.Value("int32"), |
|
"subj_type": datasets.ClassLabel(names=_NER_CLASS_LABELS), |
|
"subj_uri": datasets.Value("string"), |
|
"obj_start": datasets.Value("int32"), |
|
"obj_end": datasets.Value("int32"), |
|
"obj_type": datasets.ClassLabel(names=_NER_CLASS_LABELS), |
|
"obj_uri": datasets.Value("string"), |
|
"relation": datasets.ClassLabel(names=_CLASS_LABELS), |
|
} |
|
) |
|
elif self.config.name == "knet_re": |
|
features = datasets.Features( |
|
{ |
|
"documentId": datasets.Value("string"), |
|
"passageId": datasets.Value("string"), |
|
"factId": datasets.Value("string"), |
|
"passageText": datasets.Value("string"), |
|
"humanReadable": datasets.Value("string"), |
|
"annotatedPassage": datasets.Value("string"), |
|
"subjectStart": datasets.Value("int32"), |
|
"subjectEnd": datasets.Value("int32"), |
|
"subjectText": datasets.Value("string"), |
|
"subjectType": datasets.ClassLabel(names=_NER_CLASS_LABELS), |
|
"subjectUri": datasets.Value("string"), |
|
"objectStart": datasets.Value("int32"), |
|
"objectEnd": datasets.Value("int32"), |
|
"objectText": datasets.Value("string"), |
|
"objectType": datasets.ClassLabel(names=_NER_CLASS_LABELS), |
|
"objectUri": datasets.Value("string"), |
|
"relation": datasets.ClassLabel(names=_CLASS_LABELS), |
|
} |
|
) |
|
else: |
|
features = datasets.Features( |
|
{ |
|
"fold": datasets.Value("int32"), |
|
"documentId": datasets.Value("string"), |
|
"source": datasets.Value("string"), |
|
"documentText": datasets.Value("string"), |
|
"passages": [{ |
|
"passageId": datasets.Value("string"), |
|
"passageStart": datasets.Value("int32"), |
|
"passageEnd": datasets.Value("int32"), |
|
"passageText": datasets.Value("string"), |
|
"exhaustivelyAnnotatedProperties": [{ |
|
"propertyId": datasets.Value("string"), |
|
"propertyName": datasets.Value("string"), |
|
"propertyDescription": datasets.Value("string"), |
|
}], |
|
"facts": [{ |
|
"factId": datasets.Value("string"), |
|
"propertyId": datasets.Value("string"), |
|
"humanReadable": datasets.Value("string"), |
|
"annotatedPassage": datasets.Value("string"), |
|
"subjectStart": datasets.Value("int32"), |
|
"subjectEnd": datasets.Value("int32"), |
|
"subjectText": datasets.Value("string"), |
|
"subjectUri": datasets.Value("string"), |
|
"objectStart": datasets.Value("int32"), |
|
"objectEnd": datasets.Value("int32"), |
|
"objectText": datasets.Value("string"), |
|
"objectUri": datasets.Value("string"), |
|
}], |
|
}], |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
downloaded_files = dl_manager.download_and_extract(_URLS) |
|
|
|
splits = [datasets.Split.TRAIN] |
|
return [datasets.SplitGenerator(name=i, gen_kwargs={"filepath": downloaded_files[str(i)], "split": i}) |
|
for i in splits] |
|
|
|
def _generate_examples(self, filepath, split): |
|
"""Yields examples.""" |
|
|
|
|
|
|
|
if self.config.name == "knet_tokenized": |
|
from spacy.lang.en import English |
|
word_splitter = English() |
|
else: |
|
word_splitter = None |
|
with open(filepath, encoding="utf-8") as f: |
|
for line in f: |
|
doc = json.loads(line) |
|
if self.config.name == "knet": |
|
yield doc["documentId"], doc |
|
else: |
|
for passage in doc["passages"]: |
|
|
|
if len(passage["facts"]) == 0: |
|
continue |
|
|
|
text = passage["passageText"] |
|
passage_start = passage["passageStart"] |
|
|
|
if self.config.name == "knet_tokenized": |
|
cleaned_text, contiguous_ws_indices = remove_contiguous_whitespaces(text) |
|
spacy_doc = word_splitter(cleaned_text) |
|
word_tokens = [t.text for t in spacy_doc] |
|
for fact in passage["facts"]: |
|
subj_start = fix_char_index(fact["subjectStart"] - passage_start, contiguous_ws_indices) |
|
subj_end = fix_char_index(fact["subjectEnd"] - passage_start, contiguous_ws_indices) |
|
obj_start = fix_char_index(fact["objectStart"] - passage_start, contiguous_ws_indices) |
|
obj_end = fix_char_index(fact["objectEnd"] - passage_start, contiguous_ws_indices) |
|
|
|
subj_span = spacy_doc.char_span(subj_start, subj_end, alignment_mode="expand") |
|
obj_span = spacy_doc.char_span(obj_start, obj_end, alignment_mode="expand") |
|
|
|
relation_label = fact["humanReadable"].split(">")[1][2:] |
|
subj_type, obj_type = get_entity_types_from_relation(relation_label) |
|
id_ = fact["factId"] |
|
|
|
yield id_, { |
|
"doc_id": doc["documentId"], |
|
"passage_id": passage["passageId"], |
|
"fact_id": id_, |
|
"tokens": word_tokens, |
|
"subj_start": subj_span.start, |
|
"subj_end": subj_span.end, |
|
"subj_type": subj_type, |
|
"subj_uri": fact["subjectUri"], |
|
"obj_start": obj_span.start, |
|
"obj_end": obj_span.end, |
|
"obj_type": obj_type, |
|
"obj_uri": fact["objectUri"], |
|
"relation": relation_label |
|
} |
|
else: |
|
for fact in passage["facts"]: |
|
relation_label = fact["humanReadable"].split(">")[1][2:] |
|
subj_type, obj_type = get_entity_types_from_relation(relation_label) |
|
id_ = fact["factId"] |
|
yield id_, { |
|
"documentId": doc["documentId"], |
|
"passageId": passage["passageId"], |
|
"passageText": passage["passageText"], |
|
"factId": id_, |
|
"humanReadable": fact["humanReadable"], |
|
"annotatedPassage": fact["annotatedPassage"], |
|
"subjectStart": fact["subjectStart"] - passage_start, |
|
"subjectEnd": fact["subjectEnd"] - passage_start, |
|
"subjectText": fact["subjectText"], |
|
"subjectType": subj_type, |
|
"subjectUri": fact["subjectUri"], |
|
"objectStart": fact["objectStart"] - passage_start, |
|
"objectEnd": fact["objectEnd"] - passage_start, |
|
"objectText": fact["objectText"], |
|
"objectType": obj_type, |
|
"objectUri": fact["objectUri"], |
|
"relation": relation_label |
|
} |
|
|