|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import json |
|
|
|
import datasets |
|
|
|
import os |
|
|
|
_CITATION = ''' |
|
@inproceedings{Ammanabrolu2020AAAI, |
|
title={Story Realization: Expanding Plot Events into Sentences}, |
|
author={Prithviraj Ammanabrolu and Ethan Tien and Wesley Cheung and Zhaochen Luo and William Ma and Lara J. Martin and Mark O. Riedl}, |
|
journal={Proceedings of the AAAI Conference on Artificial Intelligence (AAAI)}, |
|
year={2020}, |
|
volume={34}, |
|
number={05}, |
|
url={https://ojs.aaai.org//index.php/AAAI/article/view/6232} |
|
} |
|
''' |
|
|
|
_DESCRIPTION = 'Loading script for the science fiction TV show plot dataset.' |
|
|
|
_URLS = {'Scifi_TV_Shows': "https://huggingface.co/datasets/lara-martin/Scifi_TV_Shows/resolve/main/scifiTVshows.zip"} |
|
|
|
|
|
class Scifi_TV_Shows(datasets.GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
version=datasets.Version('1.1.0'), |
|
name="Scifi_TV_Shows", |
|
description=f'Science fiction TV show plot summaries.', |
|
) |
|
] |
|
|
|
def _info(self): |
|
features = datasets.Features({ |
|
'story_num': datasets.Value('int16'), |
|
'story_line': datasets.Value('int16'), |
|
'event': datasets.Sequence(datasets.Value('string')), |
|
'gen_event': datasets.Sequence(datasets.Value('string')), |
|
'sent': datasets.Value('string'), |
|
'gen_sent': datasets.Value('string'), |
|
'entities': datasets.Value('string'), |
|
}) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
supervised_keys=None, |
|
|
|
homepage='https://github.com/rajammanabrolu/StoryRealization', |
|
|
|
license='The Creative Commons Attribution 4.0 International License. https://creativecommons.org/licenses/by/4.0/', |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
my_urls = _URLS[self.config.name] |
|
data_dir = dl_manager.download_and_extract(my_urls) |
|
train_filepath = os.path.join(data_dir, "scifi-train.txt") |
|
test_filepath = os.path.join(data_dir, "scifi-test.txt") |
|
val_filepath = os.path.join(data_dir, "scifi-val.txt") |
|
return[ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
'filepath': train_filepath, |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
'filepath': test_filepath, |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
'filepath': val_filepath, |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
with open(filepath, encoding="utf-8") as f: |
|
for id_, line in enumerate(f.readlines()): |
|
line = line.strip() |
|
story_num, line_num, event, gen_event, sent, gen_sent, entities = line.split("|||") |
|
yield id_, { |
|
'story_num': story_num, |
|
'story_line': line_num, |
|
'event': eval(event), |
|
'gen_event': eval(gen_event), |
|
'sent': sent, |
|
'gen_sent': gen_sent, |
|
'entities': entities, |
|
} |
|
|
|
|
|
|