|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""we're testin'""" |
|
|
|
import datasets |
|
import json |
|
|
|
class TestDatasetConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for Test Dataset for testing HF parsing""" |
|
|
|
def __init__( |
|
self, |
|
text_features, |
|
foo="foo", |
|
process_label=lambda x: x, |
|
**kwargs, |
|
): |
|
"""BuilderConfig for TestDatset. |
|
|
|
Args: |
|
text_features: `dict[string, string]`, map from the name of the feature |
|
dict for each text field to the name of the column in the tsv file |
|
label_column: `string`, name of the column in the tsv file corresponding |
|
to the label |
|
data_dir: `string`, the path to the folder containing the tsv files in the |
|
downloaded zip |
|
label_classes: `list[string]`, the list of classes if the label is |
|
categorical. If not provided, then the label will be of type |
|
`datasets.Value('float32')`. |
|
process_label: `Function[string, any]`, function taking in the raw value |
|
of the label and processing it to the form required by the label feature |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(TestDatasetConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs) |
|
self.text_features = text_features |
|
self.foo = foo |
|
self.process_label = process_label |
|
|
|
|
|
class TestDatasetEvals(datasets.GeneratorBasedBuilder): |
|
"""The General Language Understanding Evaluation (GLUE) benchmark.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
TestDatasetConfig( |
|
name="juggernaut", |
|
description= "this is a test dataset for our unit test intergrating HF datasets" , |
|
|
|
text_features={"context": "context", "continuation": "continuation"}, |
|
data_dir="heroes", |
|
), |
|
TestDatasetConfig( |
|
name="invoker", |
|
description= "this is a test dataset for our unit test intergrating HF datasets" , |
|
text_features={"quas": "quas", "wex": "wex", "exort": "exort", "spell": "spell"}, |
|
data_dir="heroes", |
|
), |
|
] |
|
|
|
def _info(self): |
|
features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()} |
|
features["idx"] = datasets.Value("int32") |
|
return datasets.DatasetInfo( |
|
description=self.config.description, |
|
features=datasets.Features(features), |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
constructed_filepath = self.construct_filepath() |
|
data_file = dl_manager.download(constructed_filepath) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"data_file": data_file, |
|
}, |
|
), |
|
] |
|
|
|
def construct_filepath(self): |
|
return self.config.name + '/data.jsonl' |
|
|
|
def _generate_examples(self, data_file): |
|
with open(data_file, encoding="utf8") as f: |
|
for n, row in enumerate(f): |
|
data = json.loads(row) |
|
example = {feat: data[col] for feat, col in self.config.text_features.items()} |
|
example["idx"] = n |
|
yield example["idx"], example |
|
|