# coding=utf-8 # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ImageNet-Sketch data set for evaluating model's ability in learning (out-of-domain) semantics at ImageNet scale""" import os import pandas as pd import datasets from datasets.tasks import ImageClassification # from .classes import IMAGENET2012_CLASSES _HOMEPAGE = "https://huggingface.co/datasets/AIPI540/test2/tree/main" _CITATION = """\ @inproceedings{wang2019learning, title={Learning Robust Global Representations by Penalizing Local Predictive Power}, author={Wang, Haohan and Ge, Songwei and Lipton, Zachary and Xing, Eric P}, booktitle={Advances in Neural Information Processing Systems}, pages={10506--10518}, year={2019} } """ _DESCRIPTION = """\ Artwork Images, to predict the year of the artwork created. """ _URL = "https://huggingface.co/datasets/AIPI540/Art2/resolve/main/final_art_data.parquet" class Artwork(datasets.GeneratorBasedBuilder): """Artwork Images - a dataset of centuries of Images classes""" def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "label": datasets.features.ClassLabel(names=classes), "image_data": datasets.Value("binary"), } ), supervised_keys=("label","image_data"), homepage=_HOMEPAGE, citation=_CITATION, task_templates=[ImageClassification(image_column="image_data", label_column="label")], ) def _split_generators(self, dl_manager): data_files = dl_manager.download_and_extract(_URL) df = pd.read_parquet(data_files, engine='pyarrow') return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "files": df, }, ), ] def _generate_examples(self, files): cnt=0 for path in files.itertuples(): print(cnt) cnt+=1 print(path) print(path.label) print(type(path.label)) print(path.image_data) print(type(path.image_data)) yield { "label": classes[(path.label)], "image_data": path.image_data, }