File size: 1,764 Bytes
322303b d187349 322303b d187349 322303b 184e359 82e775a 184e359 d187349 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
import os
import posixpath
import datasets
from datasets import DatasetInfo, DownloadManager
from fsspec.core import url_to_fs
_EXTENSION = [".png", ".jpg", ".jpeg", ".webp", ".bmp"]
class DanbooruDataset(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
# add number before name for sorting
datasets.BuilderConfig(name="full"),
]
def _info(self) -> DatasetInfo:
features = {
"image": datasets.Image(),
"post_id": datasets.Value("int64")
}
info = datasets.DatasetInfo(
features=datasets.Features(features),
supervised_keys=None,
citation="",
)
return info
def _split_generators(self, dl_manager: DownloadManager):
base_path = dl_manager._base_path
if base_path.startswith(datasets.config.HF_ENDPOINT):
base_path = base_path[len(datasets.config.HF_ENDPOINT):].replace("/resolve/", "@", 1)
base_path = "hf://" + base_path.lstrip("/")
fs, path = url_to_fs(base_path)
urls = fs.glob(posixpath.join(path, "**/*.tar"), detail=False)
archives = dl_manager.download(["hf://" + url for url in urls])
archives = [dl_manager.iter_archive(archives) for archives in archives]
return [datasets.SplitGenerator(name="train", gen_kwargs={"archives": archives})]
def _generate_examples(self, archives):
for archive in archives:
for path, f in archive:
path_root, path_ext = os.path.splitext(path)
if path_ext.lower() not in _EXTENSION:
continue
post_id = int(os.path.basename(path_root))
yield path, {"image": {"bytes": f.read()}, "post_id": post_id}
|