Datasets:
Update flickr30k-fa.py
Browse files- flickr30k-fa.py +9 -14
flickr30k-fa.py
CHANGED
@@ -12,8 +12,7 @@ _DESCRIPTION = """Flickr30k filtered and translated to Persian made by Sajjad Ay
|
|
12 |
_DOWNLOAD_URLS = {
|
13 |
"train": "https://huggingface.co/datasets/hezarai/flickr30k-fa/resolve/main/annotations_train.csv",
|
14 |
"test": "https://huggingface.co/datasets/hezarai/flickr30k-fa/resolve/main/annotations_test.csv",
|
15 |
-
"
|
16 |
-
"test_dataset": "https://huggingface.co/datasets/hezarai/flickr30k-fa/resolve/main/flickr30k-fa_test.zip",
|
17 |
}
|
18 |
|
19 |
|
@@ -50,26 +49,22 @@ class Flickr30kFa(datasets.GeneratorBasedBuilder):
|
|
50 |
|
51 |
train_path = dl_manager.download_and_extract(_DOWNLOAD_URLS["train"])
|
52 |
test_path = dl_manager.download_and_extract(_DOWNLOAD_URLS["test"])
|
53 |
-
|
54 |
-
|
55 |
-
train_extracted_path = dl_manager.extract(archive_path) if not dl_manager.is_streaming else ""
|
56 |
-
|
57 |
-
archive_path = dl_manager.download(_DOWNLOAD_URLS["test_dataset"])
|
58 |
-
test_extracted_path = dl_manager.extract(archive_path) if not dl_manager.is_streaming else ""
|
59 |
|
60 |
return [
|
61 |
datasets.SplitGenerator(
|
62 |
-
name=datasets.Split.TRAIN, gen_kwargs={"
|
63 |
),
|
64 |
datasets.SplitGenerator(
|
65 |
-
name=datasets.Split.TEST, gen_kwargs={"
|
66 |
),
|
67 |
]
|
68 |
|
69 |
-
def _generate_examples(self,
|
70 |
-
logger.info("⏳ Generating examples from = %s",
|
71 |
|
72 |
-
with open(
|
73 |
csv_reader = csv.reader(csv_file, quotechar='"', skipinitialspace=True)
|
74 |
|
75 |
# Skip header
|
@@ -77,5 +72,5 @@ class Flickr30kFa(datasets.GeneratorBasedBuilder):
|
|
77 |
|
78 |
for id_, row in enumerate(csv_reader):
|
79 |
label, filename = row
|
80 |
-
image_path = os.path.join(
|
81 |
yield id_, {"image_path": image_path, "label": label}
|
|
|
12 |
_DOWNLOAD_URLS = {
|
13 |
"train": "https://huggingface.co/datasets/hezarai/flickr30k-fa/resolve/main/annotations_train.csv",
|
14 |
"test": "https://huggingface.co/datasets/hezarai/flickr30k-fa/resolve/main/annotations_test.csv",
|
15 |
+
"data": "https://huggingface.co/datasets/hezarai/flickr30k-fa/resolve/main/images.zip",
|
|
|
16 |
}
|
17 |
|
18 |
|
|
|
49 |
|
50 |
train_path = dl_manager.download_and_extract(_DOWNLOAD_URLS["train"])
|
51 |
test_path = dl_manager.download_and_extract(_DOWNLOAD_URLS["test"])
|
52 |
+
archive_path = dl_manager.download(_DOWNLOAD_URLS["data"])
|
53 |
+
images_dir = dl_manager.extract(archive_path) if not dl_manager.is_streaming else ""
|
|
|
|
|
|
|
|
|
54 |
|
55 |
return [
|
56 |
datasets.SplitGenerator(
|
57 |
+
name=datasets.Split.TRAIN, gen_kwargs={"annotations_file": train_path, "images_dir": images_dir}
|
58 |
),
|
59 |
datasets.SplitGenerator(
|
60 |
+
name=datasets.Split.TEST, gen_kwargs={"annotations_file": test_path, "images_dir": images_dir}
|
61 |
),
|
62 |
]
|
63 |
|
64 |
+
def _generate_examples(self, annotations_file, images_dir):
|
65 |
+
logger.info("⏳ Generating examples from = %s", annotations_file)
|
66 |
|
67 |
+
with open(annotations_file, encoding="utf-8") as csv_file:
|
68 |
csv_reader = csv.reader(csv_file, quotechar='"', skipinitialspace=True)
|
69 |
|
70 |
# Skip header
|
|
|
72 |
|
73 |
for id_, row in enumerate(csv_reader):
|
74 |
label, filename = row
|
75 |
+
image_path = os.path.join(images_dir, filename)
|
76 |
yield id_, {"image_path": image_path, "label": label}
|