Datasets:
Tasks:
Visual Question Answering
Formats:
parquet
Languages:
English
Size:
10K - 100K
ArXiv:
Tags:
medical
License:
flaviagiammarino
commited on
Commit
•
33455bf
1
Parent(s):
a97487c
Upload processing.py
Browse files- scripts/processing.py +6 -1
scripts/processing.py
CHANGED
@@ -2,6 +2,7 @@
|
|
2 |
creates an "imagefolder" dataset and pushes it to the Hugging Face Hub.
|
3 |
"""
|
4 |
|
|
|
5 |
import os
|
6 |
import shutil
|
7 |
import pickle
|
@@ -17,6 +18,11 @@ for split in ["train", "val", "test"]:
|
|
17 |
|
18 |
# drop the duplicate image-question-answer triplets
|
19 |
data = data.drop_duplicates(ignore_index=True)
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
# copy the images using unique file names
|
22 |
data.insert(0, "file_name", "")
|
@@ -32,4 +38,3 @@ for split in ["train", "val", "test"]:
|
|
32 |
# push the dataset to the hub
|
33 |
dataset = datasets.load_dataset("imagefolder", data_dir="data/")
|
34 |
dataset.push_to_hub("flaviagiammarino/path-vqa")
|
35 |
-
|
|
|
2 |
creates an "imagefolder" dataset and pushes it to the Hugging Face Hub.
|
3 |
"""
|
4 |
|
5 |
+
import re
|
6 |
import os
|
7 |
import shutil
|
8 |
import pickle
|
|
|
18 |
|
19 |
# drop the duplicate image-question-answer triplets
|
20 |
data = data.drop_duplicates(ignore_index=True)
|
21 |
+
|
22 |
+
# perform some basic data cleaning/normalization
|
23 |
+
f = lambda x: re.sub(' +', ' ', str(x).lower()).replace(" ?", "?").strip()
|
24 |
+
data["question"] = data["question"].apply(f)
|
25 |
+
data["answer"] = data["answer"].apply(f)
|
26 |
|
27 |
# copy the images using unique file names
|
28 |
data.insert(0, "file_name", "")
|
|
|
38 |
# push the dataset to the hub
|
39 |
dataset = datasets.load_dataset("imagefolder", data_dir="data/")
|
40 |
dataset.push_to_hub("flaviagiammarino/path-vqa")
|
|