ShixuanAn commited on
Commit
86e85e9
1 Parent(s): 2caf7f1

Update hugging_face.py

Browse files
Files changed (1) hide show
  1. hugging_face.py +20 -18
hugging_face.py CHANGED
@@ -33,6 +33,7 @@ _HOMEPAGE = ""
33
  # TODO: Add the licence for the dataset here if you can find it
34
  _LICENSE = ""
35
 
 
36
  # TODO: Add link to the official dataset URLs here
37
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
38
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
@@ -42,7 +43,6 @@ _LICENSE = ""
42
  class RDD2020_Dataset(datasets.GeneratorBasedBuilder):
43
  """TODO: Short description of my dataset."""
44
 
45
- _URLS = _URLS
46
  VERSION = datasets.Version("1.1.0")
47
 
48
  def _info(self):
@@ -58,7 +58,7 @@ class RDD2020_Dataset(datasets.GeneratorBasedBuilder):
58
  "depth": datasets.Value("int32"),
59
  }),
60
  "image_path": datasets.Value("string"),
61
- #"pics_array": datasets.Array3D(shape=(None, None, 3), dtype="uint8"),
62
  "crack_type": datasets.Sequence(datasets.Value("string")),
63
  "crack_coordinates": datasets.Sequence(datasets.Features({
64
  "x_min": datasets.Value("int32"),
@@ -74,41 +74,43 @@ class RDD2020_Dataset(datasets.GeneratorBasedBuilder):
74
  def _split_generators(self, dl_manager):
75
 
76
  urls_to_download = {
77
- "dataset": "https://huggingface.co/datasets/ShixuanAn/RDD2020/resolve/main/RDD2020.zip"
 
 
78
  }
79
 
80
- # Download and extract the dataset using the dl_manager
81
- downloaded_files = dl_manager.download_and_extract(urls_to_download["dataset"])
82
-
83
- # Assuming the ZIP file extracts to a folder named 'RDD2020'
84
- extracted_path = os.path.join(downloaded_files, "RDD2020")
85
 
86
  return [
87
  datasets.SplitGenerator(
88
  name=datasets.Split.TRAIN,
89
  gen_kwargs={
90
- "filepath": os.path.join(extracted_path, "train"),
91
  "split": "train",
92
  }
93
  ),
94
  datasets.SplitGenerator(
95
  name=datasets.Split.TEST,
96
  gen_kwargs={
97
- "filepath": os.path.join(extracted_path, "test"),
98
- "split": "test"
99
  }
100
  ),
101
  datasets.SplitGenerator(
102
- name=datasets.Split.Validation,
103
  gen_kwargs={
104
- "filepath": os.path.join(extracted_path, "validation"),
105
- "split": "validation"
106
  }
107
- )
108
  ]
109
 
110
  def _generate_examples(self, filepath, split):
111
-
112
  # Iterate over each country directory
113
  for country_dir in ['Czech', 'India', 'Japan']:
114
  images_dir = f"{filepath}/{country_dir}/images"
@@ -120,7 +122,7 @@ class RDD2020_Dataset(datasets.GeneratorBasedBuilder):
120
  continue
121
 
122
  image_id = f"{image_file.split('.')[0]}"
123
-
124
  image_path = os.path.join(images_dir, image_file)
125
  if annotations_dir:
126
  annotation_file = image_id + '.xml'
@@ -152,4 +154,4 @@ class RDD2020_Dataset(datasets.GeneratorBasedBuilder):
152
  "image_path": image_path,
153
  "crack_type": crack_type,
154
  "crack_coordinates": crack_coordinates,
155
- }
 
33
  # TODO: Add the licence for the dataset here if you can find it
34
  _LICENSE = ""
35
 
36
+
37
  # TODO: Add link to the official dataset URLs here
38
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
39
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
 
43
  class RDD2020_Dataset(datasets.GeneratorBasedBuilder):
44
  """TODO: Short description of my dataset."""
45
 
 
46
  VERSION = datasets.Version("1.1.0")
47
 
48
  def _info(self):
 
58
  "depth": datasets.Value("int32"),
59
  }),
60
  "image_path": datasets.Value("string"),
61
+ # "pics_array": datasets.Array3D(shape=(None, None, 3), dtype="uint8"),
62
  "crack_type": datasets.Sequence(datasets.Value("string")),
63
  "crack_coordinates": datasets.Sequence(datasets.Features({
64
  "x_min": datasets.Value("int32"),
 
74
  def _split_generators(self, dl_manager):
75
 
76
  urls_to_download = {
77
+ "train": "https://huggingface.co/datasets/ShixuanAn/RDD_2020/resolve/main/train.zip",
78
+ "test": "https://huggingface.co/datasets/ShixuanAn/RDD_2020/resolve/main/test.zip",
79
+ "validation": "https://huggingface.co/datasets/ShixuanAn/RDD_2020/resolve/main/validation.zip",
80
  }
81
 
82
+ downloaded_files = {
83
+ name: dl_manager.download_and_extract(url)
84
+ for name, url in urls_to_download.items()
85
+ }
86
+
87
 
88
  return [
89
  datasets.SplitGenerator(
90
  name=datasets.Split.TRAIN,
91
  gen_kwargs={
92
+ "filepath": downloaded_files["train"],
93
  "split": "train",
94
  }
95
  ),
96
  datasets.SplitGenerator(
97
  name=datasets.Split.TEST,
98
  gen_kwargs={
99
+ "filepath": downloaded_files["test"],
100
+ "split": "test",
101
  }
102
  ),
103
  datasets.SplitGenerator(
104
+ name=datasets.Split.VALIDATION,
105
  gen_kwargs={
106
+ "filepath": downloaded_files["validation"],
107
+ "split": "validation",
108
  }
109
+ ),
110
  ]
111
 
112
  def _generate_examples(self, filepath, split):
113
+
114
  # Iterate over each country directory
115
  for country_dir in ['Czech', 'India', 'Japan']:
116
  images_dir = f"{filepath}/{country_dir}/images"
 
122
  continue
123
 
124
  image_id = f"{image_file.split('.')[0]}"
125
+
126
  image_path = os.path.join(images_dir, image_file)
127
  if annotations_dir:
128
  annotation_file = image_id + '.xml'
 
154
  "image_path": image_path,
155
  "crack_type": crack_type,
156
  "crack_coordinates": crack_coordinates,
157
+ }