lisawen0707 commited on
Commit
13de6d0
1 Parent(s): 8da06c8

Add README and dataset script

Browse files
Files changed (1) hide show
  1. soybean_dataset.py +102 -24
soybean_dataset.py CHANGED
@@ -21,44 +21,52 @@ import os
21
  from typing import List
22
  import datasets
23
  import logging
 
 
 
 
 
 
24
 
25
  # TODO: Add BibTeX citation
26
  # Find for instance the citation on arxiv or on the dataset repo/website
27
  _CITATION = """\
28
- @InProceedings{huggingface:dataset,
29
- title = {A great new dataset},
30
- author={huggingface, Inc.
31
- },
32
- year={2020}
 
 
 
 
33
  }
 
34
  """
35
 
36
  # TODO: Add description of the dataset here
37
  # You can copy an official description
38
  _DESCRIPTION = """\
39
- This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
40
  """
41
 
42
  # TODO: Add a link to an official homepage for the dataset here
43
- _HOMEPAGE = ""
44
 
45
  # TODO: Add the licence for the dataset here if you can find it
46
- _LICENSE = ""
47
 
48
  # TODO: Add link to the official dataset URLs here
49
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
50
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
51
- _URL = "https://rajpurkar.github.io/SQuAD-explorer/dataset/"
52
- _URLS = {
53
- "train": _URL + "train-v1.1.json",
54
- "dev": _URL + "dev-v1.1.json",
55
- }
56
 
57
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
58
- class SquadDataset(datasets.GeneratorBasedBuilder):
59
  """TODO: Short description of my dataset."""
60
 
61
- _URLS = _URLS
62
  VERSION = datasets.Version("1.1.0")
63
 
64
  def _info(self):
@@ -67,22 +75,92 @@ class SquadDataset(datasets.GeneratorBasedBuilder):
67
  description=_DESCRIPTION,
68
  features=datasets.Features(
69
  {
70
- "id": datasets.Value("string"),
71
- "title": datasets.Value("string"),
72
- "context": datasets.Value("string"),
73
- "question": datasets.Value("string"),
74
- "answers": datasets.features.Sequence(
75
- {"text": datasets.Value("string"), "answer_start": datasets.Value("int32"),}
76
- ),
77
  }
78
  ),
79
  # No default supervised_keys (as we have to pass both question
80
  # and context as input).
81
- supervised_keys=None,
82
- homepage="https://rajpurkar.github.io/SQuAD-explorer/",
83
  citation=_CITATION,
84
  )
85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
87
  urls_to_download = self._URLS
88
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
 
21
  from typing import List
22
  import datasets
23
  import logging
24
+ import csv
25
+ import numpy as np
26
+ from PIL import Image
27
+ import os
28
+ import io
29
+
30
 
31
  # TODO: Add BibTeX citation
32
  # Find for instance the citation on arxiv or on the dataset repo/website
33
  _CITATION = """\
34
+ @article{chen2023dataset,
35
+ title={A dataset of the quality of soybean harvested by mechanization for deep-learning-based monitoring and analysis},
36
+ author={Chen, M and Jin, C and Ni, Y and Yang, T and Xu, J},
37
+ journal={Data in Brief},
38
+ volume={52},
39
+ pages={109833},
40
+ year={2023},
41
+ publisher={Elsevier},
42
+ doi={10.1016/j.dib.2023.109833}
43
  }
44
+
45
  """
46
 
47
  # TODO: Add description of the dataset here
48
  # You can copy an official description
49
  _DESCRIPTION = """\
50
+ This dataset contains images captured during the mechanized harvesting of soybeans, aimed at facilitating the development of machine vision and deep learning models for quality analysis. It contains information of original soybean pictures in different forms, labels of whether the soybean belongs to training, validation, or testing datasets, segmentation class of soybean pictures in one dataset.
51
  """
52
 
53
  # TODO: Add a link to an official homepage for the dataset here
54
+ _HOMEPAGE = "https://huggingface.co/datasets/lisawen/soybean_dataset"
55
 
56
  # TODO: Add the licence for the dataset here if you can find it
57
+ _LICENSE = "Under a Creative Commons license"
58
 
59
  # TODO: Add link to the official dataset URLs here
60
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
61
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
62
+ _URL = "/content/drive/MyDrive/sta_663/soybean/dataset.csv"
63
+
 
 
 
64
 
65
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
66
+ class SoybeanDataset(datasets.GeneratorBasedBuilder):
67
  """TODO: Short description of my dataset."""
68
 
69
+ _URLS = _URL
70
  VERSION = datasets.Version("1.1.0")
71
 
72
  def _info(self):
 
75
  description=_DESCRIPTION,
76
  features=datasets.Features(
77
  {
78
+ "unique_id": datasets.Value("string"),
79
+ "sets": datasets.Value("string"),
80
+ "original_image": datasets.Value("string"),
81
+ "segmentation_image": datasets.Value("string"),
82
+
 
 
83
  }
84
  ),
85
  # No default supervised_keys (as we have to pass both question
86
  # and context as input).
87
+ supervised_keys=("original_image","segmentation_image"),
88
+ #homepage="https://rajpurkar.github.io/SQuAD-explorer/",
89
  citation=_CITATION,
90
  )
91
 
92
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
93
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
94
+ # Since the dataset is on Google Drive, you need to implement a way to download it using the Google Drive API.
95
+
96
+ # The path to the dataset file in Google Drive
97
+ dataset_path = "/content/drive/MyDrive/sta_663/soybean/dataset.csv"
98
+
99
+ # Check if the file exists (you may need to mount the drive and use the appropriate path)
100
+ if not os.path.exists(dataset_path):
101
+ raise FileNotFoundError(f"{dataset_path} does not exist. Have you mounted Google Drive?")
102
+
103
+ # Since we're using a local file, we don't need to download it, so we just return the path.
104
+ return [
105
+ datasets.SplitGenerator(
106
+ name=datasets.Split,
107
+ gen_kwargs={
108
+ "filepath": dataset_path
109
+ }
110
+ ),
111
+ ]
112
+
113
+ def _generate_examples(self, filepath):
114
+ #"""Yields examples as (key, example) tuples."""
115
+
116
+ # Check if the file exists (you may need to mount the drive and use the appropriate path)
117
+ if not os.path.exists(filepath):
118
+ raise FileNotFoundError(f"{filepath} does not exist. Have you mounted Google Drive?")
119
+
120
+ # Read the dataset.csv
121
+ with open(filepath, encoding="utf-8") as f:
122
+ reader = csv.DictReader(f)
123
+
124
+ for row in reader:
125
+ # Assuming the 'original_image' column has the full path to the image file
126
+ original_image_path = row['original_image']
127
+ segmentation_image_path = row['segmentation_image']
128
+ sets = row['sets']
129
+
130
+ # Open the image and convert to numpy array
131
+ with open(original_image_path, "rb") as image_file:
132
+ original_image = Image.open(image_file)
133
+ original_image_array = np.array(original_image)
134
+
135
+
136
+ # Open the image and convert to numpy array
137
+ with open(segmentation_image_path, "rb") as image_file:
138
+ segmentation_image = Image.open(image_file)
139
+ segmentation_image_array = np.array(segmentation_image)
140
+
141
+ # Here you need to replace 'initial_radius', 'final_radius', 'initial_angle', 'final_angle', 'target'
142
+ # with actual columns from your CSV or additional processing you need to do
143
+ yield row['unique_id'], {
144
+ "sets": sets,
145
+ "original_image": original_image_array,
146
+ "segmentation_image": segmentation_image_array,
147
+ # ... add other features if necessary
148
+ }
149
+
150
+
151
+
152
+
153
+
154
+
155
+
156
+
157
+
158
+
159
+
160
+
161
+
162
+
163
+ #### origin
164
  def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
165
  urls_to_download = self._URLS
166
  downloaded_files = dl_manager.download_and_extract(urls_to_download)