lisawen commited on
Commit
7ca70ae
1 Parent(s): 87a9a87

Update soybean_dataset.py

Browse files
Files changed (1) hide show
  1. soybean_dataset.py +21 -32
soybean_dataset.py CHANGED
@@ -122,51 +122,38 @@ class SoybeanDataset(datasets.GeneratorBasedBuilder):
122
  ]
123
 
124
 
125
- def __init__(self, max_workers=5):
126
- # Initialize a ThreadPoolExecutor with the desired number of workers
127
- self.executor = ThreadPoolExecutor(max_workers=max_workers)
128
-
129
- def process_image(self, image_url):
130
- # This function is now a static method that doesn't need self
131
- response = requests.get(image_url)
132
- response.raise_for_status() # This will raise an exception if there is a download error
133
- img = Image.open(BytesIO(response.content))
134
- return img
135
-
136
- def download_images(self, image_urls):
137
- # Use the executor to download images concurrently
138
- # and return a future to image map
139
- future_to_url = {self.executor.submit(self.process_image, url): url for url in image_urls}
140
- return future_to_url
141
 
142
  def _generate_examples(self, filepath):
 
143
  logging.info("generating examples from = %s", filepath)
144
 
145
  with open(filepath, encoding="utf-8") as f:
146
  data = csv.DictReader(f)
147
 
148
- # Create a set to collect all unique image URLs to download
149
- image_urls = {row['original_image'] for row in data}
150
- image_urls.update(row['segmentation_image'] for row in data)
151
-
152
- # Start the batch download
153
- future_to_url = self.download_images(image_urls)
154
-
155
- # Reset the file pointer to the start for the second pass
156
- f.seek(0)
157
- next(data) # Skip header
158
 
159
  for row in data:
 
160
  unique_id = row['unique_id']
161
- original_image_url = row['original_image']
162
- segmentation_image_url = row['segmentation_image']
163
  sets = row['sets']
164
 
165
- # Wait for the individual image futures to complete and get the result
166
- original_image = future_to_url[self.executor.submit(self.process_image, original_image_url)].result()
167
- segmentation_image = future_to_url[self.executor.submit(self.process_image, segmentation_image_url)].result()
168
 
169
- yield unique_id, {
 
 
 
170
  "unique_id": unique_id,
171
  "sets": sets,
172
  "original_image": original_image,
@@ -174,6 +161,8 @@ class SoybeanDataset(datasets.GeneratorBasedBuilder):
174
  # ... add other features if necessary
175
  }
176
 
 
 
177
 
178
 
179
 
 
122
  ]
123
 
124
 
125
+ def process_image(self,image_url):
126
+ response = requests.get(image_url)
127
+ response.raise_for_status() # This will raise an exception if there is a download error
128
+
129
+ # Open the image from the downloaded bytes and return the PIL Image
130
+ img = Image.open(BytesIO(response.content))
131
+ return img
132
+
133
+
 
 
 
 
 
 
 
134
 
135
  def _generate_examples(self, filepath):
136
+ #"""Yields examples as (key, example) tuples."""
137
  logging.info("generating examples from = %s", filepath)
138
 
139
  with open(filepath, encoding="utf-8") as f:
140
  data = csv.DictReader(f)
141
 
 
 
 
 
 
 
 
 
 
 
142
 
143
  for row in data:
144
+ # Assuming the 'original_image' column has the full path to the image file
145
  unique_id = row['unique_id']
146
+ original_image_path = row['original_image']
147
+ segmentation_image_path = row['segmentation_image']
148
  sets = row['sets']
149
 
150
+ original_image = self.process_image(original_image_path)
151
+ segmentation_image = self.process_image(segmentation_image_path)
 
152
 
153
+
154
+ # Here you need to replace 'initial_radius', 'final_radius', 'initial_angle', 'final_angle', 'target'
155
+ # with actual columns from your CSV or additional processing you need to do
156
+ yield row['unique_id'], {
157
  "unique_id": unique_id,
158
  "sets": sets,
159
  "original_image": original_image,
 
161
  # ... add other features if necessary
162
  }
163
 
164
+
165
+
166
 
167
 
168