|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
from typing import List |
|
import datasets |
|
import logging |
|
import csv |
|
import numpy as np |
|
from PIL import Image |
|
import os |
|
import io |
|
import pandas as pd |
|
import matplotlib.pyplot as plt |
|
from numpy import asarray |
|
import requests |
|
from io import BytesIO |
|
from numpy import asarray |
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
@article{chen2023dataset, |
|
title={A dataset of the quality of soybean harvested by mechanization for deep-learning-based monitoring and analysis}, |
|
author={Chen, M and Jin, C and Ni, Y and Yang, T and Xu, J}, |
|
journal={Data in Brief}, |
|
volume={52}, |
|
pages={109833}, |
|
year={2023}, |
|
publisher={Elsevier}, |
|
doi={10.1016/j.dib.2023.109833} |
|
} |
|
|
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
This dataset contains images captured during the mechanized harvesting of soybeans, aimed at facilitating the development of machine vision and deep learning models for quality analysis. It contains information of original soybean pictures in different forms, labels of whether the soybean belongs to training, validation, or testing datasets, segmentation class of soybean pictures in one dataset. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/lisawen/soybean_dataset" |
|
|
|
|
|
_LICENSE = "Under a Creative Commons license" |
|
|
|
|
|
|
|
|
|
_URL = "/content/drive/MyDrive/sta_663/soybean/dataset.csv" |
|
_URLs = { |
|
"train" : "https://raw.githubusercontent.com/lisawen0707/soybean/main/train_dataset.csv", |
|
"test": "https://raw.githubusercontent.com/lisawen0707/soybean/main/test_dataset.csv", |
|
"valid": "https://raw.githubusercontent.com/lisawen0707/soybean/main/valid_dataset.csv" |
|
} |
|
|
|
|
|
class SoybeanDataset(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
_URLs = _URLs |
|
VERSION = datasets.Version("1.1.0") |
|
|
|
def _info(self): |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"unique_id": datasets.Value("string"), |
|
"sets": datasets.Value("string"), |
|
"original_image": datasets.Image(), |
|
"segmentation_image": datasets.Image(), |
|
|
|
} |
|
), |
|
|
|
|
|
supervised_keys=("original_image","segmentation_image"), |
|
homepage="https://github.com/lisawen0707/soybean/tree/main", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: |
|
|
|
|
|
|
|
|
|
urls_to_download = self._URLs |
|
downloaded_files = dl_manager.download_and_extract(urls_to_download) |
|
|
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["valid"]}), |
|
] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
from typing import List |
|
import datasets |
|
import logging |
|
import csv |
|
import numpy as np |
|
from PIL import Image |
|
import os |
|
import io |
|
import pandas as pd |
|
import matplotlib.pyplot as plt |
|
from numpy import asarray |
|
import requests |
|
from io import BytesIO |
|
from numpy import asarray |
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
@article{chen2023dataset, |
|
title={A dataset of the quality of soybean harvested by mechanization for deep-learning-based monitoring and analysis}, |
|
author={Chen, M and Jin, C and Ni, Y and Yang, T and Xu, J}, |
|
journal={Data in Brief}, |
|
volume={52}, |
|
pages={109833}, |
|
year={2023}, |
|
publisher={Elsevier}, |
|
doi={10.1016/j.dib.2023.109833} |
|
} |
|
|
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
This dataset contains images captured during the mechanized harvesting of soybeans, aimed at facilitating the development of machine vision and deep learning models for quality analysis. It contains information of original soybean pictures in different forms, labels of whether the soybean belongs to training, validation, or testing datasets, segmentation class of soybean pictures in one dataset. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/lisawen/soybean_dataset" |
|
|
|
|
|
_LICENSE = "Under a Creative Commons license" |
|
|
|
|
|
|
|
|
|
_URL = "/content/drive/MyDrive/sta_663/soybean/dataset.csv" |
|
_URLs = { |
|
"train" : "https://raw.githubusercontent.com/lisawen0707/soybean/main/train_dataset.csv", |
|
"test": "https://raw.githubusercontent.com/lisawen0707/soybean/main/test_dataset.csv", |
|
"valid": "https://raw.githubusercontent.com/lisawen0707/soybean/main/valid_dataset.csv" |
|
} |
|
|
|
|
|
class SoybeanDataset(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
_URLs = _URLs |
|
VERSION = datasets.Version("1.1.0") |
|
|
|
def _info(self): |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"unique_id": datasets.Value("string"), |
|
"sets": datasets.Value("string"), |
|
"original_image": datasets.Image(), |
|
"segmentation_image": datasets.Image(), |
|
|
|
} |
|
), |
|
|
|
|
|
supervised_keys=("original_image","segmentation_image"), |
|
homepage="https://github.com/lisawen0707/soybean/tree/main", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: |
|
|
|
|
|
|
|
|
|
urls_to_download = self._URLs |
|
downloaded_files = dl_manager.download_and_extract(urls_to_download) |
|
|
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["valid"]}), |
|
] |
|
|
|
def download_image(self, image_url): |
|
try: |
|
response = requests.get(image_url) |
|
response.raise_for_status() |
|
img = Image.open(BytesIO(response.content)) |
|
return img |
|
except requests.RequestException as e: |
|
logging.error(f"Error downloading {image_url}: {e}") |
|
return None |
|
|
|
def download_images_concurrently(self, image_urls): |
|
images = {} |
|
with ThreadPoolExecutor(max_workers=5) as executor: |
|
future_to_url = {executor.submit(self.download_image, url): url for url in image_urls} |
|
for future in as_completed(future_to_url): |
|
url = future_to_url[future] |
|
try: |
|
image = future.result() |
|
images[url] = image |
|
except Exception as exc: |
|
logging.error(f'{url} generated an exception: {exc}') |
|
return images |
|
|
|
def _generate_examples(self, filepath): |
|
logging.info("generating examples from = %s", filepath) |
|
|
|
image_urls = [] |
|
with open(filepath, encoding="utf-8") as f: |
|
data = csv.DictReader(f) |
|
for row in data: |
|
image_urls.append(row['original_image']) |
|
image_urls.append(row['segmentation_image']) |
|
|
|
|
|
downloaded_images = self.download_images_concurrently(set(image_urls)) |
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
data = csv.DictReader(f) |
|
for row in data: |
|
unique_id = row['unique_id'] |
|
original_image_path = row['original_image'] |
|
segmentation_image_path = row['segmentation_image'] |
|
sets = row['sets'] |
|
|
|
original_image = downloaded_images.get(original_image_path) |
|
segmentation_image = downloaded_images.get(segmentation_image_path) |
|
|
|
if original_image is None or segmentation_image is None: |
|
logging.error(f"Missing image for {unique_id}") |
|
continue |
|
|
|
yield unique_id, { |
|
"unique_id": unique_id, |
|
"sets": sets, |
|
"original_image": original_image, |
|
"segmentation_image": segmentation_image, |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|