test-dataset / test_dataset.py
Jiwonny29's picture
Update test_dataset.py
4872f2d verified
raw
history blame
12.5 kB
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Address all TODOs and remove all explanatory comments
"""TODO: Add a description here."""
import csv
import json
import os
import datasets
# TODO: Add BibTeX citation
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {A great new dataset},
author={huggingface, Inc.
},
year={2020}
}
"""
# TODO: Add description of the dataset here
# You can copy an official description
_DESCRIPTION = """\
This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
"""
# TODO: Add a link to an official homepage for the dataset here
_HOMEPAGE = ""
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""
# TODO: Add link to the official dataset URLs here
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URLS = {
"first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
"second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
}
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
class NewDataset(datasets.GeneratorBasedBuilder):
"""TODO: Short description of my dataset."""
VERSION = datasets.Version("1.1.0")
# This is an example of a dataset with multiple configurations.
# If you don't want/need to define several sub-sets in your dataset,
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
# If you need to make complex sub-parts in the datasets with configurable options
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
# BUILDER_CONFIG_CLASS = MyBuilderConfig
# You will be able to load one or the other configurations in the following list with
# data = datasets.load_dataset('my_dataset', 'first_domain')
# data = datasets.load_dataset('my_dataset', 'second_domain')
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
]
DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
def _info(self):
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above
features = datasets.Features(
{
"year": datasets.Value("int32"),
"locationabbr": datasets.Value("string"),
"locationdesc": datasets.Value("string"),
"geolocation": datasets.Features({"latitude": datasets.Value("float32"), "longitude": datasets.Value("float32")}),
"disease_type": datasets.Value("int32"),
"data_value_type": datasets.Value("int32"),
"data_value": datasets.Value("float32"),
"break_out_category": datasets.Value("string"),
"break_out_details": datasets.Value("string"),
"break_out_type": datasets.Value("int32"),
"life_expectancy": datasets.Value("float32")
# These are the features of your dataset like images, labels ...
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
# supervised_keys=("sentence", "label"),
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
dl_paths = dl_manager.download_and_extract({
'train_csv': 'https://drive.google.com/file/d/1eChYmZ3RMq1v-ek1u6DD2m_dGIrz3sbi/view?usp=sharing'
})
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"csvpath": dl_paths['train_csv'],
},
),
]
def _generate_examples(self, csvpath):
with open(csvpath, encoding="utf-8") as f:
reader = csv.DictReader(f)
for key, row in enumerate(reader):
yield key, {
"year": int(row["Year"]),
"location_abbr": row["LocationAbbr"],
"location_desc": row["LocationDesc"],
"geolocation": {
"latitude": float(row["latitude"]),
"longitude": float(row["longitude"])
},
"disease_type": int(row["Disease_Type"]),
"data_value_type": int(row["Data_Value_Type"]),
"data_value": float(row["Data_Value"]),
"break_out_category": row["Break_Out_Category"],
"break_out_details": row["Break_Out_Details"],
"break_out_type": int(row["Break_Out_Type"]),
"life_expectancy": float(row["Life_Expectancy"]) if row["Life_Expectancy"] else None
}
@staticmethod
def preprocess_data(filepath):
data = pd.read_csv("/content/drive/MyDrive/National_Vital_Statistics_System__NVSS__-_National_Cardiovascular_Disease_Surveillance_Data_20240129.csv")
data = data[['YearStart', 'LocationAbbr', 'LocationDesc', 'Geolocation', 'Topic', 'Question', 'Data_Value_Type', 'Data_Value', 'Data_Value_Alt',
'Low_Confidence_Limit', 'High_Confidence_Limit', 'Break_Out_Category', 'Break_Out']]
def convert_to_tuple(geo_str):
if isinstance(geo_str, str):
geo_str = geo_str.replace('POINT (', '').replace(')', '')
lon, lat = map(float, geo_str.split())
return (lon, lat)
else:
return geo_str
data['Geolocation'] = data['Geolocation'].apply(convert_to_tuple)
disease_columns = [
'Major cardiovascular disease mortality rate among US adults (18+); NVSS',
'Diseases of the heart (heart disease) mortality rate among US adults (18+); NVSS',
'Acute myocardial infarction (heart attack) mortality rate among US adults (18+); NVSS',
'Coronary heart disease mortality rate among US adults (18+); NVSS',
'Heart failure mortality rate among US adults (18+); NVSS',
'Cerebrovascular disease (stroke) mortality rate among US adults (18+); NVSS',
'Ischemic stroke mortality rate among US adults (18+); NVSS',
'Hemorrhagic stroke mortality rate among US adults (18+); NVSS'
]
disease_column_mapping = {column_name: index for index, column_name in enumerate(disease_columns)}
data['Question'] = data['Question'].apply(lambda x: disease_column_mapping.get(x, -1))
sex_columns = ['Male', 'Female']
sex_column_mapping = {column_name: index + 1 for index, column_name in enumerate(sex_columns)}
age_columns = ['18-24', '25-44', '45-64', '65+']
age_column_mapping = {column_name: index + 1 for index, column_name in enumerate(age_columns)}
race_columns = ['Non-Hispanic White', 'Non-Hispanic Black', 'Hispanic', 'Other']
race_column_mapping = {column_name: index + 1 for index, column_name in enumerate(race_columns)}
def map_break_out_category(value):
if value in sex_column_mapping:
return sex_column_mapping[value]
elif value in age_column_mapping:
return age_column_mapping[value]
elif value in race_column_mapping:
return race_column_mapping[value]
else:
return value
data['Break_Out_Type'] = data['Break_Out'].apply(map_break_out_category)
data.drop(columns=['Topic', 'Low_Confidence_Limit', 'High_Confidence_Limit', 'Data_Value_Alt'], axis=1, inplace=True)
data['Data_Value_Type'] = data['Data_Value_Type'].apply(lambda x: 1 if x == 'Age-Standardized' else 0)
data.rename(columns={'Question':'Disease_Type', 'YearStart':'Year', 'Break_Out':'Break_Out_Details'}, inplace=True)
data['Break_Out_Type'] = data['Break_Out_Type'].replace('Overall', 0)
lt2000 = pd.read_csv("https://drive.google.com/file/d/1ktRNl7jg0Z83rkymD9gcsGLdVqVaFtd-/view?usp=drive_link")
lt2000 = lt2000[(lt2000['race_name'] == 'Total') & (lt2000['age_name'] == '<1 year')]
lt2000 = lt2000[['location_name', 'val']]
lt2000.rename(columns={'val':'Life_Expectancy'}, inplace=True)
lt2005 = pd.read_csv("https://drive.google.com/file/d/1xZqeOgj32-BkOhDTZVc4k_tp1ddnOEh7/view?usp=drive_link")
lt2005 = lt2005[(lt2005['race_name'] == 'Total') & (lt2005['age_name'] == '<1 year')]
lt2005 = lt2005[['location_name', 'val']]
lt2005.rename(columns={'val':'Life_Expectancy'}, inplace=True)
lt2010 = pd.read_csv("https://drive.google.com/file/d/1ItqHBuuUa38PVytfahaAV8NWwbhHMMg8/view?usp=drive_link")
lt2010 = lt2010[(lt2010['race_name'] == 'Total') & (lt2010['age_name'] == '<1 year')]
lt2010 = lt2010[['location_name', 'val']]
lt2010.rename(columns={'val':'Life_Expectancy'}, inplace=True)
lt2015 = pd.read_csv("https://drive.google.com/file/d/1rOgQY1RQiry2ionTKM_UWgT8cYD2E0vX/view?usp=drive_link")
lt2015 = lt2015[(lt2015['race_name'] == 'Total') & (lt2015['age_name'] == '<1 year')]
lt2015 = lt2015[['location_name', 'val']]
lt2015.rename(columns={'val':'Life_Expectancy'}, inplace=True)
lt_data = pd.concat([lt2000, lt2005, lt2010, lt2015])
lt_data.drop_duplicates(subset=['location_name'], inplace=True)
data2 = pd.merge(data, lt_data, how='inner', left_on='LocationDesc', right_on='location_name')
data2.drop(columns=['location_name'], axis=1, inplace=True)
data2 = data2[(data2['Break_Out_Details'] != '75+') & (data2['Break_Out_Details'] != '35+')]
data2.rename(columns={'Question':'Disease_Type'}, inplace=True)
data2['Life_Expectancy'] = np.where(data2['Break_Out_Type'] == 0, data2['Life_Expectancy'], np.nan)
processed_filepath = '/content/drive/MyDrive/my_processed_data.csv'
return processed_filepath