Jiwonny29 commited on
Commit
043ecc1
1 Parent(s): bcbfb3b

Update test_dataset.py

Browse files
Files changed (1) hide show
  1. test_dataset.py +36 -60
test_dataset.py CHANGED
@@ -15,17 +15,10 @@
15
  """TODO: Add a description here."""
16
 
17
 
18
- import csv
19
- import json
20
- import os
21
  import pandas as pd
22
  import numpy as np
23
 
24
- import datasets
25
-
26
-
27
- # TODO: Add BibTeX citation
28
- # Find for instance the citation on arxiv or on the dataset repo/website
29
  _CITATION = """\
30
  @InProceedings{huggingface:dataset,
31
  title = {A great new dataset},
@@ -35,29 +28,20 @@ year={2020}
35
  }
36
  """
37
 
38
- # TODO: Add description of the dataset here
39
- # You can copy an official description
40
  _DESCRIPTION = """\
41
  This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
42
  """
43
 
44
- # TODO: Add a link to an official homepage for the dataset here
45
  _HOMEPAGE = ""
46
 
47
- # TODO: Add the licence for the dataset here if you can find it
48
  _LICENSE = ""
49
 
50
- # TODO: Add link to the official dataset URLs here
51
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
52
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
53
  _URLS = {
54
  "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
55
  "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
56
  }
57
 
58
-
59
- # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
60
- class NewDataset(datasets.GeneratorBasedBuilder):
61
  def _info(self):
62
  return datasets.DatasetInfo(
63
  description=_DESCRIPTION,
@@ -74,7 +58,6 @@ class NewDataset(datasets.GeneratorBasedBuilder):
74
  "Break_Out_Details": datasets.Value("string"),
75
  "Break_Out_Type": datasets.Value("int32"),
76
  "Life_Expectancy": datasets.Value("float32")
77
- # These are the features of your dataset like images, labels ...
78
  }
79
  ),
80
  supervised_keys=None,
@@ -84,48 +67,42 @@ class NewDataset(datasets.GeneratorBasedBuilder):
84
  )
85
 
86
  def _split_generators(self, dl_manager):
87
- # dl_paths = dl_manager.download_and_extract({
88
- # 'train_csv': 'https://drive.google.com/file/d/1eChYmZ3RMq1v-ek1u6DD2m_dGIrz3sbi/view?usp=sharing'
89
- # })
90
- processed_filepath = self.preprocess_data("/content/drive/MyDrive/my_processed_data.csv")
91
  return [
92
- datasets.SplitGenerator(
93
- name=datasets.Split.TRAIN,
94
- gen_kwargs={
95
- "csvpath": processed_filepath,
96
- },
97
- ),
98
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
 
100
- def _generate_examples(self, csvpath):
101
- with open(csvpath, encoding="utf-8") as f:
102
- reader = csv.DictReader(f)
103
- for key, row in enumerate(reader):
104
- year = int(row['Year']) if 'Year' in row else None
105
- if 'Geolocation' in row and isinstance(row['Geolocation'], str):
106
- geo_str = row['Geolocation'].replace('(', '').replace(')', '')
107
- latitude, longitude = map(float, geo_str.split(', '))
108
- else:
109
- latitude, longitude = None, None
110
- yield key, {
111
- "Year": year,
112
- "Location_Abbr": row.get('LocationAbbr', None),
113
- "Location_Desc": row.get('LocationDesc', None),
114
- "Geolocation": {
115
- "latitude": latitude,
116
- "longitude": longitude
117
- },
118
- "Disease_Type": int(row["Disease_Type"]) if "Disease_Type" in row else None,
119
- "Data_Value_Type": int(row["Data_Value_Type"]) if "Data_Value_Type" in row else None,
120
- "Data_Value": float(row["Data_Value"]) if "Data_Value" in row else None,
121
- "Break_Out_Category": row.get("Break_Out_Category", None),
122
- "Break_Out_Details": row.get("Break_Out_Details", None),
123
- "Break_Out_Type": int(row["Break_Out_Type"]) if 'Break_Out_Type' in row else None,
124
- "Life_Expectancy": float(row["Life_Expectancy"]) if row.get("Life_Expectancy") else None
125
- }
126
-
127
  @staticmethod
128
- def preprocess_data(filepath):
129
  data = pd.read_csv("https://drive.google.com/file/d/1ktRNl7jg0Z83rkymD9gcsGLdVqVaFtd-/view?usp=drive_link")
130
  data = data[['YearStart', 'LocationAbbr', 'LocationDesc', 'Geolocation', 'Topic', 'Question', 'Data_Value_Type', 'Data_Value', 'Data_Value_Alt',
131
  'Low_Confidence_Limit', 'High_Confidence_Limit', 'Break_Out_Category', 'Break_Out']]
@@ -206,5 +183,4 @@ class NewDataset(datasets.GeneratorBasedBuilder):
206
  data2.rename(columns={'Question':'Disease_Type'}, inplace=True)
207
  data2['Life_Expectancy'] = np.where(data2['Break_Out_Type'] == 0, data2['Life_Expectancy'], np.nan)
208
  data2 = data2.reset_index(drop=True)
209
- processed_filepath = '/content/drive/MyDrive/my_processed_data.csv'
210
- return processed_filepath
 
15
  """TODO: Add a description here."""
16
 
17
 
18
+ import datasets
 
 
19
  import pandas as pd
20
  import numpy as np
21
 
 
 
 
 
 
22
  _CITATION = """\
23
  @InProceedings{huggingface:dataset,
24
  title = {A great new dataset},
 
28
  }
29
  """
30
 
 
 
31
  _DESCRIPTION = """\
32
  This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
33
  """
34
 
 
35
  _HOMEPAGE = ""
36
 
 
37
  _LICENSE = ""
38
 
 
 
 
39
  _URLS = {
40
  "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
41
  "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
42
  }
43
 
44
+ class HealthStatisticsDataset(datasets.GeneratorBasedBuilder):
 
 
45
  def _info(self):
46
  return datasets.DatasetInfo(
47
  description=_DESCRIPTION,
 
58
  "Break_Out_Details": datasets.Value("string"),
59
  "Break_Out_Type": datasets.Value("int32"),
60
  "Life_Expectancy": datasets.Value("float32")
 
61
  }
62
  ),
63
  supervised_keys=None,
 
67
  )
68
 
69
  def _split_generators(self, dl_manager):
70
+ data = pd.read_csv(dl_manager.download_and_extract(_URLS["first_domain"]))
71
+ processed_data = self.preprocess_data(data)
 
 
72
  return [
73
+ datasets.SplitGenerator(
74
+ name=datasets.Split.TRAIN,
75
+ gen_kwargs={"data": processed_data},
76
+ ),
77
+ ]
78
+
79
+ def _generate_examples(self, data):
80
+ for key, row in data.iterrows():
81
+ year = int(row['Year']) if 'Year' in row else None
82
+ if 'Geolocation' in row and isinstance(row['Geolocation'], str):
83
+ geo_str = row['Geolocation'].replace('(', '').replace(')', '')
84
+ latitude, longitude = map(float, geo_str.split(', '))
85
+ else:
86
+ latitude, longitude = None, None
87
+ yield key, {
88
+ "Year": year,
89
+ "Location_Abbr": row.get('LocationAbbr', None),
90
+ "Location_Desc": row.get('LocationDesc', None),
91
+ "Geolocation": {
92
+ "latitude": latitude,
93
+ "longitude": longitude
94
+ },
95
+ "Disease_Type": int(row["Disease_Type"]) if "Disease_Type" in row else None,
96
+ "Data_Value_Type": int(row["Data_Value_Type"]) if "Data_Value_Type" in row else None,
97
+ "Data_Value": float(row["Data_Value"]) if "Data_Value" in row else None,
98
+ "Break_Out_Category": row.get("Break_Out_Category", None),
99
+ "Break_Out_Details": row.get("Break_Out_Details", None),
100
+ "Break_Out_Type": int(row["Break_Out_Type"]) if 'Break_Out_Type' in row else None,
101
+ "Life_Expectancy": float(row["Life_Expectancy"]) if row.get("Life_Expectancy") else None
102
+ }
103
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  @staticmethod
105
+ def preprocess_data(data):
106
  data = pd.read_csv("https://drive.google.com/file/d/1ktRNl7jg0Z83rkymD9gcsGLdVqVaFtd-/view?usp=drive_link")
107
  data = data[['YearStart', 'LocationAbbr', 'LocationDesc', 'Geolocation', 'Topic', 'Question', 'Data_Value_Type', 'Data_Value', 'Data_Value_Alt',
108
  'Low_Confidence_Limit', 'High_Confidence_Limit', 'Break_Out_Category', 'Break_Out']]
 
183
  data2.rename(columns={'Question':'Disease_Type'}, inplace=True)
184
  data2['Life_Expectancy'] = np.where(data2['Break_Out_Type'] == 0, data2['Life_Expectancy'], np.nan)
185
  data2 = data2.reset_index(drop=True)
186
+ return data2