ro-h commited on
Commit
c0459e9
1 Parent(s): 3bbab5f

Delete regulatory_comments.py

Browse files
Files changed (1) hide show
  1. regulatory_comments.py +0 -113
regulatory_comments.py DELETED
@@ -1,113 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import json
16
- import datasets
17
-
18
- # Description of the dataset
19
- _DESCRIPTION = """\
20
- United States governmental agencies often make proposed regulations open to the public for comment.
21
- Proposed regulations are organized into "dockets". This project will use Regulation.gov public API
22
- to aggregate and clean public comments for dockets that mention opioid use.
23
-
24
- Each example will consist of one docket, and include metadata such as docket id, docket title, etc.
25
- Each docket entry will also include information about the top 10 comments, including comment metadata
26
- and comment text.
27
- """
28
-
29
- # Homepage URL of the dataset
30
- _HOMEPAGE = "https://www.regulations.gov/"
31
-
32
- # URL to download the dataset
33
- _URLS = {"url": "https://huggingface.co/datasets/ro-h/regulatory_comments/raw/main/docket_comments_v6.json"}
34
-
35
- # Class definition for handling the dataset
36
- class RegComments(datasets.GeneratorBasedBuilder):
37
-
38
- # Version of the dataset
39
- VERSION = datasets.Version("1.1.4")
40
-
41
- # Method to define the structure of the dataset
42
- def _info(self):
43
- # Defining the structure of the dataset
44
- features = datasets.Features({
45
- "id": datasets.Value("string"),
46
- "agency": datasets.Value("string"), #Added In
47
- "title": datasets.Value("string"),
48
- "update_date": datasets.Value("string"), #Added In
49
- "update_time": datasets.Value("string"), #Added In
50
- "purpose": datasets.Value("string"),
51
- "keywords": datasets.Sequence(datasets.Value("string")),
52
- "comments": datasets.Sequence({
53
- "text": datasets.Value("string"),
54
- "comment_id": datasets.Value("string"),
55
- "comment_url": datasets.Value("string"),
56
- "comment_date": datasets.Value("string"),
57
- "comment_time": datasets.Value("string"),
58
- "commenter_fname": datasets.Value("string"),
59
- "commenter_lname": datasets.Value("string"),
60
- "comment_length": datasets.Value("int32")
61
- })
62
- })
63
-
64
- # Returning the dataset structure
65
- return datasets.DatasetInfo(
66
- description=_DESCRIPTION,
67
- features=features,
68
- homepage=_HOMEPAGE
69
- )
70
-
71
- # Method to handle dataset splitting (e.g., train/test)
72
- def _split_generators(self, dl_manager):
73
- urls = _URLS["url"]
74
- data_dir = dl_manager.download_and_extract(urls)
75
- # Defining the split (here, only train split is defined)
76
- return [
77
- datasets.SplitGenerator(
78
- name=datasets.Split.TRAIN,
79
- gen_kwargs={
80
- "filepath": data_dir,
81
- },
82
- ),
83
- ]
84
-
85
- # Method to generate examples from the dataset
86
- def _generate_examples(self, filepath):
87
- """This function returns the examples in the raw (text) form."""
88
- key = 0
89
- with open(filepath, 'r', encoding='utf-8') as f:
90
- data = json.load(f)
91
- for docket in data:
92
- # Extracting data fields from each docket
93
- docket_id = docket["id"]
94
- docket_agency = docket["agency"]
95
- docket_title = docket["title"]
96
- docket_update_date = docket["update_date"]
97
- docket_update_time = docket["update_time"]
98
- docket_purpose = docket.get("purpose", "unspecified")
99
- docket_keywords = docket.get("keywords", [])
100
- comments = docket["comments"]
101
-
102
- # Yielding each docket with its information
103
- yield key, {
104
- "id": docket_id,
105
- "agency": docket_agency,
106
- "title": docket_title,
107
- "update_date": docket_update_date,
108
- "update_time": docket_update_time,
109
- "purpose": docket_purpose,
110
- "keywords": docket_keywords,
111
- "comments": comments
112
- }
113
- key += 1