regulatory_comments / api_call.py
ro-h's picture
Rename api_call_mod.py to api_call.py
167008c verified
raw
history blame
5.22 kB
import requests
import json
def get_docket_ids(search_term):
url = f"https://api.regulations.gov/v4/dockets"
params = {
'filter[searchTerm]': search_term,
'api_key': "your_api_key"
}
response = requests.get(url, params=params)
if response.status_code == 200:
data = response.json()
dockets = data['data']
docket_ids = [docket['id'] for docket in dockets]
return docket_ids
else:
return f"Error: {response.status_code}"
class RegulationsDataFetcher:
API_KEY = "your_api_key"
BASE_COMMENT_URL = 'https://api.regulations.gov/v4/comments'
BASE_DOCKET_URL = 'https://api.regulations.gov/v4/dockets/'
HEADERS = {
'X-Api-Key': API_KEY,
'Content-Type': 'application/json'
}
def __init__(self, docket_id):
self.docket_id = docket_id
self.docket_url = self.BASE_DOCKET_URL + docket_id
self.dataset = []
def fetch_comments(self):
"""Fetch a single page of 25 comments."""
url = f'{self.BASE_COMMENT_URL}?filter[docketId]={self.docket_id}&page[number]=1&page[size]=25'
response = requests.get(url, headers=self.HEADERS)
if response.status_code == 200:
return response.json()
else:
print(f'Failed to retrieve comments: {response.status_code}')
return None
def get_docket_info(self):
"""Get docket information."""
response = requests.get(self.docket_url, headers=self.HEADERS)
if response.status_code == 200:
docket_data = response.json()
return (docket_data['data']['attributes']['agencyId'],
docket_data['data']['attributes']['title'],
docket_data['data']['attributes']['modifyDate'],
docket_data['data']['attributes']['docketType'],
docket_data['data']['attributes']['keywords'])
else:
print(f'Failed to retrieve docket info: {response.status_code}')
return None
def fetch_comment_details(self, comment_url):
"""Fetch detailed information of a comment."""
response = requests.get(comment_url, headers=self.HEADERS)
if response.status_code == 200:
return response.json()
else:
print(f'Failed to retrieve comment details: {response.status_code}')
return None
def collect_data(self):
"""Collect data and reshape into nested dictionary format."""
data = self.fetch_comments()
docket_info = self.get_docket_info()
# Initialize the nested dictionary structure
nested_data = {
"id": self.docket_id,
"title": docket_info[1] if docket_info else "Unknown Title",
"context": docket_info[2] if docket_info else "Unknown Context",
"purpose": docket_info[3],
"keywords": docket_info[4],
"comments": []
}
if data and 'data' in data:
for comment in data['data']:
comment_details = self.fetch_comment_details(comment['links']['self'])
if comment_details and 'data' in comment_details and 'attributes' in comment_details['data']:
comment_data = comment_details['data']['attributes']
nested_comment = {
"text": comment_data.get('comment', ''),
"comment_id": comment['id'],
"comment_url": comment['links']['self'],
"comment_date": comment['attributes']['postedDate'],
"comment_title": comment['attributes']['title'],
"commenter_fname": comment_data.get('firstName', ''),
"commenter_lname": comment_data.get('lastName', ''),
"comment_length": len(comment_data.get('comment', '')) if comment_data.get('comment') is not None else 0
}
nested_data["comments"].append(nested_comment)
if len(nested_data["comments"]) >= 10:
break
return nested_data
# CREATING DATASET
opioid_related_terms = [
# Types of Opioids
"opioids",
"heroin",
"morphine",
"fentanyl",
"methadone",
"oxycodone",
"hydrocodone",
"codeine",
"tramadol",
"prescription opioids",
# Withdrawal Support
"lofexidine",
"buprenorphine",
"naloxone",
# Related Phrases
"opioid epidemic",
"opioid abuse",
"opioid crisis",
"opioid overdose"
"opioid tolerance",
"opioid treatment program",
"medication assisted treatment",
]
docket_ids = set()
all_data = []
for term in opioid_related_terms:
docket_ids.update(get_docket_ids(term))
for docket_id in docket_ids:
fetcher = RegulationsDataFetcher(docket_id)
docket_data = fetcher.collect_data()
if len(docket_data["comments"])!=0:
print(f'{docket_id} has comments')
all_data.append(docket_data)
json_file_path = 'docket_comments.json'
with open(json_file_path, 'w') as f:
json.dump(all_data, f)