Spaces:
Sleeping
Sleeping
import gradio as gr | |
import requests | |
import os | |
import json | |
from transformers import pipeline | |
import matplotlib.pyplot as plt | |
import numpy as np | |
import pandas as pd | |
bearer_token = os.environ.get("BEARER_TOKEN") | |
print(bearer_token) | |
search_url = "https://api.twitter.com/2/tweets/search/recent" | |
def bearer_oauth(r): | |
""" | |
Method required by bearer token authentication. | |
""" | |
r.headers["Authorization"] = f"Bearer {bearer_token}" | |
r.headers["User-Agent"] = "v2RecentSearchPython" | |
return r | |
def connect_to_endpoint(url, params): | |
response = requests.get(url, auth=bearer_oauth, params=params) | |
print(response.status_code) | |
if response.status_code != 200: | |
raise Exception(response.status_code, response.text) | |
return response.json() | |
def fetch_tweets(tag): | |
q = "\"" + tag + "\"" | |
query_params = {'query': q, 'tweet.fields': 'author_id', 'max_results': 100} | |
json_response = connect_to_endpoint(search_url, query_params) | |
#print(json.dumps(json_response, indent=4, sort_keys=True)) | |
phrases = [] | |
for entry in json_response["data"]: | |
phrases.append(entry["text"]) | |
return phrases | |
pipe = pipeline("text-classification", model="mrm8488/distilroberta-finetuned-financial-news-sentiment-analysis") | |
def analyze_phrases(phrases): | |
positive = 0 | |
positive_examples = {} | |
negative = 0 | |
negative_examples = {} | |
neutral = 0 | |
neutral_examples = {} | |
outputs = pipe(phrases) | |
for index, x in enumerate(outputs): | |
if x['label'] == 'positive': | |
positive += 1 | |
if positive <= 3: | |
positive_examples[phrases[index]] = x['score'] | |
elif x['label'] == 'neutral': | |
neutral += 1 | |
if neutral <= 3: | |
neutral_examples[phrases[index]] = x['score'] | |
elif x['label'] == 'negative': | |
negative += 1 | |
if negative <= 3: | |
negative_examples[phrases[index]] = x['score'] | |
else: | |
pass | |
counts = [positive, neutral, negative] | |
return counts, positive_examples, neutral_examples, negative_examples | |
def calculate_sentiment(tag): | |
phrases = fetch_tweets(tag) | |
counts, positive_examples, neutral_examples, negative_examples = analyze_phrases(phrases) | |
output = "positive: " + str(counts[0]) + "\n" + "neutral: " + str(counts[1]) + "\n" + "negative: " + str(counts[2]) | |
plt.style.use('_mpl-gallery-nogrid') | |
# make data | |
colors = ['green', 'yellow', 'red'] | |
labels = ["Positive", "Neutral", "Negative"] | |
# plot | |
fig, ax = plt.subplots(figsize=(10, 6)) | |
wedges, texts = ax.pie(counts, colors=colors, radius=3, center=(4, 4), | |
wedgeprops={"linewidth": 1, "edgecolor": "white"}, labeldistance=1.05) | |
# Create a legend | |
ax.legend(wedges, labels, title="Categories", loc="center left", bbox_to_anchor=(1, 0, 0.5, 1)) | |
ax.set(xlim=(0, 8), | |
ylim=(0, 8)) | |
print(positive_examples) | |
html_content = "" | |
positive_tweets = list(positive_examples.items()) | |
p_df = pd.DataFrame(positive_tweets, columns=["Tweet", "Confidence"]) | |
positive_table = p_df.to_html(index=False) | |
neutral_tweets = list(neutral_examples.items()) | |
n_df = pd.DataFrame(neutral_tweets, columns=["Tweet", "Confidence"]) | |
neutral_table = n_df.to_html(index=False) | |
negative_tweets = list(negative_examples.items()) | |
neg_df = pd.DataFrame(negative_tweets, columns=["Tweet", "Confidence"]) | |
negative_table = neg_df.to_html(index=False) | |
html_content += f"<h2>Recent Positive Tweets</h2>" + positive_table | |
html_content += f"<h2>Recent Negative Tweets</h2>" + negative_table | |
return fig, html_content | |
iface = gr.Interface(fn=calculate_sentiment, inputs="text", outputs=["plot","html"]) | |
iface.launch(debug=True) |