Spaces:
Running
Running
import streamlit as st | |
from meta_ai_api import MetaAI | |
from urllib.parse import urlparse | |
import pandas as pd | |
import plotly.express as px | |
from nltk.sentiment.vader import SentimentIntensityAnalyzer | |
import nltk | |
import json | |
# Initialize Meta AI API | |
ai = MetaAI() | |
# Page config | |
st.set_page_config( | |
page_title="Meta AI Query Analysis - a Free SEO Tool by WordLift", | |
page_icon="img/fav-ico.png", | |
layout="centered", | |
initial_sidebar_state="collapsed", | |
menu_items={ | |
'Get Help': 'https://wordlift.io/book-a-demo/', | |
'About': "# This is a demo app for Meta AI SEO Optimization" | |
} | |
) | |
# Sidebar | |
st.sidebar.image("img/logo-wordlift.png") | |
def local_css(file_name): | |
with open(file_name) as f: | |
st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True) | |
local_css("style.css") | |
def fetch_response(query): | |
response = ai.prompt(message=query) | |
return response | |
def display_sources(sources): | |
if sources: | |
for source in sources: | |
# Parse the domain from the URL | |
domain = urlparse(source['link']).netloc | |
# Format and display the domain and title | |
st.markdown(f"- **{domain}**: [{source['title']}]({source['link']})", unsafe_allow_html=True) | |
else: | |
st.write("No sources available.") | |
# ---------------------------------------------------------------------------- # | |
# Sentiment Analysis Function | |
# ---------------------------------------------------------------------------- # | |
# Download the VADER lexicon for sentiment analysis | |
nltk.download('vader_lexicon') | |
# Initialize the Sentiment Intensity Analyzer | |
sid = SentimentIntensityAnalyzer() | |
def sentiment_analysis(text): | |
# Split the text into sentences | |
sentences = [sentence.strip() for sentence in text.split('.') if sentence] | |
# Create a DataFrame to hold the content and sentiment scores | |
df = pd.DataFrame(sentences, columns=['content']) | |
# Calculate sentiment scores for each sentence | |
df['sentiment_scores'] = df['content'].apply(lambda x: sid.polarity_scores(x)) | |
# Split sentiment_scores into separate columns | |
df = pd.concat([df.drop(['sentiment_scores'], axis=1), df['sentiment_scores'].apply(pd.Series)], axis=1) | |
# Determine the dominant sentiment and its confidence | |
df['dominant_sentiment'] = df[['neg', 'neu', 'pos']].idxmax(axis=1) | |
df['confidence'] = df[['neg', 'neu', 'pos']].max(axis=1) | |
return df | |
# ---------------------------------------------------------------------------- # | |
# Advanced Analysis | |
# ---------------------------------------------------------------------------- # | |
def fetch_advanced_analysis(query, msg): | |
analysis_prompt = f""" | |
Analyze the user's request: '{query}', and the response: '{msg}'. | |
Based on this analysis, generate a detailed JSON response including: | |
1. The user's intent, | |
2. Up to four follow-up questions, | |
3. The main entities mentioned in the response. | |
Example of expected JSON format: | |
{{ | |
"user_intent": "Identify the effects of climate change on polar bears", | |
"follow_up_questions": [ | |
"What are the primary threats to polar bears today?", | |
"How does the melting ice affect their habitat?", | |
"What conservation efforts are in place for polar bears?", | |
"How can individuals contribute to these efforts?" | |
], | |
"entities": {{ | |
"animal": ["polar bears"], | |
"issue": ["climate change"], | |
"actions": ["conservation efforts"] | |
}} | |
}} | |
""" | |
# Assume ai is an initialized MetaAI instance that can send prompts to the AI service | |
advanced_response = ai.prompt(message=analysis_prompt) | |
return advanced_response | |
def parse_analysis(analysis_message): | |
try: | |
start = analysis_message.find('{') | |
end = analysis_message.rfind('}') + 1 # Find the last '}' and include it | |
if start != -1 and end != -1: | |
json_str = analysis_message[start:end] | |
print("Debug JSON String:", json_str) # Continue to use this for debugging | |
analysis_data = json.loads(json_str) | |
return analysis_data | |
else: | |
return {"error": "Valid JSON data not found in the response"} | |
except json.JSONDecodeError as e: | |
return {"error": "Failed to decode JSON", "details": str(e)} | |
# ---------------------------------------------------------------------------- # | |
# Main Function | |
# ---------------------------------------------------------------------------- # | |
def main(): | |
# Path to the image | |
image_path = 'img/meta-ai-logo.png' # Replace with your image's filename and extension | |
# Create two columns | |
col1, col2 = st.columns([1, 2]) # Adjust the ratio as needed for your layout | |
# Use the first column to display the image | |
with col1: | |
st.image(image_path, width=60) | |
# Use the second column to display the title and other content | |
with col2: | |
st.title("Meta AI SEO Tool") | |
# Collapsible box with link to the site | |
with st.expander("ℹ️ Important Information", expanded=False): | |
st.markdown(""" | |
- 🚨 **This is an experimental tool**: Functionality might vary, and it may not always work as expected. | |
- 📖 **Learn more about our research**: Understand what Meta AI is and why SEO matters by reading our in-depth article. [Read about Meta AI and SEO](https://wordlift.io/blog/en/meta-ai-seo/)""") | |
# User input | |
user_query = st.text_area("Enter your query:", height=150, key="query_overview") | |
submit_button = st.button("Analyze Query", key="submit_overview") | |
# Create tabs | |
tab1, tab2, tab3 = st.tabs(["Overview", "Analysis", "Sentiment"]) | |
# Tab 1: Overview - Showing the initial response and sources | |
with tab1: | |
if submit_button and user_query: | |
response = fetch_response(user_query) | |
msg = response.get('message', 'No response message.') | |
st.write(msg) | |
with st.expander("Show Sources"): | |
display_sources(response.get('sources', [])) | |
# Tab 2: Analysis - Showing the result of the advanced analysis | |
with tab2: | |
# In case you need inputs here as well, ensure they have unique keys | |
if 'submit_overview' in st.session_state and st.session_state.submit_overview: | |
advanced_response = fetch_advanced_analysis(st.session_state.query_overview, msg) | |
advanced_msg = advanced_response.get('message', 'No advanced analysis available.') | |
analysis_data = parse_analysis(advanced_msg) | |
if "error" not in analysis_data: | |
st.write("#### User Intent:", analysis_data['user_intent']) | |
st.divider() # 👈 An horizontal rule | |
st.write("### Follow-up Questions:") | |
for question in analysis_data['follow_up_questions']: | |
st.write("- " + question) | |
st.divider() | |
st.write("#### Identified Concepts:") | |
for entity_type, entities in analysis_data['entities'].items(): | |
st.write(f"**{entity_type.capitalize()}**: {', '.join(entities)}") | |
st.divider() | |
# Tab 3: Sentiment - Displaying sentiment analysis of the response | |
with tab3: | |
if 'submit_overview' in st.session_state and st.session_state.submit_overview: | |
df_sentiment = sentiment_analysis(msg) | |
fig = px.scatter(df_sentiment, y='dominant_sentiment', color='dominant_sentiment', size='confidence', | |
hover_data=['content'], | |
color_discrete_map={"neg": "firebrick", "neu": "navajowhite", "pos": "darkgreen"}, | |
labels={'dominant_sentiment': 'Sentiment'}, | |
title='Sentiment Analysis of the Response') | |
fig.update_layout(width=800, height=300) | |
st.plotly_chart(fig) | |
if __name__ == "__main__": | |
main() |