jeremierostan commited on
Commit
cd6140c
1 Parent(s): f87eabd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +63 -57
app.py CHANGED
@@ -1,5 +1,7 @@
1
- import gradio as gr
2
  import os
 
 
 
3
  from langchain_openai import ChatOpenAI
4
  from langchain.agents import AgentExecutor, create_tool_calling_agent
5
  from langchain_community.tools.tavily_search import TavilySearchResults
@@ -7,16 +9,21 @@ from langchain_core.prompts import ChatPromptTemplate
7
  from datetime import datetime, timedelta
8
 
9
  # Set up API keys
10
- OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
11
- TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
12
- os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
13
- os.environ["TAVILY_API_KEY"] = TAVILY_API_KEY
 
14
 
15
- # Initialize the LLM
16
- llm = ChatOpenAI(model="gpt-4o-mini")
 
 
 
 
17
 
18
  # Set Up the Tavily Search Tool
19
- tools = [TavilySearchResults(max_results=3)]
20
 
21
  # CEFR levels and their descriptions
22
  CEFR_LEVELS = {
@@ -30,24 +37,24 @@ CEFR_LEVELS = {
30
  }
31
 
32
  # Create a Chat Prompt Template
33
- prompt = ChatPromptTemplate.from_messages([
34
  ("system", "You are a helpful assistant for ESL learners. Use the tavily_search_results_json tool to find current news information about the students' interests."),
35
  ("human", "{input}"),
36
  ("placeholder", "{agent_scratchpad}"),
37
  ])
38
 
39
  # Construct the Tools agent
40
- agent = create_tool_calling_agent(llm, tools, prompt)
41
 
42
  # Create an agent executor
43
- agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
44
 
45
  def get_news_content(topic):
46
  current_date = datetime.now().strftime("%Y-%m-%d")
47
  week_ago = (datetime.now() - timedelta(days=7)).strftime("%Y-%m-%d")
48
  query = f"Summarize the latest news about {topic} from {week_ago} to {current_date}"
49
 
50
- result = agent_executor.invoke({"input": query})
51
  return result['output'], result.get('intermediate_steps', [])
52
 
53
  def generate_article(topic, content, urls, level, learning_objective=None):
@@ -59,7 +66,7 @@ def generate_article(topic, content, urls, level, learning_objective=None):
59
  The article should be engaging, informative, and appropriate for the specified language level.
60
  Include a 'Sources' section at the end of the article with the following URLs: {urls}
61
  """
62
- result = agent_executor.invoke({"input": prompt})
63
  return result['output']
64
 
65
  def extract_language_elements(article, level, learning_objective=None):
@@ -77,7 +84,7 @@ def extract_language_elements(article, level, learning_objective=None):
77
  Return the results in a structured format.
78
  Article: {article}
79
  """
80
- result = agent_executor.invoke({"input": prompt})
81
  return result['output']
82
 
83
  def generate_feedback_and_takeaway(article, level, language_elements, learning_objective=None):
@@ -95,40 +102,7 @@ def generate_feedback_and_takeaway(article, level, language_elements, learning_o
95
  Article: {article}
96
  Language Elements: {language_elements}
97
  """
98
- result = agent_executor.invoke({"input": prompt})
99
- return result['output']
100
-
101
- def generate_response(user_input, context, level, learning_objective=None):
102
- if level is None:
103
- return "Please specify your English proficiency level."
104
- if level not in CEFR_LEVELS:
105
- return "Invalid English proficiency level specified."
106
-
107
- objective_text = f"Additionally, focus on the following learning objective: {learning_objective}. " if learning_objective else ""
108
- prompt = f"""
109
- #Role
110
- You are an ESL teacher
111
- You answer students' questions about an article, helping them understand it, with a focus on language elements.
112
- You also ask the student questions aligned with relevant language elements to check their understanding.
113
- #Instructions
114
- 1. If the student asks a question: Based on the following article for a {level} ({CEFR_LEVELS[level]}) English language learner, respond to the user's input in a helpful manner.
115
- 2. When the user is ready, ask relevant questions to check for their understanding of the article, but also to practice the relevant language elements.
116
- Ensure questions are challenging but appropriate for the {level} level.
117
- Avoid questions with obvious answers.
118
- Types of questions should include:
119
- - Multiple-choice questions
120
- - True/false questions
121
- - Fill-in-the-blank questions
122
- - Questions to apply the elements of language learned in new contexts
123
- - Open-ended questions
124
- Questions should become increasingly difficult as the student succeeds, and easier if they struggle.
125
- When the student fails, provide a hint. If they fail again, provide the answer and an explanation.
126
- {objective_text}
127
- Article: {context['article']}
128
- Language Elements: {context['language_elements']}
129
- User Input: {user_input}
130
- """
131
- result = agent_executor.invoke({"input": prompt})
132
  return result['output']
133
 
134
  def main(topic, level, learning_objective=None):
@@ -142,7 +116,9 @@ def main(topic, level, learning_objective=None):
142
  context = {
143
  'article': article,
144
  'language_elements': language_elements,
145
- 'feedback_and_takeaway': feedback_and_takeaway
 
 
146
  }
147
  return article, feedback_and_takeaway, context
148
  else:
@@ -150,9 +126,40 @@ def main(topic, level, learning_objective=None):
150
  except Exception as e:
151
  return f"An error occurred: {str(e)}", "", None
152
 
153
- def chat(user_input, context, level, learning_objective):
154
- response = generate_response(user_input, context, level, learning_objective)
155
- return [(user_input, response)]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
 
157
  # Gradio Interface
158
  with gr.Blocks() as demo:
@@ -172,16 +179,15 @@ with gr.Blocks() as demo:
172
  context.value = ctx
173
  return article, feedback
174
 
175
- generate_btn.click(on_generate, inputs=[topic_input, level_input, objective_input], outputs=[article_output, feedback_output])
176
-
177
  def on_chat(user_input):
178
  if context.value is not None:
179
- response = chat(user_input, context.value, level_input.value, objective_input.value)
180
  return response
181
  else:
182
- return "Please generate an article first."
183
 
 
184
  chat_input.submit(on_chat, inputs=[chat_input], outputs=[chat_output])
185
 
186
- demo.launch()
187
 
 
 
1
  import os
2
+ import openai
3
+ import gradio as gr
4
+ import time
5
  from langchain_openai import ChatOpenAI
6
  from langchain.agents import AgentExecutor, create_tool_calling_agent
7
  from langchain_community.tools.tavily_search import TavilySearchResults
 
9
  from datetime import datetime, timedelta
10
 
11
  # Set up API keys
12
+ openai.api_key = os.getenv('OPENAI_API_KEY')
13
+ TAVILY_API_KEY = os.getenv('TAVILY_API_KEY')
14
+ assistant_id = os.getenv('ASSISTANT_ID')
15
+ username = os.getenv('USERNAME')
16
+ password = os.getenv('PASSWORD')
17
 
18
+ client = openai.Client(api_key=openai.api_key)
19
+ assistant = client.beta.assistants.retrieve(assistant_id)
20
+ thread = client.beta.threads.create()
21
+
22
+ # Initialize the LLM for article generation
23
+ llm_article = ChatOpenAI(model="gpt-4o-mini")
24
 
25
  # Set Up the Tavily Search Tool
26
+ tools = [TavilySearchResults(max_results=4)]
27
 
28
  # CEFR levels and their descriptions
29
  CEFR_LEVELS = {
 
37
  }
38
 
39
  # Create a Chat Prompt Template
40
+ prompt_article = ChatPromptTemplate.from_messages([
41
  ("system", "You are a helpful assistant for ESL learners. Use the tavily_search_results_json tool to find current news information about the students' interests."),
42
  ("human", "{input}"),
43
  ("placeholder", "{agent_scratchpad}"),
44
  ])
45
 
46
  # Construct the Tools agent
47
+ agent_article = create_tool_calling_agent(llm_article, tools, prompt_article)
48
 
49
  # Create an agent executor
50
+ agent_executor_article = AgentExecutor(agent=agent_article, tools=tools, verbose=True)
51
 
52
  def get_news_content(topic):
53
  current_date = datetime.now().strftime("%Y-%m-%d")
54
  week_ago = (datetime.now() - timedelta(days=7)).strftime("%Y-%m-%d")
55
  query = f"Summarize the latest news about {topic} from {week_ago} to {current_date}"
56
 
57
+ result = agent_executor_article.invoke({"input": query})
58
  return result['output'], result.get('intermediate_steps', [])
59
 
60
  def generate_article(topic, content, urls, level, learning_objective=None):
 
66
  The article should be engaging, informative, and appropriate for the specified language level.
67
  Include a 'Sources' section at the end of the article with the following URLs: {urls}
68
  """
69
+ result = agent_executor_article.invoke({"input": prompt})
70
  return result['output']
71
 
72
  def extract_language_elements(article, level, learning_objective=None):
 
84
  Return the results in a structured format.
85
  Article: {article}
86
  """
87
+ result = agent_executor_article.invoke({"input": prompt})
88
  return result['output']
89
 
90
  def generate_feedback_and_takeaway(article, level, language_elements, learning_objective=None):
 
102
  Article: {article}
103
  Language Elements: {language_elements}
104
  """
105
+ result = agent_executor_article.invoke({"input": prompt})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  return result['output']
107
 
108
  def main(topic, level, learning_objective=None):
 
116
  context = {
117
  'article': article,
118
  'language_elements': language_elements,
119
+ 'feedback_and_takeaway': feedback_and_takeaway,
120
+ 'level': level,
121
+ 'learning_objective': learning_objective
122
  }
123
  return article, feedback_and_takeaway, context
124
  else:
 
126
  except Exception as e:
127
  return f"An error occurred: {str(e)}", "", None
128
 
129
+ # Function to handle chat interaction with the existing OpenAI assistant
130
+ def chat_with_assistant(message, context):
131
+ if context is None:
132
+ return [("System", "Please generate an article first.")]
133
+
134
+ level = context['level']
135
+ article = context['article']
136
+ language_elements = context['language_elements']
137
+ learning_objective = context.get('learning_objective', '')
138
+
139
+ # Add the user's message to the thread
140
+ client.beta.threads.messages.create(
141
+ thread_id=thread.id,
142
+ role="user",
143
+ content=message
144
+ )
145
+
146
+ # Run the assistant
147
+ run = client.beta.threads.runs.create(
148
+ thread_id=thread.id,
149
+ assistant_id=assistant_id
150
+ )
151
+
152
+ # Wait for the assistant's response
153
+ while True:
154
+ run_status = client.beta.threads.runs.retrieve(thread_id=thread.id, run_id=run.id)
155
+ if run_status.status == 'completed':
156
+ # Retrieve the assistant's response
157
+ messages = client.beta.threads.messages.list(thread_id=thread.id)
158
+ assistant_response = messages.data[-1].content[0].text.value
159
+ break
160
+ time.sleep(1)
161
+
162
+ return [(message, assistant_response)]
163
 
164
  # Gradio Interface
165
  with gr.Blocks() as demo:
 
179
  context.value = ctx
180
  return article, feedback
181
 
 
 
182
  def on_chat(user_input):
183
  if context.value is not None:
184
+ response = chat_with_assistant(user_input, context.value)
185
  return response
186
  else:
187
+ return [("System", "Please generate an article first.")]
188
 
189
+ generate_btn.click(on_generate, inputs=[topic_input, level_input, objective_input], outputs=[article_output, feedback_output])
190
  chat_input.submit(on_chat, inputs=[chat_input], outputs=[chat_output])
191
 
192
+ demo.launch(auth=(username, password))
193