Spaces:
Runtime error
Runtime error
Update backupapp.py
Browse files- backupapp.py +186 -109
backupapp.py
CHANGED
@@ -1,7 +1,5 @@
|
|
1 |
import streamlit as st
|
2 |
import streamlit.components.v1 as components
|
3 |
-
|
4 |
-
import openai
|
5 |
import os
|
6 |
import base64
|
7 |
import glob
|
@@ -17,9 +15,11 @@ import re
|
|
17 |
import textract
|
18 |
import zipfile
|
19 |
import random
|
20 |
-
|
|
|
|
|
|
|
21 |
from datetime import datetime
|
22 |
-
from openai import ChatCompletion
|
23 |
from xml.etree import ElementTree as ET
|
24 |
from bs4 import BeautifulSoup
|
25 |
from collections import deque
|
@@ -34,11 +34,84 @@ from langchain.memory import ConversationBufferMemory
|
|
34 |
from langchain.chains import ConversationalRetrievalChain
|
35 |
from templates import css, bot_template, user_template
|
36 |
from io import BytesIO
|
|
|
|
|
37 |
|
|
|
|
|
38 |
|
39 |
-
#
|
40 |
-
st.set_page_config(page_title="GPT Streamlit Document Reasoner", layout="wide")
|
41 |
should_save = st.sidebar.checkbox("๐พ Save", value=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
|
43 |
|
44 |
# Read it aloud
|
@@ -72,47 +145,53 @@ def readitaloud(result):
|
|
72 |
components.html(documentHTML5, width=800, height=300)
|
73 |
#return result
|
74 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
# Chat and Chat with files
|
76 |
def chat_with_model(prompt, document_section, model_choice='gpt-3.5-turbo'):
|
77 |
model = model_choice
|
78 |
-
conversation = [{'role': 'system', 'content': 'You are a
|
79 |
conversation.append({'role': 'user', 'content': prompt})
|
80 |
if len(document_section)>0:
|
81 |
conversation.append({'role': 'assistant', 'content': document_section})
|
82 |
-
|
83 |
start_time = time.time()
|
84 |
report = []
|
85 |
res_box = st.empty()
|
86 |
collected_chunks = []
|
87 |
collected_messages = []
|
88 |
-
|
89 |
key = os.getenv('OPENAI_API_KEY')
|
90 |
-
|
91 |
-
|
|
|
|
|
|
|
92 |
model='gpt-3.5-turbo',
|
93 |
messages=conversation,
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
chunk_message = chunk['choices'][0]['delta'] # extract the message
|
100 |
collected_messages.append(chunk_message) # save the message
|
101 |
-
|
102 |
-
content=chunk["choices"][0].get("delta",{}).get("content")
|
103 |
-
|
104 |
try:
|
105 |
-
report.append(content)
|
106 |
if len(content) > 0:
|
|
|
|
|
107 |
result = "".join(report).strip()
|
108 |
-
#result = result.replace("\n", "")
|
109 |
res_box.markdown(f'*{result}*')
|
110 |
except:
|
111 |
st.write(' ')
|
112 |
-
|
113 |
-
full_reply_content = ''.join([m.get('content', '') for m in collected_messages])
|
114 |
st.write("Elapsed time:")
|
115 |
st.write(time.time() - start_time)
|
|
|
|
|
116 |
readitaloud(full_reply_content)
|
117 |
return full_reply_content
|
118 |
|
@@ -121,17 +200,96 @@ def chat_with_file_contents(prompt, file_content, model_choice='gpt-3.5-turbo'):
|
|
121 |
conversation.append({'role': 'user', 'content': prompt})
|
122 |
if len(file_content)>0:
|
123 |
conversation.append({'role': 'assistant', 'content': file_content})
|
124 |
-
|
|
|
|
|
|
|
125 |
return response['choices'][0]['message']['content']
|
126 |
|
127 |
-
|
128 |
def link_button_with_emoji(url, title, emoji_summary):
|
129 |
emojis = ["๐", "๐ฅ", "๐ก๏ธ", "๐ฉบ", "๐ฌ", "๐", "๐งช", "๐จโโ๏ธ", "๐ฉโโ๏ธ"]
|
130 |
random_emoji = random.choice(emojis)
|
131 |
st.markdown(f"[{random_emoji} {emoji_summary} - {title}]({url})")
|
132 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
133 |
# Define function to add paper buttons and links
|
134 |
def add_paper_buttons_and_links():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
135 |
col1, col2, col3, col4 = st.columns(4)
|
136 |
|
137 |
with col1:
|
@@ -206,13 +364,6 @@ def extract_feature_and_detail(paragraph):
|
|
206 |
return header, detail
|
207 |
return None, None
|
208 |
|
209 |
-
def generate_filename(prompt, file_type):
|
210 |
-
central = pytz.timezone('US/Central')
|
211 |
-
safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
|
212 |
-
replaced_prompt = prompt.replace(" ", "_").replace("\n", "_")
|
213 |
-
safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:90]
|
214 |
-
return f"{safe_date_time}_{safe_prompt}.{file_type}"
|
215 |
-
|
216 |
def transcribe_audio(file_path, model):
|
217 |
key = os.getenv('OPENAI_API_KEY')
|
218 |
headers = {
|
@@ -251,62 +402,6 @@ def save_and_play_audio(audio_recorder):
|
|
251 |
return None
|
252 |
|
253 |
|
254 |
-
# Define a context dictionary to maintain the state between exec calls
|
255 |
-
context = {}
|
256 |
-
|
257 |
-
def create_file(filename, prompt, response, should_save=True):
|
258 |
-
if not should_save:
|
259 |
-
return
|
260 |
-
|
261 |
-
# Extract base filename without extension
|
262 |
-
base_filename, ext = os.path.splitext(filename)
|
263 |
-
|
264 |
-
# Initialize the combined content
|
265 |
-
combined_content = ""
|
266 |
-
|
267 |
-
# Add Prompt with markdown title and emoji
|
268 |
-
combined_content += "# Prompt ๐\n" + prompt + "\n\n"
|
269 |
-
|
270 |
-
# Add Response with markdown title and emoji
|
271 |
-
combined_content += "# Response ๐ฌ\n" + response + "\n\n"
|
272 |
-
|
273 |
-
# Check for code blocks in the response
|
274 |
-
resources = re.findall(r"```([\s\S]*?)```", response)
|
275 |
-
for resource in resources:
|
276 |
-
# Check if the resource contains Python code
|
277 |
-
if "python" in resource.lower():
|
278 |
-
# Remove the 'python' keyword from the code block
|
279 |
-
cleaned_code = re.sub(r'^\s*python', '', resource, flags=re.IGNORECASE | re.MULTILINE)
|
280 |
-
|
281 |
-
# Add Code Results title with markdown and emoji
|
282 |
-
combined_content += "# Code Results ๐\n"
|
283 |
-
|
284 |
-
# Redirect standard output to capture it
|
285 |
-
original_stdout = sys.stdout
|
286 |
-
sys.stdout = io.StringIO()
|
287 |
-
|
288 |
-
# Execute the cleaned Python code within the context
|
289 |
-
try:
|
290 |
-
exec(cleaned_code, context)
|
291 |
-
code_output = sys.stdout.getvalue()
|
292 |
-
combined_content += f"```\n{code_output}\n```\n\n"
|
293 |
-
realtimeEvalResponse = "# Code Results ๐\n" + "```" + code_output + "```\n\n"
|
294 |
-
st.write(realtimeEvalResponse)
|
295 |
-
|
296 |
-
except Exception as e:
|
297 |
-
combined_content += f"```python\nError executing Python code: {e}\n```\n\n"
|
298 |
-
|
299 |
-
# Restore the original standard output
|
300 |
-
sys.stdout = original_stdout
|
301 |
-
else:
|
302 |
-
# Add non-Python resources with markdown and emoji
|
303 |
-
combined_content += "# Resource ๐ ๏ธ\n" + "```" + resource + "```\n\n"
|
304 |
-
|
305 |
-
# Save the combined content to a Markdown file
|
306 |
-
if should_save:
|
307 |
-
with open(f"{base_filename}.md", 'w') as file:
|
308 |
-
file.write(combined_content)
|
309 |
-
|
310 |
|
311 |
def truncate_document(document, length):
|
312 |
return document[:length]
|
@@ -470,15 +565,6 @@ def get_zip_download_link(zip_file):
|
|
470 |
|
471 |
def main():
|
472 |
|
473 |
-
col1, col2, col3, col4 = st.columns(4)
|
474 |
-
|
475 |
-
with col1:
|
476 |
-
with st.expander("Settings ๐ง ๐พ", expanded=False):
|
477 |
-
# File type for output, model choice
|
478 |
-
menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
|
479 |
-
choice = st.sidebar.selectbox("Output File Type:", menu)
|
480 |
-
model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
|
481 |
-
|
482 |
# Audio, transcribe, GPT:
|
483 |
filename = save_and_play_audio(audio_recorder)
|
484 |
|
@@ -520,9 +606,7 @@ def main():
|
|
520 |
else:
|
521 |
if st.button(f"Chat about Section {i+1}"):
|
522 |
st.write('Reasoning with your inputs...')
|
523 |
-
response = chat_with_model(user_prompt, section, model_choice)
|
524 |
-
st.write('Response:')
|
525 |
-
st.write(response)
|
526 |
document_responses[i] = response
|
527 |
filename = generate_filename(f"{user_prompt}_section_{i+1}", choice)
|
528 |
create_file(filename, user_prompt, response, should_save)
|
@@ -538,14 +622,7 @@ def main():
|
|
538 |
# Process each section with the model
|
539 |
response = chat_with_model(prompt_section, ''.join(list(document_sections)), model_choice)
|
540 |
full_response += response + '\n' # Combine the responses
|
541 |
-
|
542 |
-
#st.write('Response:')
|
543 |
-
#st.write(full_response)
|
544 |
-
|
545 |
response = full_response
|
546 |
-
st.write('Response:')
|
547 |
-
st.write(response)
|
548 |
-
|
549 |
filename = generate_filename(user_prompt, choice)
|
550 |
create_file(filename, user_prompt, response, should_save)
|
551 |
st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
|
@@ -631,4 +708,4 @@ with st.sidebar:
|
|
631 |
st.session_state.conversation = get_chain(vectorstore)
|
632 |
st.markdown('# AI Search Index of Length:' + length + ' Created.') # add timing
|
633 |
filename = generate_filename(raw, 'txt')
|
634 |
-
create_file(filename, raw, '', should_save)
|
|
|
1 |
import streamlit as st
|
2 |
import streamlit.components.v1 as components
|
|
|
|
|
3 |
import os
|
4 |
import base64
|
5 |
import glob
|
|
|
15 |
import textract
|
16 |
import zipfile
|
17 |
import random
|
18 |
+
import httpx # add 11/13/23
|
19 |
+
import asyncio
|
20 |
+
from openai import OpenAI
|
21 |
+
#from openai import AsyncOpenAI
|
22 |
from datetime import datetime
|
|
|
23 |
from xml.etree import ElementTree as ET
|
24 |
from bs4 import BeautifulSoup
|
25 |
from collections import deque
|
|
|
34 |
from langchain.chains import ConversationalRetrievalChain
|
35 |
from templates import css, bot_template, user_template
|
36 |
from io import BytesIO
|
37 |
+
from contextlib import redirect_stdout
|
38 |
+
|
39 |
|
40 |
+
# set page config once
|
41 |
+
st.set_page_config(page_title="Python AI Pair Programmer", layout="wide")
|
42 |
|
43 |
+
# UI for sidebar controls
|
|
|
44 |
should_save = st.sidebar.checkbox("๐พ Save", value=True)
|
45 |
+
col1, col2, col3, col4 = st.columns(4)
|
46 |
+
with col1:
|
47 |
+
with st.expander("Settings ๐ง ๐พ", expanded=True):
|
48 |
+
# File type for output, model choice
|
49 |
+
menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
|
50 |
+
choice = st.sidebar.selectbox("Output File Type:", menu)
|
51 |
+
model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
|
52 |
+
|
53 |
+
# Define a context dictionary to maintain the state between exec calls
|
54 |
+
context = {}
|
55 |
+
|
56 |
+
def create_file(filename, prompt, response, should_save=True):
|
57 |
+
if not should_save:
|
58 |
+
return
|
59 |
+
|
60 |
+
# Extract base filename without extension
|
61 |
+
base_filename, ext = os.path.splitext(filename)
|
62 |
+
|
63 |
+
# Initialize the combined content
|
64 |
+
combined_content = ""
|
65 |
+
|
66 |
+
# Add Prompt with markdown title and emoji
|
67 |
+
combined_content += "# Prompt ๐\n" + prompt + "\n\n"
|
68 |
+
|
69 |
+
# Add Response with markdown title and emoji
|
70 |
+
combined_content += "# Response ๐ฌ\n" + response + "\n\n"
|
71 |
+
|
72 |
+
# Check for code blocks in the response
|
73 |
+
resources = re.findall(r"```([\s\S]*?)```", response)
|
74 |
+
for resource in resources:
|
75 |
+
# Check if the resource contains Python code
|
76 |
+
if "python" in resource.lower():
|
77 |
+
# Remove the 'python' keyword from the code block
|
78 |
+
cleaned_code = re.sub(r'^\s*python', '', resource, flags=re.IGNORECASE | re.MULTILINE)
|
79 |
+
|
80 |
+
# Add Code Results title with markdown and emoji
|
81 |
+
combined_content += "# Code Results ๐\n"
|
82 |
+
|
83 |
+
# Redirect standard output to capture it
|
84 |
+
original_stdout = sys.stdout
|
85 |
+
sys.stdout = io.StringIO()
|
86 |
+
|
87 |
+
# Execute the cleaned Python code within the context
|
88 |
+
try:
|
89 |
+
exec(cleaned_code, context)
|
90 |
+
code_output = sys.stdout.getvalue()
|
91 |
+
combined_content += f"```\n{code_output}\n```\n\n"
|
92 |
+
realtimeEvalResponse = "# Code Results ๐\n" + "```" + code_output + "```\n\n"
|
93 |
+
st.code(realtimeEvalResponse)
|
94 |
+
|
95 |
+
except Exception as e:
|
96 |
+
combined_content += f"```python\nError executing Python code: {e}\n```\n\n"
|
97 |
+
|
98 |
+
# Restore the original standard output
|
99 |
+
sys.stdout = original_stdout
|
100 |
+
else:
|
101 |
+
# Add non-Python resources with markdown and emoji
|
102 |
+
combined_content += "# Resource ๐ ๏ธ\n" + "```" + resource + "```\n\n"
|
103 |
+
|
104 |
+
# Save the combined content to a Markdown file
|
105 |
+
if should_save:
|
106 |
+
with open(f"{base_filename}.md", 'w') as file:
|
107 |
+
file.write(combined_content)
|
108 |
+
st.code(combined_content)
|
109 |
+
|
110 |
+
# Create a Base64 encoded link for the file
|
111 |
+
with open(f"{base_filename}.md", 'rb') as file:
|
112 |
+
encoded_file = base64.b64encode(file.read()).decode()
|
113 |
+
href = f'<a href="data:file/markdown;base64,{encoded_file}" download="{filename}">Download File ๐</a>'
|
114 |
+
st.markdown(href, unsafe_allow_html=True)
|
115 |
|
116 |
|
117 |
# Read it aloud
|
|
|
145 |
components.html(documentHTML5, width=800, height=300)
|
146 |
#return result
|
147 |
|
148 |
+
def generate_filename(prompt, file_type):
|
149 |
+
central = pytz.timezone('US/Central')
|
150 |
+
safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
|
151 |
+
replaced_prompt = prompt.replace(" ", "_").replace("\n", "_")
|
152 |
+
safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:90]
|
153 |
+
return f"{safe_date_time}_{safe_prompt}.{file_type}"
|
154 |
+
|
155 |
# Chat and Chat with files
|
156 |
def chat_with_model(prompt, document_section, model_choice='gpt-3.5-turbo'):
|
157 |
model = model_choice
|
158 |
+
conversation = [{'role': 'system', 'content': 'You are a python script writer.'}]
|
159 |
conversation.append({'role': 'user', 'content': prompt})
|
160 |
if len(document_section)>0:
|
161 |
conversation.append({'role': 'assistant', 'content': document_section})
|
|
|
162 |
start_time = time.time()
|
163 |
report = []
|
164 |
res_box = st.empty()
|
165 |
collected_chunks = []
|
166 |
collected_messages = []
|
|
|
167 |
key = os.getenv('OPENAI_API_KEY')
|
168 |
+
|
169 |
+
client = OpenAI(
|
170 |
+
api_key= os.getenv('OPENAI_API_KEY')
|
171 |
+
)
|
172 |
+
stream = client.chat.completions.create(
|
173 |
model='gpt-3.5-turbo',
|
174 |
messages=conversation,
|
175 |
+
stream=True,
|
176 |
+
)
|
177 |
+
all_content = "" # Initialize an empty string to hold all content
|
178 |
+
for part in stream:
|
179 |
+
chunk_message = (part.choices[0].delta.content or "")
|
|
|
180 |
collected_messages.append(chunk_message) # save the message
|
181 |
+
content=part.choices[0].delta.content
|
|
|
|
|
182 |
try:
|
|
|
183 |
if len(content) > 0:
|
184 |
+
report.append(content)
|
185 |
+
all_content += content
|
186 |
result = "".join(report).strip()
|
|
|
187 |
res_box.markdown(f'*{result}*')
|
188 |
except:
|
189 |
st.write(' ')
|
190 |
+
full_reply_content = all_content
|
|
|
191 |
st.write("Elapsed time:")
|
192 |
st.write(time.time() - start_time)
|
193 |
+
filename = generate_filename(full_reply_content, choice)
|
194 |
+
create_file(filename, prompt, full_reply_content, should_save)
|
195 |
readitaloud(full_reply_content)
|
196 |
return full_reply_content
|
197 |
|
|
|
200 |
conversation.append({'role': 'user', 'content': prompt})
|
201 |
if len(file_content)>0:
|
202 |
conversation.append({'role': 'assistant', 'content': file_content})
|
203 |
+
client = OpenAI(
|
204 |
+
api_key= os.getenv('OPENAI_API_KEY')
|
205 |
+
)
|
206 |
+
response = client.chat.completions.create(model=model_choice, messages=conversation)
|
207 |
return response['choices'][0]['message']['content']
|
208 |
|
|
|
209 |
def link_button_with_emoji(url, title, emoji_summary):
|
210 |
emojis = ["๐", "๐ฅ", "๐ก๏ธ", "๐ฉบ", "๐ฌ", "๐", "๐งช", "๐จโโ๏ธ", "๐ฉโโ๏ธ"]
|
211 |
random_emoji = random.choice(emojis)
|
212 |
st.markdown(f"[{random_emoji} {emoji_summary} - {title}]({url})")
|
213 |
|
214 |
+
|
215 |
+
# Python parts and their corresponding emojis, with expanded details
|
216 |
+
python_parts = {
|
217 |
+
"Syntax": "โ๏ธ",
|
218 |
+
"Data Types": "๐",
|
219 |
+
"Control Structures": "๐",
|
220 |
+
"Functions": "๐ง",
|
221 |
+
"Classes": "๐๏ธ",
|
222 |
+
"API Interaction": "๐",
|
223 |
+
"Data Visualization": "๐",
|
224 |
+
"Error Handling": "โ ๏ธ",
|
225 |
+
"Libraries": "๐"
|
226 |
+
}
|
227 |
+
|
228 |
+
python_parts_extended = {
|
229 |
+
"Syntax": "โ๏ธ (Variables, Comments, Printing)",
|
230 |
+
"Data Types": "๐ (Numbers, Strings, Lists, Tuples, Sets, Dictionaries)",
|
231 |
+
"Control Structures": "๐ (If, Elif, Else, Loops, Break, Continue)",
|
232 |
+
"Functions": "๐ง (Defining, Calling, Parameters, Return Values)",
|
233 |
+
"Classes": "๐๏ธ (Creating, Inheritance, Methods, Properties)",
|
234 |
+
"API Interaction": "๐ (Requests, JSON Parsing, HTTP Methods)",
|
235 |
+
"Data Visualization": "๐ (Matplotlib, Seaborn, Plotly)",
|
236 |
+
"Error Handling": "โ ๏ธ (Try, Except, Finally, Raising)",
|
237 |
+
"Libraries": "๐ (Numpy, Pandas, Scikit-Learn, TensorFlow)"
|
238 |
+
}
|
239 |
+
|
240 |
+
# Placeholder for chat responses and interactive examples
|
241 |
+
response_placeholders = {}
|
242 |
+
example_placeholders = {}
|
243 |
+
|
244 |
+
# Function to display Python concepts with expanders, examples, and quizzes
|
245 |
+
def display_python_parts():
|
246 |
+
st.title("Python Interactive Learning Platform")
|
247 |
+
|
248 |
+
for part, emoji in python_parts.items():
|
249 |
+
with st.expander(f"{emoji} {part} - {python_parts_extended[part]}", expanded=False):
|
250 |
+
# Interactive examples
|
251 |
+
if st.button(f"Show Example for {part}", key=f"example_{part}"):
|
252 |
+
example = generate_example(part)
|
253 |
+
example_placeholders[part] = example
|
254 |
+
st.code(example_placeholders[part], language="python")
|
255 |
+
response = chat_with_model('Create a STEM related 3 to 5 line python code example with output for:' + example_placeholders[part], part)
|
256 |
+
|
257 |
+
# Quizzes
|
258 |
+
if st.button(f"Take Quiz on {part}", key=f"quiz_{part}"):
|
259 |
+
quiz = generate_quiz(part)
|
260 |
+
response = chat_with_model(quiz, part)
|
261 |
+
|
262 |
+
# Chat responses
|
263 |
+
prompt = f"Learn about {python_parts_extended[part]}"
|
264 |
+
if st.button(f"Explore {part}", key=part):
|
265 |
+
response = chat_with_model(prompt, part)
|
266 |
+
response_placeholders[part] = response
|
267 |
+
|
268 |
+
# Display the chat response if available
|
269 |
+
if part in response_placeholders:
|
270 |
+
st.markdown(f"**Response:** {response_placeholders[part]}")
|
271 |
+
|
272 |
+
def generate_example(part):
|
273 |
+
# This function will return a relevant Python example based on the selected part
|
274 |
+
# Examples could be pre-defined or dynamically generated
|
275 |
+
return "Python example for " + part
|
276 |
+
|
277 |
+
def generate_quiz(part):
|
278 |
+
# This function will create a simple quiz related to the Python part
|
279 |
+
# Quizzes could be multiple-choice questions, true/false, etc.
|
280 |
+
return "Python script quiz example for " + part
|
281 |
+
|
282 |
# Define function to add paper buttons and links
|
283 |
def add_paper_buttons_and_links():
|
284 |
+
# Python Pair Programmer
|
285 |
+
page = st.sidebar.radio("Choose a page:", ["Python Pair Programmer"])
|
286 |
+
if page == "Python Pair Programmer":
|
287 |
+
# Display Python concepts and interactive sections
|
288 |
+
display_python_parts()
|
289 |
+
|
290 |
+
|
291 |
+
|
292 |
+
|
293 |
col1, col2, col3, col4 = st.columns(4)
|
294 |
|
295 |
with col1:
|
|
|
364 |
return header, detail
|
365 |
return None, None
|
366 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
367 |
def transcribe_audio(file_path, model):
|
368 |
key = os.getenv('OPENAI_API_KEY')
|
369 |
headers = {
|
|
|
402 |
return None
|
403 |
|
404 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
405 |
|
406 |
def truncate_document(document, length):
|
407 |
return document[:length]
|
|
|
565 |
|
566 |
def main():
|
567 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
568 |
# Audio, transcribe, GPT:
|
569 |
filename = save_and_play_audio(audio_recorder)
|
570 |
|
|
|
606 |
else:
|
607 |
if st.button(f"Chat about Section {i+1}"):
|
608 |
st.write('Reasoning with your inputs...')
|
609 |
+
response = chat_with_model(user_prompt, section, model_choice)
|
|
|
|
|
610 |
document_responses[i] = response
|
611 |
filename = generate_filename(f"{user_prompt}_section_{i+1}", choice)
|
612 |
create_file(filename, user_prompt, response, should_save)
|
|
|
622 |
# Process each section with the model
|
623 |
response = chat_with_model(prompt_section, ''.join(list(document_sections)), model_choice)
|
624 |
full_response += response + '\n' # Combine the responses
|
|
|
|
|
|
|
|
|
625 |
response = full_response
|
|
|
|
|
|
|
626 |
filename = generate_filename(user_prompt, choice)
|
627 |
create_file(filename, user_prompt, response, should_save)
|
628 |
st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
|
|
|
708 |
st.session_state.conversation = get_chain(vectorstore)
|
709 |
st.markdown('# AI Search Index of Length:' + length + ' Created.') # add timing
|
710 |
filename = generate_filename(raw, 'txt')
|
711 |
+
create_file(filename, raw, '', should_save)
|