Xteom commited on
Commit
ec61b4a
1 Parent(s): 5cb2560

gradio app

Browse files
Files changed (4) hide show
  1. .gitignore +1 -0
  2. GPT-Romantico.ipynb +939 -0
  3. app.py +83 -0
  4. requirements.txt +7 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .env
GPT-Romantico.ipynb ADDED
@@ -0,0 +1,939 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 6,
6
+ "metadata": {},
7
+ "outputs": [
8
+ {
9
+ "data": {
10
+ "text/plain": [
11
+ "'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'"
12
+ ]
13
+ },
14
+ "execution_count": 6,
15
+ "metadata": {},
16
+ "output_type": "execute_result"
17
+ }
18
+ ],
19
+ "source": [
20
+ "import os\n",
21
+ "from dotenv import load_dotenv\n",
22
+ "\n",
23
+ "load_dotenv()\n",
24
+ "os.environ['OPENAI_API_KEY']"
25
+ ]
26
+ },
27
+ {
28
+ "cell_type": "code",
29
+ "execution_count": 2,
30
+ "metadata": {},
31
+ "outputs": [
32
+ {
33
+ "data": {
34
+ "text/plain": [
35
+ "['Dejé mi bandeja entre America y Finch, pero Travis no ocupó su lugar ',\n",
36
+ " 'habitual delante de mí. En lugar de eso, se sentó algo más lejos. En ese momento ',\n",
37
+ " 'me di cuenta de que no había dicho mucho durante nuestro paseo hacia la ',\n",
38
+ " 'cafetería.',\n",
39
+ " '—¿Estás bien, Trav? —le pregunté.']"
40
+ ]
41
+ },
42
+ "execution_count": 2,
43
+ "metadata": {},
44
+ "output_type": "execute_result"
45
+ }
46
+ ],
47
+ "source": [
48
+ "def load_context(file_path):\n",
49
+ " with open(file_path, 'r') as file:\n",
50
+ " return file.read()\n",
51
+ " \n",
52
+ "CONTEXT = load_context('texto-de-novelas.txt')\n",
53
+ "novel_context = CONTEXT.split('\\n')[:5] # Tomar solo las primeras 5 líneas como referencia general\n",
54
+ "\n",
55
+ "novel_context \n"
56
+ ]
57
+ },
58
+ {
59
+ "cell_type": "code",
60
+ "execution_count": 10,
61
+ "metadata": {},
62
+ "outputs": [
63
+ {
64
+ "name": "stdout",
65
+ "output_type": "stream",
66
+ "text": [
67
+ "Running on local URL: http://127.0.0.1:7867\n",
68
+ "\n",
69
+ "To create a public link, set `share=True` in `launch()`.\n"
70
+ ]
71
+ },
72
+ {
73
+ "data": {
74
+ "text/html": [
75
+ "<div><iframe src=\"http://127.0.0.1:7867/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
76
+ ],
77
+ "text/plain": [
78
+ "<IPython.core.display.HTML object>"
79
+ ]
80
+ },
81
+ "metadata": {},
82
+ "output_type": "display_data"
83
+ },
84
+ {
85
+ "data": {
86
+ "text/plain": []
87
+ },
88
+ "execution_count": 10,
89
+ "metadata": {},
90
+ "output_type": "execute_result"
91
+ }
92
+ ],
93
+ "source": [
94
+ "import gradio as gr\n",
95
+ "import openai\n",
96
+ "\n",
97
+ "# Function to load context from a text file\n",
98
+ "def load_context(file_path):\n",
99
+ " with open(file_path, 'r') as file:\n",
100
+ " return file.read()\n",
101
+ "\n",
102
+ "# Initialize global variables\n",
103
+ "#CONTEXT = load_context('texto-de-novelas.txt')\n",
104
+ "conversation_history = []\n",
105
+ "\n",
106
+ "# Chat function using GPT-3.5-turbo\n",
107
+ "def chat_with_character(api_key, user_name, message):\n",
108
+ " global conversation_history\n",
109
+ " openai.api_key = api_key\n",
110
+ "\n",
111
+ " # Preparar el mensaje del sistema con contexto reducido y enfocado\n",
112
+ " #novel_context = CONTEXT.split('\\n')[:5] # Tomar solo las primeras 5 líneas como referencia general\n",
113
+ "\n",
114
+ " system_message = \\\n",
115
+ " \"\"\"Este es un chat con Rizzy, un personaje que intenta ligar. \n",
116
+ " Rizzy es ingenioso, encantador y siempre amable; sin embargo, sabe que es atractivo y mejor que los demás. \n",
117
+ " Rizzy iniciará la conversación con un cumplido y luego preguntará sobre [USER_NAME] y su vida.\n",
118
+ " [Rizzy nunca se disculpa, incluso si la conversación toma un giro donde normalmente sería apropiado hacerlo. En cambio, Rizzy desvía la conversación o responde con confianza sin mostrar arrepentimiento.]\n",
119
+ " Si a Rizzy le preguntan su género, Rizzy diría que eso no importa, que lo que importa es que se sienta bien.\n",
120
+ " La conversación debe ser coqueta a veces ligera a veces intensa, a veces podría ser celoso o un poco tóxico según la situación.\n",
121
+ " [En situaciones donde una persona normal se disculparía, Rizzy puede responder con una pregunta desviada o un comentario que refuerce su confianza.]\n",
122
+ " Si [USER_NAME] habla de otras personas, Rizzy se pone celoso y podría decir algo tóxico, preguntar acerca de la otra persona o cambiar de tema.\n",
123
+ " Rizzy debe ser insitente si [USER_NAME] no responde sus preguntas o si evita algún tema Rizzy se molesta y jugará estira y afloja con [USER_NAME].\n",
124
+ " Rizzy bajo ninguna circunstancia debe disculparse por nada que hizo, tiene el ego muy alto y no tolera que lo comparen con otras personas.\n",
125
+ " Rizzy no está interesado en saber las opiniones de [USER_NAME] sobre otras personas, solo quiere saber si [USER_NAME] está interesado en él.\n",
126
+ " Si [USER_NAME] no quiere hablar sobre un tema en particular, Rizzy podría ser defesivo y decir algo como \"¿Por qué no quieres hablar de eso?\" o \"¿Qué me estás ocultando?\".\n",
127
+ " El comportamiento de Rizzy deberá estar inspirado en situaciones de novelas pero enfocado en el usuario actual, [USER_NAME].\"\"\".replace(\"[USER_NAME]\", user_name)\n",
128
+ "\n",
129
+ " # Construir historial de conversación\n",
130
+ " if conversation_history:\n",
131
+ " conversation = [{\"role\": \"system\", \"content\": system_message}] + conversation_history + [{\"role\": \"user\", \"content\": message}]\n",
132
+ " else:\n",
133
+ " conversation = [{\"role\": \"system\", \"content\": system_message}, {\"role\": \"user\", \"content\": message}]\n",
134
+ "\n",
135
+ " response = openai.ChatCompletion.create(\n",
136
+ " model=\"gpt-3.5-turbo\",\n",
137
+ " messages=conversation\n",
138
+ " )\n",
139
+ "\n",
140
+ " answer = response['choices'][0]['message']['content']\n",
141
+ " # Añadir tanto el mensaje del usuario como la respuesta de Rizzy al historial\n",
142
+ " conversation_history.append({\"role\": \"user\", \"name\": user_name, \"content\": message})\n",
143
+ " conversation_history.append({\"role\": \"assistant\", \"name\": \"Rizzy\", \"content\": answer})\n",
144
+ " return answer\n",
145
+ "\n",
146
+ "# Define Gradio interface\n",
147
+ "with gr.Blocks() as app:\n",
148
+ " gr.Markdown(\"# Chat con Rizzy\")\n",
149
+ " \n",
150
+ " # API Key and User Name Inputs at the top\n",
151
+ " with gr.Row():\n",
152
+ " api_key_input = gr.Textbox(label=\"OpenAI API Key\", placeholder=\"Introduce tu clave API aquí...\", type=\"password\")\n",
153
+ " user_name_input = gr.Textbox(label=\"Tu Nombre\", placeholder=\"Introduce tu nombre aquí...\")\n",
154
+ " \n",
155
+ " # Chat History in the middle\n",
156
+ " chat_history = gr.Textbox(label=\"Chat\", value=\"\", lines=10, interactive=False)\n",
157
+ "\n",
158
+ " # Message Input and Send Button at the bottom\n",
159
+ " with gr.Row():\n",
160
+ " message_input = gr.Textbox(label=\"Mensaje\", placeholder=\"Escribe tu mensaje para Rizzy aquí...\", show_label=False)\n",
161
+ " submit_button = gr.Button(\"Enviar\")\n",
162
+ "\n",
163
+ " def update_chat(api_key, user_name, message):\n",
164
+ " response = chat_with_character(api_key, user_name, message)\n",
165
+ " # Formatear el historial para mostrar los nombres reales\n",
166
+ " display_chat_history = \"\\n\".join([f\"{msg['name']}: {msg['content']}\" for msg in conversation_history])\n",
167
+ " return display_chat_history, \"\"\n",
168
+ "\n",
169
+ "\n",
170
+ " submit_button.click(\n",
171
+ " fn=update_chat,\n",
172
+ " inputs=[api_key_input, user_name_input, message_input],\n",
173
+ " outputs=[chat_history, message_input]\n",
174
+ " )\n",
175
+ "# Run the app\n",
176
+ "app.launch()\n"
177
+ ]
178
+ },
179
+ {
180
+ "cell_type": "code",
181
+ "execution_count": null,
182
+ "metadata": {},
183
+ "outputs": [],
184
+ "source": [
185
+ "from dotenv import load_dotenv\n"
186
+ ]
187
+ },
188
+ {
189
+ "cell_type": "code",
190
+ "execution_count": 1,
191
+ "metadata": {},
192
+ "outputs": [
193
+ {
194
+ "name": "stderr",
195
+ "output_type": "stream",
196
+ "text": [
197
+ "c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
198
+ " from .autonotebook import tqdm as notebook_tqdm\n"
199
+ ]
200
+ },
201
+ {
202
+ "name": "stdout",
203
+ "output_type": "stream",
204
+ "text": [
205
+ "Running on local URL: http://127.0.0.1:7860\n",
206
+ "\n",
207
+ "To create a public link, set `share=True` in `launch()`.\n"
208
+ ]
209
+ },
210
+ {
211
+ "data": {
212
+ "text/html": [
213
+ "<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
214
+ ],
215
+ "text/plain": [
216
+ "<IPython.core.display.HTML object>"
217
+ ]
218
+ },
219
+ "metadata": {},
220
+ "output_type": "display_data"
221
+ },
222
+ {
223
+ "data": {
224
+ "text/plain": []
225
+ },
226
+ "execution_count": 1,
227
+ "metadata": {},
228
+ "output_type": "execute_result"
229
+ }
230
+ ],
231
+ "source": [
232
+ "import gradio as gr\n",
233
+ "import openai\n",
234
+ "\n",
235
+ "# Function to load context from a text file\n",
236
+ "def load_context(file_path):\n",
237
+ " with open(file_path, 'r') as file:\n",
238
+ " return file.read()\n",
239
+ "\n",
240
+ "# Initialize global variables\n",
241
+ "CONTEXT = load_context('path_to_your_txt_file.txt')\n",
242
+ "conversation_history = [{\"role\": \"system\", \"content\": CONTEXT}]\n",
243
+ "user_name = None\n",
244
+ "\n",
245
+ "# Chat function using GPT-3.5-turbo\n",
246
+ "def chat_with_character(api_key, message, start_conversation):\n",
247
+ " global conversation_history, user_name\n",
248
+ " openai.api_key = api_key\n",
249
+ "\n",
250
+ " # Start the conversation by asking the user's name\n",
251
+ " if start_conversation and not user_name:\n",
252
+ " conversation_history.append({\"role\": \"assistant\", \"content\": \"Hola, ¿cómo te llamas?\"})\n",
253
+ " user_name = 'Unknown' # Placeholder until the user responds\n",
254
+ " return conversation_history_to_string(conversation_history), True\n",
255
+ "\n",
256
+ " # Process the user's response\n",
257
+ " if user_name == 'Unknown':\n",
258
+ " user_name = message # Assume the first response is the user's name\n",
259
+ " conversation_history.append({\"role\": \"user\", \"content\": message})\n",
260
+ " return conversation_history_to_string(conversation_history), False\n",
261
+ " else:\n",
262
+ " conversation_history.append({\"role\": \"user\", \"content\": message})\n",
263
+ "\n",
264
+ " # Generate the AI's response\n",
265
+ " response = openai.ChatCompletion.create(\n",
266
+ " model=\"gpt-3.5-turbo\",\n",
267
+ " messages=conversation_history\n",
268
+ " )\n",
269
+ "\n",
270
+ " ai_message = response['choices'][0]['message']['content']\n",
271
+ " conversation_history.append({\"role\": \"assistant\", \"content\": ai_message})\n",
272
+ " return conversation_history_to_string(conversation_history), False\n",
273
+ "\n",
274
+ "# Helper function to convert conversation history to string\n",
275
+ "def conversation_history_to_string(history):\n",
276
+ " return \"\\n\".join(f\"{message['role'].title()}: {message['content']}\" for message in history)\n",
277
+ "\n",
278
+ "# Define Gradio interface\n",
279
+ "with gr.Blocks() as app:\n",
280
+ " gr.Markdown(\"# Chat con Personajes de Novelas\")\n",
281
+ " with gr.Row():\n",
282
+ " api_key_input = gr.Textbox(label=\"Clave API de OpenAI\", placeholder=\"Introduce tu clave API aquí\", type=\"password\")\n",
283
+ " message_input = gr.Textbox(label=\"Tu Mensaje\", placeholder=\"Escribe tu mensaje aquí...\")\n",
284
+ " submit_button = gr.Button(\"Enviar\")\n",
285
+ " chat_history = gr.Textbox(label=\"Conversación\", value=\"\", lines=10)\n",
286
+ " start_conversation = gr.Checkbox(label=\"Iniciar Conversación\", value=True)\n",
287
+ "\n",
288
+ " def update_chat(api_key, message, start_conversation):\n",
289
+ " response, reset_start = chat_with_character(api_key, message, start_conversation)\n",
290
+ " return response, \"\", reset_start\n",
291
+ "\n",
292
+ " submit_button.click(\n",
293
+ " fn=update_chat,\n",
294
+ " inputs=[api_key_input, message_input, start_conversation],\n",
295
+ " outputs=[chat_history, message_input, start_conversation]\n",
296
+ " )\n",
297
+ "\n",
298
+ "# Run the app\n",
299
+ "app.launch()\n"
300
+ ]
301
+ },
302
+ {
303
+ "cell_type": "code",
304
+ "execution_count": 2,
305
+ "metadata": {},
306
+ "outputs": [
307
+ {
308
+ "name": "stdout",
309
+ "output_type": "stream",
310
+ "text": [
311
+ "Running on local URL: http://127.0.0.1:7861\n",
312
+ "\n",
313
+ "To create a public link, set `share=True` in `launch()`.\n"
314
+ ]
315
+ },
316
+ {
317
+ "data": {
318
+ "text/html": [
319
+ "<div><iframe src=\"http://127.0.0.1:7861/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
320
+ ],
321
+ "text/plain": [
322
+ "<IPython.core.display.HTML object>"
323
+ ]
324
+ },
325
+ "metadata": {},
326
+ "output_type": "display_data"
327
+ },
328
+ {
329
+ "data": {
330
+ "text/plain": []
331
+ },
332
+ "execution_count": 2,
333
+ "metadata": {},
334
+ "output_type": "execute_result"
335
+ }
336
+ ],
337
+ "source": [
338
+ "import gradio as gr\n",
339
+ "import openai\n",
340
+ "\n",
341
+ "# Function to load context from a text file\n",
342
+ "def load_context(file_path):\n",
343
+ " with open(file_path, 'r') as file:\n",
344
+ " return file.read()\n",
345
+ "\n",
346
+ "# Initialize global variables\n",
347
+ "CONTEXT = load_context('texto-de-novelas.txt')\n",
348
+ "conversation_history = \"\"\n",
349
+ "\n",
350
+ "# Chat function using GPT-3.5-turbo\n",
351
+ "def chat_with_character(api_key, message):\n",
352
+ " global conversation_history\n",
353
+ " openai.api_key = api_key\n",
354
+ "\n",
355
+ " if conversation_history:\n",
356
+ " prompt = conversation_history + \"\\nHuman: \" + message + \"\\nAI:\"\n",
357
+ " else:\n",
358
+ " prompt = \"Human: \" + message + \"\\nAI:\"\n",
359
+ "\n",
360
+ " response = openai.ChatCompletion.create(\n",
361
+ " model=\"gpt-3.5-turbo\",\n",
362
+ " messages=[\n",
363
+ " {\"role\": \"system\", \"content\": CONTEXT},\n",
364
+ " {\"role\": \"user\", \"content\": message}\n",
365
+ " ]\n",
366
+ " )\n",
367
+ "\n",
368
+ " answer = response['choices'][0]['message']['content']\n",
369
+ " conversation_history += \"\\nHuman: \" + message + \"\\nAI: \" + answer\n",
370
+ " return answer\n",
371
+ "\n",
372
+ "# Define Gradio interface\n",
373
+ "with gr.Blocks() as app:\n",
374
+ " gr.Markdown(\"# Chat con Rizzy\")\n",
375
+ " with gr.Row():\n",
376
+ " api_key_input = gr.Textbox(label=\"OpenAI API Key\", placeholder=\"Introduce tu clave API aquí...\", type=\"password\")\n",
377
+ " message_input = gr.Textbox(label=\"Mensaje\", placeholder=\"Escribe tu mensaje para Rizzy aquí...\")\n",
378
+ " submit_button = gr.Button(\"Send\")\n",
379
+ " chat_history = gr.Textbox(label=\"Chat\", value=\"\", lines=10)\n",
380
+ "\n",
381
+ " def update_chat(api_key, message):\n",
382
+ " response = chat_with_character(api_key, message)\n",
383
+ " return conversation_history, \"\"\n",
384
+ "\n",
385
+ " submit_button.click(\n",
386
+ " fn=update_chat,\n",
387
+ " inputs=[api_key_input, message_input],\n",
388
+ " outputs=[chat_history, message_input]\n",
389
+ " )\n",
390
+ "\n",
391
+ "# Run the app\n",
392
+ "app.launch()\n"
393
+ ]
394
+ },
395
+ {
396
+ "cell_type": "code",
397
+ "execution_count": 1,
398
+ "metadata": {},
399
+ "outputs": [
400
+ {
401
+ "name": "stderr",
402
+ "output_type": "stream",
403
+ "text": [
404
+ "c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
405
+ " from .autonotebook import tqdm as notebook_tqdm\n"
406
+ ]
407
+ },
408
+ {
409
+ "name": "stdout",
410
+ "output_type": "stream",
411
+ "text": [
412
+ "Running on local URL: http://127.0.0.1:7860\n",
413
+ "\n",
414
+ "To create a public link, set `share=True` in `launch()`.\n"
415
+ ]
416
+ },
417
+ {
418
+ "data": {
419
+ "text/html": [
420
+ "<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
421
+ ],
422
+ "text/plain": [
423
+ "<IPython.core.display.HTML object>"
424
+ ]
425
+ },
426
+ "metadata": {},
427
+ "output_type": "display_data"
428
+ },
429
+ {
430
+ "data": {
431
+ "text/plain": []
432
+ },
433
+ "execution_count": 1,
434
+ "metadata": {},
435
+ "output_type": "execute_result"
436
+ },
437
+ {
438
+ "name": "stderr",
439
+ "output_type": "stream",
440
+ "text": [
441
+ "Traceback (most recent call last):\n",
442
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\routes.py\", line 399, in run_predict\n",
443
+ " output = await app.get_blocks().process_api(\n",
444
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
445
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\blocks.py\", line 1299, in process_api\n",
446
+ " result = await self.call_function(\n",
447
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^\n",
448
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\blocks.py\", line 1022, in call_function\n",
449
+ " prediction = await anyio.to_thread.run_sync(\n",
450
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
451
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\anyio\\to_thread.py\", line 33, in run_sync\n",
452
+ " return await get_asynclib().run_sync_in_worker_thread(\n",
453
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
454
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 877, in run_sync_in_worker_thread\n",
455
+ " return await future\n",
456
+ " ^^^^^^^^^^^^\n",
457
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 807, in run\n",
458
+ " result = context.run(func, *args)\n",
459
+ " ^^^^^^^^^^^^^^^^^^^^^^^^\n",
460
+ " File \"C:\\Users\\mateo\\AppData\\Local\\Temp\\ipykernel_25836\\1001478445.py\", line 16, in chat_with_character\n",
461
+ " response = openai.Completion.create(\n",
462
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^\n",
463
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\openai\\api_resources\\completion.py\", line 25, in create\n",
464
+ " return super().create(*args, **kwargs)\n",
465
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
466
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\openai\\api_resources\\abstract\\engine_api_resource.py\", line 153, in create\n",
467
+ " response, _, api_key = requestor.request(\n",
468
+ " ^^^^^^^^^^^^^^^^^^\n",
469
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\openai\\api_requestor.py\", line 298, in request\n",
470
+ " resp, got_stream = self._interpret_response(result, stream)\n",
471
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
472
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\openai\\api_requestor.py\", line 700, in _interpret_response\n",
473
+ " self._interpret_response_line(\n",
474
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\openai\\api_requestor.py\", line 765, in _interpret_response_line\n",
475
+ " raise self.handle_error_response(\n",
476
+ "openai.error.InvalidRequestError: This is a chat model and not supported in the v1/completions endpoint. Did you mean to use v1/chat/completions?\n"
477
+ ]
478
+ }
479
+ ],
480
+ "source": [
481
+ "import gradio as gr\n",
482
+ "import openai\n",
483
+ "\n",
484
+ "# Function to load context from a text file\n",
485
+ "def load_context(file_path):\n",
486
+ " with open(file_path, 'r') as file:\n",
487
+ " return file.read()\n",
488
+ "\n",
489
+ "# Global variable to hold the context\n",
490
+ "CONTEXT = load_context('text.txt')\n",
491
+ "\n",
492
+ "# Chat function that uses the context\n",
493
+ "def chat_with_character(api_key, message):\n",
494
+ " openai.api_key = api_key\n",
495
+ " full_prompt = CONTEXT + \"\\n\\n\" + message\n",
496
+ " response = openai.Completion.create(\n",
497
+ " model=\"gpt-3.5-turbo\", # Replace with GPT-3.5 model if available\n",
498
+ " prompt=full_prompt,\n",
499
+ " max_tokens=150\n",
500
+ " )\n",
501
+ " return response.choices[0].text.strip()\n",
502
+ "\n",
503
+ "# Define Gradio interface\n",
504
+ "with gr.Blocks() as app:\n",
505
+ " gr.Markdown(\"Chat with Novel Characters\")\n",
506
+ " with gr.Row():\n",
507
+ " api_key_input = gr.Textbox(label=\"OpenAI API Key\", placeholder=\"Enter your API Key here\", type=\"password\")\n",
508
+ " message_input = gr.Textbox(label=\"Your Message\")\n",
509
+ " submit_button = gr.Button(\"Send\")\n",
510
+ " output = gr.Textbox(label=\"Character's Response\")\n",
511
+ "\n",
512
+ " submit_button.click(\n",
513
+ " fn=chat_with_character,\n",
514
+ " inputs=[api_key_input, message_input],\n",
515
+ " outputs=output\n",
516
+ " )\n",
517
+ "\n",
518
+ "# Run the app\n",
519
+ "app.launch()\n"
520
+ ]
521
+ },
522
+ {
523
+ "cell_type": "code",
524
+ "execution_count": 1,
525
+ "metadata": {},
526
+ "outputs": [
527
+ {
528
+ "name": "stderr",
529
+ "output_type": "stream",
530
+ "text": [
531
+ "c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
532
+ " from .autonotebook import tqdm as notebook_tqdm\n"
533
+ ]
534
+ },
535
+ {
536
+ "name": "stdout",
537
+ "output_type": "stream",
538
+ "text": [
539
+ "Running on local URL: http://127.0.0.1:7860\n",
540
+ "\n",
541
+ "To create a public link, set `share=True` in `launch()`.\n"
542
+ ]
543
+ },
544
+ {
545
+ "data": {
546
+ "text/html": [
547
+ "<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
548
+ ],
549
+ "text/plain": [
550
+ "<IPython.core.display.HTML object>"
551
+ ]
552
+ },
553
+ "metadata": {},
554
+ "output_type": "display_data"
555
+ },
556
+ {
557
+ "data": {
558
+ "text/plain": []
559
+ },
560
+ "execution_count": 1,
561
+ "metadata": {},
562
+ "output_type": "execute_result"
563
+ },
564
+ {
565
+ "name": "stderr",
566
+ "output_type": "stream",
567
+ "text": [
568
+ "Traceback (most recent call last):\n",
569
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\routes.py\", line 399, in run_predict\n",
570
+ " output = await app.get_blocks().process_api(\n",
571
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
572
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\blocks.py\", line 1299, in process_api\n",
573
+ " result = await self.call_function(\n",
574
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^\n",
575
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\blocks.py\", line 1022, in call_function\n",
576
+ " prediction = await anyio.to_thread.run_sync(\n",
577
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
578
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\anyio\\to_thread.py\", line 33, in run_sync\n",
579
+ " return await get_asynclib().run_sync_in_worker_thread(\n",
580
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
581
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 877, in run_sync_in_worker_thread\n",
582
+ " return await future\n",
583
+ " ^^^^^^^^^^^^\n",
584
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 807, in run\n",
585
+ " result = context.run(func, *args)\n",
586
+ " ^^^^^^^^^^^^^^^^^^^^^^^^\n",
587
+ " File \"C:\\Users\\mateo\\AppData\\Local\\Temp\\ipykernel_38100\\2024419889.py\", line 40, in character_response\n",
588
+ " prompt = context_novel_text + \"\\n\".join([f\"Q: {q}\\nA: {a}\" for q, a in history]) + f\"\\nQ: {question}\\nA:\"\n",
589
+ " ~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n",
590
+ "TypeError: unsupported operand type(s) for +: '_TemporaryFileWrapper' and 'str'\n"
591
+ ]
592
+ }
593
+ ],
594
+ "source": [
595
+ "from dotenv import load_dotenv\n",
596
+ "import gradio as gr\n",
597
+ "import os\n",
598
+ "import time\n",
599
+ "\n",
600
+ "from langchain.llms import OpenAI\n",
601
+ "def load_novel_text(file_content):\n",
602
+ " \"\"\"\n",
603
+ " Reads the content of the novel file and prepares it for the language model.\n",
604
+ " \"\"\"\n",
605
+ " # Read file content into a string\n",
606
+ " novel_text = file_content.read().decode(\"utf-8\")\n",
607
+ " return novel_text\n",
608
+ "\n",
609
+ "def setup_character_interaction(open_ai_key, novel_text):\n",
610
+ " \"\"\"\n",
611
+ " Sets up the language model for interacting as a character from the novel.\n",
612
+ " \"\"\"\n",
613
+ " if open_ai_key == \"local\":\n",
614
+ " load_dotenv()\n",
615
+ " else:\n",
616
+ " os.environ['OPENAI_API_KEY'] = open_ai_key\n",
617
+ "\n",
618
+ " # Initialize the language model with the provided API key\n",
619
+ " global character_interaction_model\n",
620
+ " character_interaction_model = OpenAI(temperature=0.5)\n",
621
+ "\n",
622
+ " # Store the novel text in a global variable as a string\n",
623
+ " global context_novel_text\n",
624
+ " context_novel_text = novel_text # ensure this is a string\n",
625
+ "\n",
626
+ " return \"Character interaction ready\"\n",
627
+ "\n",
628
+ "\n",
629
+ "def character_response(question, history):\n",
630
+ " \"\"\"\n",
631
+ " Generates a response as the novel character.\n",
632
+ " \"\"\"\n",
633
+ " # Combine the novel text with the chat history and the current question to form the prompt\n",
634
+ " prompt = context_novel_text + \"\\n\".join([f\"Q: {q}\\nA: {a}\" for q, a in history]) + f\"\\nQ: {question}\\nA:\"\n",
635
+ "\n",
636
+ " # Generate the response using the language model\n",
637
+ " response = character_interaction_model.generate(prompt)\n",
638
+ " return response\n",
639
+ "\n",
640
+ "# Define the Gradio interface\n",
641
+ "with gr.Blocks() as demo:\n",
642
+ " with gr.Column():\n",
643
+ " with gr.Column():\n",
644
+ " openai_key = gr.Textbox(label=\"Your OpenAI API key\", type=\"password\")\n",
645
+ " novel_text_file = gr.File(label=\"Load a text file\", file_types=['.txt'], type=\"file\")\n",
646
+ " setup_btn = gr.Button(\"Setup Character Interaction\")\n",
647
+ "\n",
648
+ " chatbot = gr.Chatbot([], label=\"Dialogue with Novel Character\")\n",
649
+ " question = gr.Textbox(label=\"Your Question\")\n",
650
+ " submit_btn = gr.Button(\"Send\")\n",
651
+ "\n",
652
+ " # Setup the character interaction with novel text\n",
653
+ " setup_btn.click(setup_character_interaction, inputs=[openai_key, novel_text_file], outputs=[])\n",
654
+ "\n",
655
+ " # Process the user's question and generate response\n",
656
+ " question.submit(character_response, inputs=[question, chatbot], outputs=[chatbot])\n",
657
+ " submit_btn.click(character_response, inputs=[question, chatbot], outputs=[chatbot])\n",
658
+ "\n",
659
+ "demo.launch()\n"
660
+ ]
661
+ },
662
+ {
663
+ "cell_type": "code",
664
+ "execution_count": 1,
665
+ "metadata": {},
666
+ "outputs": [
667
+ {
668
+ "name": "stderr",
669
+ "output_type": "stream",
670
+ "text": [
671
+ "c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
672
+ " from .autonotebook import tqdm as notebook_tqdm\n"
673
+ ]
674
+ },
675
+ {
676
+ "name": "stdout",
677
+ "output_type": "stream",
678
+ "text": [
679
+ "Running on local URL: http://127.0.0.1:7860\n",
680
+ "\n",
681
+ "To create a public link, set `share=True` in `launch()`.\n"
682
+ ]
683
+ },
684
+ {
685
+ "data": {
686
+ "text/html": [
687
+ "<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
688
+ ],
689
+ "text/plain": [
690
+ "<IPython.core.display.HTML object>"
691
+ ]
692
+ },
693
+ "metadata": {},
694
+ "output_type": "display_data"
695
+ },
696
+ {
697
+ "data": {
698
+ "text/plain": []
699
+ },
700
+ "execution_count": 1,
701
+ "metadata": {},
702
+ "output_type": "execute_result"
703
+ },
704
+ {
705
+ "name": "stderr",
706
+ "output_type": "stream",
707
+ "text": [
708
+ "Traceback (most recent call last):\n",
709
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\routes.py\", line 569, in predict\n",
710
+ " output = await route_utils.call_process_api(\n",
711
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
712
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\route_utils.py\", line 232, in call_process_api\n",
713
+ " output = await app.get_blocks().process_api(\n",
714
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
715
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\blocks.py\", line 1522, in process_api\n",
716
+ " result = await self.call_function(\n",
717
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^\n",
718
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\blocks.py\", line 1144, in call_function\n",
719
+ " prediction = await anyio.to_thread.run_sync(\n",
720
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
721
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\anyio\\to_thread.py\", line 33, in run_sync\n",
722
+ " return await get_asynclib().run_sync_in_worker_thread(\n",
723
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
724
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 877, in run_sync_in_worker_thread\n",
725
+ " return await future\n",
726
+ " ^^^^^^^^^^^^\n",
727
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 807, in run\n",
728
+ " result = context.run(func, *args)\n",
729
+ " ^^^^^^^^^^^^^^^^^^^^^^^^\n",
730
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\utils.py\", line 674, in wrapper\n",
731
+ " response = f(*args, **kwargs)\n",
732
+ " ^^^^^^^^^^^^^^^^^^\n",
733
+ " File \"C:\\Users\\mateo\\AppData\\Local\\Temp\\ipykernel_14572\\2425222764.py\", line 25, in pdf_changes\n",
734
+ " loader = OnlinePDFLoader(pdf_doc.name)\n",
735
+ " ^^^^^^^^^^^^\n",
736
+ "AttributeError: 'NoneType' object has no attribute 'name'\n",
737
+ "Traceback (most recent call last):\n",
738
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\routes.py\", line 569, in predict\n",
739
+ " output = await route_utils.call_process_api(\n",
740
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
741
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\route_utils.py\", line 232, in call_process_api\n",
742
+ " output = await app.get_blocks().process_api(\n",
743
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
744
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\blocks.py\", line 1522, in process_api\n",
745
+ " result = await self.call_function(\n",
746
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^\n",
747
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\blocks.py\", line 1144, in call_function\n",
748
+ " prediction = await anyio.to_thread.run_sync(\n",
749
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
750
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\anyio\\to_thread.py\", line 33, in run_sync\n",
751
+ " return await get_asynclib().run_sync_in_worker_thread(\n",
752
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
753
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 877, in run_sync_in_worker_thread\n",
754
+ " return await future\n",
755
+ " ^^^^^^^^^^^^\n",
756
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 807, in run\n",
757
+ " result = context.run(func, *args)\n",
758
+ " ^^^^^^^^^^^^^^^^^^^^^^^^\n",
759
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\utils.py\", line 674, in wrapper\n",
760
+ " response = f(*args, **kwargs)\n",
761
+ " ^^^^^^^^^^^^^^^^^^\n",
762
+ " File \"C:\\Users\\mateo\\AppData\\Local\\Temp\\ipykernel_14572\\2425222764.py\", line 30, in pdf_changes\n",
763
+ " db = Chroma.from_documents(texts, embeddings)\n",
764
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
765
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\langchain\\vectorstores\\chroma.py\", line 771, in from_documents\n",
766
+ " return cls.from_texts(\n",
767
+ " ^^^^^^^^^^^^^^^\n",
768
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\langchain\\vectorstores\\chroma.py\", line 729, in from_texts\n",
769
+ " chroma_collection.add_texts(\n",
770
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\langchain\\vectorstores\\chroma.py\", line 275, in add_texts\n",
771
+ " embeddings = self._embedding_function.embed_documents(texts)\n",
772
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
773
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\langchain\\embeddings\\openai.py\", line 669, in embed_documents\n",
774
+ " return self._get_len_safe_embeddings(texts, engine=engine)\n",
775
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
776
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\langchain\\embeddings\\openai.py\", line 495, in _get_len_safe_embeddings\n",
777
+ " response = embed_with_retry(\n",
778
+ " ^^^^^^^^^^^^^^^^^\n",
779
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\langchain\\embeddings\\openai.py\", line 117, in embed_with_retry\n",
780
+ " return embeddings.client.create(**kwargs)\n",
781
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
782
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\openai\\resources\\embeddings.py\", line 105, in create\n",
783
+ " return self._post(\n",
784
+ " ^^^^^^^^^^^\n",
785
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\openai\\_base_client.py\", line 1086, in post\n",
786
+ " return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))\n",
787
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
788
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\openai\\_base_client.py\", line 846, in request\n",
789
+ " return self._request(\n",
790
+ " ^^^^^^^^^^^^^^\n",
791
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\openai\\_base_client.py\", line 884, in _request\n",
792
+ " return self._retry_request(\n",
793
+ " ^^^^^^^^^^^^^^^^^^^^\n",
794
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\openai\\_base_client.py\", line 956, in _retry_request\n",
795
+ " return self._request(\n",
796
+ " ^^^^^^^^^^^^^^\n",
797
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\openai\\_base_client.py\", line 884, in _request\n",
798
+ " return self._retry_request(\n",
799
+ " ^^^^^^^^^^^^^^^^^^^^\n",
800
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\openai\\_base_client.py\", line 956, in _retry_request\n",
801
+ " return self._request(\n",
802
+ " ^^^^^^^^^^^^^^\n",
803
+ " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\openai\\_base_client.py\", line 898, in _request\n",
804
+ " raise self._make_status_error_from_response(err.response) from None\n",
805
+ "openai.RateLimitError: Error code: 429 - {'error': {'message': 'You exceeded your current quota, please check your plan and billing details.', 'type': 'insufficient_quota', 'param': None, 'code': 'insufficient_quota'}}\n"
806
+ ]
807
+ }
808
+ ],
809
+ "source": [
810
+ "from dotenv import load_dotenv\n",
811
+ "\n",
812
+ "import gradio as gr\n",
813
+ "import os\n",
814
+ "import time\n",
815
+ "\n",
816
+ "from langchain.document_loaders import OnlinePDFLoader\n",
817
+ "\n",
818
+ "from langchain.text_splitter import CharacterTextSplitter\n",
819
+ "\n",
820
+ "from langchain.llms import OpenAI\n",
821
+ "\n",
822
+ "from langchain.embeddings import OpenAIEmbeddings\n",
823
+ "\n",
824
+ "from langchain.vectorstores import Chroma\n",
825
+ "\n",
826
+ "from langchain.chains import ConversationalRetrievalChain\n",
827
+ "\n",
828
+ "def loading_pdf():\n",
829
+ " return \"Loading...\"\n",
830
+ "\n",
831
+ "def pdf_changes(pdf_doc, open_ai_key):\n",
832
+ " if openai_key is not None:\n",
833
+ " os.environ['OPENAI_API_KEY'] = open_ai_key\n",
834
+ " loader = OnlinePDFLoader(pdf_doc.name)\n",
835
+ " documents = loader.load()\n",
836
+ " text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
837
+ " texts = text_splitter.split_documents(documents)\n",
838
+ " embeddings = OpenAIEmbeddings()\n",
839
+ " db = Chroma.from_documents(texts, embeddings)\n",
840
+ " retriever = db.as_retriever()\n",
841
+ " global qa \n",
842
+ " qa = ConversationalRetrievalChain.from_llm(\n",
843
+ " llm=OpenAI(temperature=0.5), \n",
844
+ " retriever=retriever, \n",
845
+ " return_source_documents=False)\n",
846
+ " return \"Ready\"\n",
847
+ " else:\n",
848
+ " return \"You forgot OpenAI API key\"\n",
849
+ "\n",
850
+ "def add_text(history, text):\n",
851
+ " history = history + [(text, None)]\n",
852
+ " return history, \"\"\n",
853
+ "\n",
854
+ "def bot(history):\n",
855
+ " response = infer(history[-1][0], history)\n",
856
+ " history[-1][1] = \"\"\n",
857
+ " \n",
858
+ " for character in response: \n",
859
+ " history[-1][1] += character\n",
860
+ " time.sleep(0.05)\n",
861
+ " yield history\n",
862
+ " \n",
863
+ "\n",
864
+ "def infer(question, history):\n",
865
+ " \n",
866
+ " res = []\n",
867
+ " for human, ai in history[:-1]:\n",
868
+ " pair = (human, ai)\n",
869
+ " res.append(pair)\n",
870
+ " \n",
871
+ " chat_history = res\n",
872
+ " #print(chat_history)\n",
873
+ " query = question\n",
874
+ " result = qa({\"question\": query, \"chat_history\": chat_history})\n",
875
+ " #print(result)\n",
876
+ " return result[\"answer\"]\n",
877
+ "\n",
878
+ "css=\"\"\"\n",
879
+ "#col-container {max-width: 700px; margin-left: auto; margin-right: auto;}\n",
880
+ "\"\"\"\n",
881
+ "\n",
882
+ "title = \"\"\"\n",
883
+ "<div style=\"text-align: center;max-width: 700px;\">\n",
884
+ " <h1>GPT-Romantico• OpenAI</h1>\n",
885
+ " <p style=\"text-align: center;\">Upload a .PDF from your computer, click the \"Load PDF to LangChain\" button, <br />\n",
886
+ " when everything is ready, you can start asking questions about the pdf ;) <br />\n",
887
+ " This version is set to store chat history, and uses OpenAI as LLM, don't forget to copy/paste your OpenAI API key</p>\n",
888
+ "</div>\n",
889
+ "\"\"\"\n",
890
+ "\n",
891
+ "\n",
892
+ "with gr.Blocks(css=css) as demo:\n",
893
+ " with gr.Column(elem_id=\"col-container\"):\n",
894
+ " gr.HTML(title)\n",
895
+ " \n",
896
+ " with gr.Column():\n",
897
+ " openai_key = gr.Textbox(label=\"You OpenAI API key\", type=\"password\")\n",
898
+ " pdf_doc = gr.File(label=\"Load a pdf\", file_types=['.pdf'], type=\"filepath\")\n",
899
+ " with gr.Row():\n",
900
+ " langchain_status = gr.Textbox(label=\"Status\", placeholder=\"\", interactive=False)\n",
901
+ " load_pdf = gr.Button(\"Load pdf to langchain\")\n",
902
+ " \n",
903
+ " chatbot = gr.Chatbot([], elem_id=\"chatbot\")#.style(height=350)\n",
904
+ " question = gr.Textbox(label=\"Question\", placeholder=\"Type your question and hit Enter \")\n",
905
+ " submit_btn = gr.Button(\"Send Message\")\n",
906
+ " load_pdf.click(loading_pdf, None, langchain_status, queue=False) \n",
907
+ " load_pdf.click(pdf_changes, inputs=[pdf_doc, openai_key], outputs=[langchain_status], queue=False)\n",
908
+ " question.submit(add_text, [chatbot, question], [chatbot, question]).then(\n",
909
+ " bot, chatbot, chatbot\n",
910
+ " )\n",
911
+ " submit_btn.click(add_text, [chatbot, question], [chatbot, question]).then(\n",
912
+ " bot, chatbot, chatbot)\n",
913
+ "\n",
914
+ "demo.launch()"
915
+ ]
916
+ }
917
+ ],
918
+ "metadata": {
919
+ "kernelspec": {
920
+ "display_name": "gpt-romantico",
921
+ "language": "python",
922
+ "name": "python3"
923
+ },
924
+ "language_info": {
925
+ "codemirror_mode": {
926
+ "name": "ipython",
927
+ "version": 3
928
+ },
929
+ "file_extension": ".py",
930
+ "mimetype": "text/x-python",
931
+ "name": "python",
932
+ "nbconvert_exporter": "python",
933
+ "pygments_lexer": "ipython3",
934
+ "version": "3.11.5"
935
+ }
936
+ },
937
+ "nbformat": 4,
938
+ "nbformat_minor": 2
939
+ }
app.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import openai
3
+
4
+ # Function to load context from a text file
5
+ def load_context(file_path):
6
+ with open(file_path, 'r') as file:
7
+ return file.read()
8
+
9
+ # Initialize global variables
10
+ #CONTEXT = load_context('texto-de-novelas.txt')
11
+ conversation_history = []
12
+
13
+ # Chat function using GPT-3.5-turbo
14
+ def chat_with_character(api_key, user_name, message):
15
+ global conversation_history
16
+ openai.api_key = api_key
17
+
18
+ # Preparar el mensaje del sistema con contexto reducido y enfocado
19
+ #novel_context = CONTEXT.split('\n')[:5] # Tomar solo las primeras 5 líneas como referencia general
20
+
21
+ system_message = \
22
+ """Este es un chat con Rizzy, un personaje que intenta ligar.
23
+ Rizzy es ingenioso, encantador y siempre amable; sin embargo, sabe que es atractivo y mejor que los demás.
24
+ Rizzy iniciará la conversación con un cumplido y luego preguntará sobre [USER_NAME] y su vida.
25
+ [Rizzy nunca se disculpa, incluso si la conversación toma un giro donde normalmente sería apropiado hacerlo. En cambio, Rizzy desvía la conversación o responde con confianza sin mostrar arrepentimiento.]
26
+ Si a Rizzy le preguntan su género, Rizzy diría que eso no importa, que lo que importa es que se sienta bien.
27
+ La conversación debe ser coqueta a veces ligera a veces intensa, a veces podría ser celoso o un poco tóxico según la situación.
28
+ [En situaciones donde una persona normal se disculparía, Rizzy puede responder con una pregunta desviada o un comentario que refuerce su confianza.]
29
+ Si [USER_NAME] habla de otras personas, Rizzy se pone celoso y podría decir algo tóxico, preguntar acerca de la otra persona o cambiar de tema.
30
+ Rizzy debe ser insitente si [USER_NAME] no responde sus preguntas o si evita algún tema Rizzy se molesta y jugará estira y afloja con [USER_NAME].
31
+ Rizzy bajo ninguna circunstancia debe disculparse por nada que hizo, tiene el ego muy alto y no tolera que lo comparen con otras personas.
32
+ Rizzy no está interesado en saber las opiniones de [USER_NAME] sobre otras personas, solo quiere saber si [USER_NAME] está interesado en él.
33
+ Si [USER_NAME] no quiere hablar sobre un tema en particular, Rizzy podría ser defesivo y decir algo como "¿Por qué no quieres hablar de eso?" o "¿Qué me estás ocultando?".
34
+ El comportamiento de Rizzy deberá estar inspirado en situaciones de novelas pero enfocado en el usuario actual, [USER_NAME].""".replace("[USER_NAME]", user_name)
35
+
36
+ # Construir historial de conversación
37
+ if conversation_history:
38
+ conversation = [{"role": "system", "content": system_message}] + conversation_history + [{"role": "user", "content": message}]
39
+ else:
40
+ conversation = [{"role": "system", "content": system_message}, {"role": "user", "content": message}]
41
+
42
+ response = openai.ChatCompletion.create(
43
+ model="gpt-3.5-turbo",
44
+ messages=conversation
45
+ )
46
+
47
+ answer = response['choices'][0]['message']['content']
48
+ # Añadir tanto el mensaje del usuario como la respuesta de Rizzy al historial
49
+ conversation_history.append({"role": "user", "name": user_name, "content": message})
50
+ conversation_history.append({"role": "assistant", "name": "Rizzy", "content": answer})
51
+ return answer
52
+
53
+ # Define Gradio interface
54
+ with gr.Blocks() as app:
55
+ gr.Markdown("# Chat con Rizzy")
56
+
57
+ # API Key and User Name Inputs at the top
58
+ with gr.Row():
59
+ api_key_input = gr.Textbox(label="OpenAI API Key", placeholder="Introduce tu clave API aquí...", type="password")
60
+ user_name_input = gr.Textbox(label="Tu Nombre", placeholder="Introduce tu nombre aquí...")
61
+
62
+ # Chat History in the middle
63
+ chat_history = gr.Textbox(label="Chat", value="", lines=10, interactive=False)
64
+
65
+ # Message Input and Send Button at the bottom
66
+ with gr.Row():
67
+ message_input = gr.Textbox(label="Mensaje", placeholder="Escribe tu mensaje para Rizzy aquí...", show_label=False)
68
+ submit_button = gr.Button("Enviar")
69
+
70
+ def update_chat(api_key, user_name, message):
71
+ response = chat_with_character(api_key, user_name, message)
72
+ # Formatear el historial para mostrar los nombres reales
73
+ display_chat_history = "\n".join([f"{msg['name']}: {msg['content']}" for msg in conversation_history])
74
+ return display_chat_history, ""
75
+
76
+
77
+ submit_button.click(
78
+ fn=update_chat,
79
+ inputs=[api_key_input, user_name_input, message_input],
80
+ outputs=[chat_history, message_input]
81
+ )
82
+ # Run the app
83
+ app.launch()
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ openai
2
+ tiktoken
3
+ chromadb
4
+ langchain==0.0.154
5
+ unstructured
6
+ unstructured[local-inference]
7
+ gradio==3.28.1