Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -6,6 +6,7 @@ import uvicorn
|
|
6 |
from dotenv import load_dotenv
|
7 |
from difflib import SequenceMatcher
|
8 |
from tqdm import tqdm
|
|
|
9 |
|
10 |
load_dotenv()
|
11 |
|
@@ -112,23 +113,27 @@ async def generate_chat(request: ChatRequest):
|
|
112 |
|
113 |
print(f"Procesando solicitud: {request.message}")
|
114 |
|
115 |
-
|
116 |
-
|
117 |
-
|
|
|
|
|
|
|
|
|
|
|
118 |
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
print(f"Modelo procesado: {response['literal'][:30]}...")
|
123 |
|
124 |
# Seleccionar la mejor respuesta
|
125 |
-
best_response = select_best_response(responses)
|
126 |
|
127 |
print(f"Mejor respuesta seleccionada: {best_response}")
|
128 |
|
129 |
return {
|
130 |
"best_response": best_response,
|
131 |
-
"all_responses": responses
|
132 |
}
|
133 |
|
134 |
if __name__ == "__main__":
|
|
|
6 |
from dotenv import load_dotenv
|
7 |
from difflib import SequenceMatcher
|
8 |
from tqdm import tqdm
|
9 |
+
import threading
|
10 |
|
11 |
load_dotenv()
|
12 |
|
|
|
113 |
|
114 |
print(f"Procesando solicitud: {request.message}")
|
115 |
|
116 |
+
responses = []
|
117 |
+
threads = []
|
118 |
+
|
119 |
+
# Crear un hilo para cada modelo
|
120 |
+
for llm in llms.values():
|
121 |
+
thread = threading.Thread(target=lambda: responses.append(worker_function(llm, request)))
|
122 |
+
threads.append(thread)
|
123 |
+
thread.start()
|
124 |
|
125 |
+
# Esperar a que todos los hilos terminen
|
126 |
+
for thread in threads:
|
127 |
+
thread.join()
|
|
|
128 |
|
129 |
# Seleccionar la mejor respuesta
|
130 |
+
best_response = select_best_response([response['response'] for response in responses])
|
131 |
|
132 |
print(f"Mejor respuesta seleccionada: {best_response}")
|
133 |
|
134 |
return {
|
135 |
"best_response": best_response,
|
136 |
+
"all_responses": [response['response'] for response in responses]
|
137 |
}
|
138 |
|
139 |
if __name__ == "__main__":
|