moriire commited on
Commit
ed1e95d
1 Parent(s): 8cd1792

Update app/llm.py

Browse files
Files changed (1) hide show
  1. app/llm.py +3 -3
app/llm.py CHANGED
@@ -1,6 +1,6 @@
1
  import fastapi
2
  from fastapi.responses import JSONResponse
3
- from fastapi_users import models
4
  from time import time
5
  #from fastapi.middleware.cors import CORSMiddleware
6
  #MODEL_PATH = "./qwen1_5-0_5b-chat-q4_0.gguf" #"./qwen1_5-0_5b-chat-q4_0.gguf"
@@ -73,7 +73,7 @@ def health():
73
 
74
  # Chat Completion API
75
  @llm_router.post("/chat/", tags=["llm"])
76
- async def chat(chatm:ChatModel, user: models.BaseUser = fastapi.Depends(current_active_user)):
77
  try:
78
  st = time()
79
  output = llm_chat.create_chat_completion(
@@ -96,7 +96,7 @@ async def chat(chatm:ChatModel, user: models.BaseUser = fastapi.Depends(current_
96
 
97
  # Chat Completion API
98
  @llm_router.post("/generate", tags=["llm"])
99
- async def generate(gen:GenModel, user: models.BaseUser = fastapi.Depends(current_active_user)):
100
  gen.system = "You are an helpful medical AI assistant."
101
  gen.temperature = 0.5
102
  gen.seed = 42
 
1
  import fastapi
2
  from fastapi.responses import JSONResponse
3
+ from fastapi_users import schemas
4
  from time import time
5
  #from fastapi.middleware.cors import CORSMiddleware
6
  #MODEL_PATH = "./qwen1_5-0_5b-chat-q4_0.gguf" #"./qwen1_5-0_5b-chat-q4_0.gguf"
 
73
 
74
  # Chat Completion API
75
  @llm_router.post("/chat/", tags=["llm"])
76
+ async def chat(chatm:ChatModel, user: schemas.BaseUser = fastapi.Depends(current_active_user)):
77
  try:
78
  st = time()
79
  output = llm_chat.create_chat_completion(
 
96
 
97
  # Chat Completion API
98
  @llm_router.post("/generate", tags=["llm"])
99
+ async def generate(gen:GenModel, user: schemas.BaseUser = fastapi.Depends(current_active_user)):
100
  gen.system = "You are an helpful medical AI assistant."
101
  gen.temperature = 0.5
102
  gen.seed = 42