from llama_cpp.server.app import create_app, Settings | |
from fastapi.responses import HTMLResponse | |
import os | |
app = create_app( | |
Settings( | |
n_threads=2, # set to number of cpu cores | |
model="model/gguf-model.bin", | |
embedding=True, | |
chat_format="intel" | |
) | |
) | |
# Read the content of index.html once and store it in memory | |
with open("index.html", "r") as f: | |
content = f.read() | |
async def read_items(): | |
return content | |
if __name__ == "__main__": | |
import uvicorn | |
uvicorn.run(app, | |
host=os.environ["HOST"], | |
port=int(os.environ["PORT"]) | |
) | |