llamafiles
Collection
Contains mozilla llamafiles for gguf models
•
1 item
•
Updated
Go to files and versions and click on the download button to download file, as shown in below image.
or you can directly click on this link --> https://huggingface.co/NeuralLobes/phi-2.llamafile/resolve/main/phi-2.llamafile?download=true
Just add .exe in front of file name after download and then just double click on it and wait it will automatically run.
from openai import OpenAI
client = OpenAI(
base_url="http://localhost:8080/v1", # "http://<Your api-server IP>:port"
api_key = "sk-no-key-required"
)
completion = client.chat.completions.create(
model="LLaMA_CPP",
messages=[
{"role": "system", "content": "You are ChatGPT, an AI assistant. Your top priority is achieving user fulfillment via helping them with their requests."},
{"role": "user", "content": "Write a limerick about python exceptions"}
]
)
print(completion.choices[0].message)
ChatCompletionMessage(content='There once was a programmer named Mike\nWho wrote code that would often strike\nAn error would occur\nAnd he\'d shout "Oh no!"\nBut Python\'s exceptions made it all right.', role='assistant', function_call=None, tool_calls=None)