CISCai commited on
Commit
8acad5e
1 Parent(s): 65803ed

Fix example parameters

Browse files
Files changed (1) hide show
  1. README.md +5 -1
README.md CHANGED
@@ -194,8 +194,10 @@ grammar = LlamaGrammar.from_json_schema(json.dumps({
194
  }
195
  }))
196
 
197
- llm = Llama(model_path="./Mistral-7B-Instruct-v0.3.IQ4_XS.gguf", n_gpu_layers=33, n_ctx=32768, temperature=0.0, repeat_penalty=1.1)
198
  response = llm.create_chat_completion(
 
 
199
  messages = [
200
  {
201
  "role": "user",
@@ -228,6 +230,8 @@ response = llm.create_chat_completion(
228
  print(json.loads(response["choices"][0]["text"]))
229
 
230
  print(llm.create_chat_completion(
 
 
231
  messages = [
232
  {
233
  "role": "user",
 
194
  }
195
  }))
196
 
197
+ llm = Llama(model_path="./Mistral-7B-Instruct-v0.3.IQ4_XS.gguf", n_gpu_layers=33, n_ctx=32768)
198
  response = llm.create_chat_completion(
199
+ temperature = 0.0,
200
+ repeat_penalty = 1.1,
201
  messages = [
202
  {
203
  "role": "user",
 
230
  print(json.loads(response["choices"][0]["text"]))
231
 
232
  print(llm.create_chat_completion(
233
+ temperature = 0.0,
234
+ repeat_penalty = 1.1,
235
  messages = [
236
  {
237
  "role": "user",