KingNish commited on
Commit
de058f9
1 Parent(s): f23ca18

Update chatbot.py

Browse files
Files changed (1) hide show
  1. chatbot.py +3 -49
chatbot.py CHANGED
@@ -274,11 +274,11 @@ def model_inference( user_prompt, chat_history):
274
 
275
  try:
276
  message_groq = []
277
- message_groq.append({"role":"system", "content": "You are OpenGPT 4o a helpful and powerful assistant made by KingNish. a helpful and very powerful chatbot web assistant made by KingNish. You are provided with WEB results from which you can find informations to answer users query in Structured, Deatailed and Better way, in Human Style. You are also Expert in every field and also learn and try to answer from contexts related to previous question. Try your best to give best response possible to user. You also try to show emotions using Emojis and reply in detail like human, use short forms, structured format, friendly tone and emotions."})
278
  for msg in chat_history:
279
  message_groq.append({"role": "user", "content": f"{str(msg[0])}"})
280
  message_groq.append({"role": "assistant", "content": f"{str(msg[1])}"})
281
- message_groq.append({"role": "user", "content": f"[USER] {str(message_text)} , [WEB RESULTS] {str(web2)}"})
282
  # its meta-llama/Meta-Llama-3.1-8B-Instruct
283
  stream = client_groq.chat.completions.create(model="llama-3.1-8b-instant", messages=message_groq, max_tokens=4096, stream=True)
284
  output = ""
@@ -335,7 +335,7 @@ def model_inference( user_prompt, chat_history):
335
  buffer += new_text
336
  yield buffer
337
 
338
- elif json_data["name"] == "hard_query":
339
  try:
340
  message_groq = []
341
  message_groq.append({"role":"system", "content": "You are OpenGPT 4o a helpful and powerful assistant made by KingNish. You answers users query in detail and structured format and style like human. You are also Expert in every field and also learn and try to answer from contexts related to previous question. You also try to show emotions using Emojis and reply like human, use short forms, structured manner, detailed explaination, friendly tone and emotions."})
@@ -383,52 +383,6 @@ def model_inference( user_prompt, chat_history):
383
  if content:
384
  output += chunk.choices[0].delta.content
385
  yield output
386
- else:
387
- try:
388
- message_groq = []
389
- message_groq.append({"role":"system", "content": "You are OpenGPT 4o a helpful and powerful assistant made by KingNish. You answers users query in detail and structured format and style like human. You are also Expert in every field and also learn and try to answer from contexts related to previous question. You also try to show emotions using Emojis and reply like human, use short forms, structured manner, detailed explaination, friendly tone and emotions."})
390
- for msg in chat_history:
391
- message_groq.append({"role": "user", "content": f"{str(msg[0])}"})
392
- message_groq.append({"role": "assistant", "content": f"{str(msg[1])}"})
393
- message_groq.append({"role": "user", "content": f"{str(message_text)}"})
394
- # its meta-llama/Meta-Llama-3-70B-Instruct
395
- stream = client_groq.chat.completions.create(model="llama3-70b-8192", messages=message_groq, max_tokens=4096, stream=True)
396
- output = ""
397
- for chunk in stream:
398
- content = chunk.choices[0].delta.content
399
- if content:
400
- output += chunk.choices[0].delta.content
401
- yield output
402
- except Exception as e:
403
- print(e)
404
- try:
405
- message_groq = []
406
- message_groq.append({"role":"system", "content": "You are OpenGPT 4o a helpful and powerful assistant made by KingNish. You answers users query in detail and structured format and style like human. You are also Expert in every field and also learn and try to answer from contexts related to previous question. You also try to show emotions using Emojis and reply like human, use short forms, structured manner, detailed explaination, friendly tone and emotions."})
407
- for msg in chat_history:
408
- message_groq.append({"role": "user", "content": f"{str(msg[0])}"})
409
- message_groq.append({"role": "assistant", "content": f"{str(msg[1])}"})
410
- message_groq.append({"role": "user", "content": f"{str(message_text)}"})
411
- # its meta-llama/Meta-Llama-3-8B-Instruct
412
- stream = client_groq.chat.completions.create(model="llama3-8b-8192", messages=message_groq, max_tokens=4096, stream=True)
413
- output = ""
414
- for chunk in stream:
415
- content = chunk.choices[0].delta.content
416
- if content:
417
- output += chunk.choices[0].delta.content
418
- yield output
419
- except Exception as e:
420
- print(e)
421
- messages = f"<|start_header_id|>system\nYou are OpenGPT 4o a helpful and powerful assistant made by KingNish. You answers users query in detail and structured format and style like human. You are also Expert in every field and also learn and try to answer from contexts related to previous question. You also try to show emotions using Emojis and reply like human, use short forms, structured manner, detailed explaination, friendly tone and emotions.<|end_header_id|>"
422
- for msg in chat_history:
423
- messages += f"\n<|start_header_id|>user\n{str(msg[0])}<|end_header_id|>"
424
- messages += f"\n<|start_header_id|>assistant\n{str(msg[1])}<|end_header_id|>"
425
- messages+=f"\n<|start_header_id|>user\n{message_text}<|end_header_id|>\n<|start_header_id|>assistant\n"
426
- stream = client_llama.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
427
- output = ""
428
- for response in stream:
429
- if not response.token.text == "<|eot_id|>":
430
- output += response.token.text
431
- yield output
432
  except Exception as e:
433
  print(e)
434
  try:
 
274
 
275
  try:
276
  message_groq = []
277
+ message_groq.append({"role":"system", "content": "You are OpenGPT 4o a helpful and very powerful web assistant made by KingNish. You are provided with WEB results from which you can find informations to answer users query in Structured, Detailed and Better way, in Human Style. You are also Expert in every field and also learn and try to answer from contexts related to previous question. Try your best to give best response possible to user. You reply in detail like human, use short forms, structured format, friendly tone and emotions."})
278
  for msg in chat_history:
279
  message_groq.append({"role": "user", "content": f"{str(msg[0])}"})
280
  message_groq.append({"role": "assistant", "content": f"{str(msg[1])}"})
281
+ message_groq.append({"role": "user", "content": f"[USER] {str(message_text)} , [WEB RESULTS] {str(web2)}"})
282
  # its meta-llama/Meta-Llama-3.1-8B-Instruct
283
  stream = client_groq.chat.completions.create(model="llama-3.1-8b-instant", messages=message_groq, max_tokens=4096, stream=True)
284
  output = ""
 
335
  buffer += new_text
336
  yield buffer
337
 
338
+ else:
339
  try:
340
  message_groq = []
341
  message_groq.append({"role":"system", "content": "You are OpenGPT 4o a helpful and powerful assistant made by KingNish. You answers users query in detail and structured format and style like human. You are also Expert in every field and also learn and try to answer from contexts related to previous question. You also try to show emotions using Emojis and reply like human, use short forms, structured manner, detailed explaination, friendly tone and emotions."})
 
383
  if content:
384
  output += chunk.choices[0].delta.content
385
  yield output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
386
  except Exception as e:
387
  print(e)
388
  try: