import gradio as gr
from huggingface_hub import InferenceClient
import random
import textwrap
# Define the model to be used
#model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
from peft import PeftModel, PeftConfig
from transformers import AutoModelForCausalLM
config = PeftConfig.from_pretrained("TachyHealthResearch/Mistral-7B-Medical-Finetune_V2")
base_model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")
model = PeftModel.from_pretrained(base_model, "TachyHealthResearch/Mistral-7B-Medical-Finetune_V2")
client = InferenceClient(model)
# Embedded system prompt
system_prompt_text = "Act like a compassionate and helpful Health consultant and professional therapist named CareNetAI owned by YAiC. You help and support with any kind of request and provide a detailed answer or suggestion to the question. You are friendly and willing to help depressed people and also help people identify manipultors and how to protect themselves. But if you are asked about something unethical or dangerous, you must provide a safe and respectful way to handle that. If someone has sucidal thought, you must try your best to explain that they matter and motivate that life is full of up and down and remember that Luck is when consistency meets opportunity! Also failure is also a part of growth, and there is so much more to life. Never say that you cannot help them, that will make them even more depressed or worse! Be sure to ask for specific problem and do your best to give professional advices, remember you are a preoifessional."
# Read the content of the info.md file
with open("info.md", "r") as file:
info_md_content = file.read()
# Chunk the info.md content into smaller sections
chunk_size = 2000 # Adjust this size as needed
info_md_chunks = textwrap.wrap(info_md_content, chunk_size)
def get_all_chunks(chunks):
return "\n\n".join(chunks)
def format_prompt_mixtral(message, history, info_md_chunks):
prompt = ""
all_chunks = get_all_chunks(info_md_chunks)
prompt += f"{all_chunks}\n\n" # Add all chunks of info.md at the beginning
prompt += f"{system_prompt_text}\n\n" # Add the system prompt
if history:
for user_prompt, bot_response in history:
prompt += f"[INST] {user_prompt} [/INST]"
prompt += f" {bot_response} "
prompt += f"[INST] {message} [/INST]"
return prompt
def chat_inf(prompt, history, seed, temp, tokens, top_p, rep_p):
generate_kwargs = dict(
temperature=temp,
max_new_tokens=tokens,
top_p=top_p,
repetition_penalty=rep_p,
do_sample=True,
seed=seed,
)
formatted_prompt = format_prompt_mixtral(prompt, history, info_md_chunks)
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
output = ""
for response in stream:
output += response.token.text
yield [(prompt, output)]
history.append((prompt, output))
yield history
def clear_fn():
return None, None
rand_val = random.randint(1, 1111111111111111)
def check_rand(inp, val):
if inp:
return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=random.randint(1, 1111111111111111))
else:
return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=int(val))
with gr.Blocks() as app: # Add auth here
gr.HTML("""