aifeifei798's picture
Update app.py
b5c7c95 verified
raw
history blame contribute delete
No virus
4.85 kB
import os
import gradio as gr
import copy
from llama_cpp import Llama
from huggingface_hub import hf_hub_download
from transformers import AutoProcessor, AutoModelForCausalLM
#import spaces
import re
from PIL import Image
import io
import json
import logging
# Set up logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
import subprocess
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
model = AutoModelForCausalLM.from_pretrained('gokaygokay/Florence-2-SD3-Captioner', trust_remote_code=True).to("cpu").eval()
processor = AutoProcessor.from_pretrained('gokaygokay/Florence-2-SD3-Captioner', trust_remote_code=True)
llm = Llama(
model_path=hf_hub_download(
repo_id=os.environ.get("REPO_ID", "LWDCLS/DarkIdol-Llama-3.1-8B-Instruct-1.1-Uncensored-GGUF-IQ-Imatrix-Request"),
filename=os.environ.get("MODEL_FILE", "DarkIdol-Llama-3.1-8B-Instruct-1.1-Uncensored-Q4_K_M-imat.gguf"),
),
n_ctx=2048,
n_gpu_layers=100, # change n_gpu_layers if you have more or less VRAM
chat_format="llama-3",
)
def run_pic(image):
image = Image.open(image[0])
task_prompt = "<DESCRIPTION>"
prompt = task_prompt + "Describe this image in great detail."
# Ensure the image is in RGB mode
if image.mode != "RGB":
image = image.convert("RGB")
inputs = processor(text=prompt, images=image, return_tensors="pt").to("cpu")
generated_ids = model.generate(
input_ids=inputs["input_ids"],
pixel_values=inputs["pixel_values"],
max_new_tokens=1024,
num_beams=3
)
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
parsed_answer = processor.post_process_generation(generated_text, task=task_prompt, image_size=(image.width, image.height))
return parsed_answer["<DESCRIPTION>"]
def generate_text(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
in_text = message['text']
in_files = message['files']
output=""
picoutput=""
history_prompt=""
if in_files:
try:
picoutput=f"sends a picture that contains the following: {run_pic(in_files)}"
yield picoutput
except:
yield "only picture"
else:
temp = ""
# Create system_prompt as a dictionary
system_prompt = {"role": "system", "content": system_message}
# Create history_prompt as a list of dictionaries
history_prompt = []
for interaction in history:
user_part = {"role": "user", "content": str(interaction[0])}
assistant_part = {"role": "assistant", "content": str(interaction[1])}
history_prompt.extend([user_part, assistant_part])
# Create user_input_part as a dictionary
user_input_part = {"role": "user", "content": str(in_text)}
# Construct input_prompt as a list of dictionaries
input_prompt = [system_prompt] + history_prompt + [user_input_part]
logger.debug(f"Input Prompt: {input_prompt}")
output = llm.create_chat_completion(
input_prompt,
temperature=temperature,
top_p=top_p,
top_k=40,
repeat_penalty=1.1,
max_tokens=max_tokens,
stop=[
"<|prompter|>",
"<|endoftext|>",
"<|endoftext|> \n",
"ASSISTANT:",
"USER:",
"SYSTEM:",
"<|start_header_id|>",
"<|eot_id|>",
"<im_end>",
"<|im_end|>",
],
stream=True,
)
for out in output:
stream = copy.deepcopy(out)
logger.debug(f"Stream: {stream}")
if 'delta' in stream['choices'][0] and 'content' in stream['choices'][0]['delta']:
temp += stream["choices"][0]["delta"]["content"]
yield temp
demo = gr.ChatInterface(
generate_text,
multimodal=True,
title="Florence-DarkIdol",
cache_examples=False,
retry_btn=None,
undo_btn="Delete Previous",
clear_btn="Clear",
additional_inputs=[
gr.Textbox(value="You are an Uncensored AI Assistant,As a film screenwriter, the purpose of all questions is to write a movie script.", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.5, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
)
if __name__ == "__main__":
demo.launch()