Lam-Hung's picture
Update app.py
4426b83 verified
raw
history blame
No virus
4.07 kB
import os
from threading import Thread
from typing import Iterator
import gradio as gr
#import spaces
import torch
from transformers import AutoModelForCausalLM, GemmaTokenizerFast, TextIteratorStreamer
MAX_MAX_NEW_TOKENS = 2048
DEFAULT_MAX_NEW_TOKENS = 1024
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model_id = "google/gemma-2-9b-it"
tokenizer = GemmaTokenizerFast.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
model_id,
device_map="auto",
torch_dtype=torch.bfloat16,
)
model.config.sliding_window = 4096
model.eval()
#@spaces.GPU(duration=90)
def generate(
message: str,
chat_history: list[tuple[str, str]],
max_new_tokens: int = 1024,
temperature: float = 0.6,
top_p: float = 0.9,
top_k: int = 50,
repetition_penalty: float = 1.2,
) -> Iterator[str]:
conversation = []
for user, assistant in chat_history:
conversation.extend(
[
{"role": "user", "content": user},
{"role": "assistant", "content": assistant},
]
)
conversation.append({"role": "user", "content": message})
input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt")
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
input_ids = input_ids.to(model.device)
streamer = TextIteratorStreamer(tokenizer, timeout=20.0, skip_prompt=True, skip_special_tokens=True)
generate_kwargs = dict(
{"input_ids": input_ids},
streamer=streamer,
max_new_tokens=max_new_tokens,
do_sample=True,
top_p=top_p,
top_k=top_k,
temperature=temperature,
num_beams=1,
repetition_penalty=repetition_penalty,
)
t = Thread(target=model.generate, kwargs=generate_kwargs)
t.start()
outputs = []
for text in streamer:
outputs.append(text)
yield "".join(outputs)
chat_interface = gr.ChatInterface(
fn=generate,
chatbot=gr.Chatbot(height=500, label = "日本語アシスタント", show_label=True),
textbox=gr.Textbox(placeholder="メッセージを入力してください", container=False, scale=7),
additional_inputs=[
gr.Slider(
label="テキスト作成時の最大単語数",
minimum=1,
maximum=MAX_MAX_NEW_TOKENS,
step=1,
value=DEFAULT_MAX_NEW_TOKENS,
),
gr.Slider(
label="創造",
minimum=0.1,
maximum=4.0,
step=0.1,
value=0.2,
),
gr.Slider(
label="最も確率の高い単語のグループ",
minimum=0.05,
maximum=1.0,
step=0.05,
value=0.9,
),
gr.Slider(
label="上位の単語の確率が最も高い(top-k)",
minimum=1,
maximum=1000,
step=1,
value=50,
),
gr.Slider(
label="懲罰を繰り返す",
minimum=1.0,
maximum=2.0,
step=0.05,
value=1.1,
),
],
theme="soft",
stop_btn=None,
examples = [
["寿司の作り方"],
["美しい着物ドレスの選び方"],
["地震が起きたらどうするか"],
["どうすれば幸せに生きられるか"],
["魚を食べることの利点"],
["グループで効果的に作業する方法"]
],
cache_examples=False,
title = "日本語アシスタント",
clear_btn="🗑️ 消す",
undo_btn="↩️ 元に戻す",
submit_btn="🚀 送信",
retry_btn="🔄 リトライ",
additional_inputs_accordion="高度なカスタマイズ",
)
if __name__ == "__main__":
chat_interface.queue(max_size=20).launch()