Spaces:
Runtime error
Runtime error
Matt
commited on
Commit
•
814e23a
1
Parent(s):
f75ac1d
Input fixes
Browse files
app.py
CHANGED
@@ -1,17 +1,18 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoTokenizer
|
|
|
3 |
|
4 |
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
|
5 |
|
6 |
-
demo_conversation1 = [
|
7 |
{"role": "user", "content": "Hi there!"},
|
8 |
{"role": "assistant", "content": "Hello, human!"}
|
9 |
-
]
|
10 |
|
11 |
-
demo_conversation2 = [
|
12 |
{"role": "system", "content": "You are a helpful chatbot."},
|
13 |
{"role": "user", "content": "Hi there!"}
|
14 |
-
]
|
15 |
|
16 |
default_template = """{% for message in messages %}
|
17 |
{{ "<|im_start|>" + message["role"] + "\\n" + message["content"] + "<|im_end|>\\n" }}
|
@@ -22,18 +23,23 @@ default_template = """{% for message in messages %}
|
|
22 |
|
23 |
conversations = [demo_conversation1, demo_conversation2]
|
24 |
|
25 |
-
def apply_chat_template(template):
|
26 |
tokenizer.chat_template = template
|
27 |
outputs = []
|
28 |
-
for i,
|
|
|
29 |
without_gen = tokenizer.apply_chat_template(conversation, tokenize=False)
|
30 |
with_gen = tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
|
31 |
-
out = f"Conversation {i} without generation prompt:\n\n{without_gen}\n\
|
32 |
outputs.append(out)
|
33 |
return tuple(outputs)
|
34 |
|
35 |
iface = gr.Interface(
|
36 |
fn=apply_chat_template,
|
37 |
-
inputs=
|
38 |
-
|
|
|
|
|
|
|
|
|
39 |
iface.launch()
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoTokenizer
|
3 |
+
import json
|
4 |
|
5 |
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
|
6 |
|
7 |
+
demo_conversation1 = """[
|
8 |
{"role": "user", "content": "Hi there!"},
|
9 |
{"role": "assistant", "content": "Hello, human!"}
|
10 |
+
]"""
|
11 |
|
12 |
+
demo_conversation2 = """[
|
13 |
{"role": "system", "content": "You are a helpful chatbot."},
|
14 |
{"role": "user", "content": "Hi there!"}
|
15 |
+
]"""
|
16 |
|
17 |
default_template = """{% for message in messages %}
|
18 |
{{ "<|im_start|>" + message["role"] + "\\n" + message["content"] + "<|im_end|>\\n" }}
|
|
|
23 |
|
24 |
conversations = [demo_conversation1, demo_conversation2]
|
25 |
|
26 |
+
def apply_chat_template(template, test_conversation1, test_conversation2):
|
27 |
tokenizer.chat_template = template
|
28 |
outputs = []
|
29 |
+
for i, conversation_str in enumerate(test_conversation1, test_conversation2):
|
30 |
+
conversation = json.loads(conversation)
|
31 |
without_gen = tokenizer.apply_chat_template(conversation, tokenize=False)
|
32 |
with_gen = tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
|
33 |
+
out = f"Conversation {i}:\n\n{conversation_str}\n\nOutput without generation prompt:\n\n{without_gen}\n\nOutput with generation prompt:\n\n{with_gen}\n\n"
|
34 |
outputs.append(out)
|
35 |
return tuple(outputs)
|
36 |
|
37 |
iface = gr.Interface(
|
38 |
fn=apply_chat_template,
|
39 |
+
inputs=[
|
40 |
+
gr.TextArea(value=default_template, lines=10, max_lines=30, label="Chat Template"),
|
41 |
+
gr.TextArea(value=str(demo_conversation1), lines=5, label="Conversation 1"),
|
42 |
+
gr.TextArea(value=str(demo_conversation2), lines=5, label="Conversation 2")
|
43 |
+
],
|
44 |
+
outputs=["text", "text"])
|
45 |
iface.launch()
|