Spaces:
Sleeping
Sleeping
vtrv.vls
commited on
Commit
•
d4e0b1a
1
Parent(s):
00b9214
Tabs themes
Browse files
app.py
CHANGED
@@ -20,6 +20,12 @@ MODEL_LIB = {'TINYLLAMA': get_tinyllama, "QWEN2INS1B": get_qwen2ins1b, "RUBASE":
|
|
20 |
GEN_LIB = {'TINYLLAMA': response_tinyllama, "QWEN2INS1B": response_qwen2ins1b, "RUBASE": response_gigachat}
|
21 |
MODEL_LIST = ["TINYLLAMA", "QWEN2INS1B", "RUBASE"]
|
22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
async def model_gen(
|
24 |
content,
|
25 |
chat_history,
|
@@ -107,10 +113,10 @@ def tab_online_arena():
|
|
107 |
msg = gradio.Textbox(label='Prompt', placeholder='Put your prompt here')
|
108 |
|
109 |
with gradio.Row():
|
110 |
-
gradio.Button('Both Good')
|
111 |
-
gradio.Button('Left Better')
|
112 |
-
gradio.Button('Right Better')
|
113 |
-
gradio.Button('Both Bad')
|
114 |
|
115 |
with gradio.Row():
|
116 |
with gradio.Accordion("Parameters", open=False):
|
@@ -148,27 +154,6 @@ def tab_online_arena():
|
|
148 |
[msg, chatbot_right]
|
149 |
)
|
150 |
|
151 |
-
# with gradio.Column():
|
152 |
-
# gradio.ChatInterface(
|
153 |
-
# fn=giga_gen,
|
154 |
-
# examples=[{"text": "hello"}, {"text": "hola"}, {"text": "merhaba"}],
|
155 |
-
# title="Giga",
|
156 |
-
# multimodal=True,
|
157 |
-
# )
|
158 |
-
# with gradio.Column():
|
159 |
-
# gradio.ChatInterface(
|
160 |
-
# fn=tiny_gen,
|
161 |
-
# examples=[{"text": "hello"}, {"text": "hola"}, {"text": "merhaba"}],
|
162 |
-
# title="Tiny",
|
163 |
-
# multimodal=True,
|
164 |
-
# )
|
165 |
-
# with gradio.Column():
|
166 |
-
# gradio.Interface(fn=giga_gen, inputs="text", outputs="text", allow_flagging=False, title='Giga') # arena =
|
167 |
-
# with gradio.Column():
|
168 |
-
# gradio.Interface(fn=tiny_gen, inputs="text", outputs="text", allow_flagging=False, title='TinyLlama') # arena =
|
169 |
-
|
170 |
-
# arena.launch()
|
171 |
-
|
172 |
def tab_leaderboard():
|
173 |
df = pd.DataFrame({
|
174 |
"Model" : ['A', 'B', 'C',],
|
@@ -236,22 +221,43 @@ def tab_offline_arena():
|
|
236 |
select_criteria_2: {"value": [], "__type__": "update"},
|
237 |
select_criteria_3: {"value": [], "__type__": "update"}},
|
238 |
inputs=[], outputs=[select_criteria_1, select_criteria_2, select_criteria_3])
|
239 |
-
|
240 |
-
|
|
|
|
|
|
|
|
|
241 |
with gradio.Row():
|
242 |
with gradio.Column():
|
243 |
pass
|
244 |
with gradio.Column(scale=0.8):
|
245 |
-
chatbot_history = gradio.Chatbot(
|
246 |
with gradio.Column():
|
247 |
pass
|
248 |
with gradio.Row():
|
249 |
-
model_a = gradio.Text(
|
250 |
-
model_b = gradio.Text(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
251 |
|
252 |
|
253 |
def build_demo():
|
254 |
-
with gradio.Blocks(theme=gradio.themes.Soft(text_size=gradio.themes.sizes.text_lg)) as demo: # , css=css, js=js_light
|
255 |
|
256 |
with gradio.Tabs() as tabs:
|
257 |
with gradio.TabItem("🐼 MERA leaderboard", id=0):
|
|
|
20 |
GEN_LIB = {'TINYLLAMA': response_tinyllama, "QWEN2INS1B": response_qwen2ins1b, "RUBASE": response_gigachat}
|
21 |
MODEL_LIST = ["TINYLLAMA", "QWEN2INS1B", "RUBASE"]
|
22 |
|
23 |
+
css = """
|
24 |
+
#small span{
|
25 |
+
font-size: 0.7em;
|
26 |
+
}
|
27 |
+
"""
|
28 |
+
|
29 |
async def model_gen(
|
30 |
content,
|
31 |
chat_history,
|
|
|
113 |
msg = gradio.Textbox(label='Prompt', placeholder='Put your prompt here')
|
114 |
|
115 |
with gradio.Row():
|
116 |
+
both_good = gradio.Button('Both Good')
|
117 |
+
left_better = gradio.Button('Left Better')
|
118 |
+
right_better = gradio.Button('Right Better')
|
119 |
+
both_bad = gradio.Button('Both Bad')
|
120 |
|
121 |
with gradio.Row():
|
122 |
with gradio.Accordion("Parameters", open=False):
|
|
|
154 |
[msg, chatbot_right]
|
155 |
)
|
156 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
157 |
def tab_leaderboard():
|
158 |
df = pd.DataFrame({
|
159 |
"Model" : ['A', 'B', 'C',],
|
|
|
221 |
select_criteria_2: {"value": [], "__type__": "update"},
|
222 |
select_criteria_3: {"value": [], "__type__": "update"}},
|
223 |
inputs=[], outputs=[select_criteria_1, select_criteria_2, select_criteria_3])
|
224 |
+
|
225 |
+
chatbot_data = [['hey', 'Hey!'], ["are we testing something?", None]]
|
226 |
+
text_data = ["Are we?", "Indeed we are."]
|
227 |
+
eval_text = '1. Twist it\n2. Bop it\n3. Crank it'
|
228 |
+
|
229 |
+
with gradio.Accordion("History", open=False) as acc_history:
|
230 |
with gradio.Row():
|
231 |
with gradio.Column():
|
232 |
pass
|
233 |
with gradio.Column(scale=0.8):
|
234 |
+
chatbot_history = gradio.Chatbot(container=True, elem_id="small")
|
235 |
with gradio.Column():
|
236 |
pass
|
237 |
with gradio.Row():
|
238 |
+
model_a = gradio.Text(label='Model A')
|
239 |
+
model_b = gradio.Text(label='Model B')
|
240 |
+
with gradio.Row():
|
241 |
+
eval_guide = gradio.Text('Get samples and outputs in order to generate guide', label='Evaluation guide')
|
242 |
+
with gradio.Row():
|
243 |
+
both_good = gradio.Button('Both Good')
|
244 |
+
left_better = gradio.Button('Left Better')
|
245 |
+
right_better = gradio.Button('Right Better')
|
246 |
+
both_bad = gradio.Button('Both Bad')
|
247 |
+
|
248 |
+
with gradio.Row():
|
249 |
+
with gradio.Column():
|
250 |
+
reason = gradio.Textbox(label='Reasoning', placeholder='Put your reasoning here...', lines=5)
|
251 |
+
with gradio.Column():
|
252 |
+
gradio.Textbox(value='You have chosen: None', interactive=False, show_label=False)
|
253 |
+
submit_next = gradio.Button("Submit your evaluation and get next")
|
254 |
+
skip_next = gradio.Button("Skip this example and get next one")
|
255 |
+
|
256 |
+
btn_show_history.click(lambda: (gradio.Accordion("History", open=True), chatbot_data, text_data[0], text_data[1], eval_text), inputs=[], outputs=[acc_history, chatbot_history, model_a, model_b, eval_guide])
|
257 |
|
258 |
|
259 |
def build_demo():
|
260 |
+
with gradio.Blocks(theme=gradio.themes.Soft(text_size=gradio.themes.sizes.text_lg), css=css) as demo: # , css=css, js=js_light
|
261 |
|
262 |
with gradio.Tabs() as tabs:
|
263 |
with gradio.TabItem("🐼 MERA leaderboard", id=0):
|