Spaces:
Running
Running
"""Gradio app to showcase the LLM tokenization.""" | |
import os | |
import gradio as gr | |
from huggingface_hub import login | |
from playground_app import demo as playground_tab | |
from compression_app import demo as compression_tab | |
from character_app import demo as character_tab | |
auth_token = os.environ.get('HF_TOKEN', None) | |
if auth_token: | |
login(token=auth_token) | |
title = '<div align="center">Tokenizer Arena ⚔️</div>' | |
interface_list = [playground_tab, compression_tab, character_tab] | |
tab_names = [" ⚔️ Playground", "🏆 Compression Leaderboard", "📊 Character Statistics"] | |
with gr.Blocks(css="css/style.css", js="js/onload.js") as demo: | |
gr.HTML( | |
f"<h1 style='text-align: center; margin-bottom: 1rem'>{title}</h1>" | |
) | |
with gr.Tabs(): | |
for interface, tab_name in zip(interface_list, tab_names): | |
with gr.Tab(label=tab_name): | |
interface.render() | |
model_name = gr.Textbox( | |
placeholder="🔍 Add tokenizer from Hugging Face (e.g. Xenova/gpt-4o) and press ENTER...", | |
show_label=False, | |
) | |
model_name.submit() | |
# demo.load(js=open("js/onload.js", "r", encoding="utf-8").read()) | |
if __name__ == "__main__": | |
demo.launch() | |
# demo.queue(max_size=1024, default_concurrency_limit=80).launch() | |