import gradio as gr from transformers import AutoTokenizer from huggingface_hub import repo_exists def token_viz(model_name, text): if not repo_exists(model_name): gr.Error(f"{model_name} is not a valid HF repo. Please enter a valid repo.") tokenizer = AutoTokenizer.from_pretrained(model_name,cache_dir=f"./.cache/hf/{model_name}") print(model_name,text) tokens = tokenizer.encode(text) return [(tokenizer.decode(token).replace("\n",r"\n"), str(token)) for token in tokens] # Replacing '\n' for visualization purposes MARKDOWN = """

Token Visualizer ⚔️

Enter the Tokenizer you want to use to visualize the tokens. Example: To use model's tokenizer just enter **Qwen/Qwen2-72B-Instruct** """ with gr.Blocks(analytics_enabled=False) as demo: gr.Markdown(MARKDOWN) with gr.Row(): model_name = gr.Textbox(label="repo_name",interactive=True,placeholder="Enter the HF model here...") text = gr.Textbox(label="text",interactive=True, placeholder="Enter the text to be tokenized") output1 = gr.HighlightedText(show_inline_category=True) btn = gr.Button("Run") btn.click(token_viz, inputs=[model_name, text], outputs=[output1]) demo.queue().launch()