KarmaCST's picture
Update app.py
90dba08 verified
import gradio as gr
import random
import requests
from PIL import Image
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
# from dotenv import load_dotenv
# Load the translation model
translation_model = AutoModelForSeq2SeqLM.from_pretrained("KarmaCST/nllb-200-distilled-600M-dz-to-en")
tokenizer = AutoTokenizer.from_pretrained("KarmaCST/nllb-200-distilled-600M-dz-to-en")
src_lang="dzo_Tibt"
tgt_lang="eng_Latn"
model = gr.load("models/Purz/face-projection")
def generate_image(text, seed):
translation_pipeline = pipeline("translation",
model=translation_model,
tokenizer=tokenizer,
src_lang=src_lang,
tgt_lang=tgt_lang)
text = translation_pipeline(text)[0]['translation_text']
if seed is not None:
random.seed(seed)
if text in [example[0] for example in examples]:
print(f"Using example: {text}")
return model(text)
examples=[
["བྱི་ཅུང་ཚུ་གངས་རི་གི་ཐོག་ཁར་འཕུར།", None],
["པཱ་རོ་ཁྲོམ་གྱི་ཐོག་ཁར་གནམ་གྲུ་འཕུར།",None],
["པཱ་རོ་ཁྲོམ་གྱི་ཐོག་ཁར་ ཤིང་ཚུ་གི་བར་ན་ གནམ་གྲུ་འཕུར་བའི་འཐོང་གནང་།",None],
["སློབ་ཕྲུག་ཚུ་ ཆརཔ་ནང་རྐང་རྩེད་རྩེ་དེས།",None]
]
interface = gr.Interface(
fn=generate_image,
inputs=[
gr.Textbox(label="Text to Image:", placeholder="Dzongkha text..."),
gr.Slider(minimum=0, maximum=10000, step=1, label="Seed (optional)")
],
outputs=gr.Image(label="Generated Image"),
title="Dzongkha Text to Image Generation",
examples=examples,
article="<h1>Created By:</h1>Mr. Karma Wangchuk<br>Lecturer<br>Information Technology Department<br>College of Science and Technology<br>Rinchending Phuentsholing<br>Chhukha Bhutan<br>",
description="The model is currently running on the CPU, which might affect performance.",
)
interface.launch()