Spaces:
Sleeping
Sleeping
import gradio as gr | |
from peft import PeftModel, PeftConfig | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
import spaces | |
from huggingface_hub import login | |
import os | |
login(os.environ['HF_KEY']) | |
# Define a function that takes user input and returns the model's output | |
def generate_text(prompt): | |
config = PeftConfig.from_pretrained("diabolic6045/gemma-2-2b-chess-adapter") | |
base_model = AutoModelForCausalLM.from_pretrained("google/gemma-2-2b") | |
model = PeftModel.from_pretrained(base_model, "diabolic6045/gemma-2-2b-chess-adapter") | |
model.tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b") | |
input_ids = model.tokenizer.encode(prompt, return_tensors="pt") | |
print("generation") | |
output = model.generate(input_ids, max_length=20) | |
return model.tokenizer.decode(output[0], skip_special_tokens=True) | |
# Create a Gradio interface | |
demo = gr.Interface( | |
fn=generate_text, | |
inputs=gr.Textbox(label="Input prompt"), | |
outputs=gr.Textbox(label="Generated text"), | |
title="Chess Text Generation with Gemma-2-2B", | |
description="Enter a prompt and the model will generate a response.", | |
examples=[ | |
["What is the best opening move in chess?"], | |
["What is the Ruy Lopez opening?"], | |
["What is the Sicilian Defense?"], | |
], | |
) | |
# Launch the Gradio app | |
demo.launch() |