Spaces:
Sleeping
Sleeping
File size: 1,361 Bytes
3e8f413 d7f9b3d 3e8f413 d1a4cb1 dfc28f9 3e8f413 e166515 3e8f413 b90ecf2 3e8f413 8447407 3e8f413 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
import gradio as gr
from peft import PeftModel, PeftConfig
from transformers import AutoModelForCausalLM, AutoTokenizer
import spaces
from huggingface_hub import login
import os
login(os.environ['HF_KEY'])
# Define a function that takes user input and returns the model's output
@spaces.GPU(duration=20)
def generate_text(prompt):
config = PeftConfig.from_pretrained("diabolic6045/gemma-2-2b-chess-adapter")
base_model = AutoModelForCausalLM.from_pretrained("google/gemma-2-2b")
model = PeftModel.from_pretrained(base_model, "diabolic6045/gemma-2-2b-chess-adapter")
model.tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b")
input_ids = model.tokenizer.encode(prompt, return_tensors="pt")
print("generation")
output = model.generate(input_ids, max_length=20)
return model.tokenizer.decode(output[0], skip_special_tokens=True)
# Create a Gradio interface
demo = gr.Interface(
fn=generate_text,
inputs=gr.Textbox(label="Input prompt"),
outputs=gr.Textbox(label="Generated text"),
title="Chess Text Generation with Gemma-2-2B",
description="Enter a prompt and the model will generate a response.",
examples=[
["What is the best opening move in chess?"],
["What is the Ruy Lopez opening?"],
["What is the Sicilian Defense?"],
],
)
# Launch the Gradio app
demo.launch() |