ghostwrite_v3 / handler.py
sajjadamjad's picture
Upload handler.py
fe561b9
raw
history blame
1.94 kB
from typing import Dict, Any
import logging
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftConfig, PeftModel
import torch.cuda
LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
device = "cuda" if torch.cuda.is_available() else "cpu"
class EndpointHandler():
def __init__(self, path=""):
config = PeftConfig.from_pretrained(path)
model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, load_in_4bit=True, device_map='auto')
self.tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
# Load the Lora model
self.model = PeftModel.from_pretrained(model, path)
def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
"""
Args:
data (Dict): The payload with the text prompt and generation parameters.
"""
LOGGER.info(f"Received data: {data}")
# Get inputs
query = data.pop("inputs", None)
prompt_template = """
Below is a screenplay prompt followed by a screenplay response. Generate only screenplay response.
### Screenplay Prompt:
{query}
### Screenplay Response:
"""
prompt = prompt_template.format(query=query)
parameters = data.pop("parameters", None)
if prompt is None:
raise ValueError("Missing prompt.")
# Preprocess
encodeds = self.tokenizer(prompt, return_tensors="pt", add_special_tokens=True)
model_inputs = encodeds.to(device)
# Forward
LOGGER.info(f"Start generation.")
generated_ids = self.model.generate(**model_inputs, max_new_tokens=9999999, do_sample=True, pad_token_id=tokenizer.eos_token_id)
decoded = self.tokenizer.batch_decode(generated_ids)
LOGGER.info(f"Generated text length: {len(decoded[0])}")
return {"generated_text": decoded[0]}