Spaces:
Runtime error
Runtime error
File size: 1,455 Bytes
1fad4a0 ba1082d 1fad4a0 e03f966 7846d6e e03f966 7846d6e ba1082d c4cb5ac ba1082d 7846d6e ba1082d b43651d 7846d6e ba1082d b43651d 7846d6e ba1082d b43651d 7846d6e e03f966 7846d6e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
import requests
import os
from transformers import Tool
# Import other necessary libraries if needed
class TextGenerationTool(Tool):
name = "text_generator"
description = (
"This is a tool for text generation. It takes a prompt as input and returns the generated text."
)
inputs = ["text"]
outputs = ["text"]
def __call__(self, prompt: str):
API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-v0.1"
headers = {"Authorization": os.environ['hf']}
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
print(response)
return response.json()
output = query({
"inputs": prompt,
})
# Define the payload for the request
#payload = {
# "inputs": prompt # Adjust this based on your model's input format
#}
# Make the request to the API
#generated_text = requests.post(API_URL, headers=headers, json=payload).json()
# Extract and return the generated text
#return generated_text["generated_text"]
# Uncomment and customize the following lines based on your text generation needs
# text_generator = pipeline(model="gpt2")
# generated_text = text_generator(prompt, max_length=500, num_return_sequences=1, temperature=0.7)
# Print the generated text if needed
# print(generated_text)
|