Spaces:
Runtime error
Runtime error
File size: 4,249 Bytes
4962437 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from swarms.agents.message import Message
class Mistral:
"""
Mistral
model = Mistral(device="cuda", use_flash_attention=True, temperature=0.7, max_length=200)
task = "My favourite condiment is"
result = model.run(task)
print(result)
"""
def __init__(
self,
ai_name: str = "Node Model Agent",
system_prompt: str = None,
model_name: str ="mistralai/Mistral-7B-v0.1",
device: str ="cuda",
use_flash_attention: bool = False,
temperature: float = 1.0,
max_length: int = 100,
do_sample: bool = True
):
self.ai_name = ai_name
self.system_prompt = system_prompt
self.model_name = model_name
self.device = device
self.use_flash_attention = use_flash_attention
self.temperature = temperature
self.max_length = max_length
# Check if the specified device is available
if not torch.cuda.is_available() and device == "cuda":
raise ValueError("CUDA is not available. Please choose a different device.")
# Load the model and tokenizer
self.model = None
self.tokenizer = None
self.load_model()
self.history = []
def load_model(self):
try:
self.model = AutoModelForCausalLM.from_pretrained(self.model_name)
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
self.model.to(self.device)
except Exception as e:
raise ValueError(f"Error loading the Mistral model: {str(e)}")
def run(
self,
task: str
):
"""Run the model on a given task."""
try:
model_inputs = self.tokenizer(
[task],
return_tensors="pt"
).to(self.device)
generated_ids = self.model.generate(
**model_inputs,
max_length=self.max_length,
do_sample=self.do_sample,
temperature=self.temperature,
max_new_tokens=self.max_length
)
output_text = self.tokenizer.batch_decode(generated_ids)[0]
return output_text
except Exception as e:
raise ValueError(f"Error running the model: {str(e)}")
def chat(
self,
msg: str = None,
streaming: bool = False
):
"""
Run chat
Args:
msg (str, optional): Message to send to the agent. Defaults to None.
language (str, optional): Language to use. Defaults to None.
streaming (bool, optional): Whether to stream the response. Defaults to False.
Returns:
str: Response from the agent
Usage:
--------------
agent = MultiModalAgent()
agent.chat("Hello")
"""
#add users message to the history
self.history.append(
Message(
"User",
msg
)
)
#process msg
try:
response = self.agent.run(msg)
#add agent's response to the history
self.history.append(
Message(
"Agent",
response
)
)
#if streaming is = True
if streaming:
return self._stream_response(response)
else:
response
except Exception as error:
error_message = f"Error processing message: {str(error)}"
#add error to history
self.history.append(
Message(
"Agent",
error_message
)
)
return error_message
def _stream_response(
self,
response: str = None
):
"""
Yield the response token by token (word by word)
Usage:
--------------
for token in _stream_response(response):
print(token)
"""
for token in response.split():
yield token
|