feynmodel / README.md
Imagroune's picture
Update README.md
1738458 verified
|
raw
history blame
3.15 kB
# FeynModel V 0.1
![image/png](https://cdn-uploads.huggingface.co/production/uploads/645364cbf666f76551f93111/RNgPbOsYh_XAo-rCRrNUb.png)
# how to use
```python
from transformers import AutoProcessor, AutoModelForCausalLM
model_id='Imagroune/feynmodel'
processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_id,trust_remote_code=True)
model.to('cuda')
```
# LLM Inference
```python
input_text = "<start_of_turn>user\nCombien d'helicoptère un humain adulte peut manger en un seul repas?.<end_of_turn> <start_of_turn>model\n"
input_ids = processor.tokenizer(input_text, return_tensors="pt").to("cuda")
# Génération du texte en mode streaming
max_length = input_ids.input_ids.shape[1] + 1024 # Longueur maximale totale
stream_output = [] # Liste pour stocker le flux de sortie
# Génération et affichage en mode streaming
for output in model.generate(input_ids=input_ids.input_ids,max_length=max_length, do_sample=True, temperature=0.7):
decoded_output = processor.tokenizer.decode(output, skip_special_tokens=True)
stream_output.append(decoded_output)
print(decoded_output, end="", flush=True)
```
# Vision Inference
```python
from transformers import AutoTokenizer, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList
class PrintTokensStoppingCriteria(StoppingCriteria):
def __init__(self, tokenizer):
self.tokenizer = tokenizer
def __call__(self, input_ids, scores, **kwargs):
# Decode the last generated token and print it
last_token_id = input_ids[0, -1].item()
token = self.tokenizer.decode([last_token_id], skip_special_tokens=True)
print(token, end='', flush=True)
# Continue generating tokens until a stopping condition is met
# Return True to stop, False to continue
return False
stopping_criteria = PrintTokensStoppingCriteria(processor.tokenizer)
from PIL import Image
import requests
input_text = "<start_of_turn>user\n what is this ?<end_of_turn>\n<start_of_turn>model"
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true"
image = Image.open(requests.get(url, stream=True).raw)
input_text="""<start_of_turn>user
Create a concise caption that accurately describes the main elements in the image provided
<end_of_turn>
<start_of_turn>model
"""
inputs = processor(text=input_text, images=image, return_tensors="pt")
inputs = {key: value.cuda() for key, value in inputs.items()}
image
```
![image/png](https://cdn-uploads.huggingface.co/production/uploads/645364cbf666f76551f93111/XVxraj69m26HtkfaWQhve.png)
```python
max_length =inputs['input_ids'].shape[1] + 1024 # Longueur maximale totale
stream_output = [] # Liste pour stocker le flux de sortie
# Génération et affichage en mode streaming
ret= model.generate(inputs['input_ids'], pixel_values=inputs['pixel_values'],stopping_criteria=StoppingCriteriaList([stopping_criteria]),max_length=2048, do_sample=True, temperature=0.7)
# An older, green car sits parked on the curb in front of a building.
```