File size: 4,305 Bytes
b98ffbb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
import torch
from transformers import AutoProcessor, AutoModelForCausalLM, AwqConfig,AutoTokenizer
import numpy as np
import pyttsx3
START_TO_COUCH = np.array([[0.5, 0], [0.5, 0.5]]).ravel()
COUCH_TO_KITCHEN = np.array([[0.5, -0.5], [1.0, -1.0]]).ravel()
KITCHEN_TO_START = np.array([[0.5, -0.5], [0, 0]]).ravel()
engine = pyttsx3.init("espeak")
voices = engine.getProperty("voices")
engine.setProperty("voice", voices[3].id)
def speak(text):
print(f"said {text}", flush=True)
engine.say(text)
engine.runAndWait()
speak("hello")
MODE = "fused_quantized"
DEVICE = "cuda"
# PROCESSOR = AutoProcessor.from_pretrained("/mnt/c/idefics2-8b-AWQ")
tokenizer = AutoTokenizer.from_pretrained(
'/home/peiji/Bunny-v1_0-2B-zh/',
trust_remote_code=True)
BAD_WORDS_IDS = tokenizer(
["<image>", "<fake_token_around_image>"], add_special_tokens=False
).input_ids
EOS_WORDS_IDS = tokenizer(
"<end_of_utterance>", add_special_tokens=False
).input_ids + [tokenizer.eos_token_id]
# set device
device = 'cuda' # or cpu
torch.set_default_device(device)
# create model
model = AutoModelForCausalLM.from_pretrained(
'/home/peiji/Bunny-v1_0-2B-zh/',
torch_dtype=torch.float16, # float32 for cpu
device_map='auto',
trust_remote_code=True
)
print("load bunny model finish")
# # Load model
# if MODE == "regular":
# model = AutoModelForVision2Seq.from_pretrained(
# "/mnt/c/idefics2-8b-AWQ",
# torch_dtype=torch.float16,
# trust_remote_code=True,
# _attn_implementation="flash_attention_2",
# revision="3dc93be345d64fb6b1c550a233fe87ddb36f183d",
# ).to(DEVICE)
# elif MODE == "quantized":
# quant_path = "/mnt/c/idefics2-8b-AWQ"
# model = AutoModelForVision2Seq.from_pretrained(
# quant_path, trust_remote_code=True
# ).to(DEVICE)
# elif MODE == "fused_quantized":
# quant_path = "/mnt/c/idefics2-8b-AWQ"
# quantization_config = AwqConfig(
# bits=4,
# fuse_max_seq_len=4096,
# modules_to_fuse={
# "attention": ["q_proj", "k_proj", "v_proj", "o_proj"],
# "mlp": ["gate_proj", "up_proj", "down_proj"],
# "layernorm": ["input_layernorm", "post_attention_layernorm", "norm"],
# "use_alibi": False,
# "num_attention_heads": 32,
# "num_key_value_heads": 8,
# "hidden_size": 4096,
# },
# )
# model = AutoModelForVision2Seq.from_pretrained(
# quant_path, quantization_config=quantization_config, trust_remote_code=True
# ).to(DEVICE)
# else:
# raise ValueError("Unknown mode")
# def reset_awq_cache(model):
# """
# Simple method to reset the AWQ fused modules cache
# """
# from awq.modules.fused.attn import QuantAttentionFused
# for name, module in model.named_modules():
# if isinstance(module, QuantAttentionFused):
# module.start_pos = 0
def ask_vlm(image, instruction):
prompts = [
"User:",
image,
f"{instruction}.<end_of_utterance>\n",
"Assistant:",
]
speak(instruction)
inputs = tokenizer(prompts)
inputs = {k: torch.tensor(v).to(DEVICE) for k, v in inputs.items()}
generated_ids = model.generate(
**inputs, bad_words_ids=BAD_WORDS_IDS, max_new_tokens=50
)
generated_texts = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
text = generated_texts[0].split("\nAssistant: ")[1]
# reset_awq_cache(model)
speak(text)
return text
# import requests
# import torch
# from PIL import Image
# from io import BytesIO
# def download_image(url):
# try:
# # Send a GET request to the URL to download the image
# response = requests.get(url)
# # Check if the request was successful (status code 200)
# if response.status_code == 200:
# # Open the image using PIL
# image = Image.open(BytesIO(response.content))
# # Return the PIL image object
# return image
# else:
# print(f"Failed to download image. Status code: {response.status_code}")
# return None
# except Exception as e:
# print(f"An error occurred: {e}")
# return None
# # Create inputs
# image1 = download_image(
# "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
# )
# print(ask_vlm(image1, "What is this?"))
|