| import torch | |
| from transformers import AutoProcessor, AutoModelForCausalLM, AwqConfig,AutoTokenizer | |
| import numpy as np | |
| import pyttsx3 | |
| START_TO_COUCH = np.array([[0.5, 0], [0.5, 0.5]]).ravel() | |
| COUCH_TO_KITCHEN = np.array([[0.5, -0.5], [1.0, -1.0]]).ravel() | |
| KITCHEN_TO_START = np.array([[0.5, -0.5], [0, 0]]).ravel() | |
| engine = pyttsx3.init("espeak") | |
| voices = engine.getProperty("voices") | |
| engine.setProperty("voice", voices[3].id) | |
| def speak(text): | |
| print(f"said {text}", flush=True) | |
| engine.say(text) | |
| engine.runAndWait() | |
| speak("hello") | |
| MODE = "fused_quantized" | |
| DEVICE = "cuda" | |
| # PROCESSOR = AutoProcessor.from_pretrained("/mnt/c/idefics2-8b-AWQ") | |
| tokenizer = AutoTokenizer.from_pretrained( | |
| '/home/peiji/Bunny-v1_0-2B-zh/', | |
| trust_remote_code=True) | |
| BAD_WORDS_IDS = tokenizer( | |
| ["<image>", "<fake_token_around_image>"], add_special_tokens=False | |
| ).input_ids | |
| EOS_WORDS_IDS = tokenizer( | |
| "<end_of_utterance>", add_special_tokens=False | |
| ).input_ids + [tokenizer.eos_token_id] | |
| # set device | |
| device = 'cuda' # or cpu | |
| torch.set_default_device(device) | |
| # create model | |
| model = AutoModelForCausalLM.from_pretrained( | |
| '/home/peiji/Bunny-v1_0-2B-zh/', | |
| torch_dtype=torch.float16, # float32 for cpu | |
| device_map='auto', | |
| trust_remote_code=True | |
| ) | |
| print("load bunny model finish") | |
| # # Load model | |
| # if MODE == "regular": | |
| # model = AutoModelForVision2Seq.from_pretrained( | |
| # "/mnt/c/idefics2-8b-AWQ", | |
| # torch_dtype=torch.float16, | |
| # trust_remote_code=True, | |
| # _attn_implementation="flash_attention_2", | |
| # revision="3dc93be345d64fb6b1c550a233fe87ddb36f183d", | |
| # ).to(DEVICE) | |
| # elif MODE == "quantized": | |
| # quant_path = "/mnt/c/idefics2-8b-AWQ" | |
| # model = AutoModelForVision2Seq.from_pretrained( | |
| # quant_path, trust_remote_code=True | |
| # ).to(DEVICE) | |
| # elif MODE == "fused_quantized": | |
| # quant_path = "/mnt/c/idefics2-8b-AWQ" | |
| # quantization_config = AwqConfig( | |
| # bits=4, | |
| # fuse_max_seq_len=4096, | |
| # modules_to_fuse={ | |
| # "attention": ["q_proj", "k_proj", "v_proj", "o_proj"], | |
| # "mlp": ["gate_proj", "up_proj", "down_proj"], | |
| # "layernorm": ["input_layernorm", "post_attention_layernorm", "norm"], | |
| # "use_alibi": False, | |
| # "num_attention_heads": 32, | |
| # "num_key_value_heads": 8, | |
| # "hidden_size": 4096, | |
| # }, | |
| # ) | |
| # model = AutoModelForVision2Seq.from_pretrained( | |
| # quant_path, quantization_config=quantization_config, trust_remote_code=True | |
| # ).to(DEVICE) | |
| # else: | |
| # raise ValueError("Unknown mode") | |
| # def reset_awq_cache(model): | |
| # """ | |
| # Simple method to reset the AWQ fused modules cache | |
| # """ | |
| # from awq.modules.fused.attn import QuantAttentionFused | |
| # for name, module in model.named_modules(): | |
| # if isinstance(module, QuantAttentionFused): | |
| # module.start_pos = 0 | |
| def ask_vlm(image, instruction): | |
| prompts = [ | |
| "User:", | |
| image, | |
| f"{instruction}.<end_of_utterance>\n", | |
| "Assistant:", | |
| ] | |
| speak(instruction) | |
| inputs = tokenizer(prompts) | |
| inputs = {k: torch.tensor(v).to(DEVICE) for k, v in inputs.items()} | |
| generated_ids = model.generate( | |
| **inputs, bad_words_ids=BAD_WORDS_IDS, max_new_tokens=50 | |
| ) | |
| generated_texts = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) | |
| text = generated_texts[0].split("\nAssistant: ")[1] | |
| # reset_awq_cache(model) | |
| speak(text) | |
| return text | |
| # import requests | |
| # import torch | |
| # from PIL import Image | |
| # from io import BytesIO | |
| # def download_image(url): | |
| # try: | |
| # # Send a GET request to the URL to download the image | |
| # response = requests.get(url) | |
| # # Check if the request was successful (status code 200) | |
| # if response.status_code == 200: | |
| # # Open the image using PIL | |
| # image = Image.open(BytesIO(response.content)) | |
| # # Return the PIL image object | |
| # return image | |
| # else: | |
| # print(f"Failed to download image. Status code: {response.status_code}") | |
| # return None | |
| # except Exception as e: | |
| # print(f"An error occurred: {e}") | |
| # return None | |
| # # Create inputs | |
| # image1 = download_image( | |
| # "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" | |
| # ) | |
| # print(ask_vlm(image1, "What is this?")) | |