| import gradio as gr | |
| from deepseek_vl.models import VLChatProcessor, MultiModalityCausalLM | |
| from deepseek_vl.utils.io import load_pil_images | |
| import torch | |
| model_path = "deepseek-ai/deepseek-vl-1.3b-chat" | |
| vl_chat_processor = VLChatProcessor.from_pretrained(model_path) | |
| tokenizer = vl_chat_processor.tokenizer | |
| vl_gpt = MultiModalityCausalLM.from_pretrained(model_path, trust_remote_code=True).to("cpu") | |
| def qa(image, question): | |
| conversation = [ | |
| {"role": "User", "content": "<image_placeholder>" + question, "images": [image]}, | |
| {"role": "Assistant", "content": ""} | |
| ] | |
| pil_images = load_pil_images(conversation) | |
| prepare_inputs = vl_chat_processor( | |
| conversations=conversation, | |
| images=pil_images, | |
| force_batchify=True | |
| ).to("cpu") | |
| inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs) | |
| outputs = vl_gpt.language_model.generate( | |
| inputs_embeds=inputs_embeds, | |
| attention_mask=prepare_inputs.attention_mask, | |
| pad_token_id=tokenizer.eos_token_id, | |
| bos_token_id=tokenizer.bos_token_id, | |
| eos_token_id=tokenizer.eos_token_id, | |
| max_new_tokens=512, | |
| do_sample=False, | |
| use_cache=True | |
| ) | |
| answer = tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=True) | |
| return answer | |
| demo = gr.Interface( | |
| fn=qa, | |
| inputs=[gr.Image(type="pil", label="Upload Image"), gr.Textbox(label="Enter your question")], | |
| outputs="text", | |
| title="DeepSeek-VL Multimodal QA Demo", | |
| description="Upload an image and enter a question. Experience DeepSeek-VL's vision-language capabilities." | |
| ) | |
| demo.launch( | |
| server_name="0.0.0.0", | |
| server_port=7860, | |
| share=True | |
| ) |