# Load model directly
from transformers import AutoProcessor, AutoModelForImageTextToText
processor = AutoProcessor.from_pretrained("nnpy/Instruct-blip-v2")
model = AutoModelForImageTextToText.from_pretrained("nnpy/Instruct-blip-v2")Quick Links
This model is fintuned on instruction dataset using SalesForce/blip-imagecaptioning-base model.
Usage:
from transformers import BlipProcessor, BlipForConditionalGeneration
import torch
from PIL import Image
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
if processor.tokenizer.eos_token is None:
processor.tokenizer.eos_token = '<|eos|>'
model = BlipForConditionalGeneration.from_pretrained("prasanna2003/Instruct-blip-v2")
image = Image.open('file_name.jpg').convert('RGB')
prompt = """Instruction: Answer the following input according to the image.
Input: Describe this image.
output: """
inputs = processor(image, prompt, return_tensors="pt")
output = model.generate(**inputs, max_length=100)
print(tokenizer.decode(output[0]))
- Downloads last month
- 14
# Use a pipeline as a high-level helper # Warning: Pipeline type "image-to-text" is no longer supported in transformers v5. # You must load the model directly (see below) or downgrade to v4.x with: # 'pip install "transformers<5.0.0' from transformers import pipeline pipe = pipeline("image-to-text", model="nnpy/Instruct-blip-v2")