Scratch_Vision_Game_dup / app_BLIP_processing.py
prthm11's picture
Upload 11 files
1c58aa5 verified
from transformers import BlipProcessor, BlipForConditionalGeneration
from langchain.chains import LLMChain
from langchain.schema import BaseOutputParser
from PIL import Image
import torch
# Define a simple Output Parser
class CaptionParser(BaseOutputParser):
def parse(self, text: str):
return text.strip()
# LangChain-compatible VLM wrapper
class BLIPImageCaptioning:
def __init__(self):
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base", use_auth_token=None)
self.model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base", use_auth_token=None).to(self.device)
def predict(self, image_path: str) -> str:
raw_image = Image.open(image_path).convert('RGB')
inputs = self.processor(raw_image, return_tensors="pt").to(self.device)
out = self.model.generate(**inputs)
caption = self.processor.decode(out[0], skip_special_tokens=True)
return caption
# Use the BLIP model via LangChain
class ImageCaptionChain:
def __init__(self):
self.captioner = BLIPImageCaptioning()
self.output_parser = CaptionParser()
def run(self, image_path: str):
caption = self.captioner.predict(image_path)
return self.output_parser.parse(caption)
# ----------- Run Example -------------
if __name__ == "__main__":
image_path = r"images\sample.jpg" # Replace with your image path
chain = ImageCaptionChain()
caption = chain.run(image_path)
print("Generated Caption:", caption)