|
|
import gradio as gr |
|
|
from transformers import BlipProcessor, BlipForConditionalGeneration, RobertaTokenizer, RobertaForSequenceClassification |
|
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
|
import torch |
|
|
|
|
|
|
|
|
|
|
|
caption_model_name = "Salesforce/blip-image-captioning-large" |
|
|
caption_processor = BlipProcessor.from_pretrained(caption_model_name) |
|
|
caption_model = BlipForConditionalGeneration.from_pretrained(caption_model_name) |
|
|
|
|
|
|
|
|
emotion_model_name = "SamLowe/roberta-base-go_emotions" |
|
|
emotion_tokenizer = AutoTokenizer.from_pretrained(emotion_model_name) |
|
|
emotion_model = AutoModelForSequenceClassification.from_pretrained(emotion_model_name) |
|
|
|
|
|
def generate_caption_and_analyze_emotions(image): |
|
|
|
|
|
caption_inputs = caption_processor(images=image, return_tensors="pt") |
|
|
|
|
|
|
|
|
caption = caption_model.generate(**caption_inputs) |
|
|
|
|
|
|
|
|
decoded_caption = caption_processor.decode(caption[0], skip_special_tokens=True) |
|
|
|
|
|
|
|
|
|
|
|
emotion_inputs = emotion_tokenizer.encode_plus( |
|
|
decoded_caption, |
|
|
max_length=128, |
|
|
padding="max_length", |
|
|
truncation=True, |
|
|
return_tensors="pt" |
|
|
) |
|
|
emotion_outputs = emotion_model(**emotion_inputs) |
|
|
|
|
|
|
|
|
emotion_label_id = emotion_outputs.logits.argmax().item() |
|
|
emotion_label = emotion_tokenizer.decode(emotion_label_id) |
|
|
|
|
|
|
|
|
|
|
|
final_output = f"The sentiment in the provided image shows: {emotion_label}.\n\nGenerated Caption: {decoded_caption}" |
|
|
return final_output |
|
|
|
|
|
|
|
|
inputs = gr.inputs.Image(label="Upload an image") |
|
|
outputs = gr.outputs.Textbox(label="Generated Caption and Sentiment Analysis") |
|
|
|
|
|
|
|
|
app = gr.Interface(fn=generate_caption_and_analyze_emotions, inputs=inputs, outputs=outputs) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
app.launch() |
|
|
|