gdo commited on
Commit
a162149
·
verified ·
1 Parent(s): 28ab30a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -20
app.py CHANGED
@@ -1,32 +1,21 @@
1
- import sys
2
- import gradio as gr
3
  from PIL import Image
 
4
 
5
- import vertexai
6
- from vertexai.preview.vision_models import ImageCaptioningModel #Correct Import
7
- vertexai.init(project=PROJECT_ID, location=LOCATION)
8
 
9
  def image_to_prompt(image):
10
- """Generates a caption (prompt) from an image using Vertex AI."""
11
- if image is None:
12
- return "Please upload an image."
13
-
14
- try:
15
- model = ImageCaptioningModel.from_pretrained("imagetext@004") # or imagetext@003
16
- captions = model.get_captions(image=image, number_of_results=1)
17
- if captions:
18
- return captions[0]
19
- else:
20
- return "Could not generate a caption."
21
-
22
- except Exception as e:
23
- return f"An error occurred: {e}"
24
 
25
  iface = gr.Interface(
26
  fn=image_to_prompt,
27
  inputs=gr.Image(type="pil"),
28
  outputs=gr.Textbox(label="Generated Prompt"),
29
- title="Image to Prompt Generator",
30
  description="Upload an image and get a text prompt describing it.",
31
  )
32
 
 
1
+ from transformers import BlipProcessor, BlipForConditionalGeneration
 
2
  from PIL import Image
3
+ import gradio as gr
4
 
5
+ processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
6
+ model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
 
7
 
8
  def image_to_prompt(image):
9
+ inputs = processor(images=image, return_tensors="pt")
10
+ outputs = model.generate(**inputs, max_length=50)
11
+ caption = processor.decode(outputs[0], skip_special_tokens=True)
12
+ return caption
 
 
 
 
 
 
 
 
 
 
13
 
14
  iface = gr.Interface(
15
  fn=image_to_prompt,
16
  inputs=gr.Image(type="pil"),
17
  outputs=gr.Textbox(label="Generated Prompt"),
18
+ title="Image to Prompt Generator (using BLIP)",
19
  description="Upload an image and get a text prompt describing it.",
20
  )
21