TruongLeThanh commited on
Commit
ac07ce7
·
1 Parent(s): 08dcaa5

update model

Browse files
.gradio/flagged/dataset1.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ image,output,timestamp
2
+ .gradio\flagged\image\4b60174fa7b64d40f429\1984b509-acc2-47c0-8d72-5a4692938d80.jpg,,2025-04-25 16:08:27.527342
3
+ .gradio\flagged\image\ac23ccb956d7b24e0154\1984b509-acc2-47c0-8d72-5a4692938d80.jpg,,2025-04-25 16:08:29.346974
.gradio/flagged/image/4b60174fa7b64d40f429/1984b509-acc2-47c0-8d72-5a4692938d80.jpg ADDED
.gradio/flagged/image/ac23ccb956d7b24e0154/1984b509-acc2-47c0-8d72-5a4692938d80.jpg ADDED
app.py CHANGED
@@ -1,7 +1,29 @@
1
  import gradio as gr
 
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
 
5
 
6
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  demo.launch()
 
1
  import gradio as gr
2
+ from transformers import BlipProcessor, BlipForConditionalGeneration
3
+ from PIL import Image
4
+ import torch
5
 
6
+ # Load model
7
+ processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
8
+ model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
9
 
10
+ device = "cuda" if torch.cuda.is_available() else "cpu"
11
+ model.to(device)
12
+
13
+ # Inference function
14
+ def predict_caption(image):
15
+ if image.mode != "RGB":
16
+ image = image.convert("RGB")
17
+
18
+ inputs = processor(images=image, return_tensors="pt").to(device)
19
+ output = model.generate(**inputs, max_new_tokens=20)
20
+ caption = processor.decode(output[0], skip_special_tokens=True)
21
+ return caption
22
+
23
+ # Gradio UI
24
+ demo = gr.Interface(fn=predict_caption,
25
+ inputs=gr.Image(type="pil"),
26
+ outputs="text",
27
+ title="📸 BLIP Image Captioning",
28
+ description="Tải ảnh lên và nhận mô tả tự động bằng BLIP từ Salesforce.")
29
  demo.launch()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ transformers
2
+ torch
3
+ gradio
4
+ Pillow