HF-Pawan commited on
Commit
01e8e77
·
1 Parent(s): 9b9bc16

Latest Changes

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.jpg filter=lfs diff=lfs merge=lfs -text
assets/cat.jpg ADDED

Git LFS Details

  • SHA256: 390cdc5b4a8dbfdc451890fa1ff0e992a0f6606b3e83328fe33cf6a5684c234f
  • Pointer size: 130 Bytes
  • Size of remote file: 88.2 kB
assets/fridge.jpg ADDED

Git LFS Details

  • SHA256: c8af9b2d81ecf798fde207301f299758826e49184a07c5b25fde4501f4ce463e
  • Pointer size: 131 Bytes
  • Size of remote file: 155 kB
assets/giraffe.jpg ADDED

Git LFS Details

  • SHA256: 50185fa3ae65f9e55050f329e255758ebff81e3a365e737edac6c8a1908c47cf
  • Pointer size: 131 Bytes
  • Size of remote file: 107 kB
assets/marriage.jpg ADDED

Git LFS Details

  • SHA256: 0537f351c1e6a635677b02a01b09d1c87de545975ea1a3684c501c2ce4929014
  • Pointer size: 131 Bytes
  • Size of remote file: 194 kB
assets/zebra.jpg ADDED

Git LFS Details

  • SHA256: 3eef77e177af7d06968fa2d7177e23dd3e1c6e2652d08446a07427783a534ede
  • Pointer size: 131 Bytes
  • Size of remote file: 286 kB
ui/layout.py CHANGED
@@ -14,40 +14,54 @@ def build_ui(model, processor):
14
  style=style
15
  )
16
 
17
- with gr.Blocks() as demo:
18
- gr.Markdown("## 🖼️ BLIP Image Captioning (Zero-GPU)")
19
- gr.Markdown(
20
- "Generate captions or explanations from images using a CPU-only BLIP model."
21
- )
 
 
 
 
 
 
 
 
 
22
 
23
- with gr.Row():
24
- image_input = gr.Image(
25
- type="pil",
26
- label="Upload Image"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  )
28
 
29
- with gr.Column():
30
- style_select = gr.Dropdown(
31
- choices=[
32
- "Short Caption",
33
- "Detailed Caption",
34
- "Creative Caption",
35
- "Image Explanation"
36
- ],
37
- value="Detailed Caption",
38
- label="Caption Style"
39
- )
40
- output_text = gr.Textbox(
41
- label="Generated Output",
42
- lines=4
43
- )
44
-
45
- generate_btn = gr.Button("Generate")
46
-
47
- generate_btn.click(
48
- fn=run_caption,
49
- inputs=[image_input, style_select],
50
- outputs=output_text
51
- )
52
 
53
  return demo
 
14
  style=style
15
  )
16
 
17
+ with gr.Blocks(title="BLIP Image Captioning (Zero-GPU)") as demo:
18
+ with gr.Column(elem_id="container"):
19
+ gr.Markdown("## 🖼️ BLIP Image Captioning (Zero-GPU)")
20
+ gr.Markdown(
21
+ "Generate captions or explanations from images using a CPU-only BLIP model."
22
+ )
23
+
24
+ with gr.Row(equal_height=True):
25
+ with gr.Column():
26
+ image_input = gr.Image(
27
+ type="pil",
28
+ label="Upload Image"
29
+ )
30
+ generate_btn = gr.Button("Generate")
31
 
32
+ with gr.Column():
33
+ style_select = gr.Dropdown(
34
+ choices=[
35
+ "Short Caption",
36
+ "Detailed Caption",
37
+ "Creative Caption",
38
+ "Image Explanation"
39
+ ],
40
+ value="Detailed Caption",
41
+ label="Caption Style"
42
+ )
43
+ output_text = gr.Textbox(
44
+ label="Generated Output",
45
+ lines=5
46
+ )
47
+
48
+ # ✅ Examples preload inputs ONLY
49
+ gr.Examples(
50
+ examples=[
51
+ ["./assets/zebra.jpg", "Detailed Caption"],
52
+ ["./assets/cat.jpg", "Image Explanation"],
53
+ ["./assets/fridge.jpg", "Detailed Caption"],
54
+ ["./assets/marriage.jpg", "Creative Caption"],
55
+ ["./assets/giraffe.jpg", "Short Caption"]
56
+ ],
57
+ inputs=[image_input, style_select]
58
  )
59
 
60
+ # ✅ Single source of inference
61
+ generate_btn.click(
62
+ fn=run_caption,
63
+ inputs=[image_input, style_select],
64
+ outputs=output_text
65
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
 
67
  return demo