gopalagra commited on
Commit
c9f8fb0
·
verified ·
1 Parent(s): 0576f19

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +89 -89
app.py CHANGED
@@ -1,110 +1,110 @@
1
- # # app.py
2
- # import gradio as gr
3
- # from transformers import BlipProcessor, BlipForConditionalGeneration
4
- # from gtts import gTTS
5
- # import io
6
- # from PIL import Image
7
 
8
- # # -------------------------------
9
- # # Load BLIP-base model (lighter version)
10
- # # -------------------------------
11
- # processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
12
- # model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
13
-
14
- # # -------------------------------
15
- # # Generate caption function
16
- # # -------------------------------
17
- # # def generate_caption_tts(image):
18
- # # caption = generate_caption(model, processor, image)
19
- # # audio_file = text_to_audio_file(caption)
20
- # # return caption, audio_file # return file path, not BytesIO
21
-
22
-
23
- # # -------------------------------
24
- # # Convert text to speech using gTTS
25
- # # -------------------------------
26
- # import tempfile
27
- # import pyttsx3
28
-
29
- # def text_to_audio_file(text):
30
- # # Create a temporary file
31
- # tmp_file = tempfile.NamedTemporaryFile(suffix=".mp3", delete=False)
32
- # tmp_path = tmp_file.name
33
- # tmp_file.close()
34
-
35
- # engine = pyttsx3.init()
36
- # engine.save_to_file(text, tmp_path)
37
- # engine.runAndWait()
38
-
39
- # return tmp_path
40
-
41
- # def generate_caption_from_image(model, processor, image):
42
- # # image: PIL.Image
43
- # inputs = processor(images=image, return_tensors="pt")
44
- # out = model.generate(**inputs)
45
- # caption = processor.decode(out[0], skip_special_tokens=True)
46
- # return caption
47
- # # -------------------------------
48
- # # Gradio interface: Caption + Audio
49
- # # -------------------------------
50
  # def generate_caption_tts(image):
51
- # caption = generate_caption_from_image(model, processor, image) # uses global model/processor
52
- # # audio_file = text_to_audio_file(caption)
53
- # return caption
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
 
56
 
57
- # interface = gr.Interface(
58
- # fn=generate_caption_tts,
59
- # inputs=gr.Image(type="numpy"),
60
- # outputs=[gr.Textbox(label="Generated Caption")],
61
- # title="Image Captioning for Visually Impaired",
62
- # description="Upload an image, get a caption and audio description."
63
- # )
64
 
65
 
66
- # interface.launch()
67
  # # demo.launch(share=True)
68
 
69
 
70
 
71
- import gradio as gr
72
- from transformers import AutoProcessor, AutoModelForCausalLM
73
- import torch
74
- from PIL import Image
75
 
76
- # Load small LLaVA model
77
- processor = AutoProcessor.from_pretrained("llava/LLaVA-7B-llm-small")
78
- model = AutoModelForCausalLM.from_pretrained(
79
- "llava/LLaVA-7B-llm-small",
80
- torch_dtype=torch.float16,
81
- device_map="auto" # Automatically use GPU if available
82
- )
83
 
84
- def generate_caption(image):
85
- # Convert to PIL if needed
86
- if isinstance(image, str):
87
- image = Image.open(image).convert("RGB")
88
 
89
- # Prepare inputs
90
- inputs = processor(images=image, return_tensors="pt").to(model.device)
91
 
92
- # Generate output
93
- outputs = model.generate(**inputs, max_new_tokens=50)
94
 
95
- # Decode result
96
- caption = processor.decode(outputs[0], skip_special_tokens=True)
97
- return caption
98
 
99
- # Gradio Interface
100
- interface = gr.Interface(
101
- fn=generate_caption,
102
- inputs=gr.Image(type="pil"),
103
- outputs=gr.Textbox(label="Generated Caption"),
104
- title="LLaVA Image Captioning"
105
- )
106
 
107
- interface.launch()
108
 
109
 
110
 
 
1
+ # app.py
2
+ import gradio as gr
3
+ from transformers import BlipProcessor, BlipForConditionalGeneration
4
+ from gtts import gTTS
5
+ import io
6
+ from PIL import Image
7
 
8
+ # -------------------------------
9
+ # Load BLIP-base model (lighter version)
10
+ # -------------------------------
11
+ processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
12
+ model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
13
+
14
+ # -------------------------------
15
+ # Generate caption function
16
+ # -------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  # def generate_caption_tts(image):
18
+ # caption = generate_caption(model, processor, image)
19
+ # audio_file = text_to_audio_file(caption)
20
+ # return caption, audio_file # return file path, not BytesIO
21
+
22
+
23
+ # -------------------------------
24
+ # Convert text to speech using gTTS
25
+ # -------------------------------
26
+ import tempfile
27
+ import pyttsx3
28
+
29
+ def text_to_audio_file(text):
30
+ # Create a temporary file
31
+ tmp_file = tempfile.NamedTemporaryFile(suffix=".mp3", delete=False)
32
+ tmp_path = tmp_file.name
33
+ tmp_file.close()
34
+
35
+ engine = pyttsx3.init()
36
+ engine.save_to_file(text, tmp_path)
37
+ engine.runAndWait()
38
+
39
+ return tmp_path
40
+
41
+ def generate_caption_from_image(model, processor, image):
42
+ # image: PIL.Image
43
+ inputs = processor(images=image, return_tensors="pt")
44
+ out = model.generate(**inputs)
45
+ caption = processor.decode(out[0], skip_special_tokens=True)
46
+ return caption
47
+ # -------------------------------
48
+ # Gradio interface: Caption + Audio
49
+ # -------------------------------
50
+ def generate_caption_tts(image):
51
+ caption = generate_caption_from_image(model, processor, image) # uses global model/processor
52
+ # audio_file = text_to_audio_file(caption)
53
+ return caption
54
 
55
 
56
 
57
+ interface = gr.Interface(
58
+ fn=generate_caption_tts,
59
+ inputs=gr.Image(type="numpy"),
60
+ outputs=[gr.Textbox(label="Generated Caption")],
61
+ title="Image Captioning for Visually Impaired",
62
+ description="Upload an image, get a caption and audio description."
63
+ )
64
 
65
 
66
+ interface.launch()
67
  # # demo.launch(share=True)
68
 
69
 
70
 
71
+ # import gradio as gr
72
+ # from transformers import AutoProcessor, AutoModelForCausalLM
73
+ # import torch
74
+ # from PIL import Image
75
 
76
+ # # Load small LLaVA model
77
+ # processor = AutoProcessor.from_pretrained("llava/LLaVA-7B-llm-small")
78
+ # model = AutoModelForCausalLM.from_pretrained(
79
+ # "llava/LLaVA-7B-llm-small",
80
+ # torch_dtype=torch.float16,
81
+ # device_map="auto" # Automatically use GPU if available
82
+ # )
83
 
84
+ # def generate_caption(image):
85
+ # # Convert to PIL if needed
86
+ # if isinstance(image, str):
87
+ # image = Image.open(image).convert("RGB")
88
 
89
+ # # Prepare inputs
90
+ # inputs = processor(images=image, return_tensors="pt").to(model.device)
91
 
92
+ # # Generate output
93
+ # outputs = model.generate(**inputs, max_new_tokens=50)
94
 
95
+ # # Decode result
96
+ # caption = processor.decode(outputs[0], skip_special_tokens=True)
97
+ # return caption
98
 
99
+ # # Gradio Interface
100
+ # interface = gr.Interface(
101
+ # fn=generate_caption,
102
+ # inputs=gr.Image(type="pil"),
103
+ # outputs=gr.Textbox(label="Generated Caption"),
104
+ # title="LLaVA Image Captioning"
105
+ # )
106
 
107
+ # interface.launch()
108
 
109
 
110