gopalagra commited on
Commit
4bd925a
·
verified ·
1 Parent(s): c9f8fb0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +116 -94
app.py CHANGED
@@ -1,110 +1,132 @@
1
  # app.py
2
- import gradio as gr
3
- from transformers import BlipProcessor, BlipForConditionalGeneration
4
- from gtts import gTTS
5
- import io
6
- from PIL import Image
7
-
8
- # -------------------------------
9
- # Load BLIP-base model (lighter version)
10
- # -------------------------------
11
- processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
12
- model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
13
 
14
- # -------------------------------
15
- # Generate caption function
16
- # -------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  # def generate_caption_tts(image):
18
- # caption = generate_caption(model, processor, image)
19
- # audio_file = text_to_audio_file(caption)
20
- # return caption, audio_file # return file path, not BytesIO
21
-
22
-
23
- # -------------------------------
24
- # Convert text to speech using gTTS
25
- # -------------------------------
26
- import tempfile
27
- import pyttsx3
28
-
29
- def text_to_audio_file(text):
30
- # Create a temporary file
31
- tmp_file = tempfile.NamedTemporaryFile(suffix=".mp3", delete=False)
32
- tmp_path = tmp_file.name
33
- tmp_file.close()
34
 
35
- engine = pyttsx3.init()
36
- engine.save_to_file(text, tmp_path)
37
- engine.runAndWait()
38
 
39
- return tmp_path
40
 
41
- def generate_caption_from_image(model, processor, image):
42
- # image: PIL.Image
43
- inputs = processor(images=image, return_tensors="pt")
44
- out = model.generate(**inputs)
45
- caption = processor.decode(out[0], skip_special_tokens=True)
46
- return caption
47
- # -------------------------------
48
- # Gradio interface: Caption + Audio
49
- # -------------------------------
50
- def generate_caption_tts(image):
51
- caption = generate_caption_from_image(model, processor, image) # uses global model/processor
52
- # audio_file = text_to_audio_file(caption)
53
- return caption
54
 
55
 
 
 
56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  interface = gr.Interface(
58
- fn=generate_caption_tts,
59
- inputs=gr.Image(type="numpy"),
60
- outputs=[gr.Textbox(label="Generated Caption")],
61
- title="Image Captioning for Visually Impaired",
62
- description="Upload an image, get a caption and audio description."
 
 
 
63
  )
64
 
65
-
66
  interface.launch()
67
- # # demo.launch(share=True)
68
-
69
-
70
-
71
- # import gradio as gr
72
- # from transformers import AutoProcessor, AutoModelForCausalLM
73
- # import torch
74
- # from PIL import Image
75
-
76
- # # Load small LLaVA model
77
- # processor = AutoProcessor.from_pretrained("llava/LLaVA-7B-llm-small")
78
- # model = AutoModelForCausalLM.from_pretrained(
79
- # "llava/LLaVA-7B-llm-small",
80
- # torch_dtype=torch.float16,
81
- # device_map="auto" # Automatically use GPU if available
82
- # )
83
-
84
- # def generate_caption(image):
85
- # # Convert to PIL if needed
86
- # if isinstance(image, str):
87
- # image = Image.open(image).convert("RGB")
88
-
89
- # # Prepare inputs
90
- # inputs = processor(images=image, return_tensors="pt").to(model.device)
91
-
92
- # # Generate output
93
- # outputs = model.generate(**inputs, max_new_tokens=50)
94
-
95
- # # Decode result
96
- # caption = processor.decode(outputs[0], skip_special_tokens=True)
97
- # return caption
98
-
99
- # # Gradio Interface
100
- # interface = gr.Interface(
101
- # fn=generate_caption,
102
- # inputs=gr.Image(type="pil"),
103
- # outputs=gr.Textbox(label="Generated Caption"),
104
- # title="LLaVA Image Captioning"
105
- # )
106
-
107
- # interface.launch()
108
 
109
 
110
 
 
 
1
  # app.py
2
+ # import gradio as gr
3
+ # from transformers import BlipProcessor, BlipForConditionalGeneration
4
+ # from gtts import gTTS
5
+ # import io
6
+ # from PIL import Image
 
 
 
 
 
 
7
 
8
+ # # -------------------------------
9
+ # # Load BLIP-base model (lighter version)
10
+ # # -------------------------------
11
+ # processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
12
+ # model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
13
+
14
+ # # -------------------------------
15
+ # # Generate caption function
16
+ # # -------------------------------
17
+ # # def generate_caption_tts(image):
18
+ # # caption = generate_caption(model, processor, image)
19
+ # # audio_file = text_to_audio_file(caption)
20
+ # # return caption, audio_file # return file path, not BytesIO
21
+
22
+
23
+ # # -------------------------------
24
+ # # Convert text to speech using gTTS
25
+ # # -------------------------------
26
+ # import tempfile
27
+ # import pyttsx3
28
+
29
+ # def text_to_audio_file(text):
30
+ # # Create a temporary file
31
+ # tmp_file = tempfile.NamedTemporaryFile(suffix=".mp3", delete=False)
32
+ # tmp_path = tmp_file.name
33
+ # tmp_file.close()
34
+
35
+ # engine = pyttsx3.init()
36
+ # engine.save_to_file(text, tmp_path)
37
+ # engine.runAndWait()
38
+
39
+ # return tmp_path
40
+
41
+ # def generate_caption_from_image(model, processor, image):
42
+ # # image: PIL.Image
43
+ # inputs = processor(images=image, return_tensors="pt")
44
+ # out = model.generate(**inputs)
45
+ # caption = processor.decode(out[0], skip_special_tokens=True)
46
+ # return caption
47
+ # # -------------------------------
48
+ # # Gradio interface: Caption + Audio
49
+ # # -------------------------------
50
  # def generate_caption_tts(image):
51
+ # caption = generate_caption_from_image(model, processor, image) # uses global model/processor
52
+ # # audio_file = text_to_audio_file(caption)
53
+ # return caption
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
 
 
 
55
 
 
56
 
57
+ # interface = gr.Interface(
58
+ # fn=generate_caption_tts,
59
+ # inputs=gr.Image(type="numpy"),
60
+ # outputs=[gr.Textbox(label="Generated Caption")],
61
+ # title="Image Captioning for Visually Impaired",
62
+ # description="Upload an image, get a caption and audio description."
63
+ # )
 
 
 
 
 
 
64
 
65
 
66
+ # interface.launch()
67
+ # # demo.launch(share=True)
68
 
69
+ import gradio as gr
70
+ from transformers import BlipProcessor, BlipForConditionalGeneration, pipeline
71
+ from PIL import Image
72
+ from gtts import gTTS
73
+ import tempfile
74
+ import os
75
+
76
+ # Load BLIP model
77
+ processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
78
+ model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
79
+
80
+ # Translation pipelines
81
+ translation_models = {
82
+ "Hindi": pipeline("translation", model="Helsinki-NLP/opus-mt-en-hi"),
83
+ "French": pipeline("translation", model="Helsinki-NLP/opus-mt-en-fr"),
84
+ "Spanish": pipeline("translation", model="Helsinki-NLP/opus-mt-en-es"),
85
+ }
86
+
87
+ # Map language to gTTS codes
88
+ tts_lang_map = {
89
+ "Hindi": "hi",
90
+ "French": "fr",
91
+ "Spanish": "es",
92
+ }
93
+
94
+ def generate_caption_translate_tts(image, target_lang):
95
+ # Step 1: Generate English caption
96
+ inputs = processor(image, return_tensors="pt")
97
+ out = model.generate(**inputs, max_new_tokens=50)
98
+ english_caption = processor.decode(out[0], skip_special_tokens=True)
99
+
100
+ # Step 2: Translate
101
+ if target_lang in translation_models:
102
+ translated = translation_models[target_lang](english_caption)[0]['translation_text']
103
+ else:
104
+ translated = "Translation not available"
105
+
106
+ # Step 3: Convert to Speech
107
+ audio_file = None
108
+ if target_lang in tts_lang_map:
109
+ tts = gTTS(translated, lang=tts_lang_map[target_lang])
110
+ tmp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
111
+ tts.save(tmp_file.name)
112
+ audio_file = tmp_file.name
113
+
114
+ return english_caption, translated, audio_file
115
+
116
+ # Gradio Interface
117
  interface = gr.Interface(
118
+ fn=generate_caption_translate_tts,
119
+ inputs=[gr.Image(type="pil"), gr.Dropdown(["Hindi", "French", "Spanish"], label="Translate To")],
120
+ outputs=[
121
+ gr.Textbox(label="English Caption"),
122
+ gr.Textbox(label="Translated Caption"),
123
+ gr.Audio(label="Spoken Translation")
124
+ ],
125
+ title="BLIP Captioning + Translation + Speech"
126
  )
127
 
 
128
  interface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
 
130
 
131
 
132
+