gopalagra commited on
Commit
58313c8
·
verified ·
1 Parent(s): a5f24a2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +185 -23
app.py CHANGED
@@ -66,6 +66,158 @@
66
  # interface.launch()
67
  # # demo.launch(share=True)
68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  import gradio as gr
70
  from transformers import (
71
  BlipProcessor,
@@ -73,16 +225,12 @@ from transformers import (
73
  BlipForQuestionAnswering,
74
  pipeline
75
  )
76
- moderation_model = pipeline(
77
- "text-classification",
78
- model="Vrandan/Comment-Moderation",
79
- return_all_scores=True
80
- )
81
-
82
  from PIL import Image
83
  import torch
84
  from gtts import gTTS
85
  import tempfile
 
 
86
 
87
  # ----------------------
88
  # Device setup
@@ -114,37 +262,46 @@ moderation_model = pipeline("text-classification", model="unitary/toxic-bert")
114
 
115
  print("✅ All models loaded!")
116
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  # ----------------------
118
  # Safety Filter Function
119
  # ----------------------
120
  def is_caption_safe(caption):
121
  try:
122
  votes = moderation_model(caption)
123
- # If return_all_scores=True, it's [[{label, score}, ...]]
124
  if isinstance(votes, list) and isinstance(votes[0], list):
125
  votes = votes[0]
126
- # Now safe to loop
127
  for item in votes:
128
  if isinstance(item, dict) and item.get("label") in ["V", "V2"] and item.get("score", 0) > 0.5:
129
  return False
130
  except Exception as e:
131
  print("⚠️ Moderation failed:", e)
132
 
133
- # Fallback keywords
134
  unsafe_keywords = [
135
- "gun", "blood", "skull", "kill", "corpse", "gore", "knife", "weapon",
136
- "fire", "murder", "dead", "death", "suicide", "bomb", "explosion",
137
- "terrorist", "assault", "stab", "shoot", "pistol", "rifle", "shotgun",
138
- "grenade", "horror", "beheaded", "torture", "hostage", "rape",
139
- "war", "massacre", "chainsaw", "poison", "strangle", "hang", "drown"
140
  ]
141
  if any(word in caption.lower() for word in unsafe_keywords):
142
  return False
143
  return True
144
 
145
 
146
-
147
-
148
  # ----------------------
149
  # Caption + Translate + Speak
150
  # ----------------------
@@ -157,7 +314,8 @@ def generate_caption_translate_speak(image, target_lang):
157
 
158
  # Step 1.5: Safety Check
159
  if not is_caption_safe(english_caption):
160
- return "⚠️ Warning: Unsafe or inappropriate content detected!", "", None
 
161
 
162
  # Step 2: Translate
163
  if target_lang in translation_models:
@@ -172,6 +330,7 @@ def generate_caption_translate_speak(image, target_lang):
172
 
173
  return english_caption, translated, tmp_file.name
174
 
 
175
  # ----------------------
176
  # VQA
177
  # ----------------------
@@ -181,17 +340,18 @@ def vqa_answer(image, question):
181
  out = vqa_model.generate(**inputs, max_new_tokens=50)
182
  answer = vqa_processor.decode(out[0], skip_special_tokens=True)
183
 
184
- # Run safety filter on answers too
185
  if not is_caption_safe(answer):
186
- return "⚠️ Warning: Unsafe or inappropriate content detected!"
 
 
 
187
 
188
- return answer
189
 
190
  # ----------------------
191
  # Gradio UI
192
  # ----------------------
193
  with gr.Blocks(title="BLIP Vision App") as demo:
194
- gr.Markdown("## 🖼️ BLIP: Image Captioning + Translation + Speech + VQA (with Safety Filter)")
195
 
196
  with gr.Tab("Caption + Translate + Speak"):
197
  with gr.Row():
@@ -199,7 +359,7 @@ with gr.Blocks(title="BLIP Vision App") as demo:
199
  lang_in = gr.Dropdown(["Hindi", "French", "Spanish"], label="Translate To", value="Hindi")
200
  eng_out = gr.Textbox(label="English Caption")
201
  trans_out = gr.Textbox(label="Translated Caption")
202
- audio_out = gr.Audio(label="Spoken Caption", type="filepath")
203
  btn1 = gr.Button("Generate Caption, Translate & Speak")
204
  btn1.click(generate_caption_translate_speak, inputs=[img_in, lang_in], outputs=[eng_out, trans_out, audio_out])
205
 
@@ -208,10 +368,12 @@ with gr.Blocks(title="BLIP Vision App") as demo:
208
  img_vqa = gr.Image(type="pil", label="Upload Image")
209
  q_in = gr.Textbox(label="Ask a Question about the Image")
210
  ans_out = gr.Textbox(label="Answer")
 
211
  btn2 = gr.Button("Ask")
212
- btn2.click(vqa_answer, inputs=[img_vqa, q_in], outputs=ans_out)
213
 
214
  demo.launch()
215
 
 
216
 
217
 
 
66
  # interface.launch()
67
  # # demo.launch(share=True)
68
 
69
+ # import gradio as gr
70
+ # from transformers import (
71
+ # BlipProcessor,
72
+ # BlipForConditionalGeneration,
73
+ # BlipForQuestionAnswering,
74
+ # pipeline
75
+ # )
76
+ # moderation_model = pipeline(
77
+ # "text-classification",
78
+ # model="Vrandan/Comment-Moderation",
79
+ # return_all_scores=True
80
+ # )
81
+
82
+ # from PIL import Image
83
+ # import torch
84
+ # from gtts import gTTS
85
+ # import tempfile
86
+
87
+ # # ----------------------
88
+ # # Device setup
89
+ # # ----------------------
90
+ # device = "cuda" if torch.cuda.is_available() else "cpu"
91
+
92
+ # # ----------------------
93
+ # # Load Models Once
94
+ # # ----------------------
95
+ # print("🔄 Loading models...")
96
+
97
+ # # Captioning
98
+ # caption_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
99
+ # caption_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large").to(device)
100
+
101
+ # # VQA
102
+ # vqa_processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base")
103
+ # vqa_model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base").to(device)
104
+
105
+ # # Translation
106
+ # translation_models = {
107
+ # "Hindi": pipeline("translation", model="Helsinki-NLP/opus-mt-en-hi"),
108
+ # "French": pipeline("translation", model="Helsinki-NLP/opus-mt-en-fr"),
109
+ # "Spanish": pipeline("translation", model="Helsinki-NLP/opus-mt-en-es"),
110
+ # }
111
+
112
+ # # Safety Moderation Pipeline
113
+ # moderation_model = pipeline("text-classification", model="unitary/toxic-bert")
114
+
115
+ # print("✅ All models loaded!")
116
+
117
+ # # ----------------------
118
+ # # Safety Filter Function
119
+ # # ----------------------
120
+ # def is_caption_safe(caption):
121
+ # try:
122
+ # votes = moderation_model(caption)
123
+ # # If return_all_scores=True, it's [[{label, score}, ...]]
124
+ # if isinstance(votes, list) and isinstance(votes[0], list):
125
+ # votes = votes[0]
126
+ # # Now safe to loop
127
+ # for item in votes:
128
+ # if isinstance(item, dict) and item.get("label") in ["V", "V2"] and item.get("score", 0) > 0.5:
129
+ # return False
130
+ # except Exception as e:
131
+ # print("⚠️ Moderation failed:", e)
132
+
133
+ # # Fallback keywords
134
+ # unsafe_keywords = [
135
+ # "gun", "blood", "skull", "kill", "corpse", "gore", "knife", "weapon",
136
+ # "fire", "murder", "dead", "death", "suicide", "bomb", "explosion",
137
+ # "terrorist", "assault", "stab", "shoot", "pistol", "rifle", "shotgun",
138
+ # "grenade", "horror", "beheaded", "torture", "hostage", "rape",
139
+ # "war", "massacre", "chainsaw", "poison", "strangle", "hang", "drown"
140
+ # ]
141
+ # if any(word in caption.lower() for word in unsafe_keywords):
142
+ # return False
143
+ # return True
144
+
145
+
146
+
147
+
148
+ # # ----------------------
149
+ # # Caption + Translate + Speak
150
+ # # ----------------------
151
+ # def generate_caption_translate_speak(image, target_lang):
152
+ # # Step 1: Caption
153
+ # inputs = caption_processor(images=image, return_tensors="pt").to(device)
154
+ # with torch.no_grad():
155
+ # out = caption_model.generate(**inputs, max_new_tokens=50)
156
+ # english_caption = caption_processor.decode(out[0], skip_special_tokens=True)
157
+
158
+ # # Step 1.5: Safety Check
159
+ # if not is_caption_safe(english_caption):
160
+ # return "⚠️ Warning: Unsafe or inappropriate content detected!", "", None
161
+
162
+ # # Step 2: Translate
163
+ # if target_lang in translation_models:
164
+ # translated = translation_models[target_lang](english_caption)[0]['translation_text']
165
+ # else:
166
+ # translated = "Translation not available"
167
+
168
+ # # Step 3: Generate Speech (English caption for now)
169
+ # tts = gTTS(english_caption, lang="en")
170
+ # tmp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
171
+ # tts.save(tmp_file.name)
172
+
173
+ # return english_caption, translated, tmp_file.name
174
+
175
+ # # ----------------------
176
+ # # VQA
177
+ # # ----------------------
178
+ # def vqa_answer(image, question):
179
+ # inputs = vqa_processor(image, question, return_tensors="pt").to(device)
180
+ # with torch.no_grad():
181
+ # out = vqa_model.generate(**inputs, max_new_tokens=50)
182
+ # answer = vqa_processor.decode(out[0], skip_special_tokens=True)
183
+
184
+ # # Run safety filter on answers too
185
+ # if not is_caption_safe(answer):
186
+ # return "⚠️ Warning: Unsafe or inappropriate content detected!"
187
+
188
+ # return answer
189
+
190
+ # # ----------------------
191
+ # # Gradio UI
192
+ # # ----------------------
193
+ # with gr.Blocks(title="BLIP Vision App") as demo:
194
+ # gr.Markdown("## 🖼️ BLIP: Image Captioning + Translation + Speech + VQA (with Safety Filter)")
195
+
196
+ # with gr.Tab("Caption + Translate + Speak"):
197
+ # with gr.Row():
198
+ # img_in = gr.Image(type="pil", label="Upload Image")
199
+ # lang_in = gr.Dropdown(["Hindi", "French", "Spanish"], label="Translate To", value="Hindi")
200
+ # eng_out = gr.Textbox(label="English Caption")
201
+ # trans_out = gr.Textbox(label="Translated Caption")
202
+ # audio_out = gr.Audio(label="Spoken Caption", type="filepath")
203
+ # btn1 = gr.Button("Generate Caption, Translate & Speak")
204
+ # btn1.click(generate_caption_translate_speak, inputs=[img_in, lang_in], outputs=[eng_out, trans_out, audio_out])
205
+
206
+ # with gr.Tab("Visual Question Answering (VQA)"):
207
+ # with gr.Row():
208
+ # img_vqa = gr.Image(type="pil", label="Upload Image")
209
+ # q_in = gr.Textbox(label="Ask a Question about the Image")
210
+ # ans_out = gr.Textbox(label="Answer")
211
+ # btn2 = gr.Button("Ask")
212
+ # btn2.click(vqa_answer, inputs=[img_vqa, q_in], outputs=ans_out)
213
+
214
+ # demo.launch()
215
+
216
+
217
+
218
+
219
+
220
+
221
  import gradio as gr
222
  from transformers import (
223
  BlipProcessor,
 
225
  BlipForQuestionAnswering,
226
  pipeline
227
  )
 
 
 
 
 
 
228
  from PIL import Image
229
  import torch
230
  from gtts import gTTS
231
  import tempfile
232
+ import numpy as np
233
+ import soundfile as sf
234
 
235
  # ----------------------
236
  # Device setup
 
262
 
263
  print("✅ All models loaded!")
264
 
265
+
266
+ # ----------------------
267
+ # Utility: Generate a Beep Sound
268
+ # ----------------------
269
+ def make_beep_sound(duration=0.5, freq=1000):
270
+ """Generate a short beep tone and save as temporary .wav file."""
271
+ samplerate = 44100
272
+ t = np.linspace(0, duration, int(samplerate * duration), endpoint=False)
273
+ wave = 0.5 * np.sin(2 * np.pi * freq * t)
274
+ tmp_beep = tempfile.NamedTemporaryFile(delete=False, suffix=".wav")
275
+ sf.write(tmp_beep.name, wave, samplerate)
276
+ return tmp_beep.name
277
+
278
+
279
  # ----------------------
280
  # Safety Filter Function
281
  # ----------------------
282
  def is_caption_safe(caption):
283
  try:
284
  votes = moderation_model(caption)
 
285
  if isinstance(votes, list) and isinstance(votes[0], list):
286
  votes = votes[0]
 
287
  for item in votes:
288
  if isinstance(item, dict) and item.get("label") in ["V", "V2"] and item.get("score", 0) > 0.5:
289
  return False
290
  except Exception as e:
291
  print("⚠️ Moderation failed:", e)
292
 
 
293
  unsafe_keywords = [
294
+ "gun", "blood", "skull", "kill", "corpse", "gore", "knife", "weapon",
295
+ "fire", "murder", "dead", "death", "suicide", "bomb", "explosion",
296
+ "terrorist", "assault", "stab", "shoot", "pistol", "rifle", "shotgun",
297
+ "grenade", "horror", "beheaded", "torture", "hostage", "rape",
298
+ "war", "massacre", "chainsaw", "poison", "strangle", "hang", "drown"
299
  ]
300
  if any(word in caption.lower() for word in unsafe_keywords):
301
  return False
302
  return True
303
 
304
 
 
 
305
  # ----------------------
306
  # Caption + Translate + Speak
307
  # ----------------------
 
314
 
315
  # Step 1.5: Safety Check
316
  if not is_caption_safe(english_caption):
317
+ beep = make_beep_sound()
318
+ return "⚠️ Warning: Unsafe or inappropriate content detected!", "", beep
319
 
320
  # Step 2: Translate
321
  if target_lang in translation_models:
 
330
 
331
  return english_caption, translated, tmp_file.name
332
 
333
+
334
  # ----------------------
335
  # VQA
336
  # ----------------------
 
340
  out = vqa_model.generate(**inputs, max_new_tokens=50)
341
  answer = vqa_processor.decode(out[0], skip_special_tokens=True)
342
 
 
343
  if not is_caption_safe(answer):
344
+ beep = make_beep_sound()
345
+ return "⚠️ Warning: Unsafe or inappropriate content detected!", beep
346
+
347
+ return answer, None
348
 
 
349
 
350
  # ----------------------
351
  # Gradio UI
352
  # ----------------------
353
  with gr.Blocks(title="BLIP Vision App") as demo:
354
+ gr.Markdown("## 🖼️ BLIP: Image Captioning + Translation + Speech + VQA (with Safety Filter + Beep Alert)")
355
 
356
  with gr.Tab("Caption + Translate + Speak"):
357
  with gr.Row():
 
359
  lang_in = gr.Dropdown(["Hindi", "French", "Spanish"], label="Translate To", value="Hindi")
360
  eng_out = gr.Textbox(label="English Caption")
361
  trans_out = gr.Textbox(label="Translated Caption")
362
+ audio_out = gr.Audio(label="Audio Output", type="filepath")
363
  btn1 = gr.Button("Generate Caption, Translate & Speak")
364
  btn1.click(generate_caption_translate_speak, inputs=[img_in, lang_in], outputs=[eng_out, trans_out, audio_out])
365
 
 
368
  img_vqa = gr.Image(type="pil", label="Upload Image")
369
  q_in = gr.Textbox(label="Ask a Question about the Image")
370
  ans_out = gr.Textbox(label="Answer")
371
+ beep_out = gr.Audio(label="Alert Sound", type="filepath")
372
  btn2 = gr.Button("Ask")
373
+ btn2.click(vqa_answer, inputs=[img_vqa, q_in], outputs=[ans_out, beep_out])
374
 
375
  demo.launch()
376
 
377
+
378
 
379