kemo2003 commited on
Commit
7d2dfb8
·
verified ·
1 Parent(s): 5ea0a54

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +89 -157
app.py CHANGED
@@ -9,15 +9,16 @@ from huggingface_hub import login
9
  from transformers import (
10
  AutoTokenizer,
11
  AutoModelForCausalLM,
12
- LlavaNextProcessor,
13
- LlavaNextForConditionalGeneration,
14
  WhisperProcessor,
15
  WhisperForConditionalGeneration,
16
- SpeechT5Processor,
17
- SpeechT5ForTextToSpeech,
18
- SpeechT5HifiGan
19
  )
20
- from datasets import load_dataset
 
 
21
  import os
22
  import scipy.io.wavfile as wavfile
23
  import io
@@ -29,166 +30,106 @@ token = os.getenv("HF_API_TOKEN")
29
  if token:
30
  login(token=token)
31
  else:
32
- print("Warning: HF_API_TOKEN not set. Some models might require login.")
33
 
34
  # Device Configuration
35
  device = "cuda" if torch.cuda.is_available() else "cpu"
36
- print(f"Using device: {device}")
37
 
38
- # 1. Text Generation Model: microsoft/Phi-4-reasoning-plus
39
- text_model_name = "microsoft/Phi-4-reasoning-plus"
40
- print(f"Loading text model: {text_model_name}")
41
  text_tokenizer = AutoTokenizer.from_pretrained(text_model_name)
42
  text_model = AutoModelForCausalLM.from_pretrained(
43
  text_model_name,
44
- torch_dtype=torch.bfloat16 if device == "cuda" else torch.float32, # bfloat16 for better performance if available
45
- device_map="auto", # Automatically uses available GPUs or CPU
46
- trust_remote_code=True # Phi models may require this
47
  )
48
- print("Text model loaded.")
49
-
50
- # 2. Image Analysis Model: llava-hf/llava-1.5-7b-hf
51
- image_model_name = "llava-hf/llava-1.5-7b-hf"
52
- print(f"Loading image model: {image_model_name}")
53
- image_processor = LlavaNextProcessor.from_pretrained(image_model_name)
54
- image_model = LlavaNextForConditionalGeneration.from_pretrained(
 
 
55
  image_model_name,
56
- torch_dtype=torch.float16 if device == "cuda" else torch.float32,
57
  device_map="auto"
58
  )
59
- print("Image model loaded.")
60
 
61
- # 3. Speech-to-Text Model: openai/whisper-large-v3
62
- stt_model_name = "openai/whisper-large-v3"
63
- print(f"Loading STT model: {stt_model_name}")
64
  stt_processor = WhisperProcessor.from_pretrained(stt_model_name)
65
  stt_model = WhisperForConditionalGeneration.from_pretrained(stt_model_name).to(device)
66
- stt_model.config.forced_decoder_ids = None # For multilingual capabilities
67
- print("STT model loaded.")
68
-
69
- # 4. Text-to-Speech Model: microsoft/speecht5_tts & microsoft/speecht5_hifigan
70
- tts_processor_name = "microsoft/speecht5_tts"
71
- tts_model_name = "microsoft/speecht5_tts"
72
- tts_vocoder_name = "microsoft/speecht5_hifigan"
73
 
74
- print(f"Loading TTS processor: {tts_processor_name}")
75
- tts_processor = SpeechT5Processor.from_pretrained(tts_processor_name)
76
- print(f"Loading TTS model: {tts_model_name}")
77
- tts_model = SpeechT5ForTextToSpeech.from_pretrained(tts_model_name).to(device)
78
- print(f"Loading TTS vocoder: {tts_vocoder_name}")
79
- tts_vocoder = SpeechT5HifiGan.from_pretrained(tts_vocoder_name).to(device)
80
-
81
- # Load speaker embeddings (example from CMU ARCTIC dataset)
82
- print("Loading speaker embeddings for TTS...")
83
- try:
84
- embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
85
- # speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0).to(device) # A specific speaker
86
- # Let's pick a female voice, e.g., slt (female)
87
- # Find an entry for 'slt' speaker, or use a default if not easily found by index.
88
- # For simplicity, using a known good index for a female voice if available, otherwise a default.
89
- # This part might need adjustment based on the dataset structure or desired voice.
90
- # Example: embeddings_dataset is a list of dicts. Find one with speaker 'slt'.
91
- # For now, let's use a fixed index that often corresponds to a clear female voice.
92
- speaker_id_index = 7306 # Example index, might need to find a better one or allow selection
93
- # A common female speaker from this dataset is 'slt'. Let's try to find her or use a default.
94
- # The dataset has 'slt_arctic_a0001' to 'slt_arctic_b0593'. Index 7306 is 'awb_arctic_a0001'.
95
- # Let's try to find 'slt' if possible, otherwise use a default. The dataset has 8803 entries.
96
- # For simplicity, we'll use a default index. A common one used in examples is 7306.
97
- # A known female voice index from this dataset (often 'slt' or similar) is around this range.
98
- # Let's use a specific one for 'slt' if we can find it, otherwise a default.
99
- # The dataset items are like {'audio': ..., 'filename': 'arctic_a0001.wav', 'speaker': 'slt', 'text': ..., 'xvector': ...}
100
- # Let's try to find an 'slt' speaker explicitly
101
- slt_speaker_embedding = None
102
- for i in range(len(embeddings_dataset)):
103
- if embeddings_dataset[i]['speaker'] == 'slt':
104
- slt_speaker_embedding = torch.tensor(embeddings_dataset[i]["xvector"]).unsqueeze(0).to(device)
105
- print("Found 'slt' speaker embedding.")
106
- break
107
- if slt_speaker_embedding is None:
108
- print("Could not find 'slt' speaker, using default index 7306.")
109
- slt_speaker_embedding = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0).to(device)
110
- speaker_embeddings = slt_speaker_embedding
111
-
112
- except Exception as e:
113
- print(f"Could not load speaker embeddings: {e}. TTS might not work optimally or at all.")
114
- speaker_embeddings = torch.randn((1, 512)).to(device) # Fallback to random embeddings
115
- print("TTS components loaded.")
116
 
117
  # --- Helper Functions for Model Inference ---
118
 
119
- # 1. Text Generation
120
  def generate_text_response(prompt_text):
121
  try:
122
- # Phi-4-reasoning-plus uses a specific chat template
123
- # System prompt from the model card (simplified for general use here)
124
- system_prompt = "You are a helpful AI assistant. Please provide a clear and friendly answer."
125
- # The model card suggests a more complex system prompt for reasoning tasks.
126
- # For a general chatbot, a simpler one might be fine, or adjust as needed.
127
- # Original prompt: "أجب على السؤال التالي بطريقة ودية وواضحة:"
128
- # We will use the user's prompt directly with a general system prompt.
129
-
130
- messages = [
131
- {"role": "system", "content": system_prompt},
132
- {"role": "user", "content": f"أجب على السؤال التالي بطريقة ودية وواضحة: {prompt_text}"}
133
- ]
134
-
135
- # Tokenize the chat
136
- # The model card for Phi-4-reasoning-plus shows: inputs = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
137
- # However, apply_chat_template might not be available or work the same for all AutoTokenizers directly without specific setup.
138
- # A more general approach for models expecting chat format:
139
- prompt_for_model = text_tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
140
- inputs = text_tokenizer(prompt_for_model, return_tensors="pt", padding=True, truncation=True, max_length=2048).to(text_model.device)
141
-
142
  outputs = text_model.generate(
143
  **inputs,
144
- max_new_tokens=300, # Increased from 200 for potentially longer answers
145
- # Parameters from Phi-4 model card for reasoning, can be adjusted for chat
146
- # temperature=0.8,
147
- # top_p=0.95,
148
- # do_sample=True,
149
- # For more deterministic chat, we might use different settings
150
  temperature=0.7,
151
  top_k=50,
152
  do_sample=True,
153
- pad_token_id=text_tokenizer.eos_token_id
 
154
  )
155
- # Decode, skipping the prompt part
156
  response_text = text_tokenizer.decode(outputs[0][inputs.input_ids.shape[1]:], skip_special_tokens=True)
157
  return response_text.strip()
158
  except Exception as e:
159
- print(f"Error in text generation: {e}")
160
  return f"خطأ في معالجة النص: {str(e)}"
161
 
162
- # 2. Image Analysis
163
  def analyze_image(pil_image, question_text=None):
164
  try:
165
  if pil_image is None:
166
  return "الرجاء رفع صورة أولاً."
167
- # Convert numpy array from Gradio to PIL Image if necessary
168
  if isinstance(pil_image, np.ndarray):
169
  pil_image = Image.fromarray(pil_image).convert("RGB")
170
- else: # Assuming it's already a PIL image path or object from gr.Image
171
  pil_image = pil_image.convert("RGB")
172
 
173
  if not question_text or question_text.strip() == "":
174
- prompt_for_model = "USER: <image>\nصف محتوى الصورة بالتفصيل\nASSISTANT:"
175
- else:
176
- prompt_for_model = f"USER: <image>\n{question_text}\nASSISTANT:"
 
 
 
 
 
 
 
 
 
 
 
 
177
 
178
- inputs = image_processor(text=prompt_for_model, images=pil_image, return_tensors="pt").to(image_model.device)
179
- outputs = image_model.generate(**inputs, max_new_tokens=100)
180
- response_text = image_processor.decode(outputs[0], skip_special_tokens=True)
181
- # LLaVA response often includes the prompt, so we might need to clean it.
182
- # Typically, the response starts after "ASSISTANT: "
183
- assistant_marker = "ASSISTANT:"
184
- if assistant_marker in response_text:
185
- response_text = response_text.split(assistant_marker, 1)[-1].strip()
186
  return response_text
187
  except Exception as e:
188
- print(f"Error in image analysis: {e}")
189
  return f"خطأ في تحليل الصورة: {str(e)}"
190
 
191
- # 3. Audio Processing (STT and TTS)
192
  def process_audio(audio_input):
193
  try:
194
  if audio_input is None:
@@ -196,41 +137,34 @@ def process_audio(audio_input):
196
 
197
  sample_rate, audio_data = audio_input
198
 
199
- # Ensure audio_data is float32 and normalized for Whisper
200
  if audio_data.dtype != np.float32:
201
  audio_data = audio_data.astype(np.float32)
202
  if np.max(np.abs(audio_data)) > 0:
203
  audio_data = audio_data / np.max(np.abs(audio_data))
204
- else: # Handle silent audio
205
  return "تم استقبال صوت صامت.", "", (16000, np.array([], dtype=np.int16))
206
 
207
- # Speech-to-Text
208
  input_features = stt_processor(audio_data, sampling_rate=sample_rate, return_tensors="pt").input_features.to(device)
209
- # Generate token ids
210
- predicted_ids = stt_model.generate(input_features, language="ar") # Specify language for better accuracy if known
211
- # Decode token ids to text
212
- transcription = stt_processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
213
- transcription = transcription.strip()
214
 
215
  if not transcription:
216
  return "لم يتمكن النموذج من استخراج نص من الصوت.", "", (16000, np.array([], dtype=np.int16))
217
 
218
- # Text Generation (Response to transcription)
219
  text_response = generate_text_response(transcription)
220
-
221
- # Text-to-Speech
222
- inputs = tts_processor(text=text_response, return_tensors="pt")
223
- speech_values = tts_model.generate_speech(inputs["input_ids"].to(device), speaker_embeddings.to(device), vocoder=tts_vocoder)
224
 
225
- # Ensure output is a NumPy array and handle sample rate
226
- audio_output_np = speech_values.cpu().numpy()
227
- # SpeechT5 typically outputs at 16kHz
228
- tts_sample_rate = 16000
 
 
 
 
229
 
230
  return transcription, text_response, (tts_sample_rate, audio_output_np)
231
  except Exception as e:
232
- print(f"Error in audio processing: {e}")
233
- # Return empty audio with a valid sample rate for Gradio
234
  empty_audio_data = np.array([], dtype=np.float32)
235
  return f"خطأ في معالجة الصوت: {str(e)}", "", (16000, empty_audio_data)
236
 
@@ -239,7 +173,7 @@ def process_file(file_obj):
239
  try:
240
  if file_obj is None:
241
  return "الرجاء رفع ملف أولاً."
242
- file_path = file_obj.name # Gradio File object has a .name attribute with the temp path
243
  text_content = ""
244
  if file_path.endswith(".pdf"):
245
  with fitz.open(file_path) as doc:
@@ -256,23 +190,21 @@ def process_file(file_obj):
256
  if not text_content.strip():
257
  return "الملف فارغ أو لا يمكن قراءة محتواه النصي."
258
 
259
- # Summarize or query the content
260
- # Limit context size for the model if necessary
261
- max_context_len = 1500 # Adjust based on model limits and typical file sizes
262
  if len(text_content) > max_context_len:
263
  text_content = text_content[:max_context_len] + "... [المحتوى تم اختصاره]"
264
 
265
  response = generate_text_response(f"لخص المحتوى التالي من الملف: \n\n{text_content}")
266
  return response
267
  except Exception as e:
268
- print(f"Error processing file: {e}")
269
  return f"خطأ في قراءة الملف: {str(e)}"
270
 
271
  # --- Gradio Interface ---
272
  with gr.Blocks(css=".gradio-container {background-color: #f0f4f8; font-family: Arial; color: #333; padding: 20px;}", theme=gr.themes.Soft()) as demo:
273
- gr.Markdown("# 🤖 Kemo Chat V2 - مساعد ذكي متعدد الوسائط (بأدوات Hugging Face الأحدث)")
274
- gr.Markdown("🎯 تفاعل معي عبر النصوص، الصور، الصوت أو الملفات! يدعم العربية والإنجليزية (النماذج قد تتفاوت في دعم اللغات).")
275
- gr.Markdown("📁 يدعم الملفات: PDF، Excel، CSV\n🖼️ يدعم الوصف الذكي للصور والسؤال عنها\n🎙️ تحويل الصوت إلى نص والرد صوتياً")
276
 
277
  with gr.Tab("💬 المحادثة النصية"):
278
  text_input = gr.Textbox(label="اكتب سؤالك أو رسالتك هنا", lines=3)
@@ -280,23 +212,23 @@ with gr.Blocks(css=".gradio-container {background-color: #f0f4f8; font-family: A
280
  text_submit = gr.Button("إرسال", variant="primary")
281
  text_submit.click(fn=generate_text_response, inputs=text_input, outputs=text_output)
282
 
283
- with gr.Tab("🖼️ تحليل الصور"):
284
- gr.Markdown("ارفع صورة واسأل عنها أو اطلب وصفًا لها.")
285
  with gr.Row():
286
- image_input = gr.Image(type="pil", label="ارفع صورة") # type="pil" for PIL Image object
287
  with gr.Column():
288
- image_question = gr.Textbox(label="اختياري: اسأل سؤال عن الصورة", lines=2)
289
- image_output = gr.Textbox(label="الوصف أو الإجابة", lines=5, interactive=False)
290
  image_submit = gr.Button("تحليل الصورة", variant="primary")
291
  image_submit.click(fn=analyze_image, inputs=[image_input, image_question], outputs=image_output)
292
 
293
  with gr.Tab("🎤 التفاعل الصوتي"):
294
  gr.Markdown("سجّل رسالة صوتية، سيتم تحويلها إلى نص، ثم الرد عليها نصيًا وصوتيًا.")
295
- audio_input = gr.Audio(sources=["microphone"], type="numpy", label="سجّل رسالتك الصوتية") # type="numpy" for (sr, data)
296
  with gr.Row():
297
  audio_transcription = gr.Textbox(label="النص المستخرج من صوتك", interactive=False, lines=2)
298
  audio_text_response = gr.Textbox(label="الرد النصي على رسالتك", interactive=False, lines=3)
299
- audio_output_player = gr.Audio(label="الرد الصوتي من المساعد", type="numpy", interactive=False) # type="numpy" for (sr, data)
300
  audio_submit = gr.Button("معالجة الصوت", variant="primary")
301
  audio_submit.click(fn=process_audio,
302
  inputs=audio_input,
@@ -310,6 +242,6 @@ with gr.Blocks(css=".gradio-container {background-color: #f0f4f8; font-family: A
310
  file_submit.click(fn=process_file, inputs=file_input, outputs=file_output)
311
 
312
  if __name__ == "__main__":
313
- print("Launching Gradio Demo...")
314
- demo.launch(share=True) # share=True for public link, remove if not needed
315
 
 
9
  from transformers import (
10
  AutoTokenizer,
11
  AutoModelForCausalLM,
12
+ # AutoProcessor, # Replaced by ViltProcessor for VQA
13
+ # AutoModelForVision2Seq, # Replaced by ViltForQuestionAnswering for VQA
14
  WhisperProcessor,
15
  WhisperForConditionalGeneration,
16
+ ViltProcessor, # Added for ViLT VQA model
17
+ ViltForQuestionAnswering # Added for ViLT VQA model
 
18
  )
19
+ from parler_tts import ParlerTTSForConditionalGeneration
20
+ from transformers import AutoFeatureExtractor
21
+
22
  import os
23
  import scipy.io.wavfile as wavfile
24
  import io
 
30
  if token:
31
  login(token=token)
32
  else:
33
+ print("تحذير: لم يتم تعيين متغير البيئة HF_API_TOKEN. بعض النماذج قد تتطلب تسجيل الدخول.")
34
 
35
  # Device Configuration
36
  device = "cuda" if torch.cuda.is_available() else "cpu"
37
+ print(f"استخدام الجهاز: {device}")
38
 
39
+ # 1. Text Generation Model: distilgpt2 (Lightweight)
40
+ text_model_name = "distilgpt2"
41
+ print(f"تحميل نموذج النص: {text_model_name}")
42
  text_tokenizer = AutoTokenizer.from_pretrained(text_model_name)
43
  text_model = AutoModelForCausalLM.from_pretrained(
44
  text_model_name,
45
+ torch_dtype=torch.float32,
46
+ device_map="auto"
 
47
  )
48
+ if text_tokenizer.pad_token is None:
49
+ text_tokenizer.pad_token = text_tokenizer.eos_token
50
+ print("تم تحميل نموذج النص.")
51
+
52
+ # 2. Image Analysis Model: dandelin/vilt-b32-finetuned-vqa (Lightweight, Public VQA)
53
+ image_model_name = "dandelin/vilt-b32-finetuned-vqa"
54
+ print(f"تحميل نموذج الصور (VQA): {image_model_name}")
55
+ image_processor = ViltProcessor.from_pretrained(image_model_name)
56
+ image_model = ViltForQuestionAnswering.from_pretrained(
57
  image_model_name,
58
+ torch_dtype=torch.float16 if device == "cuda" else torch.float32,
59
  device_map="auto"
60
  )
61
+ print("تم تحميل نموذج الصور (VQA).")
62
 
63
+ # 3. Speech-to-Text Model: openai/whisper-tiny (Lightweight)
64
+ stt_model_name = "openai/whisper-tiny"
65
+ print(f"تحميل نموذج تحويل الكلام إلى نص: {stt_model_name}")
66
  stt_processor = WhisperProcessor.from_pretrained(stt_model_name)
67
  stt_model = WhisperForConditionalGeneration.from_pretrained(stt_model_name).to(device)
68
+ stt_model.config.forced_decoder_ids = None
69
+ print("تم تحميل نموذج تحويل الكلام إلى نص.")
 
 
 
 
 
70
 
71
+ # 4. Text-to-Speech Model: parler-tts/parler-tts-tiny-v1 (Lightweight)
72
+ tts_model_repo_id = "parler-tts/parler-tts-tiny-v1"
73
+ print(f"تحميل نموذج تحويل النص إلى كلام: {tts_model_repo_id}")
74
+ tts_model = ParlerTTSForConditionalGeneration.from_pretrained(tts_model_repo_id).to(device)
75
+ tts_feature_extractor = AutoFeatureExtractor.from_pretrained(tts_model_repo_id)
76
+ print("تم تحميل مكونات تحويل النص إلى كلام.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
78
  # --- Helper Functions for Model Inference ---
79
 
80
+ # 1. Text Generation (using distilgpt2)
81
  def generate_text_response(prompt_text):
82
  try:
83
+ full_prompt = f"السؤال: {prompt_text}\nالإجابة الودية والواضحة:"
84
+ inputs = text_tokenizer(full_prompt, return_tensors="pt", padding=True, truncation=True, max_length=512).to(text_model.device)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  outputs = text_model.generate(
86
  **inputs,
87
+ max_new_tokens=150,
 
 
 
 
 
88
  temperature=0.7,
89
  top_k=50,
90
  do_sample=True,
91
+ pad_token_id=text_tokenizer.eos_token_id,
92
+ no_repeat_ngram_size=2
93
  )
 
94
  response_text = text_tokenizer.decode(outputs[0][inputs.input_ids.shape[1]:], skip_special_tokens=True)
95
  return response_text.strip()
96
  except Exception as e:
97
+ print(f"خطأ في توليد النص: {e}")
98
  return f"خطأ في معالجة النص: {str(e)}"
99
 
100
+ # 2. Image Analysis (using dandelin/vilt-b32-finetuned-vqa)
101
  def analyze_image(pil_image, question_text=None):
102
  try:
103
  if pil_image is None:
104
  return "الرجاء رفع صورة أولاً."
 
105
  if isinstance(pil_image, np.ndarray):
106
  pil_image = Image.fromarray(pil_image).convert("RGB")
107
+ else:
108
  pil_image = pil_image.convert("RGB")
109
 
110
  if not question_text or question_text.strip() == "":
111
+ # ViLT is a VQA model, it needs a question.
112
+ # If no question, we can ask a generic one, or return a message.
113
+ # For now, let's ask a generic question if none is provided.
114
+ question_text = "What is in this image?"
115
+
116
+ # Prepare inputs for ViLT
117
+ encoding = image_processor(pil_image, question_text, return_tensors="pt").to(image_model.device)
118
+
119
+ # Forward pass
120
+ with torch.no_grad():
121
+ outputs = image_model(**encoding)
122
+
123
+ logits = outputs.logits
124
+ idx = logits.argmax(-1).item()
125
+ response_text = image_model.config.id2label[idx]
126
 
 
 
 
 
 
 
 
 
127
  return response_text
128
  except Exception as e:
129
+ print(f"خطأ في تحليل الصورة: {e}")
130
  return f"خطأ في تحليل الصورة: {str(e)}"
131
 
132
+ # 3. Audio Processing (STT with Whisper Tiny and TTS with ParlerTTS Tiny)
133
  def process_audio(audio_input):
134
  try:
135
  if audio_input is None:
 
137
 
138
  sample_rate, audio_data = audio_input
139
 
 
140
  if audio_data.dtype != np.float32:
141
  audio_data = audio_data.astype(np.float32)
142
  if np.max(np.abs(audio_data)) > 0:
143
  audio_data = audio_data / np.max(np.abs(audio_data))
144
+ else:
145
  return "تم استقبال صوت صامت.", "", (16000, np.array([], dtype=np.int16))
146
 
 
147
  input_features = stt_processor(audio_data, sampling_rate=sample_rate, return_tensors="pt").input_features.to(device)
148
+ predicted_ids = stt_model.generate(input_features, language="ar")
149
+ transcription = stt_processor.batch_decode(predicted_ids, skip_special_tokens=True)[0].strip()
 
 
 
150
 
151
  if not transcription:
152
  return "لم يتمكن النموذج من استخراج نص من الصوت.", "", (16000, np.array([], dtype=np.int16))
153
 
 
154
  text_response = generate_text_response(transcription)
 
 
 
 
155
 
156
+ prompt = text_response
157
+ with torch.no_grad():
158
+ generation_output = tts_model.generate(input_ids=None,
159
+ prompt=prompt,
160
+ do_sample=True,
161
+ temperature=1.0).cpu().numpy().squeeze()
162
+ audio_output_np = generation_output
163
+ tts_sample_rate = tts_model.config.sampling_rate
164
 
165
  return transcription, text_response, (tts_sample_rate, audio_output_np)
166
  except Exception as e:
167
+ print(f"خطأ في معالجة الصوت: {e}")
 
168
  empty_audio_data = np.array([], dtype=np.float32)
169
  return f"خطأ في معالجة الصوت: {str(e)}", "", (16000, empty_audio_data)
170
 
 
173
  try:
174
  if file_obj is None:
175
  return "الرجاء رفع ملف أولاً."
176
+ file_path = file_obj.name
177
  text_content = ""
178
  if file_path.endswith(".pdf"):
179
  with fitz.open(file_path) as doc:
 
190
  if not text_content.strip():
191
  return "الملف فارغ أو لا يمكن قراءة محتواه النصي."
192
 
193
+ max_context_len = 1000
 
 
194
  if len(text_content) > max_context_len:
195
  text_content = text_content[:max_context_len] + "... [المحتوى تم اختصاره]"
196
 
197
  response = generate_text_response(f"لخص المحتوى التالي من الملف: \n\n{text_content}")
198
  return response
199
  except Exception as e:
200
+ print(f"خطأ في معالجة الملف: {e}")
201
  return f"خطأ في قراءة الملف: {str(e)}"
202
 
203
  # --- Gradio Interface ---
204
  with gr.Blocks(css=".gradio-container {background-color: #f0f4f8; font-family: Arial; color: #333; padding: 20px;}", theme=gr.themes.Soft()) as demo:
205
+ gr.Markdown("# 🤖 Kemo Chat V3.2 - مساعد ذكي متعدد الوسائط (نماذج خفيفة الوزن - ViLT VQA)")
206
+ gr.Markdown("🎯 تفاعل معي عبر النصوص، الصور، الصوت أو الملفات! (باستخدام نماذج أقل استهلاكًا للذاكرة).")
207
+ gr.Markdown("📁 يدعم الملفات: PDF، Excel، CSV\n🖼️ يدعم الإجابة على الأسئلة حول الصور (VQA)\n🎙️ تحويل الصوت إلى نص والرد صوتياً")
208
 
209
  with gr.Tab("💬 المحادثة النصية"):
210
  text_input = gr.Textbox(label="اكتب سؤالك أو رسالتك هنا", lines=3)
 
212
  text_submit = gr.Button("إرسال", variant="primary")
213
  text_submit.click(fn=generate_text_response, inputs=text_input, outputs=text_output)
214
 
215
+ with gr.Tab("🖼️ تحليل الصور (سؤال وجواب)"):
216
+ gr.Markdown("ارفع صورة واطرح سؤالاً عنها.")
217
  with gr.Row():
218
+ image_input = gr.Image(type="pil", label="ارفع صورة")
219
  with gr.Column():
220
+ image_question = gr.Textbox(label="اطرح سؤالاً عن الصورة (مطلوب لـ ViLT)", lines=2, placeholder="مثال: What color is the car?")
221
+ image_output = gr.Textbox(label="الإجابة", lines=5, interactive=False)
222
  image_submit = gr.Button("تحليل الصورة", variant="primary")
223
  image_submit.click(fn=analyze_image, inputs=[image_input, image_question], outputs=image_output)
224
 
225
  with gr.Tab("🎤 التفاعل الصوتي"):
226
  gr.Markdown("سجّل رسالة صوتية، سيتم تحويلها إلى نص، ثم الرد عليها نصيًا وصوتيًا.")
227
+ audio_input = gr.Audio(sources=["microphone"], type="numpy", label="سجّل رسالتك الصوتية")
228
  with gr.Row():
229
  audio_transcription = gr.Textbox(label="النص المستخرج من صوتك", interactive=False, lines=2)
230
  audio_text_response = gr.Textbox(label="الرد النصي على رسالتك", interactive=False, lines=3)
231
+ audio_output_player = gr.Audio(label="الرد الصوتي من المساعد", type="numpy", interactive=False)
232
  audio_submit = gr.Button("معالجة الصوت", variant="primary")
233
  audio_submit.click(fn=process_audio,
234
  inputs=audio_input,
 
242
  file_submit.click(fn=process_file, inputs=file_input, outputs=file_output)
243
 
244
  if __name__ == "__main__":
245
+ print("Launching Gradio Demo (Lightweight Models with ViLT VQA)...")
246
+ demo.launch(share=True)
247