| import gradio as gr |
| from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline |
| import torch |
| from datetime import datetime |
| import pandas as pd |
| import matplotlib.pyplot as plt |
| import seaborn as sns |
| import os |
| import json |
| from pathlib import Path |
| from typing import Optional |
|
|
| |
| |
| |
| class Config: |
| MODEL_ID = "Edraky/Edraky-AI" |
| THEME = "soft" |
| MAX_TOKENS = 1000 |
| TEMPERATURE = 0.7 |
| TOP_P = 0.9 |
| HISTORY_FILE = "chat_history.csv" |
| CONFIG_FILE = "config.json" |
| DEFAULT_SETTINGS = { |
| "language": "ar", |
| "theme": THEME, |
| "max_tokens": MAX_TOKENS, |
| "temperature": TEMPERATURE, |
| "top_p": TOP_P |
| } |
| |
| @classmethod |
| def load_settings(cls): |
| if os.path.exists(cls.CONFIG_FILE): |
| with open(cls.CONFIG_FILE, 'r', encoding='utf-8') as f: |
| return json.load(f) |
| return cls.DEFAULT_SETTINGS |
| |
| @classmethod |
| def save_settings(cls, settings): |
| with open(cls.CONFIG_FILE, 'w', encoding='utf-8') as f: |
| json.dump(settings, f, ensure_ascii=False, indent=4) |
|
|
| |
| |
| |
| def load_model(): |
| """Load the model and tokenizer with GPU support if available""" |
| try: |
| tokenizer = AutoTokenizer.from_pretrained( |
| Config.MODEL_ID, |
| trust_remote_code=True |
| ) |
| |
| if torch.cuda.is_available(): |
| device = "cuda" |
| torch_dtype = torch.float16 |
| print("GPU available - using CUDA with float16") |
| else: |
| device = "cpu" |
| torch_dtype = torch.float32 |
| print("Using CPU with float32") |
| |
| model = AutoModelForCausalLM.from_pretrained( |
| Config.MODEL_ID, |
| torch_dtype=torch_dtype, |
| device_map="auto" if device == "cuda" else None, |
| trust_remote_code=True |
| ) |
| |
| if device == "cpu": |
| model = model.to(device) |
| |
| pipe = pipeline( |
| "text-generation", |
| model=model, |
| tokenizer=tokenizer, |
| device=device |
| ) |
| return pipe, tokenizer, device |
| except Exception as e: |
| raise RuntimeError(f"Failed to load model: {str(e)}") |
|
|
| try: |
| pipe, tokenizer, device = load_model() |
| print(f"Model successfully loaded on {device.upper()}") |
| except RuntimeError as e: |
| print(f"Critical error: {e}") |
| pipe, tokenizer, device = None, None, "cpu" |
|
|
| |
| |
| |
| def ensure_history_file(): |
| """Ensure history file exists with proper columns""" |
| if not os.path.exists(Config.HISTORY_FILE): |
| pd.DataFrame(columns=["timestamp", "language", "prompt", "response"]).to_csv( |
| Config.HISTORY_FILE, index=False |
| ) |
|
|
| def log_conversation(prompt: str, response: str, language: str) -> None: |
| """Log conversation to CSV file with language tracking""" |
| try: |
| ensure_history_file() |
| df = pd.DataFrame({ |
| "timestamp": [datetime.now().strftime("%Y-%m-%d %H:%M:%S")], |
| "language": [language], |
| "prompt": [prompt], |
| "response": [response] |
| }) |
| df.to_csv( |
| Config.HISTORY_FILE, |
| mode="a", |
| header=False, |
| index=False |
| ) |
| except Exception as e: |
| print(f"Error logging conversation: {e}") |
|
|
| def generate_response( |
| prompt: str, |
| language: str = "ar", |
| max_tokens: int = Config.MAX_TOKENS, |
| temperature: float = Config.TEMPERATURE, |
| top_p: float = Config.TOP_P |
| ) -> str: |
| """Generate response from the model with enhanced parameters""" |
| if pipe is None or tokenizer is None: |
| return "Error: Model not loaded properly. Please try again later." |
| try: |
| system_message = { |
| "ar": """ |
| أنت إدراكي (Edraky)، مساعد ذكي متعدد اللغات صُمم لدعم الطلاب - خاصة في النظام التعليمي المصري. مهمتك هي تقديم شروحات واضحة ودقيقة وجذابة عبر مجموعة واسعة من المواد الأكاديمية، خاصة لمرحلتي الإعدادي والثانوي. أنت لطيف، مشجع، وتحترم الجميع دائماً. |
| |
| 🎓 المبادئ الأساسية: |
| |
| 1. استخدم لغة مبسطة يفهمها الطلاب بمختلف مستوياتهم - لا تعقد الأمور. |
| 2. شجع الفضول دائماً، حتى لو كان السؤال بسيطاً أو غير صحيح. |
| 3. علّم بالعربية والإنجليزية حسب لغة المستخدم. إذا تحدث المستخدم بالعربية، فاستخدم العربية الفصحى ما لم يُطلب غير ذلك. |
| 4. كن واعياً ثقافياً بمصر والعالم العربي. استخدم أمثلة ذات صلة عند الشرح. |
| 5. لا تكذب أو تختلق معلومات. إذا كان شيء غير معروف أو غير واضح، قل ذلك بصدق. |
| 6. ابقَ مركزاً على كونك مساعداً تعليمياً: لا تمزح بطريقة تشتت الانتباه، ولا تقدم ردوداً خارج الموضوع إلا إذا طُلب ذلك بوضوح. |
| 7. رد بحماس مثل معلم أو مرشد شغوف يؤمن بأن كل طالب يمكنه التفوق. |
| 8. إذا كان السؤال غير واضح، فاسأل لتساعد المستخدم على التوضيح بدلاً من التخمين الخاطئ. |
| |
| 🛠️ الميزات التي تدعمها (إذا كانت متاحة): |
| |
| - سجل الأسئلة السابقة |
| - إدخال صوتي |
| - رفع ملفات (صور، PDF) للمساعدة في الواجبات المدرسية |
| - تبديل اللغة حسب الطلب |
| - تلخيص أو شرح أو ترجمة النصوص الصعبة |
| |
| 🌟 النبرة: ودودة، احترافية، وتحفيزية. تصرف مثل الأخ/الأخت الأكبر الذكي أو المدرس المتفاني. |
| |
| 🧠 يمكنك التفكير خطوة بخطوة. مسموح لك بتقسيم المشكلات المعقدة إلى أجزاء، خاصة في الرياضيات والعلوم والنحو. |
| |
| 💡 أنت تساعد المستخدمين ليس فقط في الحصول على الإجابات - بل في فهم كيف ولماذا. |
| """, |
| "en": """ |
| You are Edraky (إدراكي), a smart, multilingual AI assistant built to support students—especially in the Egyptian educational system. Your role is to provide clear, accurate, and engaging explanations across a wide range of academic subjects, especially for preparatory and secondary levels. You are kind, encouraging, and always respectful. |
| |
| 🎓 Key Principles: |
| |
| 1. Use simplified language that students of all levels can understand—don't overcomplicate. |
| 2. Always encourage curiosity, even if a question is simple or incorrect. |
| 3. Teach in both Arabic and English, depending on the user's input. If the user speaks Arabic, prefer using Modern Standard Arabic unless otherwise specified. |
| 4. Be culturally aware of Egypt and the Arab world. Use relevant examples when explaining. |
| 5. Never lie or make up facts. If something is unknown or unclear, say so honestly. |
| 6. Stay focused on being an educational assistant: no jokes that distract, no off-topic replies unless clearly allowed. |
| 7. Respond warmly like a passionate teacher or mentor who believes every student can shine. |
| 8. If the question is unclear, ask questions to help the user clarify instead of guessing wrong. |
| |
| 🛠️ Features You Support (if implemented): |
| |
| - History of previous questions |
| - Voice input |
| - Uploads (images, PDFs) for help with scanned homework |
| - Language switching on demand |
| - Summarizing, explaining, or translating difficult texts |
| |
| 🌟 Tone: Friendly, professional, and motivational. Act like a smart older brother/sister or a dedicated tutor. |
| |
| 🧠 You can think step-by-step. You're also allowed to break complex problems into parts, especially in math, science, and grammar. |
| |
| 💡 You help users not just get answers—but understand how and why. |
| """ |
| }.get(language, "ar") |
|
|
| messages = [ |
| {"role": "system", "content": system_message}, |
| {"role": "user", "content": prompt} |
| ] |
|
|
| outputs = pipe( |
| messages, |
| max_new_tokens=max_tokens, |
| temperature=temperature, |
| top_p=top_p, |
| do_sample=True, |
| pad_token_id=tokenizer.eos_token_id |
| ) |
|
|
| try: |
| response = outputs[0]['generated_text'][-1]['content'] |
| except Exception: |
| response = outputs[0]['generated_text'] if 'generated_text' in outputs[0] else str(outputs) |
| log_conversation(prompt, response, language) |
| return response |
| except Exception as e: |
| return f"Error generating response: {str(e)}" |
|
|
| def show_history() -> pd.DataFrame: |
| """Display conversation history with proper formatting""" |
| try: |
| ensure_history_file() |
| df = pd.read_csv(Config.HISTORY_FILE) |
| if not df.empty: |
| df['timestamp'] = pd.to_datetime(df['timestamp']) |
| return df.sort_values('timestamp', ascending=False) |
| return pd.DataFrame() |
| except Exception as e: |
| print(f"Error reading history: {e}") |
| return pd.DataFrame() |
|
|
| def analyze_history() -> Optional[plt.Figure]: |
| """Create visualizations of conversation history""" |
| try: |
| df = show_history() |
| if df.empty: |
| return None |
| |
| plt.figure(figsize=(12, 8)) |
| |
| ax1 = plt.subplot2grid((2, 2), (0, 0)) |
| ax2 = plt.subplot2grid((2, 2), (0, 1)) |
| ax3 = plt.subplot2grid((2, 2), (1, 0), colspan=2) |
| |
| df['date'] = df['timestamp'].dt.date |
| daily_counts = df.groupby('date').size() |
| daily_counts.plot(kind='bar', ax=ax1, color='#4e79a7') |
| ax1.set_title("Daily Conversation Count") |
| ax1.set_xlabel("Date") |
| ax1.set_ylabel("Count") |
| |
| lang_dist = df['language'].value_counts() |
| lang_dist.plot(kind='pie', ax=ax2, autopct='%1.1f%%', |
| colors=['#f28e2b', '#e15759'], |
| labels=['Arabic', 'English']) |
| ax2.set_title("Language Distribution") |
| ax2.set_ylabel("") |
| |
| df['hour'] = df['timestamp'].dt.hour |
| hourly = df.groupby('hour').size() |
| sns.lineplot(x=hourly.index, y=hourly.values, ax=ax3, color='#59a14f') |
| ax3.set_title("Hourly Activity") |
| ax3.set_xlabel("Hour of Day") |
| ax3.set_ylabel("Conversations") |
| |
| plt.tight_layout() |
| return plt.gcf() |
| except Exception as e: |
| print(f"Error analyzing history: {e}") |
| return None |
|
|
| def clear_history() -> pd.DataFrame: |
| """Clear conversation history""" |
| try: |
| pd.DataFrame(columns=["timestamp", "language", "prompt", "response"]).to_csv( |
| Config.HISTORY_FILE, index=False |
| ) |
| return pd.DataFrame() |
| except Exception as e: |
| print(f"Error clearing history: {e}") |
| return show_history() |
|
|
| |
| |
| |
| def get_css() -> str: |
| """Return custom CSS for the interface""" |
| return """ |
| .gradio-container { |
| font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; |
| } |
| .header { |
| text-align: center; |
| padding: 20px; |
| background: linear-gradient(135deg, #6e48aa 0%, #9d50bb 100%); |
| color: white; |
| border-radius: 8px; |
| margin-bottom: 20px; |
| } |
| .header h1 { |
| margin: 0; |
| font-size: 2.5em; |
| } |
| .header p { |
| margin: 5px 0 0; |
| font-size: 1.1em; |
| } |
| .footer { |
| text-align: center; |
| margin-top: 20px; |
| padding: 10px; |
| color: #666; |
| font-size: 0.9em; |
| border-top: 1px solid #eee; |
| } |
| .rtl-text { |
| text-align: right; |
| direction: rtl; |
| } |
| .ltr-text { |
| text-align: left; |
| direction: ltr; |
| } |
| .settings-section { |
| background: #f9f9f9; |
| padding: 15px; |
| border-radius: 8px; |
| margin-bottom: 15px; |
| border: 1px solid #eee; |
| } |
| """ |
|
|
| def create_header(): |
| """Create the header section""" |
| gr.Markdown(""" |
| <div class="header"> |
| <h1>إدراكي - Edraky AI</h1> |
| <p>مساعد ذكي للطلاب في المواد الدراسية | Smart Student Assistant</p> |
| </div> |
| """) |
|
|
| def create_footer(): |
| """Create the footer section""" |
| gr.Markdown(f""" |
| <div class="footer"> |
| <p>Edraky AI - Student Assistant | Powered by Qwen2-1.5B | Running on {device.upper()}</p> |
| </div> |
| """) |
|
|
| def create_chat_tab(settings: dict): |
| """Create the chat interface tab""" |
| with gr.Row(): |
| with gr.Column(scale=1): |
| language = gr.Radio( |
| ["العربية (Arabic)", "English"], |
| value="العربية (Arabic)" if settings["language"] == "ar" else "English", |
| label="اللغة / Language" |
| ) |
| |
| with gr.Accordion("⚙️ إعدادات المحادثة / Chat Settings", open=False): |
| max_tokens = gr.Slider( |
| 100, 1000, |
| value=settings["max_tokens"], |
| label="طول الرد / Max Response Length" |
| ) |
| temperature = gr.Slider( |
| 0.1, 1.0, |
| value=settings["temperature"], |
| label="الإبداعية / Creativity (Temperature)" |
| ) |
| top_p = gr.Slider( |
| 0.1, 1.0, |
| value=settings["top_p"], |
| label="التنوع / Diversity (Top-P)" |
| ) |
| |
| with gr.Column(scale=3): |
| prompt = gr.Textbox( |
| label="اكتب سؤالك هنا... / Type your question here...", |
| placeholder="كيف يمكنني فهم نظرية فيثاغورس؟ / How can I understand the Pythagorean theorem?", |
| lines=5, |
| elem_classes=["rtl-text"] |
| ) |
| submit_btn = gr.Button( |
| "إرسال / Submit", |
| variant="primary" |
| ) |
| output = gr.Textbox( |
| label="الرد / Response", |
| interactive=False, |
| lines=10 |
| ) |
| |
| submit_btn.click( |
| fn=lambda p, l, m, t, tp: generate_response( |
| p, |
| "ar" if l == "العربية (Arabic)" else "en", |
| m, t, tp |
| ), |
| inputs=[prompt, language, max_tokens, temperature, top_p], |
| outputs=output |
| ) |
|
|
| def create_history_tab(): |
| """Create the history tab""" |
| with gr.Row(): |
| history_table = gr.Dataframe( |
| label="سجل المحادثات / Conversation History", |
| headers=["Timestamp", "Language", "Prompt", "Response"], |
| interactive=False, |
| wrap=True |
| ) |
| |
| with gr.Row(): |
| refresh_btn = gr.Button("🔄 تحديث / Refresh") |
| clear_btn = gr.Button("🗑️ مسح السجل / Clear History", variant="stop") |
| export_btn = gr.Button("📤 تصدير / Export") |
| |
| refresh_btn.click(fn=show_history, outputs=history_table) |
| clear_btn.click(fn=clear_history, outputs=history_table) |
| export_btn.click( |
| fn=lambda: gr.File(value=Config.HISTORY_FILE, visible=True), |
| outputs=gr.File(visible=True) |
| ) |
|
|
| def create_analytics_tab(): |
| """Create the analytics tab""" |
| with gr.Row(): |
| analytics_plot = gr.Plot( |
| label="التحليلات / Analytics", |
| show_label=True |
| ) |
| |
| with gr.Row(): |
| refresh_analytics = gr.Button("🔄 تحديث التحليلات / Refresh Analytics") |
| |
| refresh_analytics.click(fn=analyze_history, outputs=analytics_plot) |
|
|
| def create_settings_tab(settings: dict): |
| """Create the settings tab""" |
| with gr.Column(): |
| with gr.Row(): |
| with gr.Column(): |
| gr.Markdown("### إعدادات العرض / Display Settings") |
| theme = gr.Dropdown( |
| ["soft", "default", "gradio/glass", "gradio/monochrome"], |
| value=settings["theme"], |
| label="السمة / Theme" |
| ) |
| default_lang = gr.Radio( |
| ["العربية (Arabic)", "English"], |
| value="العربية (Arabic)" if settings["language"] == "ar" else "English", |
| label="اللغة الافتراضية / Default Language" |
| ) |
| |
| with gr.Row(): |
| with gr.Column(): |
| gr.Markdown("### إعدادات النموذج / Model Settings") |
| default_max_tokens = gr.Slider( |
| 100, 1000, |
| value=settings["max_tokens"], |
| label="الطول الافتراضي للرد / Default Max Tokens" |
| ) |
| default_temp = gr.Slider( |
| 0.1, 1.0, |
| value=settings["temperature"], |
| label="الإبداعية الافتراضية / Default Temperature" |
| ) |
| default_top_p = gr.Slider( |
| 0.1, 1.0, |
| value=settings["top_p"], |
| label="التنوع الافتراضي / Default Top-P" |
| ) |
| |
| save_btn = gr.Button("💾 حفظ الإعدادات / Save Settings", variant="primary") |
| status = gr.Textbox(label="الحالة / Status", interactive=False) |
| |
| save_btn.click( |
| fn=lambda t, l, m, temp, tp: save_settings(t, l, m, temp, tp, status), |
| inputs=[theme, default_lang, default_max_tokens, default_temp, default_top_p], |
| outputs=status |
| ) |
|
|
| def save_settings(theme: str, lang: str, max_t: int, temp: float, top_p: float, status: gr.Textbox) -> str: |
| """Save settings to config file""" |
| try: |
| new_settings = { |
| "theme": theme, |
| "language": "ar" if lang == "العربية (Arabic)" else "en", |
| "max_tokens": max_t, |
| "temperature": temp, |
| "top_p": top_p |
| } |
| Config.save_settings(new_settings) |
| return "✅ تم حفظ الإعدادات بنجاح / Settings saved successfully!" |
| except Exception as e: |
| return f"❌ فشل في حفظ الإعدادات: {str(e)} / Failed to save settings: {str(e)}" |
|
|
| def create_interface(): |
| """Create the Gradio interface with enhanced features""" |
| current_settings = Config.load_settings() |
| |
| with gr.Blocks(theme=current_settings["theme"], css=get_css()) as app: |
| create_header() |
| |
| with gr.Tabs(): |
| with gr.Tab("💬 Chat", id="chat"): |
| create_chat_tab(current_settings) |
| |
| with gr.Tab("📜 History", id="history"): |
| create_history_tab() |
| |
| with gr.Tab("📊 Analytics", id="analytics"): |
| create_analytics_tab() |
| |
| with gr.Tab("⚙️ Settings", id="settings"): |
| create_settings_tab(current_settings) |
| |
| create_footer() |
| |
| return app |
|
|
| |
| |
| |
| if __name__ == "__main__": |
| Path("logs").mkdir(exist_ok=True) |
| |
| app = create_interface() |
| app.launch( |
| server_name="0.0.0.0", |
| server_port=7860, |
| share=False, |
| favicon_path=None, |
| inbrowser=True |
| ) |