Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import torch
|
| 3 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 4 |
+
|
| 5 |
+
# ============================================================
|
| 6 |
+
# Load Model and Tokenizer
|
| 7 |
+
# ============================================================
|
| 8 |
+
|
| 9 |
+
MODEL_ID = "azeddinShr/LFM2-1.2B-RAG-ARABIC-AdaLoRA"
|
| 10 |
+
|
| 11 |
+
print("Loading model...")
|
| 12 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 13 |
+
MODEL_ID,
|
| 14 |
+
device_map="auto",
|
| 15 |
+
torch_dtype=torch.bfloat16,
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
|
| 19 |
+
print("✅ Model loaded successfully!")
|
| 20 |
+
|
| 21 |
+
# ============================================================
|
| 22 |
+
# Generation Function
|
| 23 |
+
# ============================================================
|
| 24 |
+
|
| 25 |
+
def generate_answer(context, question, max_tokens=150, temperature=0.0):
|
| 26 |
+
"""
|
| 27 |
+
Generate answer from context and question
|
| 28 |
+
|
| 29 |
+
Args:
|
| 30 |
+
context: Background information (Arabic text)
|
| 31 |
+
question: Question to answer (Arabic text)
|
| 32 |
+
max_tokens: Maximum length of generated answer
|
| 33 |
+
temperature: Sampling temperature (0.0 = deterministic)
|
| 34 |
+
|
| 35 |
+
Returns:
|
| 36 |
+
Generated answer
|
| 37 |
+
"""
|
| 38 |
+
# Validate inputs
|
| 39 |
+
if not context or not context.strip():
|
| 40 |
+
return "⚠️ الرجاء إدخال السياق (Please provide context)"
|
| 41 |
+
|
| 42 |
+
if not question or not question.strip():
|
| 43 |
+
return "⚠️ الرجاء إدخال السؤال (Please provide a question)"
|
| 44 |
+
|
| 45 |
+
try:
|
| 46 |
+
# Format prompt
|
| 47 |
+
prompt = f"استخدم السياق التالي للإجابة على السؤال:\n\n{context}\n\nالسؤال: {question}"
|
| 48 |
+
|
| 49 |
+
# Tokenize
|
| 50 |
+
messages = [{"role": "user", "content": prompt}]
|
| 51 |
+
input_ids = tokenizer.apply_chat_template(
|
| 52 |
+
messages,
|
| 53 |
+
add_generation_prompt=True,
|
| 54 |
+
return_tensors="pt"
|
| 55 |
+
).to(model.device)
|
| 56 |
+
|
| 57 |
+
# Generate
|
| 58 |
+
with torch.no_grad():
|
| 59 |
+
outputs = model.generate(
|
| 60 |
+
input_ids,
|
| 61 |
+
max_new_tokens=int(max_tokens),
|
| 62 |
+
temperature=float(temperature) if temperature > 0 else 0.0,
|
| 63 |
+
do_sample=temperature > 0,
|
| 64 |
+
pad_token_id=tokenizer.eos_token_id,
|
| 65 |
+
eos_token_id=tokenizer.eos_token_id,
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
# Decode
|
| 69 |
+
answer = tokenizer.decode(
|
| 70 |
+
outputs[0][input_ids.shape[1]:],
|
| 71 |
+
skip_special_tokens=True
|
| 72 |
+
).strip()
|
| 73 |
+
|
| 74 |
+
return answer
|
| 75 |
+
|
| 76 |
+
except Exception as e:
|
| 77 |
+
return f"❌ Error: {str(e)}"
|
| 78 |
+
|
| 79 |
+
# ============================================================
|
| 80 |
+
# Example Questions
|
| 81 |
+
# ============================================================
|
| 82 |
+
|
| 83 |
+
examples = [
|
| 84 |
+
[
|
| 85 |
+
"جمال أحمد حمزة خاشقجي صحفي وإعلامي سعودي، ولد في 13 أكتوبر 1958 في المدينة المنورة. رأس عدّة مناصب لعدد من الصحف في السعودية، وتقلّد منصب مستشار، كما أنّه مدير عام قناة العرب الإخبارية سابقًا.",
|
| 86 |
+
"من هو جمال خاشقجي؟"
|
| 87 |
+
],
|
| 88 |
+
[
|
| 89 |
+
"نيوم هو مشروع ضخم في شمال غرب المملكة العربية السعودية أطلقه ولي العهد الأمير محمد بن سلمان في أكتوبر 2017. تبلغ المساحة الإجمالية المخططة لنيوم 26,500 كيلومتر مربع بتكلفة تقدر بـ 500 مليار دولار.",
|
| 90 |
+
"ما هي المساحة الإجمالية لمشروع نيوم؟"
|
| 91 |
+
],
|
| 92 |
+
[
|
| 93 |
+
"نيوم هو مشروع ضخم في شمال غرب المملكة العربية السعودية أطلقه ولي العهد الأمير محمد بن سلمان في أكتوبر 2017. تبلغ المساحة الإجمالية المخططة لنيوم 26,500 كيلومتر مربع بتكلفة تقدر بـ 500 مليار دولار. يتضمن المشروع مناطق متعددة منها ذا لاين (The Line)، وهي مدينة خطية مستقبلية، بالإضافة إلى سندالة وأوكساجون وتروجينا. من المخطط أن تكتمل أجزاء رئيسية من المشروع بحلول عام 2030 كجزء من رؤية السعودية 2030.",
|
| 94 |
+
"ما هي المساحة الإجمالية المخططة لمشروع نيوم؟"
|
| 95 |
+
],
|
| 96 |
+
[
|
| 97 |
+
"في فبراير 2025، وقعت نيوم اتفاقية شراكة مع شركة داتافولت السعودية لتصميم مصنع ضخم للذكاء الاصطناعي سيكون بمثابة مركز بيانات. بموجب الاتفاقية، سيتم تنفيذ المشروع في أوكساجون، مدينة الصناعات النظيفة والمتقدمة التابعة لنيوم، على مراحل. ستشهد المرحلة الأولى استثماراً أولياً قدره 5 مليارات دولار، ومن المتوقع أن يكون جاهزاً للعمل بحلول عام 2028.",
|
| 98 |
+
"كم تبلغ قيمة ��لاستثمار الأولي لمصنع الذكاء الاصطناعي في نيوم؟"
|
| 99 |
+
]
|
| 100 |
+
]
|
| 101 |
+
|
| 102 |
+
# ============================================================
|
| 103 |
+
# Gradio Interface
|
| 104 |
+
# ============================================================
|
| 105 |
+
|
| 106 |
+
with gr.Blocks(theme=gr.themes.Soft(), title="Arabic QA System") as demo:
|
| 107 |
+
|
| 108 |
+
gr.Markdown(
|
| 109 |
+
"""
|
| 110 |
+
# 🤖 نظام الأسئلة والأجوبة العربي
|
| 111 |
+
# Arabic Question Answering System
|
| 112 |
+
|
| 113 |
+
Fine-tuned LFM2-1.2B-RAG model for Arabic extractive question answering.
|
| 114 |
+
Provide context in Arabic and ask a question to get an answer extracted from the context.
|
| 115 |
+
|
| 116 |
+
**Model:** [LiquidAI/LFM2-1.2B-RAG](https://huggingface.co/LiquidAI/LFM2-1.2B-RAG) fine-tuned with AdaLoRA
|
| 117 |
+
"""
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
with gr.Row():
|
| 121 |
+
with gr.Column(scale=2):
|
| 122 |
+
context_input = gr.Textbox(
|
| 123 |
+
label="السياق / Context",
|
| 124 |
+
placeholder="أدخل النص الذي يحتوي على المعلومات...\nEnter the text containing the information...",
|
| 125 |
+
lines=8,
|
| 126 |
+
rtl=True
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
question_input = gr.Textbox(
|
| 130 |
+
label="السؤال / Question",
|
| 131 |
+
placeholder="اطرح سؤالك هنا...\nAsk your question here...",
|
| 132 |
+
lines=2,
|
| 133 |
+
rtl=True
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
with gr.Accordion("⚙️ Advanced Settings", open=False):
|
| 137 |
+
max_tokens = gr.Slider(
|
| 138 |
+
minimum=50,
|
| 139 |
+
maximum=500,
|
| 140 |
+
value=150,
|
| 141 |
+
step=10,
|
| 142 |
+
label="Max Tokens",
|
| 143 |
+
info="Maximum length of generated answer"
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
temperature = gr.Slider(
|
| 147 |
+
minimum=0.0,
|
| 148 |
+
maximum=1.0,
|
| 149 |
+
value=0.0,
|
| 150 |
+
step=0.1,
|
| 151 |
+
label="Temperature",
|
| 152 |
+
info="0 = deterministic, higher = more creative"
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
submit_btn = gr.Button("🔍 Get Answer / احصل على الإجابة", variant="primary", size="lg")
|
| 156 |
+
|
| 157 |
+
with gr.Column(scale=1):
|
| 158 |
+
answer_output = gr.Textbox(
|
| 159 |
+
label="الإجابة / Answer",
|
| 160 |
+
lines=10,
|
| 161 |
+
rtl=True,
|
| 162 |
+
show_copy_button=True
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
gr.Examples(
|
| 166 |
+
examples=examples,
|
| 167 |
+
inputs=[context_input, question_input],
|
| 168 |
+
label="📝 Example Questions / أمثلة"
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
gr.Markdown(
|
| 172 |
+
"""
|
| 173 |
+
---
|
| 174 |
+
### ℹ️ How to use / كيفية الاستخدام
|
| 175 |
+
|
| 176 |
+
1. **Paste Context**: Add Arabic text containing information
|
| 177 |
+
2. **Ask Question**: Write your question about the context
|
| 178 |
+
3. **Get Answer**: The model will extract the answer from the context
|
| 179 |
+
|
| 180 |
+
**Note:** This model is optimized for Modern Standard Arabic and extractive QA tasks.
|
| 181 |
+
|
| 182 |
+
### 🔗 Links
|
| 183 |
+
- [Model Card](https://huggingface.co/YOUR_USERNAME/lfm2-1.2b-arabic-qa-adalora)
|
| 184 |
+
- [Dataset (ARCD)](https://huggingface.co/datasets/hsseinmz/arcd)
|
| 185 |
+
- [Base Model](https://huggingface.co/LiquidAI/LFM2-1.2B-RAG)
|
| 186 |
+
"""
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
# Connect button to function
|
| 190 |
+
submit_btn.click(
|
| 191 |
+
fn=generate_answer,
|
| 192 |
+
inputs=[context_input, question_input, max_tokens, temperature],
|
| 193 |
+
outputs=answer_output
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
# ============================================================
|
| 197 |
+
# Launch
|
| 198 |
+
# ============================================================
|
| 199 |
+
|
| 200 |
+
if __name__ == "__main__":
|
| 201 |
+
demo.launch()
|