X
File size: 2,390 Bytes
3c8b892
f442e1a
7e0c4ed
3c8b892
f9cd102
 
 
bb96229
7e0c4ed
 
f9cd102
 
 
7e0c4ed
 
 
 
 
 
f9cd102
 
 
 
 
7e0c4ed
 
f9cd102
 
 
 
 
f442e1a
3c8b892
f9cd102
 
f442e1a
f9cd102
 
 
7e0c4ed
 
 
 
f442e1a
 
f9cd102
 
 
7e0c4ed
 
 
bb96229
f442e1a
 
 
f9cd102
7e0c4ed
 
 
f442e1a
7e0c4ed
 
f9cd102
7e0c4ed
 
 
 
f442e1a
bb96229
7e0c4ed
 
eca263a
7e0c4ed
f9cd102
7e0c4ed
eca263a
f9cd102
 
 
bb96229
 
7e0c4ed
f9cd102
7e0c4ed
add4032
f442e1a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline

# =========================
# CONFIG
# =========================
MODEL_NAME = "kawkabelaloom/astramindx"
SYSTEM_PROMPT = "أنت مساعد عربي ذكي، تجيب بوضوح وبأسلوب بسيط ومفيد."

# =========================
# LOAD TOKENIZER
# =========================
print("🔄 Loading tokenizer...")
tokenizer = AutoTokenizer.from_pretrained(
    MODEL_NAME,
    trust_remote_code=True
)

# =========================
# LOAD MODEL (FORCE CPU – NO QUANTIZATION)
# =========================
print("🔄 Loading model (CPU, no quantization)...")

model = AutoModelForCausalLM.from_pretrained(
    MODEL_NAME,
    device_map=None,                # ❌ لا device_map auto
    torch_dtype=torch.float32,       # CPU safe
    low_cpu_mem_usage=False,         # مهم
    trust_remote_code=True,
    quantization_config=None         # 🔥 الحل الأساسي
)

model.eval()
print("✅ Model loaded successfully")

# =========================
# PIPELINE
# =========================
generator = pipeline(
    "text-generation",
    model=model,
    tokenizer=tokenizer
)

# =========================
# CHAT LOGIC
# =========================
def build_prompt(history, user_message):
    prompt = SYSTEM_PROMPT + "\n\n"
    for user, bot in history:
        prompt += f"المستخدم: {user}\nالمساعد: {bot}\n"
    prompt += f"المستخدم: {user_message}\nالمساعد:"
    return prompt


def chat(user_message, history):
    try:
        prompt = build_prompt(history, user_message)

        output = generator(
            prompt,
            max_new_tokens=128,
            temperature=0.7,
            top_p=0.9,
            do_sample=True
        )

        response = output[0]["generated_text"].split("المساعد:")[-1].strip()
        history.append((user_message, response))
        return history, ""

    except Exception as e:
        history.append((user_message, f"❌ Error: {str(e)}"))
        return history, ""

# =========================
# GRADIO UI
# =========================
with gr.Blocks() as demo:
    gr.Markdown("# 🤖 Astramindx Chatbot")
    chatbot = gr.Chatbot(height=450)
    msg = gr.Textbox(placeholder="اكتب سؤالك هنا...")
    msg.submit(chat, [msg, chatbot], [chatbot, msg])

demo.launch()