Ahmed-El-Sharkawy commited on
Commit
09752a5
·
verified ·
1 Parent(s): 3f41fce

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +173 -143
app.py CHANGED
@@ -1,143 +1,173 @@
1
- import os, sys, time, asyncio
2
- from typing import List, Dict
3
- import gradio as gr
4
- from dotenv import load_dotenv
5
-
6
- # Windows event loop (prevents asyncio warnings on Win)
7
- if sys.platform.startswith("win"):
8
- try:
9
- asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
10
- except Exception:
11
- pass
12
-
13
- # config from .env or defaults
14
- load_dotenv()
15
- APP_Name = os.getenv("APP_Name", "Ghaymah GenAI chatbots")
16
- APP_Version = os.getenv("APP_Version", "0.1.0")
17
- API_KEY = os.getenv("API_KEY", "")
18
-
19
- # Models from .env or fallback to your set
20
- MODELS = [m.strip() for m in os.getenv("Models", "").split(",") if m.strip()] or [
21
- "gemma-3-4b-it",
22
- "QwQ-32B",
23
- "DeepSeek-V3-0324",
24
- "Qwen/Qwen3-32B",
25
- "zai-org/GLM-4.5-Air",
26
- "moonshotai/Kimi-K2-Instruct",
27
- ]
28
-
29
- # Friendly descriptions & logo
30
- MODEL_INFO = {
31
- "gemma-3-4b-it": "Google Gemma-3 4B Instruct light, fast, solid reasoning.",
32
- "QwQ-32B": "QwQ-32B — reasoning-focused; strong long-form answers.",
33
- "DeepSeek-V3-0324": "DeepSeek V3 (0324) — versatile, great multi-step reasoning.",
34
- "Qwen/Qwen3-32B": "Qwen3-32Bmultilingual, good code & math.",
35
- "zai-org/GLM-4.5-Air": "GLM-4.5-Air — efficient generalist, good latency.",
36
- "moonshotai/Kimi-K2-Instruct": "Kimi K2 Instruct — long-context, helpful writing.",
37
- }
38
- LOGO_PATH = "download.jpeg" # change to your image if different
39
-
40
- # ── OpenAI-compatible client ──────────────────────────────────────────────────
41
- from openai import OpenAI
42
- BASE_URL = "https://genai.ghaymah.systems"
43
- client = OpenAI(api_key=API_KEY, base_url=BASE_URL) if API_KEY else None
44
-
45
- SYSTEM_SEED = "You are Ghaymah Assistant. Be concise and helpful."
46
-
47
- # Helpers
48
- BACKOFF = [5, 10, 20] # basic retry for 429s
49
-
50
- def safe_chat_complete(model: str, messages: List[Dict], max_tokens: int = 800) -> str:
51
- if not client:
52
- return "⚠️ Missing API_KEY in .env"
53
- attempt = 0
54
- while True:
55
- try:
56
- resp = client.chat.completions.create(
57
- model=model,
58
- messages=messages,
59
- max_tokens=max_tokens,
60
- temperature=0.3,
61
- timeout=90,
62
- )
63
- return resp.choices[0].message.content or ""
64
- except Exception as e:
65
- msg = str(e)
66
- if ("429" in msg or "Rate" in msg) and attempt < len(BACKOFF):
67
- time.sleep(BACKOFF[attempt]); attempt += 1
68
- continue
69
- return f"Request failed for `{model}`: {e}"
70
-
71
- def init_state():
72
- return {"messages": [{"role": "system", "content": SYSTEM_SEED}]}
73
-
74
- # Gradio app
75
- with gr.Blocks(title=APP_Name) as demo:
76
- state = gr.State(init_state())
77
-
78
- gr.Markdown(f"# {APP_Name} \n<span style='opacity:.7'>v{APP_Version}</span>")
79
-
80
- with gr.Row():
81
- # Left: Chat
82
- with gr.Column(scale=3):
83
- chat = gr.Chatbot(label="Chat", height=520, type="messages", value=[])
84
- user_in = gr.Textbox(label="Your message", placeholder="Type here…", lines=2)
85
- with gr.Row():
86
- send_btn = gr.Button("Send", variant="primary")
87
- clear_btn = gr.Button("Clear")
88
-
89
- # Right: Model selector + logo + info
90
- with gr.Column(scale=1, min_width=320):
91
- model_choice = gr.Radio(
92
- choices=MODELS,
93
- value=MODELS[0],
94
- label="Models",
95
- info="Select Your Model Here",
96
- )
97
- gr.Image(LOGO_PATH, show_label=False, container=False)
98
- info_md = gr.Markdown(MODEL_INFO.get(MODELS[0], ""))
99
-
100
- def _update_info(m: str) -> str:
101
- title = f"**{m}**"
102
- desc = MODEL_INFO.get(m, "")
103
- return f"{title}\n\n{desc}"
104
- model_choice.change(_update_info, model_choice, info_md)
105
-
106
- # Step 1: push the user message into the chat stream
107
- def on_submit(msg, chat_messages):
108
- if not msg:
109
- return "", (chat_messages or [])
110
- updated = (chat_messages or []) + [{"role": "user", "content": msg}]
111
- return "", updated
112
-
113
- def bot_step(chat_messages, chosen_model, st):
114
- msgs = [{"role": "system", "content": SYSTEM_SEED}]
115
- # only include last 2 visible messages
116
- for m in (chat_messages or [])[-2:]:
117
- role, content = m.get("role"), m.get("content")
118
- if role in ("user", "assistant") and isinstance(content, str):
119
- msgs.append({"role": role, "content": content})
120
-
121
- reply = safe_chat_complete(chosen_model, msgs, max_tokens=800)
122
- updated = (chat_messages or []) + [{"role": "assistant", "content": reply}]
123
- st = st or init_state()
124
- st["messages"] = msgs + [{"role": "assistant", "content": reply}]
125
- return updated, st
126
-
127
-
128
- # Clear
129
- def on_clear():
130
- return [], init_state()
131
-
132
- # Wire events
133
- user_in.submit(on_submit, [user_in, chat], [user_in, chat]) \
134
- .then(bot_step, [chat, model_choice, state], [chat, state])
135
-
136
- send_btn.click(on_submit, [user_in, chat], [user_in, chat]) \
137
- .then(bot_step, [chat, model_choice, state], [chat, state])
138
-
139
- clear_btn.click(on_clear, outputs=[chat, state])
140
-
141
- if __name__ == "__main__":
142
- demo.queue()
143
- demo.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, sys, time, asyncio
2
+ from typing import List, Dict
3
+ import gradio as gr
4
+ from dotenv import load_dotenv
5
+ import base64
6
+ from openai import OpenAI
7
+
8
+ if sys.platform.startswith("win"):
9
+ try:
10
+ asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
11
+ except Exception:
12
+ pass
13
+
14
+ # config
15
+ load_dotenv()
16
+ APP_Name = os.getenv("APP_Name", "Ghaymah GenAI chatbots")
17
+ APP_Version = os.getenv("APP_Version", "0.1.0")
18
+ API_KEY = os.getenv("API_KEY", "")
19
+
20
+ # Models
21
+ MODELS = [m.strip() for m in os.getenv("Models", "").split(",") if m.strip()] or [
22
+ "QwQ-32B",
23
+ "DeepSeek-V3-0324",
24
+ "Qwen/Qwen3-32B",
25
+ "zai-org/GLM-4.5-Air",
26
+ "moonshotai/Kimi-K2-Instruct",
27
+ ]
28
+
29
+ MODEL_INFO = {
30
+ "QwQ-32B": "QwQ-32B — reasoning-focused; strong long-form answers.",
31
+ "DeepSeek-V3-0324": "DeepSeek V3 (0324)versatile, great multi-step reasoning.",
32
+ "Qwen/Qwen3-32B": "Qwen3-32B — multilingual, good code & math.",
33
+ "zai-org/GLM-4.5-Air": "GLM-4.5-Air efficient generalist, good latency.",
34
+ "moonshotai/Kimi-K2-Instruct": "Kimi K2 Instruct long-context, helpful writing.",
35
+ }
36
+ LOGO_PATH = "download.jpeg"
37
+ COMPANY_LOGO = LOGO_PATH
38
+ OWNER_NAME = "ENG. Ahmed Yasser El Sharkawy"
39
+
40
+ CSS = """
41
+ .app-header{display:flex;align-items:center;gap:12px;justify-content:center;margin:6px 0 16px}
42
+ .app-header img{height:60px;border-radius:12px}
43
+ .app-title{font-weight:800;font-size:28px;line-height:1.1}
44
+ .app-sub{opacity:.7;font-size:14px}
45
+ """
46
+
47
+ # OpenAI-compatible client
48
+
49
+ BASE_URL = "https://genai.ghaymah.systems"
50
+ client = OpenAI(api_key=API_KEY, base_url=BASE_URL) if API_KEY else None
51
+
52
+ SYSTEM_SEED = "You are Ghaymah Assistant. Be concise and helpful."
53
+
54
+ # Helpers
55
+ BACKOFF = [5, 10, 20]
56
+
57
+ def logo_data_uri(path: str) -> str:
58
+ if not os.path.exists(path):
59
+ return ""
60
+ ext = os.path.splitext(path)[1].lower()
61
+ mime = {
62
+ ".png": "image/png", ".jpg": "image/jpeg", ".jpeg": "image/jpeg",
63
+ ".webp": "image/webp", ".gif": "image/gif"
64
+ }.get(ext, "image/png")
65
+ with open(path, "rb") as f:
66
+ b64 = base64.b64encode(f.read()).decode("utf-8")
67
+ return f"data:{mime};base64,{b64}"
68
+
69
+ def safe_chat_complete(model: str, messages: List[Dict], max_tokens: int = 800) -> str:
70
+ if not client:
71
+ return "⚠️ Missing API_KEY in .env"
72
+ attempt = 0
73
+ while True:
74
+ try:
75
+ resp = client.chat.completions.create(
76
+ model=model,
77
+ messages=messages,
78
+ max_tokens=max_tokens,
79
+ temperature=0.3,
80
+ timeout=90,
81
+ )
82
+ return resp.choices[0].message.content or ""
83
+ except Exception as e:
84
+ msg = str(e)
85
+ if ("429" in msg or "Rate" in msg) and attempt < len(BACKOFF):
86
+ time.sleep(BACKOFF[attempt]); attempt += 1
87
+ continue
88
+ return f"Request failed for `{model}`: {e}"
89
+
90
+ def init_state():
91
+ return {"messages": [{"role": "system", "content": SYSTEM_SEED}]}
92
+
93
+ # Gradio app
94
+ with gr.Blocks(title=f"{APP_Name} v{APP_Version}", css=CSS) as demo:
95
+ header_logo_src = logo_data_uri(COMPANY_LOGO)
96
+ logo_html = f"<img src='{header_logo_src}' alt='logo'>" if header_logo_src else ""
97
+ gr.HTML(f"""
98
+ <div class="app-header">
99
+ {logo_html}
100
+ <div class="app-header-text">
101
+ <div class="app-title">{APP_Name}</div>
102
+ <div class="app-sub">v{APP_Version} • {OWNER_NAME}</div>
103
+ </div>
104
+ </div>
105
+ """)
106
+ state = gr.State(init_state())
107
+
108
+ gr.Markdown(f"# {APP_Name} \n<span style='opacity:.7'>v{APP_Version}</span>")
109
+
110
+ with gr.Row():
111
+ # Left: Chat
112
+ with gr.Column(scale=3):
113
+ chat = gr.Chatbot(label="Chat", height=520, type="messages", value=[])
114
+ user_in = gr.Textbox(label="Your message", placeholder="Type here…", lines=2)
115
+ with gr.Row():
116
+ send_btn = gr.Button("Send", variant="primary")
117
+ clear_btn = gr.Button("Clear")
118
+
119
+ # Right: Model selector + logo + info
120
+ with gr.Column(scale=1, min_width=320):
121
+ model_choice = gr.Radio(
122
+ choices=MODELS,
123
+ value=MODELS[0],
124
+ label="Models",
125
+ info="Select Your Model Here",
126
+ )
127
+ gr.Image(LOGO_PATH, show_label=False, container=False)
128
+ info_md = gr.Markdown(MODEL_INFO.get(MODELS[0], ""))
129
+
130
+ def _update_info(m: str) -> str:
131
+ title = f"**{m}**"
132
+ desc = MODEL_INFO.get(m, "")
133
+ return f"{title}\n\n{desc}"
134
+ model_choice.change(_update_info, model_choice, info_md)
135
+
136
+ # Step 1: push the user message into the chat stream
137
+ def on_submit(msg, chat_messages):
138
+ if not msg:
139
+ return "", (chat_messages or [])
140
+ updated = (chat_messages or []) + [{"role": "user", "content": msg}]
141
+ return "", updated
142
+
143
+ def bot_step(chat_messages, chosen_model, st):
144
+ msgs = [{"role": "system", "content": SYSTEM_SEED}]
145
+ # only include last 2 visible messages
146
+ for m in (chat_messages or [])[-2:]:
147
+ role, content = m.get("role"), m.get("content")
148
+ if role in ("user", "assistant") and isinstance(content, str):
149
+ msgs.append({"role": role, "content": content})
150
+
151
+ reply = safe_chat_complete(chosen_model, msgs, max_tokens=800)
152
+ updated = (chat_messages or []) + [{"role": "assistant", "content": reply}]
153
+ st = st or init_state()
154
+ st["messages"] = msgs + [{"role": "assistant", "content": reply}]
155
+ return updated, st
156
+
157
+
158
+ # Clear
159
+ def on_clear():
160
+ return [], init_state()
161
+
162
+ # Wire events
163
+ user_in.submit(on_submit, [user_in, chat], [user_in, chat]) \
164
+ .then(bot_step, [chat, model_choice, state], [chat, state])
165
+
166
+ send_btn.click(on_submit, [user_in, chat], [user_in, chat]) \
167
+ .then(bot_step, [chat, model_choice, state], [chat, state])
168
+
169
+ clear_btn.click(on_clear, outputs=[chat, state])
170
+
171
+ if __name__ == "__main__":
172
+ demo.queue()
173
+ demo.launch()