rahul7star commited on
Commit
3cd74c8
·
verified ·
1 Parent(s): a92b520

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +247 -0
app.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import textwrap
3
+ import traceback
4
+ import gradio as gr
5
+ from openai import OpenAI
6
+
7
+ # ---------------------------
8
+ # Configuration
9
+ # ---------------------------
10
+ HF_ENV_VAR = "OPENAI_API_KEY" # actually holds your Hugging Face token
11
+ HF_TOKEN = os.environ.get(HF_ENV_VAR)
12
+ if not HF_TOKEN:
13
+ print(
14
+ f"ERROR: environment variable {HF_ENV_VAR} not found. "
15
+ "Set it to your Hugging Face token before running."
16
+ )
17
+
18
+ MODEL_ID = "openai/gpt-oss-20b" # served through Hugging Face router
19
+
20
+ # ---------------------------
21
+ # Initialize Hugging Face router client
22
+ # ---------------------------
23
+ client = OpenAI(base_url="https://router.huggingface.co/v1", api_key=HF_TOKEN)
24
+
25
+ # ---------------------------
26
+ # Research loader (project root)
27
+ # ---------------------------
28
+ ROOT_DIR = "."
29
+ ALLOWED_EXT = (".txt", ".md")
30
+
31
+ def load_research_from_root(max_total_chars: int = 12000):
32
+ files = []
33
+ for name in sorted(os.listdir(ROOT_DIR)):
34
+ if name.lower().endswith(ALLOWED_EXT) and name != "requirements.txt":
35
+ if name == os.path.basename(__file__):
36
+ continue
37
+ files.append(name)
38
+
39
+ if not files:
40
+ return "No research files (.txt/.md) found in project root."
41
+
42
+ combined_parts, total_len = [], 0
43
+ for fname in files:
44
+ try:
45
+ with open(os.path.join(ROOT_DIR, fname), "r", encoding="utf-8", errors="ignore") as f:
46
+ txt = f.read()
47
+ except Exception as e:
48
+ txt = f"[Error reading {fname}: {e}]"
49
+
50
+ if len(txt) > 8000:
51
+ sample = txt[:8000] + "\n\n[TRUNCATED]\n"
52
+ else:
53
+ sample = txt
54
+
55
+ part = f"--- {fname} ---\n{sample.strip()}\n"
56
+ combined_parts.append(part)
57
+ total_len += len(part)
58
+ if total_len >= max_total_chars:
59
+ break
60
+
61
+ combined = "\n\n".join(combined_parts)
62
+ if len(combined) > max_total_chars:
63
+ combined = combined[:max_total_chars] + "\n\n[TRUNCATED]"
64
+ return combined
65
+
66
+
67
+ # ---------------------------
68
+ # System prompt templates
69
+ # ---------------------------
70
+ research_context = load_research_from_root(max_total_chars=12000)
71
+
72
+ def get_system_prompt(mode="chat"):
73
+ if mode == "chat":
74
+ return textwrap.dedent(f"""
75
+ OhamLab A Quantum Intelligence AI.
76
+ Mode: Conversational.
77
+ Guidelines:
78
+ - Answer clearly in natural paragraphs (3–6 sentences).
79
+ - Do NOT use tables, spreadsheets, or rigid formatting unless explicitly asked.
80
+ - Always address the user’s question directly before expanding.
81
+ - Be insightful, empathetic, and concise.
82
+
83
+ --- BEGIN RESEARCH CONTEXT (TRIMMED) ---
84
+ {research_context}
85
+ --- END RESEARCH CONTEXT ---
86
+ """).strip()
87
+ else:
88
+ return textwrap.dedent(f"""
89
+ You are OhamLab, a Quantum Dialectical Agentic Crosssphere Intelligence AI.
90
+ Mode: Research / Analytical.
91
+ Guidelines:
92
+ - Write structured, multi-sphere reasoning (science, philosophy, psychology, etc).
93
+ - Use sections, subpoints, and dialectical chains.
94
+ - Provide deep analysis, even if it looks like a research paper.
95
+ - Always reference the research context if relevant.
96
+
97
+ --- BEGIN RESEARCH CONTEXT (TRIMMED) ---
98
+ {research_context}
99
+ --- END RESEARCH CONTEXT ---
100
+ """).strip()
101
+
102
+ # ---------------------------
103
+ # State
104
+ # ---------------------------
105
+ conversation_mode = "chat" # can toggle
106
+ history_messages = [{"role": "system", "content": get_system_prompt("chat")}]
107
+ chat_history_for_ui = []
108
+
109
+ # ---------------------------
110
+ # Model call helper
111
+ # ---------------------------
112
+ def call_model_get_response(model_id: str, messages: list, max_tokens: int = 700):
113
+ resp = client.chat.completions.create(
114
+ model=model_id,
115
+ messages=messages,
116
+ max_tokens=max_tokens,
117
+ temperature=0.7,
118
+ )
119
+ choice = resp.choices[0]
120
+ try:
121
+ return choice.message.content.strip()
122
+ except Exception:
123
+ return str(choice)
124
+
125
+ # ---------------------------
126
+ # Chat logic
127
+ # ---------------------------
128
+ def chat_with_model(user_message, chat_history):
129
+ global history_messages, chat_history_for_ui, conversation_mode
130
+
131
+ if not user_message or str(user_message).strip() == "":
132
+ return "", chat_history
133
+
134
+ # Mode switching commands
135
+ if "switch to research mode" in user_message.lower():
136
+ conversation_mode = "research"
137
+ history_messages = [{"role": "system", "content": get_system_prompt("research")}]
138
+ return "", chat_history + [("🟢 Mode switched", "🔬 Research Mode activated.")]
139
+ elif "switch to chat mode" in user_message.lower():
140
+ conversation_mode = "chat"
141
+ history_messages = [{"role": "system", "content": get_system_prompt("chat")}]
142
+ return "", chat_history + [("🟢 Mode switched", "💬 Chat Mode activated.")]
143
+
144
+ # Append user message
145
+ history_messages.append({"role": "user", "content": user_message})
146
+
147
+ try:
148
+ bot_text = call_model_get_response(MODEL_ID, history_messages, max_tokens=700)
149
+ except Exception as e:
150
+ tb = traceback.format_exc()
151
+ bot_text = f"⚠️ **Error**: {str(e)}\n\nTraceback:\n{tb.splitlines()[-6:]}"
152
+
153
+ # Append response
154
+ history_messages.append({"role": "assistant", "content": bot_text})
155
+ chat_history_for_ui.append((user_message, bot_text))
156
+
157
+ return "", chat_history_for_ui
158
+
159
+ def reset_chat():
160
+ global history_messages, chat_history_for_ui
161
+ history_messages = [{"role": "system", "content": get_system_prompt(conversation_mode)}]
162
+ chat_history_for_ui = []
163
+ return []
164
+
165
+ # ---------------------------
166
+ # Gradio UI
167
+ # ---------------------------
168
+ def build_ui():
169
+ with gr.Blocks(
170
+ theme=gr.themes.Soft(),
171
+ css="""
172
+ #chatbot {
173
+ background-color: #f9f9fb;
174
+ border-radius: 12px;
175
+ padding: 10px;
176
+ overflow-y: auto;
177
+ }
178
+ .user-bubble {
179
+ background: #4a90e2;
180
+ color: white;
181
+ border-radius: 14px;
182
+ padding: 8px 12px;
183
+ margin: 6px;
184
+ max-width: 75%;
185
+ align-self: flex-end;
186
+ font-size: 14px;
187
+ }
188
+ .bot-bubble {
189
+ background: #e6e6e6;
190
+ color: #333;
191
+ border-radius: 14px;
192
+ padding: 8px 12px;
193
+ margin: 6px;
194
+ max-width: 75%;
195
+ align-self: flex-start;
196
+ font-size: 14px;
197
+ }
198
+ #controls {
199
+ display: flex;
200
+ gap: 8px;
201
+ align-items: center;
202
+ margin-top: 6px;
203
+ }
204
+ #topbar {
205
+ display: flex;
206
+ justify-content: flex-end;
207
+ gap: 8px;
208
+ margin-bottom: 6px;
209
+ }
210
+ """
211
+ ) as demo:
212
+ # Top bar with close + clear
213
+ with gr.Row(elem_id="topbar"):
214
+ close_btn = gr.Button("❌", size="sm")
215
+ clear_btn = gr.Button("🧹 Clear", size="sm")
216
+
217
+ chatbot = gr.Chatbot(
218
+ label="",
219
+ height=350, # reduced height so input is visible
220
+ elem_id="chatbot",
221
+ type="tuples",
222
+ bubble_full_width=False,
223
+ avatar_images=("👤", "🤖"),
224
+ )
225
+
226
+ with gr.Row(elem_id="controls"):
227
+ msg = gr.Textbox(
228
+ placeholder="Type your message here...",
229
+ lines=2,
230
+ scale=8,
231
+ )
232
+ submit_btn = gr.Button("🚀 Send", variant="primary", scale=2)
233
+
234
+ # Wire buttons
235
+ submit_btn.click(chat_with_model, inputs=[msg, chatbot], outputs=[msg, chatbot])
236
+ msg.submit(chat_with_model, inputs=[msg, chatbot], outputs=[msg, chatbot])
237
+ clear_btn.click(reset_chat, inputs=None, outputs=chatbot)
238
+
239
+ demo.launch(server_name="0.0.0.0", server_port=7860, share=False)
240
+ return demo
241
+
242
+ # ---------------------------
243
+ # Entrypoint
244
+ # ---------------------------
245
+ if __name__ == "__main__":
246
+ print(f"✅ Starting Aerelyth with Hugging Face model: {MODEL_ID}")
247
+ build_ui()