rahul7star commited on
Commit
e01c0c4
·
verified ·
1 Parent(s): 4fb3db5

Create app_t.py

Browse files
Files changed (1) hide show
  1. app_t.py +252 -0
app_t.py ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import textwrap
3
+ import traceback
4
+ import gradio as gr
5
+ from transformers import pipeline
6
+
7
+ # ---------------------------
8
+ # Configuration
9
+ # ---------------------------
10
+ MODEL_ID = "openai/gpt-oss-20b" # Hugging Face Transformers model
11
+
12
+ # ---------------------------
13
+ # Load pipeline
14
+ # ---------------------------
15
+ # device_map="auto" will use GPU if available, otherwise CPU
16
+ pipe = pipeline("text-generation", model=MODEL_ID, device_map="auto")
17
+
18
+ # ---------------------------
19
+ # Research loader (project root)
20
+ # ---------------------------
21
+ ROOT_DIR = "."
22
+ ALLOWED_EXT = (".txt", ".md")
23
+
24
+ def load_research_from_root(max_total_chars: int = 12000):
25
+ files = []
26
+ for name in sorted(os.listdir(ROOT_DIR)):
27
+ if name.lower().endswith(ALLOWED_EXT) and name != "requirements.txt":
28
+ if name == os.path.basename(__file__):
29
+ continue
30
+ files.append(name)
31
+
32
+ if not files:
33
+ return "No research files (.txt/.md) found in project root."
34
+
35
+ combined_parts, total_len = [], 0
36
+ for fname in files:
37
+ try:
38
+ with open(os.path.join(ROOT_DIR, fname), "r", encoding="utf-8", errors="ignore") as f:
39
+ txt = f.read()
40
+ except Exception as e:
41
+ txt = f"[Error reading {fname}: {e}]"
42
+
43
+ if len(txt) > 8000:
44
+ sample = txt[:8000] + "\n\n[TRUNCATED]\n"
45
+ else:
46
+ sample = txt
47
+
48
+ part = f"--- {fname} ---\n{sample.strip()}\n"
49
+ combined_parts.append(part)
50
+ total_len += len(part)
51
+ if total_len >= max_total_chars:
52
+ break
53
+
54
+ combined = "\n\n".join(combined_parts)
55
+ if len(combined) > max_total_chars:
56
+ combined = combined[:max_total_chars] + "\n\n[TRUNCATED]"
57
+ return combined
58
+
59
+ # ---------------------------
60
+ # System prompt templates
61
+ # ---------------------------
62
+ research_context = load_research_from_root(max_total_chars=12000)
63
+
64
+ def get_system_prompt(mode="chat"):
65
+ if mode == "chat":
66
+ return textwrap.dedent(f"""
67
+ OhamLab A Quantum Intelligence AI.
68
+ Mode: Conversational.
69
+ Guidelines:
70
+ - Answer clearly in natural paragraphs (3–6 sentences).
71
+ - Do NOT use tables, spreadsheets, or rigid formatting unless explicitly asked.
72
+ - Always address the user’s question directly before expanding.
73
+ - Be insightful, empathetic, and concise.
74
+
75
+ --- BEGIN RESEARCH CONTEXT (TRIMMED) ---
76
+ {research_context}
77
+ --- END RESEARCH CONTEXT ---
78
+ """).strip()
79
+ else:
80
+ return textwrap.dedent(f"""
81
+ You are OhamLab, a Quantum Dialectical Agentic Crosssphere Intelligence AI.
82
+ Mode: Research / Analytical.
83
+ Guidelines:
84
+ - Write structured, multi-sphere reasoning (science, philosophy, psychology, etc).
85
+ - Use sections, subpoints, and dialectical chains.
86
+ - Provide deep analysis, even if it looks like a research paper.
87
+ - Always reference the research context if relevant.
88
+
89
+ --- BEGIN RESEARCH CONTEXT (TRIMMED) ---
90
+ {research_context}
91
+ --- END RESEARCH CONTEXT ---
92
+ """).strip()
93
+
94
+ # ---------------------------
95
+ # State
96
+ # ---------------------------
97
+ conversation_mode = "chat" # default
98
+ history_messages = [{"role": "system", "content": get_system_prompt("chat")}]
99
+ chat_history_for_ui = []
100
+
101
+ # ---------------------------
102
+ # Model call helper
103
+ # ---------------------------
104
+ def call_model_get_response(model_id: str, messages: list, max_tokens: int = 700):
105
+ # Convert structured messages into plain text
106
+ conversation_text = ""
107
+ for m in messages:
108
+ if m["role"] == "system":
109
+ conversation_text += f"[SYSTEM]: {m['content']}\n"
110
+ elif m["role"] == "user":
111
+ conversation_text += f"[USER]: {m['content']}\n"
112
+ elif m["role"] == "assistant":
113
+ conversation_text += f"[ASSISTANT]: {m['content']}\n"
114
+
115
+ conversation_text += "[ASSISTANT]:"
116
+
117
+ try:
118
+ output = pipe(
119
+ conversation_text,
120
+ max_new_tokens=max_tokens,
121
+ do_sample=True,
122
+ temperature=0.7,
123
+ return_full_text=False,
124
+ )
125
+ return output[0]["generated_text"].strip()
126
+ except Exception as e:
127
+ tb = traceback.format_exc()
128
+ return f"⚠️ **Error**: {str(e)}\n\nTraceback:\n{tb.splitlines()[-6:]}"
129
+
130
+ # ---------------------------
131
+ # Chat logic
132
+ # ---------------------------
133
+ def chat_with_model(user_message, chat_history):
134
+ global history_messages, chat_history_for_ui, conversation_mode
135
+
136
+ if not user_message or str(user_message).strip() == "":
137
+ return "", chat_history
138
+
139
+ # Mode switching commands
140
+ if "switch to research mode" in user_message.lower():
141
+ conversation_mode = "research"
142
+ history_messages = [{"role": "system", "content": get_system_prompt("research")}]
143
+ return "", chat_history + [("🟢 Mode switched", "🔬 Research Mode activated.")]
144
+ elif "switch to chat mode" in user_message.lower():
145
+ conversation_mode = "chat"
146
+ history_messages = [{"role": "system", "content": get_system_prompt("chat")}]
147
+ return "", chat_history + [("🟢 Mode switched", "💬 Chat Mode activated.")]
148
+
149
+ # Append user message
150
+ history_messages.append({"role": "user", "content": user_message})
151
+
152
+ try:
153
+ bot_text = call_model_get_response(MODEL_ID, history_messages, max_tokens=700)
154
+ except Exception as e:
155
+ tb = traceback.format_exc()
156
+ bot_text = f"⚠️ **Error**: {str(e)}\n\nTraceback:\n{tb.splitlines()[-6:]}"
157
+
158
+ # Append response
159
+ history_messages.append({"role": "assistant", "content": bot_text})
160
+ chat_history_for_ui.append((user_message, bot_text))
161
+
162
+ return "", chat_history_for_ui
163
+
164
+ def reset_chat():
165
+ global history_messages, chat_history_for_ui
166
+ history_messages = [{"role": "system", "content": get_system_prompt(conversation_mode)}]
167
+ chat_history_for_ui = []
168
+ return []
169
+
170
+ # ---------------------------
171
+ # Gradio UI
172
+ # ---------------------------
173
+ def build_ui():
174
+ with gr.Blocks(
175
+ theme=gr.themes.Soft(),
176
+ css="""
177
+ #chatbot {
178
+ background-color: #f9f9fb;
179
+ border-radius: 12px;
180
+ padding: 10px;
181
+ overflow-y: auto;
182
+ }
183
+ .user-bubble {
184
+ background: #4a90e2;
185
+ color: white;
186
+ border-radius: 14px;
187
+ padding: 8px 12px;
188
+ margin: 6px;
189
+ max-width: 75%;
190
+ align-self: flex-end;
191
+ font-size: 14px;
192
+ }
193
+ .bot-bubble {
194
+ background: #e6e6e6;
195
+ color: #333;
196
+ border-radius: 14px;
197
+ padding: 8px 12px;
198
+ margin: 6px;
199
+ max-width: 75%;
200
+ align-self: flex-start;
201
+ font-size: 14px;
202
+ }
203
+ #controls {
204
+ display: flex;
205
+ gap: 8px;
206
+ align-items: center;
207
+ margin-top: 6px;
208
+ }
209
+ #topbar {
210
+ display: flex;
211
+ justify-content: flex-end;
212
+ gap: 8px;
213
+ margin-bottom: 6px;
214
+ }
215
+ """
216
+ ) as demo:
217
+ # Top bar with close + clear
218
+ with gr.Row(elem_id="topbar"):
219
+ close_btn = gr.Button("❌", size="sm")
220
+ clear_btn = gr.Button("🧹 Clear", size="sm")
221
+
222
+ chatbot = gr.Chatbot(
223
+ label="",
224
+ height=350, # reduced height so input is visible
225
+ elem_id="chatbot",
226
+ type="tuples",
227
+ bubble_full_width=False,
228
+ avatar_images=("👤", "🤖"),
229
+ )
230
+
231
+ with gr.Row(elem_id="controls"):
232
+ msg = gr.Textbox(
233
+ placeholder="Type your message here...",
234
+ lines=2,
235
+ scale=8,
236
+ )
237
+ submit_btn = gr.Button("🚀 Send", variant="primary", scale=2)
238
+
239
+ # Wire buttons
240
+ submit_btn.click(chat_with_model, inputs=[msg, chatbot], outputs=[msg, chatbot])
241
+ msg.submit(chat_with_model, inputs=[msg, chatbot], outputs=[msg, chatbot])
242
+ clear_btn.click(reset_chat, inputs=None, outputs=chatbot)
243
+
244
+ demo.launch(server_name="0.0.0.0", server_port=7860, share=False)
245
+ return demo
246
+
247
+ # ---------------------------
248
+ # Entrypoint
249
+ # ---------------------------
250
+ if __name__ == "__main__":
251
+ print(f"✅ Starting Aerelyth with Transformers model: {MODEL_ID}")
252
+ build_ui()