OhamLab-AI / app_t.py
rahul7star's picture
Update app_t.py
ef48f71 verified
import spaces
import os
import textwrap
import traceback
import gradio as gr
from transformers import pipeline
# ---------------------------
# Configuration
# ---------------------------
MODEL_ID = "openai/gpt-oss-20b" # Hugging Face Transformers model
# ---------------------------
# Load pipeline
# ---------------------------
# device_map="auto" will use GPU if available, otherwise CPU
pipe = pipeline("text-generation", model=MODEL_ID, device_map="auto")
# ---------------------------
# Research loader (project root)
# ---------------------------
ROOT_DIR = "."
ALLOWED_EXT = (".txt", ".md")
def load_research_from_root(max_total_chars: int = 12000):
files = []
for name in sorted(os.listdir(ROOT_DIR)):
if name.lower().endswith(ALLOWED_EXT) and name != "requirements.txt":
if name == os.path.basename(__file__):
continue
files.append(name)
if not files:
return "No research files (.txt/.md) found in project root."
combined_parts, total_len = [], 0
for fname in files:
try:
with open(os.path.join(ROOT_DIR, fname), "r", encoding="utf-8", errors="ignore") as f:
txt = f.read()
except Exception as e:
txt = f"[Error reading {fname}: {e}]"
if len(txt) > 8000:
sample = txt[:8000] + "\n\n[TRUNCATED]\n"
else:
sample = txt
part = f"--- {fname} ---\n{sample.strip()}\n"
combined_parts.append(part)
total_len += len(part)
if total_len >= max_total_chars:
break
combined = "\n\n".join(combined_parts)
if len(combined) > max_total_chars:
combined = combined[:max_total_chars] + "\n\n[TRUNCATED]"
return combined
# ---------------------------
# System prompt templates
# ---------------------------
research_context = load_research_from_root(max_total_chars=12000)
def get_system_prompt(mode="chat"):
if mode == "chat":
return textwrap.dedent(f"""
OhamLab A Quantum Intelligence AI.
Mode: Conversational.
Guidelines:
- Answer clearly in natural paragraphs (3–6 sentences).
- Do NOT use tables, spreadsheets, or rigid formatting unless explicitly asked.
- Always address the user’s question directly before expanding.
- Be insightful, empathetic, and concise.
--- BEGIN RESEARCH CONTEXT (TRIMMED) ---
{research_context}
--- END RESEARCH CONTEXT ---
""").strip()
else:
return textwrap.dedent(f"""
You are OhamLab, a Quantum Dialectical Agentic Crosssphere Intelligence AI.
Mode: Research / Analytical.
Guidelines:
- Write structured, multi-sphere reasoning (science, philosophy, psychology, etc).
- Use sections, subpoints, and dialectical chains.
- Provide deep analysis, even if it looks like a research paper.
- Always reference the research context if relevant.
--- BEGIN RESEARCH CONTEXT (TRIMMED) ---
{research_context}
--- END RESEARCH CONTEXT ---
""").strip()
# ---------------------------
# State
# ---------------------------
conversation_mode = "chat" # default
history_messages = [{"role": "system", "content": get_system_prompt("chat")}]
chat_history_for_ui = []
# ---------------------------
# Model call helper
# ---------------------------
def call_model_get_response(model_id: str, messages: list, max_tokens: int = 700):
# Convert structured messages into plain text
conversation_text = ""
for m in messages:
if m["role"] == "system":
conversation_text += f"[SYSTEM]: {m['content']}\n"
elif m["role"] == "user":
conversation_text += f"[USER]: {m['content']}\n"
elif m["role"] == "assistant":
conversation_text += f"[ASSISTANT]: {m['content']}\n"
conversation_text += "[ASSISTANT]:"
try:
output = pipe(
conversation_text,
max_new_tokens=max_tokens,
do_sample=True,
temperature=0.7,
return_full_text=False,
)
return output[0]["generated_text"].strip()
except Exception as e:
tb = traceback.format_exc()
return f"⚠️ **Error**: {str(e)}\n\nTraceback:\n{tb.splitlines()[-6:]}"
# ---------------------------
# Chat logic
# ---------------------------
@spaces.GPU()
def chat_with_model(user_message, chat_history):
global history_messages, chat_history_for_ui, conversation_mode
if not user_message or str(user_message).strip() == "":
return "", chat_history
# Mode switching commands
if "switch to research mode" in user_message.lower():
conversation_mode = "research"
history_messages = [{"role": "system", "content": get_system_prompt("research")}]
return "", chat_history + [("🟢 Mode switched", "🔬 Research Mode activated.")]
elif "switch to chat mode" in user_message.lower():
conversation_mode = "chat"
history_messages = [{"role": "system", "content": get_system_prompt("chat")}]
return "", chat_history + [("🟢 Mode switched", "💬 Chat Mode activated.")]
# Append user message
history_messages.append({"role": "user", "content": user_message})
try:
bot_text = call_model_get_response(MODEL_ID, history_messages, max_tokens=700)
except Exception as e:
tb = traceback.format_exc()
bot_text = f"⚠️ **Error**: {str(e)}\n\nTraceback:\n{tb.splitlines()[-6:]}"
# Append response
history_messages.append({"role": "assistant", "content": bot_text})
chat_history_for_ui.append((user_message, bot_text))
return "", chat_history_for_ui
def reset_chat():
global history_messages, chat_history_for_ui
history_messages = [{"role": "system", "content": get_system_prompt(conversation_mode)}]
chat_history_for_ui = []
return []
# ---------------------------
# Gradio UI
# ---------------------------
def build_ui():
with gr.Blocks(
theme=gr.themes.Soft(),
css="""
#chatbot {
background-color: #f9f9fb;
border-radius: 12px;
padding: 10px;
overflow-y: auto;
}
.user-bubble {
background: #4a90e2;
color: white;
border-radius: 14px;
padding: 8px 12px;
margin: 6px;
max-width: 75%;
align-self: flex-end;
font-size: 14px;
}
.bot-bubble {
background: #e6e6e6;
color: #333;
border-radius: 14px;
padding: 8px 12px;
margin: 6px;
max-width: 75%;
align-self: flex-start;
font-size: 14px;
}
#controls {
display: flex;
gap: 8px;
align-items: center;
margin-top: 6px;
}
#topbar {
display: flex;
justify-content: flex-end;
gap: 8px;
margin-bottom: 6px;
}
"""
) as demo:
# Top bar with close + clear
with gr.Row(elem_id="topbar"):
close_btn = gr.Button("❌", size="sm")
clear_btn = gr.Button("🧹 Clear", size="sm")
chatbot = gr.Chatbot(
label="",
height=350, # reduced height so input is visible
elem_id="chatbot",
type="tuples",
bubble_full_width=False,
avatar_images=("👤", "🤖"),
)
with gr.Row(elem_id="controls"):
msg = gr.Textbox(
placeholder="Type your message here...",
lines=2,
scale=8,
)
submit_btn = gr.Button("🚀 Send", variant="primary", scale=2)
# Wire buttons
submit_btn.click(chat_with_model, inputs=[msg, chatbot], outputs=[msg, chatbot])
msg.submit(chat_with_model, inputs=[msg, chatbot], outputs=[msg, chatbot])
clear_btn.click(reset_chat, inputs=None, outputs=chatbot)
demo.launch(server_name="0.0.0.0", server_port=7860, share=False)
return demo
# ---------------------------
# Entrypoint
# ---------------------------
if __name__ == "__main__":
print(f"✅ Starting Aerelyth with Transformers model: {MODEL_ID}")
build_ui()