Spaces:
Running on CPU Upgrade
Running on CPU Upgrade
Claude commited on
Commit ·
cb5a2a4
1
Parent(s): 47586ab
feat: Implement OAuth-based auth flow with gr.LoginButton and request context
Browse files
app.py
CHANGED
|
@@ -112,7 +112,7 @@ def auto_select_perspectives(query: str, n: int = 4) -> List[str]:
|
|
| 112 |
return selected[:n]
|
| 113 |
|
| 114 |
|
| 115 |
-
def call_perspective(perspective_name: str, query: str) -> str:
|
| 116 |
"""Generate response from a single perspective using HF Inference API."""
|
| 117 |
p = get_perspective(perspective_name)
|
| 118 |
if not p:
|
|
@@ -121,12 +121,20 @@ def call_perspective(perspective_name: str, query: str) -> str:
|
|
| 121 |
if not HAS_LLM:
|
| 122 |
return f"[{perspective_name.upper()}] Demo response (LLM unavailable)"
|
| 123 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 124 |
try:
|
| 125 |
messages = [
|
| 126 |
{"role": "system", "content": p.system_prompt},
|
| 127 |
{"role": "user", "content": query}
|
| 128 |
]
|
| 129 |
-
response =
|
| 130 |
messages,
|
| 131 |
max_tokens=256,
|
| 132 |
temperature=0.7,
|
|
@@ -141,9 +149,17 @@ def call_perspective(perspective_name: str, query: str) -> str:
|
|
| 141 |
return f"[{perspective_name}] Error: {str(e)[:80]}"
|
| 142 |
|
| 143 |
|
| 144 |
-
def generate_synthesis(perspectives_responses: Dict[str, str], query: str) -> str:
|
| 145 |
"""Generate synthesis from all perspective responses."""
|
| 146 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
# Try LLM synthesis first if available
|
| 148 |
if HAS_LLM:
|
| 149 |
perspective_text = "\n\n".join(
|
|
@@ -162,7 +178,7 @@ Brief unified insight:"""
|
|
| 162 |
{"role": "system", "content": "Synthesize multi-perspective responses concisely."},
|
| 163 |
{"role": "user", "content": synthesis_prompt}
|
| 164 |
]
|
| 165 |
-
response =
|
| 166 |
messages,
|
| 167 |
max_tokens=256,
|
| 168 |
temperature=0.7,
|
|
@@ -680,7 +696,8 @@ def process_message(
|
|
| 680 |
chat_history: List,
|
| 681 |
state: Dict,
|
| 682 |
perspective_mode: str,
|
| 683 |
-
custom_perspectives: List[str]
|
|
|
|
| 684 |
) -> Tuple:
|
| 685 |
"""Main conversation handler with all visualizations."""
|
| 686 |
|
|
@@ -708,7 +725,7 @@ def process_message(
|
|
| 708 |
# ===== STEP 4-6: Generate & Evaluate =====
|
| 709 |
perspectives_responses = {}
|
| 710 |
for perspective_name in selected_perspectives:
|
| 711 |
-
response = call_perspective(perspective_name, user_msg)
|
| 712 |
perspectives_responses[perspective_name] = response
|
| 713 |
|
| 714 |
aegis = state.get("aegis") or AEGIS()
|
|
@@ -728,7 +745,7 @@ def process_message(
|
|
| 728 |
mean_tension = np.mean(list(tensions.values())) if tensions else 0.3
|
| 729 |
|
| 730 |
# ===== STEP 7: Synthesis =====
|
| 731 |
-
synthesis = generate_synthesis(perspectives_responses, user_msg)
|
| 732 |
chat_history.append({"role": "assistant", "content": synthesis})
|
| 733 |
|
| 734 |
# ===== STEP 8-9: Resonance & Memory =====
|
|
@@ -1129,40 +1146,20 @@ def create_interface():
|
|
| 1129 |
</div>
|
| 1130 |
""")
|
| 1131 |
|
| 1132 |
-
#
|
| 1133 |
with gr.Group():
|
| 1134 |
-
gr.Markdown("### 🔐
|
| 1135 |
-
gr.Markdown("
|
| 1136 |
with gr.Row():
|
| 1137 |
-
|
| 1138 |
-
|
| 1139 |
-
|
| 1140 |
-
placeholder="hf_...",
|
| 1141 |
-
scale=4,
|
| 1142 |
)
|
| 1143 |
-
|
| 1144 |
-
|
| 1145 |
-
token_status = gr.Markdown(
|
| 1146 |
-
"✓ Ready (demo mode — perspectives use built-in synthesis)",
|
| 1147 |
visible=True
|
| 1148 |
)
|
| 1149 |
|
| 1150 |
-
def apply_token(token_str):
|
| 1151 |
-
"""Apply HF token to enable inference."""
|
| 1152 |
-
if not token_str or token_str.strip() == "":
|
| 1153 |
-
return "❌ No token provided"
|
| 1154 |
-
try:
|
| 1155 |
-
login(token=token_str.strip())
|
| 1156 |
-
return "✅ Token applied! LLM synthesis enabled."
|
| 1157 |
-
except Exception as e:
|
| 1158 |
-
return f"⚠️ Token invalid: {str(e)[:60]}"
|
| 1159 |
-
|
| 1160 |
-
apply_token_btn.click(
|
| 1161 |
-
apply_token,
|
| 1162 |
-
token_input,
|
| 1163 |
-
token_status,
|
| 1164 |
-
)
|
| 1165 |
-
|
| 1166 |
with gr.Tabs():
|
| 1167 |
# =================== CHAT TAB ===================
|
| 1168 |
with gr.Tab("Explore", id="chat"):
|
|
@@ -1301,14 +1298,14 @@ Created by **Jonathan Harrison** to explore recursive reasoning, multi-perspecti
|
|
| 1301 |
""")
|
| 1302 |
|
| 1303 |
# Event handling
|
| 1304 |
-
def on_submit(msg, history, st, mode, custom):
|
| 1305 |
-
result = process_message(msg, history, st, mode, custom)
|
| 1306 |
return result
|
| 1307 |
|
| 1308 |
# Wire submit events
|
| 1309 |
send_btn.click(
|
| 1310 |
on_submit,
|
| 1311 |
-
[msg_input, chatbot, state, perspective_mode, custom_perspectives],
|
| 1312 |
[chatbot, state, aegis_display, coherence_display, nexus_display,
|
| 1313 |
psi_display, memory_display, coverage_display, memory_browser,
|
| 1314 |
spiderweb_plot, coherence_plot, tension_plot, aegis_plot, memory_plot, nexus_plot],
|
|
@@ -1317,7 +1314,7 @@ Created by **Jonathan Harrison** to explore recursive reasoning, multi-perspecti
|
|
| 1317 |
|
| 1318 |
msg_input.submit(
|
| 1319 |
on_submit,
|
| 1320 |
-
[msg_input, chatbot, state, perspective_mode, custom_perspectives],
|
| 1321 |
[chatbot, state, aegis_display, coherence_display, nexus_display,
|
| 1322 |
psi_display, memory_display, coverage_display, memory_browser,
|
| 1323 |
spiderweb_plot, coherence_plot, tension_plot, aegis_plot, memory_plot, nexus_plot],
|
|
|
|
| 112 |
return selected[:n]
|
| 113 |
|
| 114 |
|
| 115 |
+
def call_perspective(perspective_name: str, query: str, request: gr.Request = None) -> str:
|
| 116 |
"""Generate response from a single perspective using HF Inference API."""
|
| 117 |
p = get_perspective(perspective_name)
|
| 118 |
if not p:
|
|
|
|
| 121 |
if not HAS_LLM:
|
| 122 |
return f"[{perspective_name.upper()}] Demo response (LLM unavailable)"
|
| 123 |
|
| 124 |
+
# Extract auth token from request headers (set by Gradio OAuth)
|
| 125 |
+
auth_token = None
|
| 126 |
+
if request and hasattr(request, 'headers') and 'authorization' in request.headers:
|
| 127 |
+
auth_token = request.headers['authorization'].replace('Bearer ', '').strip()
|
| 128 |
+
|
| 129 |
+
# Create client with user's auth token if available
|
| 130 |
+
inference_client = InferenceClient("meta-llama/Llama-3.1-8B-Instruct", token=auth_token) if auth_token else client
|
| 131 |
+
|
| 132 |
try:
|
| 133 |
messages = [
|
| 134 |
{"role": "system", "content": p.system_prompt},
|
| 135 |
{"role": "user", "content": query}
|
| 136 |
]
|
| 137 |
+
response = inference_client.chat_completion(
|
| 138 |
messages,
|
| 139 |
max_tokens=256,
|
| 140 |
temperature=0.7,
|
|
|
|
| 149 |
return f"[{perspective_name}] Error: {str(e)[:80]}"
|
| 150 |
|
| 151 |
|
| 152 |
+
def generate_synthesis(perspectives_responses: Dict[str, str], query: str, request: gr.Request = None) -> str:
|
| 153 |
"""Generate synthesis from all perspective responses."""
|
| 154 |
|
| 155 |
+
# Extract auth token from request headers
|
| 156 |
+
auth_token = None
|
| 157 |
+
if request and hasattr(request, 'headers') and 'authorization' in request.headers:
|
| 158 |
+
auth_token = request.headers['authorization'].replace('Bearer ', '').strip()
|
| 159 |
+
|
| 160 |
+
# Create client with user's auth token if available
|
| 161 |
+
inference_client = InferenceClient("meta-llama/Llama-3.1-8B-Instruct", token=auth_token) if auth_token else client
|
| 162 |
+
|
| 163 |
# Try LLM synthesis first if available
|
| 164 |
if HAS_LLM:
|
| 165 |
perspective_text = "\n\n".join(
|
|
|
|
| 178 |
{"role": "system", "content": "Synthesize multi-perspective responses concisely."},
|
| 179 |
{"role": "user", "content": synthesis_prompt}
|
| 180 |
]
|
| 181 |
+
response = inference_client.chat_completion(
|
| 182 |
messages,
|
| 183 |
max_tokens=256,
|
| 184 |
temperature=0.7,
|
|
|
|
| 696 |
chat_history: List,
|
| 697 |
state: Dict,
|
| 698 |
perspective_mode: str,
|
| 699 |
+
custom_perspectives: List[str],
|
| 700 |
+
request: gr.Request = None
|
| 701 |
) -> Tuple:
|
| 702 |
"""Main conversation handler with all visualizations."""
|
| 703 |
|
|
|
|
| 725 |
# ===== STEP 4-6: Generate & Evaluate =====
|
| 726 |
perspectives_responses = {}
|
| 727 |
for perspective_name in selected_perspectives:
|
| 728 |
+
response = call_perspective(perspective_name, user_msg, request)
|
| 729 |
perspectives_responses[perspective_name] = response
|
| 730 |
|
| 731 |
aegis = state.get("aegis") or AEGIS()
|
|
|
|
| 745 |
mean_tension = np.mean(list(tensions.values())) if tensions else 0.3
|
| 746 |
|
| 747 |
# ===== STEP 7: Synthesis =====
|
| 748 |
+
synthesis = generate_synthesis(perspectives_responses, user_msg, request)
|
| 749 |
chat_history.append({"role": "assistant", "content": synthesis})
|
| 750 |
|
| 751 |
# ===== STEP 8-9: Resonance & Memory =====
|
|
|
|
| 1146 |
</div>
|
| 1147 |
""")
|
| 1148 |
|
| 1149 |
+
# OAuth Login with HuggingFace
|
| 1150 |
with gr.Group():
|
| 1151 |
+
gr.Markdown("### 🔐 Sign in with HuggingFace")
|
| 1152 |
+
gr.Markdown("_Login with your HF account to use your inference quota for full LLM synthesis._")
|
| 1153 |
with gr.Row():
|
| 1154 |
+
login_button = gr.LoginButton(
|
| 1155 |
+
scale=2,
|
| 1156 |
+
size="lg",
|
|
|
|
|
|
|
| 1157 |
)
|
| 1158 |
+
auth_note = gr.Markdown(
|
| 1159 |
+
"✓ Ready for analysis — LLM features unlocked when logged in",
|
|
|
|
|
|
|
| 1160 |
visible=True
|
| 1161 |
)
|
| 1162 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1163 |
with gr.Tabs():
|
| 1164 |
# =================== CHAT TAB ===================
|
| 1165 |
with gr.Tab("Explore", id="chat"):
|
|
|
|
| 1298 |
""")
|
| 1299 |
|
| 1300 |
# Event handling
|
| 1301 |
+
def on_submit(msg, history, st, mode, custom, request: gr.Request):
|
| 1302 |
+
result = process_message(msg, history, st, mode, custom, request)
|
| 1303 |
return result
|
| 1304 |
|
| 1305 |
# Wire submit events
|
| 1306 |
send_btn.click(
|
| 1307 |
on_submit,
|
| 1308 |
+
[msg_input, chatbot, state, perspective_mode, custom_perspectives, gr.Request()],
|
| 1309 |
[chatbot, state, aegis_display, coherence_display, nexus_display,
|
| 1310 |
psi_display, memory_display, coverage_display, memory_browser,
|
| 1311 |
spiderweb_plot, coherence_plot, tension_plot, aegis_plot, memory_plot, nexus_plot],
|
|
|
|
| 1314 |
|
| 1315 |
msg_input.submit(
|
| 1316 |
on_submit,
|
| 1317 |
+
[msg_input, chatbot, state, perspective_mode, custom_perspectives, gr.Request()],
|
| 1318 |
[chatbot, state, aegis_display, coherence_display, nexus_display,
|
| 1319 |
psi_display, memory_display, coverage_display, memory_browser,
|
| 1320 |
spiderweb_plot, coherence_plot, tension_plot, aegis_plot, memory_plot, nexus_plot],
|