Spaces:
Running
Running
update
Browse files- Dockerfile +19 -0
- backend/code.py +20 -22
- backend/command.py +15 -8
- backend/defaultnames.py +124 -0
- backend/main.py +281 -68
- backend/research.py +350 -338
- backend/research_tulu.py +0 -473
- backend/utils.py +6 -2
- frontend/index.html +140 -65
- frontend/research-ui.js +3 -3
- frontend/script.js +534 -26
- frontend/style.css +460 -28
Dockerfile
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.12-slim
|
| 2 |
+
|
| 3 |
+
WORKDIR /app
|
| 4 |
+
|
| 5 |
+
# Install dependencies
|
| 6 |
+
COPY pyproject.toml .
|
| 7 |
+
RUN pip install --no-cache-dir .
|
| 8 |
+
|
| 9 |
+
# Copy application code
|
| 10 |
+
COPY backend/ backend/
|
| 11 |
+
COPY frontend/ frontend/
|
| 12 |
+
|
| 13 |
+
# HF Spaces uses port 7860
|
| 14 |
+
EXPOSE 7860
|
| 15 |
+
|
| 16 |
+
# Run the server
|
| 17 |
+
# --no-browser since we're in a container
|
| 18 |
+
# --workspace-dir /app/workspace for session storage
|
| 19 |
+
CMD ["python", "-m", "backend.main", "--port", "7860", "--no-browser", "--workspace-dir", "/app/workspace"]
|
backend/code.py
CHANGED
|
@@ -2,11 +2,14 @@
|
|
| 2 |
Code notebook backend - handles code execution with E2B
|
| 3 |
"""
|
| 4 |
import json
|
|
|
|
| 5 |
import os
|
| 6 |
import re
|
| 7 |
from typing import List, Dict, Optional
|
| 8 |
from e2b_code_interpreter import Sandbox
|
| 9 |
|
|
|
|
|
|
|
| 10 |
|
| 11 |
TOOLS = [
|
| 12 |
{
|
|
@@ -268,7 +271,7 @@ def download_files_from_sandbox(sbx: Sandbox, files: List[Dict], files_root: str
|
|
| 268 |
return "\n".join(results)
|
| 269 |
|
| 270 |
|
| 271 |
-
def stream_code_execution(client, model: str, messages: List[Dict], sbx: Sandbox, files_root: str = None):
|
| 272 |
"""
|
| 273 |
Stream code execution results
|
| 274 |
|
|
@@ -281,6 +284,7 @@ def stream_code_execution(client, model: str, messages: List[Dict], sbx: Sandbox
|
|
| 281 |
messages: Conversation messages
|
| 282 |
sbx: E2B sandbox instance
|
| 283 |
files_root: Root directory for file uploads (optional)
|
|
|
|
| 284 |
"""
|
| 285 |
turns = 0
|
| 286 |
done = False
|
|
@@ -297,12 +301,16 @@ def stream_code_execution(client, model: str, messages: List[Dict], sbx: Sandbox
|
|
| 297 |
for attempt in range(MAX_RETRIES):
|
| 298 |
try:
|
| 299 |
# Call LLM with tools
|
| 300 |
-
|
| 301 |
-
messages
|
| 302 |
-
model
|
| 303 |
-
tools
|
| 304 |
-
tool_choice
|
| 305 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 306 |
break # Success, exit retry loop
|
| 307 |
except Exception as e:
|
| 308 |
last_error = e
|
|
@@ -340,7 +348,7 @@ def stream_code_execution(client, model: str, messages: List[Dict], sbx: Sandbox
|
|
| 340 |
thinking_content = content
|
| 341 |
|
| 342 |
if result_match:
|
| 343 |
-
|
| 344 |
result_content = result_match.group(1).strip()
|
| 345 |
# Remove result tags from thinking display
|
| 346 |
thinking_content = re.sub(r'<result>.*?</result>', '', content, flags=re.DOTALL | re.IGNORECASE).strip()
|
|
@@ -363,7 +371,7 @@ def stream_code_execution(client, model: str, messages: List[Dict], sbx: Sandbox
|
|
| 363 |
code = args["code"]
|
| 364 |
except json.JSONDecodeError as e:
|
| 365 |
error_msg = f"JSON parse error: {e}. Raw arguments: {tool_call.function.arguments[:500]}"
|
| 366 |
-
|
| 367 |
# Treat as tool error so LLM can recover
|
| 368 |
output = f"Error parsing code arguments: {e}"
|
| 369 |
messages.append({
|
|
@@ -387,7 +395,7 @@ def stream_code_execution(client, model: str, messages: List[Dict], sbx: Sandbox
|
|
| 387 |
continue
|
| 388 |
except KeyError as e:
|
| 389 |
error_msg = f"Missing required key {e} in arguments: {tool_call.function.arguments[:500]}"
|
| 390 |
-
|
| 391 |
output = f"Error: Missing required 'code' parameter"
|
| 392 |
messages.append({
|
| 393 |
"role": "assistant",
|
|
@@ -418,16 +426,6 @@ def stream_code_execution(client, model: str, messages: List[Dict], sbx: Sandbox
|
|
| 418 |
output = parse_execution_result(execution)
|
| 419 |
has_error = execution.error is not None
|
| 420 |
|
| 421 |
-
# Debug logging for E2B execution results
|
| 422 |
-
print(f"[E2B DEBUG] Number of results: {len(execution.results)}")
|
| 423 |
-
for i, result in enumerate(execution.results):
|
| 424 |
-
print(f"[E2B DEBUG] Result {i}:")
|
| 425 |
-
print(f" - text: {result.text[:100] if result.text else None}...")
|
| 426 |
-
print(f" - png: {len(result.png) if result.png else None} bytes")
|
| 427 |
-
print(f" - jpeg: {len(result.jpeg) if result.jpeg else None} bytes")
|
| 428 |
-
print(f" - svg: {len(result.svg) if result.svg else None} bytes")
|
| 429 |
-
print(f" - all attrs: {[a for a in dir(result) if not a.startswith('_')]}")
|
| 430 |
-
|
| 431 |
# Extract images and assign figure names
|
| 432 |
images = []
|
| 433 |
figure_names = []
|
|
@@ -494,7 +492,7 @@ def stream_code_execution(client, model: str, messages: List[Dict], sbx: Sandbox
|
|
| 494 |
paths = args["paths"]
|
| 495 |
except (json.JSONDecodeError, KeyError) as e:
|
| 496 |
error_msg = f"Failed to parse upload_files arguments: {e}. Raw: {tool_call.function.arguments[:500]}"
|
| 497 |
-
|
| 498 |
output = f"Error parsing upload_files arguments: {e}"
|
| 499 |
messages.append({
|
| 500 |
"role": "assistant",
|
|
@@ -553,7 +551,7 @@ def stream_code_execution(client, model: str, messages: List[Dict], sbx: Sandbox
|
|
| 553 |
files = args["files"]
|
| 554 |
except (json.JSONDecodeError, KeyError) as e:
|
| 555 |
error_msg = f"Failed to parse download_files arguments: {e}. Raw: {tool_call.function.arguments[:500]}"
|
| 556 |
-
|
| 557 |
output = f"Error parsing download_files arguments: {e}"
|
| 558 |
messages.append({
|
| 559 |
"role": "assistant",
|
|
|
|
| 2 |
Code notebook backend - handles code execution with E2B
|
| 3 |
"""
|
| 4 |
import json
|
| 5 |
+
import logging
|
| 6 |
import os
|
| 7 |
import re
|
| 8 |
from typing import List, Dict, Optional
|
| 9 |
from e2b_code_interpreter import Sandbox
|
| 10 |
|
| 11 |
+
logger = logging.getLogger(__name__)
|
| 12 |
+
|
| 13 |
|
| 14 |
TOOLS = [
|
| 15 |
{
|
|
|
|
| 271 |
return "\n".join(results)
|
| 272 |
|
| 273 |
|
| 274 |
+
def stream_code_execution(client, model: str, messages: List[Dict], sbx: Sandbox, files_root: str = None, extra_params: Optional[Dict] = None):
|
| 275 |
"""
|
| 276 |
Stream code execution results
|
| 277 |
|
|
|
|
| 284 |
messages: Conversation messages
|
| 285 |
sbx: E2B sandbox instance
|
| 286 |
files_root: Root directory for file uploads (optional)
|
| 287 |
+
extra_params: Extra parameters for API calls (optional)
|
| 288 |
"""
|
| 289 |
turns = 0
|
| 290 |
done = False
|
|
|
|
| 301 |
for attempt in range(MAX_RETRIES):
|
| 302 |
try:
|
| 303 |
# Call LLM with tools
|
| 304 |
+
call_params = {
|
| 305 |
+
"messages": messages,
|
| 306 |
+
"model": model,
|
| 307 |
+
"tools": TOOLS,
|
| 308 |
+
"tool_choice": "auto",
|
| 309 |
+
}
|
| 310 |
+
# Apply any extra params via extra_body (for OpenAI SDK compatibility)
|
| 311 |
+
if extra_params:
|
| 312 |
+
call_params["extra_body"] = extra_params
|
| 313 |
+
response = client.chat.completions.create(**call_params)
|
| 314 |
break # Success, exit retry loop
|
| 315 |
except Exception as e:
|
| 316 |
last_error = e
|
|
|
|
| 348 |
thinking_content = content
|
| 349 |
|
| 350 |
if result_match:
|
| 351 |
+
logger.debug(f"Result found: {content[:200]}...")
|
| 352 |
result_content = result_match.group(1).strip()
|
| 353 |
# Remove result tags from thinking display
|
| 354 |
thinking_content = re.sub(r'<result>.*?</result>', '', content, flags=re.DOTALL | re.IGNORECASE).strip()
|
|
|
|
| 371 |
code = args["code"]
|
| 372 |
except json.JSONDecodeError as e:
|
| 373 |
error_msg = f"JSON parse error: {e}. Raw arguments: {tool_call.function.arguments[:500]}"
|
| 374 |
+
logger.error(error_msg)
|
| 375 |
# Treat as tool error so LLM can recover
|
| 376 |
output = f"Error parsing code arguments: {e}"
|
| 377 |
messages.append({
|
|
|
|
| 395 |
continue
|
| 396 |
except KeyError as e:
|
| 397 |
error_msg = f"Missing required key {e} in arguments: {tool_call.function.arguments[:500]}"
|
| 398 |
+
logger.error(error_msg)
|
| 399 |
output = f"Error: Missing required 'code' parameter"
|
| 400 |
messages.append({
|
| 401 |
"role": "assistant",
|
|
|
|
| 426 |
output = parse_execution_result(execution)
|
| 427 |
has_error = execution.error is not None
|
| 428 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 429 |
# Extract images and assign figure names
|
| 430 |
images = []
|
| 431 |
figure_names = []
|
|
|
|
| 492 |
paths = args["paths"]
|
| 493 |
except (json.JSONDecodeError, KeyError) as e:
|
| 494 |
error_msg = f"Failed to parse upload_files arguments: {e}. Raw: {tool_call.function.arguments[:500]}"
|
| 495 |
+
logger.error(error_msg)
|
| 496 |
output = f"Error parsing upload_files arguments: {e}"
|
| 497 |
messages.append({
|
| 498 |
"role": "assistant",
|
|
|
|
| 551 |
files = args["files"]
|
| 552 |
except (json.JSONDecodeError, KeyError) as e:
|
| 553 |
error_msg = f"Failed to parse download_files arguments: {e}. Raw: {tool_call.function.arguments[:500]}"
|
| 554 |
+
logger.error(error_msg)
|
| 555 |
output = f"Error parsing download_files arguments: {e}"
|
| 556 |
messages.append({
|
| 557 |
"role": "assistant",
|
backend/command.py
CHANGED
|
@@ -2,10 +2,13 @@
|
|
| 2 |
Command center backend - handles tool-based notebook launching
|
| 3 |
"""
|
| 4 |
import json
|
|
|
|
| 5 |
import os
|
| 6 |
import re
|
| 7 |
from typing import List, Dict
|
| 8 |
|
|
|
|
|
|
|
| 9 |
# Tool definitions for launching notebooks
|
| 10 |
TOOLS = [
|
| 11 |
{
|
|
@@ -101,7 +104,8 @@ RETRY_DELAYS = [2, 5, 10] # Delay in seconds for each retry attempt
|
|
| 101 |
|
| 102 |
# Set FORCE_RETRY=1 to test retry logic with any error
|
| 103 |
FORCE_RETRY_FOR_TESTING = os.environ.get("FORCE_RETRY", "0") == "1"
|
| 104 |
-
|
|
|
|
| 105 |
|
| 106 |
|
| 107 |
def parse_llm_error(error: Exception) -> dict:
|
|
@@ -135,7 +139,7 @@ def parse_llm_error(error: Exception) -> dict:
|
|
| 135 |
}
|
| 136 |
|
| 137 |
|
| 138 |
-
def stream_command_center(client, model: str, messages: List[Dict]):
|
| 139 |
"""
|
| 140 |
Stream command center responses with notebook launching capabilities
|
| 141 |
|
|
@@ -159,12 +163,15 @@ def stream_command_center(client, model: str, messages: List[Dict]):
|
|
| 159 |
raise Exception("Error code: 429 - {'message': \"We're experiencing high traffic right now! Please try again soon.\", 'type': 'too_many_requests_error', 'param': 'queue', 'code': 'queue_exceeded'}")
|
| 160 |
|
| 161 |
# Call LLM with tools
|
| 162 |
-
|
| 163 |
-
messages
|
| 164 |
-
model
|
| 165 |
-
tools
|
| 166 |
-
tool_choice
|
| 167 |
-
|
|
|
|
|
|
|
|
|
|
| 168 |
break # Success, exit retry loop
|
| 169 |
except Exception as e:
|
| 170 |
last_error = e
|
|
|
|
| 2 |
Command center backend - handles tool-based notebook launching
|
| 3 |
"""
|
| 4 |
import json
|
| 5 |
+
import logging
|
| 6 |
import os
|
| 7 |
import re
|
| 8 |
from typing import List, Dict
|
| 9 |
|
| 10 |
+
logger = logging.getLogger(__name__)
|
| 11 |
+
|
| 12 |
# Tool definitions for launching notebooks
|
| 13 |
TOOLS = [
|
| 14 |
{
|
|
|
|
| 104 |
|
| 105 |
# Set FORCE_RETRY=1 to test retry logic with any error
|
| 106 |
FORCE_RETRY_FOR_TESTING = os.environ.get("FORCE_RETRY", "0") == "1"
|
| 107 |
+
if FORCE_RETRY_FOR_TESTING:
|
| 108 |
+
logger.debug("FORCE_RETRY_FOR_TESTING enabled")
|
| 109 |
|
| 110 |
|
| 111 |
def parse_llm_error(error: Exception) -> dict:
|
|
|
|
| 139 |
}
|
| 140 |
|
| 141 |
|
| 142 |
+
def stream_command_center(client, model: str, messages: List[Dict], extra_params: dict = None):
|
| 143 |
"""
|
| 144 |
Stream command center responses with notebook launching capabilities
|
| 145 |
|
|
|
|
| 163 |
raise Exception("Error code: 429 - {'message': \"We're experiencing high traffic right now! Please try again soon.\", 'type': 'too_many_requests_error', 'param': 'queue', 'code': 'queue_exceeded'}")
|
| 164 |
|
| 165 |
# Call LLM with tools
|
| 166 |
+
call_params = {
|
| 167 |
+
"messages": messages,
|
| 168 |
+
"model": model,
|
| 169 |
+
"tools": TOOLS,
|
| 170 |
+
"tool_choice": "auto",
|
| 171 |
+
}
|
| 172 |
+
if extra_params:
|
| 173 |
+
call_params["extra_body"] = extra_params
|
| 174 |
+
response = client.chat.completions.create(**call_params)
|
| 175 |
break # Success, exit retry loop
|
| 176 |
except Exception as e:
|
| 177 |
last_error = e
|
backend/defaultnames.py
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Auto-generated isotope data for session names
|
| 2 |
+
# Structure: element name -> list of mass numbers
|
| 3 |
+
# Two-stage sampling: 1) pick random element, 2) pick random isotope
|
| 4 |
+
|
| 5 |
+
ISOTOPES = {
|
| 6 |
+
"hydrogen": [1, 2, 3, 4, 5, 6],
|
| 7 |
+
"helium": [3, 4, 5, 6, 7, 8, 9, 10],
|
| 8 |
+
"lithium": [4, 5, 6, 7, 8, 9, 10, 11, 12],
|
| 9 |
+
"beryllium": [5, 6, 7, 8, 9, 10, 11, 12, 13, 14],
|
| 10 |
+
"boron": [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
|
| 11 |
+
"carbon": [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22],
|
| 12 |
+
"nitrogen": [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24],
|
| 13 |
+
"oxygen": [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26],
|
| 14 |
+
"fluorine": [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
|
| 15 |
+
"neon": [16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32],
|
| 16 |
+
"sodium": [18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35],
|
| 17 |
+
"magnesium": [20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37],
|
| 18 |
+
"aluminum": [21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39],
|
| 19 |
+
"silicon": [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42],
|
| 20 |
+
"phosphorus": [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46],
|
| 21 |
+
"sulfur": [26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49],
|
| 22 |
+
"chlorine": [28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51],
|
| 23 |
+
"argon": [30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53],
|
| 24 |
+
"potassium": [32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55],
|
| 25 |
+
"calcium": [34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57],
|
| 26 |
+
"scandium": [36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59],
|
| 27 |
+
"titanium": [38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61],
|
| 28 |
+
"vanadium": [40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63],
|
| 29 |
+
"chromium": [42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65],
|
| 30 |
+
"manganese": [44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67],
|
| 31 |
+
"iron": [45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69],
|
| 32 |
+
"cobalt": [48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72],
|
| 33 |
+
"nickel": [50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78],
|
| 34 |
+
"copper": [52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80],
|
| 35 |
+
"zinc": [54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82],
|
| 36 |
+
"gallium": [56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84],
|
| 37 |
+
"germanium": [58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86],
|
| 38 |
+
"arsenic": [60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89],
|
| 39 |
+
"selenium": [65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92],
|
| 40 |
+
"bromine": [67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94],
|
| 41 |
+
"krypton": [69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97],
|
| 42 |
+
"rubidium": [71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102],
|
| 43 |
+
"strontium": [73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104],
|
| 44 |
+
"yttrium": [77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106],
|
| 45 |
+
"zirconium": [79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108],
|
| 46 |
+
"niobium": [81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110],
|
| 47 |
+
"molybdenum": [83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113],
|
| 48 |
+
"technetium": [85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115],
|
| 49 |
+
"ruthenium": [87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118],
|
| 50 |
+
"rhodium": [89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121],
|
| 51 |
+
"palladium": [91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123],
|
| 52 |
+
"silver": [94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127],
|
| 53 |
+
"cadmium": [96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130],
|
| 54 |
+
"indium": [98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134],
|
| 55 |
+
"tin": [100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137],
|
| 56 |
+
"antimony": [103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139],
|
| 57 |
+
"tellurium": [106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142],
|
| 58 |
+
"iodine": [108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144],
|
| 59 |
+
"xenon": [110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147],
|
| 60 |
+
"cesium": [112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151],
|
| 61 |
+
"barium": [114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153],
|
| 62 |
+
"lanthanum": [117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155],
|
| 63 |
+
"cerium": [119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157],
|
| 64 |
+
"praseodymium": [121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159],
|
| 65 |
+
"neodymium": [126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161],
|
| 66 |
+
"promethium": [128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163],
|
| 67 |
+
"samarium": [130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165],
|
| 68 |
+
"europium": [132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167],
|
| 69 |
+
"gadolinium": [136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169],
|
| 70 |
+
"terbium": [138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171],
|
| 71 |
+
"dysprosium": [140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173],
|
| 72 |
+
"holmium": [142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175],
|
| 73 |
+
"erbium": [144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177],
|
| 74 |
+
"thulium": [146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179],
|
| 75 |
+
"ytterbium": [148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181],
|
| 76 |
+
"lutetium": [150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184],
|
| 77 |
+
"hafnium": [154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186],
|
| 78 |
+
"tantalum": [156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188],
|
| 79 |
+
"tungsten": [158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190],
|
| 80 |
+
"rhenium": [160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192],
|
| 81 |
+
"osmium": [162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196],
|
| 82 |
+
"iridium": [165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199],
|
| 83 |
+
"platinum": [168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202],
|
| 84 |
+
"gold": [171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205],
|
| 85 |
+
"mercury": [175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208],
|
| 86 |
+
"thallium": [177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210],
|
| 87 |
+
"lead": [181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214],
|
| 88 |
+
"bismuth": [185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216],
|
| 89 |
+
"polonium": [190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218],
|
| 90 |
+
"astatine": [193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223],
|
| 91 |
+
"radon": [196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228],
|
| 92 |
+
"francium": [200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232],
|
| 93 |
+
"radium": [203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234],
|
| 94 |
+
"actinium": [207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236],
|
| 95 |
+
"thorium": [210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238],
|
| 96 |
+
"protactinium": [213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240],
|
| 97 |
+
"uranium": [218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242],
|
| 98 |
+
"neptunium": [225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244],
|
| 99 |
+
"plutonium": [228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247],
|
| 100 |
+
"americium": [231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249],
|
| 101 |
+
"curium": [233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252],
|
| 102 |
+
"berkelium": [235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254],
|
| 103 |
+
"californium": [237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256],
|
| 104 |
+
"einsteinium": [240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257],
|
| 105 |
+
"fermium": [242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259],
|
| 106 |
+
"mendelevium": [245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260],
|
| 107 |
+
"nobelium": [249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262],
|
| 108 |
+
"lawrencium": [251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263],
|
| 109 |
+
"rutherfordium": [253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264],
|
| 110 |
+
"dubnium": [255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265],
|
| 111 |
+
"seaborgium": [258, 259, 260, 261, 262, 263, 264, 265, 266],
|
| 112 |
+
"bohrium": [260, 261, 262, 263, 264, 265, 266, 267],
|
| 113 |
+
"hassium": [263, 264, 265, 266, 267, 268, 269, 277],
|
| 114 |
+
"meitnerium": [265, 266, 267, 268, 269, 270, 271],
|
| 115 |
+
"darmstadtium": [267, 268, 269, 270, 271, 272, 273, 281],
|
| 116 |
+
"roentgenium": [272],
|
| 117 |
+
"copernicium": [285],
|
| 118 |
+
"nihonium": [286],
|
| 119 |
+
"flerovium": [289],
|
| 120 |
+
"moscovium": [289],
|
| 121 |
+
"livermorium": [293],
|
| 122 |
+
"tennessine": [294],
|
| 123 |
+
"oganesson": [294],
|
| 124 |
+
}
|
backend/main.py
CHANGED
|
@@ -7,10 +7,31 @@ from typing import List, Optional, Dict
|
|
| 7 |
import json
|
| 8 |
import httpx
|
| 9 |
import os
|
|
|
|
| 10 |
from datetime import datetime
|
| 11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
app = FastAPI(title="Productive API")
|
| 13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
# Get the project root directory (parent of backend/)
|
| 15 |
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
| 16 |
|
|
@@ -22,22 +43,15 @@ try:
|
|
| 22 |
E2B_AVAILABLE = True
|
| 23 |
except ImportError:
|
| 24 |
E2B_AVAILABLE = False
|
| 25 |
-
|
| 26 |
|
| 27 |
-
# For research
|
| 28 |
-
# Set RESEARCH_BACKEND=tulu to use DR-TULU model-driven research
|
| 29 |
-
RESEARCH_BACKEND = os.environ.get("RESEARCH_BACKEND", "original").lower()
|
| 30 |
try:
|
| 31 |
-
|
| 32 |
-
from research_tulu import stream_research
|
| 33 |
-
print(f"Research backend: DR-TULU (model-driven)")
|
| 34 |
-
else:
|
| 35 |
-
from research import stream_research
|
| 36 |
-
print(f"Research backend: Original (orchestrator-driven)")
|
| 37 |
RESEARCH_AVAILABLE = True
|
| 38 |
except ImportError as e:
|
| 39 |
RESEARCH_AVAILABLE = False
|
| 40 |
-
|
| 41 |
|
| 42 |
# For command center with tool-based launching
|
| 43 |
try:
|
|
@@ -45,7 +59,7 @@ try:
|
|
| 45 |
COMMAND_AVAILABLE = True
|
| 46 |
except ImportError:
|
| 47 |
COMMAND_AVAILABLE = False
|
| 48 |
-
|
| 49 |
|
| 50 |
# Session management for sandboxes
|
| 51 |
SANDBOXES: Dict[str, any] = {}
|
|
@@ -273,11 +287,13 @@ class ChatRequest(BaseModel):
|
|
| 273 |
endpoint: str # User's configured LLM endpoint
|
| 274 |
token: Optional[str] = None # Optional auth token
|
| 275 |
model: Optional[str] = "gpt-4" # Model name
|
|
|
|
| 276 |
e2b_key: Optional[str] = None # E2B API key for code execution
|
| 277 |
serper_key: Optional[str] = None # Serper API key for research
|
| 278 |
research_sub_agent_model: Optional[str] = None # Model for research sub-tasks
|
| 279 |
research_sub_agent_endpoint: Optional[str] = None # Endpoint for research sub-agent (may differ from main)
|
| 280 |
research_sub_agent_token: Optional[str] = None # Token for research sub-agent endpoint
|
|
|
|
| 281 |
research_parallel_workers: Optional[int] = None # Number of parallel workers for research
|
| 282 |
research_max_websites: Optional[int] = None # Max websites to analyze per research session
|
| 283 |
notebook_id: Optional[str] = None # Unique notebook/tab ID for session management
|
|
@@ -308,7 +324,8 @@ async def stream_code_notebook(
|
|
| 308 |
e2b_key: str,
|
| 309 |
session_id: str,
|
| 310 |
tab_id: str = "default",
|
| 311 |
-
frontend_context: Optional[Dict] = None
|
|
|
|
| 312 |
):
|
| 313 |
"""Handle code notebook with execution capabilities"""
|
| 314 |
|
|
@@ -341,14 +358,14 @@ async def stream_code_notebook(
|
|
| 341 |
record_api_call(tab_id, full_messages)
|
| 342 |
|
| 343 |
# Stream code execution
|
| 344 |
-
for update in stream_code_execution(client, model, full_messages, sbx, files_root=FILES_ROOT):
|
| 345 |
# Forward updates to frontend
|
| 346 |
yield f"data: {json.dumps(update)}\n\n"
|
| 347 |
|
| 348 |
except Exception as e:
|
| 349 |
import traceback
|
| 350 |
error_message = f"Code execution error: {str(e)}\n{traceback.format_exc()}"
|
| 351 |
-
|
| 352 |
|
| 353 |
# Check if this is a sandbox timeout error (502)
|
| 354 |
error_str = str(e)
|
|
@@ -373,7 +390,7 @@ async def stream_code_notebook(
|
|
| 373 |
yield f"data: {json.dumps({'type': 'info', 'content': 'New sandbox created. Retrying execution...'})}\n\n"
|
| 374 |
|
| 375 |
# Retry code execution with new sandbox
|
| 376 |
-
for update in stream_code_execution(client, model, full_messages, sbx, files_root=FILES_ROOT):
|
| 377 |
yield f"data: {json.dumps(update)}\n\n"
|
| 378 |
|
| 379 |
except Exception as retry_error:
|
|
@@ -393,7 +410,9 @@ async def stream_research_notebook(
|
|
| 393 |
max_websites: Optional[int] = None,
|
| 394 |
tab_id: str = "default",
|
| 395 |
sub_agent_endpoint: Optional[str] = None,
|
| 396 |
-
sub_agent_token: Optional[str] = None
|
|
|
|
|
|
|
| 397 |
):
|
| 398 |
"""Handle research notebook with web search"""
|
| 399 |
|
|
@@ -438,13 +457,13 @@ async def stream_research_notebook(
|
|
| 438 |
max_sites = max_websites if max_websites else 50
|
| 439 |
|
| 440 |
# Stream research
|
| 441 |
-
for update in stream_research(client, model, question, serper_key, max_websites=max_sites, system_prompt=system_prompt, sub_agent_model=analysis_model, parallel_workers=workers, sub_agent_client=sub_agent_client):
|
| 442 |
yield f"data: {json.dumps(update)}\n\n"
|
| 443 |
|
| 444 |
except Exception as e:
|
| 445 |
import traceback
|
| 446 |
error_message = f"Research error: {str(e)}\n{traceback.format_exc()}"
|
| 447 |
-
|
| 448 |
yield f"data: {json.dumps({'type': 'error', 'content': error_message})}\n\n"
|
| 449 |
|
| 450 |
|
|
@@ -453,13 +472,14 @@ async def stream_command_center_notebook(
|
|
| 453 |
endpoint: str,
|
| 454 |
token: Optional[str],
|
| 455 |
model: str,
|
| 456 |
-
tab_id: str = "0"
|
|
|
|
| 457 |
):
|
| 458 |
"""Handle command center with tool-based notebook launching"""
|
| 459 |
|
| 460 |
if not COMMAND_AVAILABLE:
|
| 461 |
# Fallback to regular chat if command tools not available
|
| 462 |
-
async for chunk in stream_chat_response(messages, endpoint, token, model, "command", tab_id):
|
| 463 |
yield chunk
|
| 464 |
return
|
| 465 |
|
|
@@ -477,26 +497,26 @@ async def stream_command_center_notebook(
|
|
| 477 |
system_prompt = get_system_prompt("command")
|
| 478 |
|
| 479 |
# Build full messages: system + stored history + new messages
|
| 480 |
-
|
| 481 |
-
|
| 482 |
|
| 483 |
# On first call, history is empty, so just use incoming messages
|
| 484 |
# On subsequent calls, append only new messages
|
| 485 |
if not CONVERSATION_HISTORY[tab_id]:
|
| 486 |
# First message - use all incoming messages
|
| 487 |
full_messages = [{"role": "system", "content": system_prompt}] + messages
|
| 488 |
-
|
| 489 |
else:
|
| 490 |
# Subsequent messages - use stored history + incoming messages
|
| 491 |
# The incoming messages should only be the new user message
|
| 492 |
full_messages = [{"role": "system", "content": system_prompt}] + CONVERSATION_HISTORY[tab_id] + messages
|
| 493 |
-
|
| 494 |
|
| 495 |
# Store for debugging
|
| 496 |
record_api_call(tab_id, full_messages)
|
| 497 |
|
| 498 |
# Stream command center execution
|
| 499 |
-
for update in stream_command_center(client, model, full_messages):
|
| 500 |
# Forward updates to frontend
|
| 501 |
yield f"data: {json.dumps(update)}\n\n"
|
| 502 |
|
|
@@ -511,7 +531,7 @@ async def stream_command_center_notebook(
|
|
| 511 |
except Exception as e:
|
| 512 |
import traceback
|
| 513 |
error_message = f"Command center error: {str(e)}\n{traceback.format_exc()}"
|
| 514 |
-
|
| 515 |
yield f"data: {json.dumps({'type': 'error', 'content': error_message})}\n\n"
|
| 516 |
|
| 517 |
|
|
@@ -521,16 +541,13 @@ async def stream_chat_response(
|
|
| 521 |
token: Optional[str],
|
| 522 |
model: str,
|
| 523 |
notebook_type: str,
|
| 524 |
-
tab_id: str = "default"
|
|
|
|
| 525 |
):
|
| 526 |
"""Proxy stream from user's configured LLM endpoint"""
|
| 527 |
|
| 528 |
try:
|
| 529 |
-
|
| 530 |
-
print(f"Endpoint: {endpoint}")
|
| 531 |
-
print(f"Model: {model}")
|
| 532 |
-
print(f"Messages: {len(messages)} messages")
|
| 533 |
-
print(f"Token provided: {bool(token)}")
|
| 534 |
|
| 535 |
# Prepare messages with appropriate system prompt based on notebook type (with file tree)
|
| 536 |
system_prompt = get_system_prompt(notebook_type)
|
|
@@ -545,9 +562,9 @@ async def stream_chat_response(
|
|
| 545 |
if not token and "huggingface.co" in endpoint:
|
| 546 |
token = os.getenv("HF_TOKEN")
|
| 547 |
if token:
|
| 548 |
-
|
| 549 |
else:
|
| 550 |
-
|
| 551 |
|
| 552 |
# Prepare headers
|
| 553 |
headers = {
|
|
@@ -563,8 +580,11 @@ async def stream_chat_response(
|
|
| 563 |
"stream": True,
|
| 564 |
"temperature": 0.7
|
| 565 |
}
|
|
|
|
|
|
|
|
|
|
| 566 |
|
| 567 |
-
|
| 568 |
|
| 569 |
# Make streaming request to user's endpoint
|
| 570 |
async with httpx.AsyncClient(timeout=60.0) as client:
|
|
@@ -578,7 +598,7 @@ async def stream_chat_response(
|
|
| 578 |
error_text = await response.aread()
|
| 579 |
error_detail = error_text.decode() if error_text else f"Status {response.status_code}"
|
| 580 |
error_message = f"LLM API error ({response.status_code}): {error_detail}"
|
| 581 |
-
|
| 582 |
yield f"data: {json.dumps({'type': 'error', 'content': error_message})}\n\n"
|
| 583 |
return
|
| 584 |
|
|
@@ -610,13 +630,12 @@ async def stream_chat_response(
|
|
| 610 |
|
| 611 |
except httpx.RequestError as e:
|
| 612 |
error_message = f"Connection error to LLM endpoint: {str(e)}"
|
| 613 |
-
|
| 614 |
yield f"data: {json.dumps({'type': 'error', 'content': error_message})}\n\n"
|
| 615 |
except Exception as e:
|
| 616 |
import traceback
|
| 617 |
error_message = f"Error: {str(e) or 'Unknown error occurred'}"
|
| 618 |
-
|
| 619 |
-
print(traceback.format_exc())
|
| 620 |
yield f"data: {json.dumps({'type': 'error', 'content': error_message})}\n\n"
|
| 621 |
|
| 622 |
|
|
@@ -683,7 +702,7 @@ async def generate_title(request: TitleRequest):
|
|
| 683 |
async def chat_stream(request: ChatRequest):
|
| 684 |
"""Proxy streaming chat to user's configured LLM endpoint"""
|
| 685 |
|
| 686 |
-
|
| 687 |
|
| 688 |
if not request.messages:
|
| 689 |
raise HTTPException(status_code=400, detail="Messages are required")
|
|
@@ -700,6 +719,11 @@ async def chat_stream(request: ChatRequest):
|
|
| 700 |
# Convert frontend_context to dict if provided
|
| 701 |
frontend_context = request.frontend_context.model_dump() if request.frontend_context else None
|
| 702 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 703 |
# Route to code execution handler for code notebooks
|
| 704 |
if request.notebook_type == "code":
|
| 705 |
# Use notebook_id as session key, fallback to "default" if not provided
|
|
@@ -709,12 +733,13 @@ async def chat_stream(request: ChatRequest):
|
|
| 709 |
stream_code_notebook(
|
| 710 |
messages,
|
| 711 |
request.endpoint,
|
| 712 |
-
|
| 713 |
request.model or "gpt-4",
|
| 714 |
-
|
| 715 |
session_id,
|
| 716 |
tab_id,
|
| 717 |
-
frontend_context
|
|
|
|
| 718 |
),
|
| 719 |
media_type="text/event-stream",
|
| 720 |
headers={
|
|
@@ -728,21 +753,23 @@ async def chat_stream(request: ChatRequest):
|
|
| 728 |
if request.notebook_type == "research":
|
| 729 |
# Use sub-agent endpoint/token if provided, otherwise fall back to main
|
| 730 |
sub_agent_endpoint = request.research_sub_agent_endpoint or request.endpoint
|
| 731 |
-
sub_agent_token = request.research_sub_agent_token if request.research_sub_agent_endpoint else
|
| 732 |
|
| 733 |
return StreamingResponse(
|
| 734 |
stream_research_notebook(
|
| 735 |
messages,
|
| 736 |
request.endpoint,
|
| 737 |
-
|
| 738 |
request.model or "gpt-4",
|
| 739 |
-
|
| 740 |
request.research_sub_agent_model,
|
| 741 |
request.research_parallel_workers,
|
| 742 |
None,
|
| 743 |
tab_id,
|
| 744 |
sub_agent_endpoint,
|
| 745 |
-
sub_agent_token
|
|
|
|
|
|
|
| 746 |
),
|
| 747 |
media_type="text/event-stream",
|
| 748 |
headers={
|
|
@@ -758,9 +785,10 @@ async def chat_stream(request: ChatRequest):
|
|
| 758 |
stream_command_center_notebook(
|
| 759 |
messages,
|
| 760 |
request.endpoint,
|
| 761 |
-
|
| 762 |
request.model or "gpt-4",
|
| 763 |
-
tab_id
|
|
|
|
| 764 |
),
|
| 765 |
media_type="text/event-stream",
|
| 766 |
headers={
|
|
@@ -775,10 +803,11 @@ async def chat_stream(request: ChatRequest):
|
|
| 775 |
stream_chat_response(
|
| 776 |
messages,
|
| 777 |
request.endpoint,
|
| 778 |
-
|
| 779 |
request.model or "gpt-4",
|
| 780 |
request.notebook_type,
|
| 781 |
-
tab_id
|
|
|
|
| 782 |
),
|
| 783 |
media_type="text/event-stream",
|
| 784 |
headers={
|
|
@@ -901,7 +930,9 @@ os.makedirs(CONFIG_DIR, exist_ok=True)
|
|
| 901 |
|
| 902 |
SETTINGS_FILE = os.path.join(CONFIG_DIR, "settings.json")
|
| 903 |
FILES_ROOT = os.getcwd() # Root directory for file tree (current working directory)
|
| 904 |
-
|
|
|
|
|
|
|
| 905 |
|
| 906 |
# Directories/patterns to exclude from file tree
|
| 907 |
FILES_EXCLUDE = {
|
|
@@ -964,6 +995,188 @@ async def save_settings(settings: dict):
|
|
| 964 |
raise HTTPException(status_code=500, detail=f"Failed to save settings: {str(e)}")
|
| 965 |
|
| 966 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 967 |
# ============================================
|
| 968 |
# Workspace State Persistence
|
| 969 |
# ============================================
|
|
@@ -994,6 +1207,8 @@ def get_default_workspace():
|
|
| 994 |
@app.get("/api/workspace")
|
| 995 |
async def get_workspace():
|
| 996 |
"""Load workspace state from workspace.json file"""
|
|
|
|
|
|
|
| 997 |
try:
|
| 998 |
if os.path.exists(WORKSPACE_FILE):
|
| 999 |
with open(WORKSPACE_FILE, "r") as f:
|
|
@@ -1009,6 +1224,8 @@ async def get_workspace():
|
|
| 1009 |
@app.post("/api/workspace")
|
| 1010 |
async def save_workspace(workspace: dict):
|
| 1011 |
"""Save workspace state to workspace.json file"""
|
|
|
|
|
|
|
| 1012 |
try:
|
| 1013 |
with open(WORKSPACE_FILE, "w") as f:
|
| 1014 |
json.dump(workspace, f, indent=2)
|
|
@@ -1020,6 +1237,8 @@ async def save_workspace(workspace: dict):
|
|
| 1020 |
@app.post("/api/workspace/clear")
|
| 1021 |
async def clear_workspace():
|
| 1022 |
"""Clear workspace and start fresh"""
|
|
|
|
|
|
|
| 1023 |
try:
|
| 1024 |
default_workspace = get_default_workspace()
|
| 1025 |
with open(WORKSPACE_FILE, "w") as f:
|
|
@@ -1190,28 +1409,22 @@ def start():
|
|
| 1190 |
# Set custom config directory if provided
|
| 1191 |
if args.config_dir:
|
| 1192 |
set_config_dir(args.config_dir)
|
| 1193 |
-
|
| 1194 |
|
| 1195 |
# Set custom workspace directory if provided
|
| 1196 |
if args.workspace_dir:
|
| 1197 |
-
global FILES_ROOT,
|
| 1198 |
FILES_ROOT = os.path.abspath(args.workspace_dir)
|
| 1199 |
-
os.
|
| 1200 |
-
WORKSPACE_FILE = os.path.join(FILES_ROOT, "workspace.json")
|
| 1201 |
|
| 1202 |
-
#
|
| 1203 |
-
|
| 1204 |
-
print("Clearing workspace...")
|
| 1205 |
-
default_workspace = get_default_workspace()
|
| 1206 |
-
with open(WORKSPACE_FILE, "w") as f:
|
| 1207 |
-
json.dump(default_workspace, f, indent=2)
|
| 1208 |
-
print("Workspace cleared.")
|
| 1209 |
|
| 1210 |
url = f"http://localhost:{args.port}"
|
| 1211 |
-
|
| 1212 |
-
|
| 1213 |
-
|
| 1214 |
-
|
| 1215 |
|
| 1216 |
# Open browser after a short delay to let the server start
|
| 1217 |
if not args.no_browser:
|
|
|
|
| 7 |
import json
|
| 8 |
import httpx
|
| 9 |
import os
|
| 10 |
+
import logging
|
| 11 |
from datetime import datetime
|
| 12 |
|
| 13 |
+
# Configure logging
|
| 14 |
+
logging.basicConfig(
|
| 15 |
+
level=logging.INFO,
|
| 16 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
| 17 |
+
)
|
| 18 |
+
logger = logging.getLogger(__name__)
|
| 19 |
+
|
| 20 |
app = FastAPI(title="Productive API")
|
| 21 |
|
| 22 |
+
|
| 23 |
+
# ============================================
|
| 24 |
+
# Environment Variable Fallbacks
|
| 25 |
+
# ============================================
|
| 26 |
+
# These allow API keys to be set via environment variables as fallback
|
| 27 |
+
# when not configured in settings. Useful for HF Spaces deployment.
|
| 28 |
+
|
| 29 |
+
def get_env_fallback(value: Optional[str], env_var: str) -> Optional[str]:
|
| 30 |
+
"""Return value if set, otherwise check environment variable."""
|
| 31 |
+
if value:
|
| 32 |
+
return value
|
| 33 |
+
return os.environ.get(env_var)
|
| 34 |
+
|
| 35 |
# Get the project root directory (parent of backend/)
|
| 36 |
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
| 37 |
|
|
|
|
| 43 |
E2B_AVAILABLE = True
|
| 44 |
except ImportError:
|
| 45 |
E2B_AVAILABLE = False
|
| 46 |
+
logger.warning("E2B not available. Code execution will be disabled.")
|
| 47 |
|
| 48 |
+
# For research
|
|
|
|
|
|
|
| 49 |
try:
|
| 50 |
+
from research import stream_research
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
RESEARCH_AVAILABLE = True
|
| 52 |
except ImportError as e:
|
| 53 |
RESEARCH_AVAILABLE = False
|
| 54 |
+
logger.warning(f"Research dependencies not available ({e}). Install with: pip install trafilatura requests")
|
| 55 |
|
| 56 |
# For command center with tool-based launching
|
| 57 |
try:
|
|
|
|
| 59 |
COMMAND_AVAILABLE = True
|
| 60 |
except ImportError:
|
| 61 |
COMMAND_AVAILABLE = False
|
| 62 |
+
logger.warning("Command center tool handling not available.")
|
| 63 |
|
| 64 |
# Session management for sandboxes
|
| 65 |
SANDBOXES: Dict[str, any] = {}
|
|
|
|
| 287 |
endpoint: str # User's configured LLM endpoint
|
| 288 |
token: Optional[str] = None # Optional auth token
|
| 289 |
model: Optional[str] = "gpt-4" # Model name
|
| 290 |
+
extra_params: Optional[Dict] = None # Extra parameters for API calls (e.g., enable_thinking)
|
| 291 |
e2b_key: Optional[str] = None # E2B API key for code execution
|
| 292 |
serper_key: Optional[str] = None # Serper API key for research
|
| 293 |
research_sub_agent_model: Optional[str] = None # Model for research sub-tasks
|
| 294 |
research_sub_agent_endpoint: Optional[str] = None # Endpoint for research sub-agent (may differ from main)
|
| 295 |
research_sub_agent_token: Optional[str] = None # Token for research sub-agent endpoint
|
| 296 |
+
research_sub_agent_extra_params: Optional[Dict] = None # Extra params for research sub-agent
|
| 297 |
research_parallel_workers: Optional[int] = None # Number of parallel workers for research
|
| 298 |
research_max_websites: Optional[int] = None # Max websites to analyze per research session
|
| 299 |
notebook_id: Optional[str] = None # Unique notebook/tab ID for session management
|
|
|
|
| 324 |
e2b_key: str,
|
| 325 |
session_id: str,
|
| 326 |
tab_id: str = "default",
|
| 327 |
+
frontend_context: Optional[Dict] = None,
|
| 328 |
+
extra_params: Optional[Dict] = None
|
| 329 |
):
|
| 330 |
"""Handle code notebook with execution capabilities"""
|
| 331 |
|
|
|
|
| 358 |
record_api_call(tab_id, full_messages)
|
| 359 |
|
| 360 |
# Stream code execution
|
| 361 |
+
for update in stream_code_execution(client, model, full_messages, sbx, files_root=FILES_ROOT, extra_params=extra_params):
|
| 362 |
# Forward updates to frontend
|
| 363 |
yield f"data: {json.dumps(update)}\n\n"
|
| 364 |
|
| 365 |
except Exception as e:
|
| 366 |
import traceback
|
| 367 |
error_message = f"Code execution error: {str(e)}\n{traceback.format_exc()}"
|
| 368 |
+
logger.error(error_message)
|
| 369 |
|
| 370 |
# Check if this is a sandbox timeout error (502)
|
| 371 |
error_str = str(e)
|
|
|
|
| 390 |
yield f"data: {json.dumps({'type': 'info', 'content': 'New sandbox created. Retrying execution...'})}\n\n"
|
| 391 |
|
| 392 |
# Retry code execution with new sandbox
|
| 393 |
+
for update in stream_code_execution(client, model, full_messages, sbx, files_root=FILES_ROOT, extra_params=extra_params):
|
| 394 |
yield f"data: {json.dumps(update)}\n\n"
|
| 395 |
|
| 396 |
except Exception as retry_error:
|
|
|
|
| 410 |
max_websites: Optional[int] = None,
|
| 411 |
tab_id: str = "default",
|
| 412 |
sub_agent_endpoint: Optional[str] = None,
|
| 413 |
+
sub_agent_token: Optional[str] = None,
|
| 414 |
+
extra_params: Optional[Dict] = None,
|
| 415 |
+
sub_agent_extra_params: Optional[Dict] = None
|
| 416 |
):
|
| 417 |
"""Handle research notebook with web search"""
|
| 418 |
|
|
|
|
| 457 |
max_sites = max_websites if max_websites else 50
|
| 458 |
|
| 459 |
# Stream research
|
| 460 |
+
for update in stream_research(client, model, question, serper_key, max_websites=max_sites, system_prompt=system_prompt, sub_agent_model=analysis_model, parallel_workers=workers, sub_agent_client=sub_agent_client, extra_params=extra_params, sub_agent_extra_params=sub_agent_extra_params):
|
| 461 |
yield f"data: {json.dumps(update)}\n\n"
|
| 462 |
|
| 463 |
except Exception as e:
|
| 464 |
import traceback
|
| 465 |
error_message = f"Research error: {str(e)}\n{traceback.format_exc()}"
|
| 466 |
+
logger.error(error_message)
|
| 467 |
yield f"data: {json.dumps({'type': 'error', 'content': error_message})}\n\n"
|
| 468 |
|
| 469 |
|
|
|
|
| 472 |
endpoint: str,
|
| 473 |
token: Optional[str],
|
| 474 |
model: str,
|
| 475 |
+
tab_id: str = "0",
|
| 476 |
+
extra_params: Optional[Dict] = None
|
| 477 |
):
|
| 478 |
"""Handle command center with tool-based notebook launching"""
|
| 479 |
|
| 480 |
if not COMMAND_AVAILABLE:
|
| 481 |
# Fallback to regular chat if command tools not available
|
| 482 |
+
async for chunk in stream_chat_response(messages, endpoint, token, model, "command", tab_id, extra_params):
|
| 483 |
yield chunk
|
| 484 |
return
|
| 485 |
|
|
|
|
| 497 |
system_prompt = get_system_prompt("command")
|
| 498 |
|
| 499 |
# Build full messages: system + stored history + new messages
|
| 500 |
+
logger.debug(f"tab_id={tab_id}, incoming messages={messages}")
|
| 501 |
+
logger.debug(f"stored history length={len(CONVERSATION_HISTORY[tab_id])}")
|
| 502 |
|
| 503 |
# On first call, history is empty, so just use incoming messages
|
| 504 |
# On subsequent calls, append only new messages
|
| 505 |
if not CONVERSATION_HISTORY[tab_id]:
|
| 506 |
# First message - use all incoming messages
|
| 507 |
full_messages = [{"role": "system", "content": system_prompt}] + messages
|
| 508 |
+
logger.debug(f"First call, full_messages length={len(full_messages)}")
|
| 509 |
else:
|
| 510 |
# Subsequent messages - use stored history + incoming messages
|
| 511 |
# The incoming messages should only be the new user message
|
| 512 |
full_messages = [{"role": "system", "content": system_prompt}] + CONVERSATION_HISTORY[tab_id] + messages
|
| 513 |
+
logger.debug(f"Subsequent call, full_messages length={len(full_messages)}")
|
| 514 |
|
| 515 |
# Store for debugging
|
| 516 |
record_api_call(tab_id, full_messages)
|
| 517 |
|
| 518 |
# Stream command center execution
|
| 519 |
+
for update in stream_command_center(client, model, full_messages, extra_params=extra_params):
|
| 520 |
# Forward updates to frontend
|
| 521 |
yield f"data: {json.dumps(update)}\n\n"
|
| 522 |
|
|
|
|
| 531 |
except Exception as e:
|
| 532 |
import traceback
|
| 533 |
error_message = f"Command center error: {str(e)}\n{traceback.format_exc()}"
|
| 534 |
+
logger.error(error_message)
|
| 535 |
yield f"data: {json.dumps({'type': 'error', 'content': error_message})}\n\n"
|
| 536 |
|
| 537 |
|
|
|
|
| 541 |
token: Optional[str],
|
| 542 |
model: str,
|
| 543 |
notebook_type: str,
|
| 544 |
+
tab_id: str = "default",
|
| 545 |
+
extra_params: Optional[Dict] = None
|
| 546 |
):
|
| 547 |
"""Proxy stream from user's configured LLM endpoint"""
|
| 548 |
|
| 549 |
try:
|
| 550 |
+
logger.info(f"Stream request: endpoint={endpoint}, model={model}, messages={len(messages)}, token={'yes' if token else 'no'}")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 551 |
|
| 552 |
# Prepare messages with appropriate system prompt based on notebook type (with file tree)
|
| 553 |
system_prompt = get_system_prompt(notebook_type)
|
|
|
|
| 562 |
if not token and "huggingface.co" in endpoint:
|
| 563 |
token = os.getenv("HF_TOKEN")
|
| 564 |
if token:
|
| 565 |
+
logger.debug("Using HF_TOKEN from environment for Hugging Face endpoint")
|
| 566 |
else:
|
| 567 |
+
logger.warning("No token provided and HF_TOKEN not found in environment!")
|
| 568 |
|
| 569 |
# Prepare headers
|
| 570 |
headers = {
|
|
|
|
| 580 |
"stream": True,
|
| 581 |
"temperature": 0.7
|
| 582 |
}
|
| 583 |
+
# Apply any extra params (e.g., enable_thinking)
|
| 584 |
+
if extra_params:
|
| 585 |
+
request_body.update(extra_params)
|
| 586 |
|
| 587 |
+
logger.debug(f"Sending request to: {endpoint}/chat/completions")
|
| 588 |
|
| 589 |
# Make streaming request to user's endpoint
|
| 590 |
async with httpx.AsyncClient(timeout=60.0) as client:
|
|
|
|
| 598 |
error_text = await response.aread()
|
| 599 |
error_detail = error_text.decode() if error_text else f"Status {response.status_code}"
|
| 600 |
error_message = f"LLM API error ({response.status_code}): {error_detail}"
|
| 601 |
+
logger.error(f"LLM API error: {error_message}")
|
| 602 |
yield f"data: {json.dumps({'type': 'error', 'content': error_message})}\n\n"
|
| 603 |
return
|
| 604 |
|
|
|
|
| 630 |
|
| 631 |
except httpx.RequestError as e:
|
| 632 |
error_message = f"Connection error to LLM endpoint: {str(e)}"
|
| 633 |
+
logger.error(f"HTTP Request Error: {e}")
|
| 634 |
yield f"data: {json.dumps({'type': 'error', 'content': error_message})}\n\n"
|
| 635 |
except Exception as e:
|
| 636 |
import traceback
|
| 637 |
error_message = f"Error: {str(e) or 'Unknown error occurred'}"
|
| 638 |
+
logger.error(f"Exception in stream_chat_response: {e}\n{traceback.format_exc()}")
|
|
|
|
| 639 |
yield f"data: {json.dumps({'type': 'error', 'content': error_message})}\n\n"
|
| 640 |
|
| 641 |
|
|
|
|
| 702 |
async def chat_stream(request: ChatRequest):
|
| 703 |
"""Proxy streaming chat to user's configured LLM endpoint"""
|
| 704 |
|
| 705 |
+
logger.debug(f"Chat stream request: notebook_type={request.notebook_type}")
|
| 706 |
|
| 707 |
if not request.messages:
|
| 708 |
raise HTTPException(status_code=400, detail="Messages are required")
|
|
|
|
| 719 |
# Convert frontend_context to dict if provided
|
| 720 |
frontend_context = request.frontend_context.model_dump() if request.frontend_context else None
|
| 721 |
|
| 722 |
+
# Apply environment variable fallbacks for API keys
|
| 723 |
+
e2b_key = get_env_fallback(request.e2b_key, "E2B_API_KEY")
|
| 724 |
+
serper_key = get_env_fallback(request.serper_key, "SERPER_API_KEY")
|
| 725 |
+
token = get_env_fallback(request.token, "LLM_API_KEY")
|
| 726 |
+
|
| 727 |
# Route to code execution handler for code notebooks
|
| 728 |
if request.notebook_type == "code":
|
| 729 |
# Use notebook_id as session key, fallback to "default" if not provided
|
|
|
|
| 733 |
stream_code_notebook(
|
| 734 |
messages,
|
| 735 |
request.endpoint,
|
| 736 |
+
token,
|
| 737 |
request.model or "gpt-4",
|
| 738 |
+
e2b_key or "",
|
| 739 |
session_id,
|
| 740 |
tab_id,
|
| 741 |
+
frontend_context,
|
| 742 |
+
request.extra_params
|
| 743 |
),
|
| 744 |
media_type="text/event-stream",
|
| 745 |
headers={
|
|
|
|
| 753 |
if request.notebook_type == "research":
|
| 754 |
# Use sub-agent endpoint/token if provided, otherwise fall back to main
|
| 755 |
sub_agent_endpoint = request.research_sub_agent_endpoint or request.endpoint
|
| 756 |
+
sub_agent_token = request.research_sub_agent_token if request.research_sub_agent_endpoint else token
|
| 757 |
|
| 758 |
return StreamingResponse(
|
| 759 |
stream_research_notebook(
|
| 760 |
messages,
|
| 761 |
request.endpoint,
|
| 762 |
+
token,
|
| 763 |
request.model or "gpt-4",
|
| 764 |
+
serper_key or "",
|
| 765 |
request.research_sub_agent_model,
|
| 766 |
request.research_parallel_workers,
|
| 767 |
None,
|
| 768 |
tab_id,
|
| 769 |
sub_agent_endpoint,
|
| 770 |
+
sub_agent_token,
|
| 771 |
+
request.extra_params,
|
| 772 |
+
request.research_sub_agent_extra_params
|
| 773 |
),
|
| 774 |
media_type="text/event-stream",
|
| 775 |
headers={
|
|
|
|
| 785 |
stream_command_center_notebook(
|
| 786 |
messages,
|
| 787 |
request.endpoint,
|
| 788 |
+
token,
|
| 789 |
request.model or "gpt-4",
|
| 790 |
+
tab_id,
|
| 791 |
+
request.extra_params
|
| 792 |
),
|
| 793 |
media_type="text/event-stream",
|
| 794 |
headers={
|
|
|
|
| 803 |
stream_chat_response(
|
| 804 |
messages,
|
| 805 |
request.endpoint,
|
| 806 |
+
token,
|
| 807 |
request.model or "gpt-4",
|
| 808 |
request.notebook_type,
|
| 809 |
+
tab_id,
|
| 810 |
+
request.extra_params
|
| 811 |
),
|
| 812 |
media_type="text/event-stream",
|
| 813 |
headers={
|
|
|
|
| 930 |
|
| 931 |
SETTINGS_FILE = os.path.join(CONFIG_DIR, "settings.json")
|
| 932 |
FILES_ROOT = os.getcwd() # Root directory for file tree (current working directory)
|
| 933 |
+
SESSIONS_ROOT = os.path.join(FILES_ROOT, "sessions") # Sessions are stored in sessions/ subfolder
|
| 934 |
+
CURRENT_SESSION = None # Name of the current session (None = no session selected)
|
| 935 |
+
WORKSPACE_FILE = None # Set when session is selected
|
| 936 |
|
| 937 |
# Directories/patterns to exclude from file tree
|
| 938 |
FILES_EXCLUDE = {
|
|
|
|
| 995 |
raise HTTPException(status_code=500, detail=f"Failed to save settings: {str(e)}")
|
| 996 |
|
| 997 |
|
| 998 |
+
# ============================================
|
| 999 |
+
# Session Management
|
| 1000 |
+
# ============================================
|
| 1001 |
+
|
| 1002 |
+
def get_session_path(session_name: str) -> str:
|
| 1003 |
+
"""Get the full path for a session folder"""
|
| 1004 |
+
return os.path.join(SESSIONS_ROOT, session_name)
|
| 1005 |
+
|
| 1006 |
+
|
| 1007 |
+
def list_sessions() -> list:
|
| 1008 |
+
"""List all available sessions"""
|
| 1009 |
+
sessions = []
|
| 1010 |
+
if os.path.exists(SESSIONS_ROOT):
|
| 1011 |
+
for name in os.listdir(SESSIONS_ROOT):
|
| 1012 |
+
session_path = os.path.join(SESSIONS_ROOT, name)
|
| 1013 |
+
workspace_file = os.path.join(session_path, "workspace.json")
|
| 1014 |
+
if os.path.isdir(session_path) and os.path.exists(workspace_file):
|
| 1015 |
+
# Get modification time
|
| 1016 |
+
mtime = os.path.getmtime(workspace_file)
|
| 1017 |
+
sessions.append({
|
| 1018 |
+
"name": name,
|
| 1019 |
+
"modified": mtime
|
| 1020 |
+
})
|
| 1021 |
+
# Sort by modification time (most recent first)
|
| 1022 |
+
sessions.sort(key=lambda x: x["modified"], reverse=True)
|
| 1023 |
+
return sessions
|
| 1024 |
+
|
| 1025 |
+
|
| 1026 |
+
def select_session(session_name: str) -> bool:
|
| 1027 |
+
"""Select a session and update global paths"""
|
| 1028 |
+
global CURRENT_SESSION, WORKSPACE_FILE, FILES_ROOT, CONVERSATION_HISTORY, MESSAGE_HISTORY
|
| 1029 |
+
|
| 1030 |
+
session_path = get_session_path(session_name)
|
| 1031 |
+
workspace_file = os.path.join(session_path, "workspace.json")
|
| 1032 |
+
|
| 1033 |
+
if not os.path.exists(session_path):
|
| 1034 |
+
return False
|
| 1035 |
+
|
| 1036 |
+
CURRENT_SESSION = session_name
|
| 1037 |
+
WORKSPACE_FILE = workspace_file
|
| 1038 |
+
# FILES_ROOT stays at the original working directory (not session-specific)
|
| 1039 |
+
|
| 1040 |
+
# Clear backend state when switching sessions
|
| 1041 |
+
CONVERSATION_HISTORY.clear()
|
| 1042 |
+
MESSAGE_HISTORY.clear()
|
| 1043 |
+
|
| 1044 |
+
return True
|
| 1045 |
+
|
| 1046 |
+
|
| 1047 |
+
def create_session(session_name: str) -> bool:
|
| 1048 |
+
"""Create a new session folder with default workspace"""
|
| 1049 |
+
session_path = get_session_path(session_name)
|
| 1050 |
+
|
| 1051 |
+
if os.path.exists(session_path):
|
| 1052 |
+
return False # Session already exists
|
| 1053 |
+
|
| 1054 |
+
os.makedirs(session_path, exist_ok=True)
|
| 1055 |
+
|
| 1056 |
+
# Create default workspace.json
|
| 1057 |
+
workspace_file = os.path.join(session_path, "workspace.json")
|
| 1058 |
+
with open(workspace_file, "w") as f:
|
| 1059 |
+
json.dump(get_default_workspace(), f, indent=2)
|
| 1060 |
+
|
| 1061 |
+
return True
|
| 1062 |
+
|
| 1063 |
+
|
| 1064 |
+
@app.get("/api/sessions/random-name")
|
| 1065 |
+
async def api_random_session_name():
|
| 1066 |
+
"""Get a random isotope name for session naming.
|
| 1067 |
+
Uses two-stage sampling: 1) pick random element, 2) pick random isotope.
|
| 1068 |
+
This gives equal weight to all elements regardless of isotope count.
|
| 1069 |
+
"""
|
| 1070 |
+
import random
|
| 1071 |
+
from defaultnames import ISOTOPES
|
| 1072 |
+
# Two-stage sampling: first pick element, then pick isotope
|
| 1073 |
+
element = random.choice(list(ISOTOPES.keys()))
|
| 1074 |
+
mass_number = random.choice(ISOTOPES[element])
|
| 1075 |
+
return {"name": f"{element}-{mass_number}"}
|
| 1076 |
+
|
| 1077 |
+
|
| 1078 |
+
@app.get("/api/sessions")
|
| 1079 |
+
async def api_list_sessions():
|
| 1080 |
+
"""List all available sessions"""
|
| 1081 |
+
return {
|
| 1082 |
+
"sessions": list_sessions(),
|
| 1083 |
+
"current": CURRENT_SESSION,
|
| 1084 |
+
"sessionsRoot": SESSIONS_ROOT
|
| 1085 |
+
}
|
| 1086 |
+
|
| 1087 |
+
|
| 1088 |
+
@app.post("/api/sessions")
|
| 1089 |
+
async def api_create_session(data: dict):
|
| 1090 |
+
"""Create a new session"""
|
| 1091 |
+
name = data.get("name", "").strip()
|
| 1092 |
+
if not name:
|
| 1093 |
+
raise HTTPException(status_code=400, detail="Session name is required")
|
| 1094 |
+
|
| 1095 |
+
# Sanitize name for filesystem
|
| 1096 |
+
safe_name = "".join(c for c in name if c.isalnum() or c in "- _").strip()
|
| 1097 |
+
if not safe_name:
|
| 1098 |
+
raise HTTPException(status_code=400, detail="Invalid session name")
|
| 1099 |
+
|
| 1100 |
+
if not create_session(safe_name):
|
| 1101 |
+
raise HTTPException(status_code=409, detail="Session already exists")
|
| 1102 |
+
|
| 1103 |
+
# Auto-select the new session
|
| 1104 |
+
select_session(safe_name)
|
| 1105 |
+
|
| 1106 |
+
return {"success": True, "name": safe_name}
|
| 1107 |
+
|
| 1108 |
+
|
| 1109 |
+
@app.post("/api/sessions/select")
|
| 1110 |
+
async def api_select_session(data: dict):
|
| 1111 |
+
"""Select an existing session"""
|
| 1112 |
+
name = data.get("name", "").strip()
|
| 1113 |
+
if not name:
|
| 1114 |
+
raise HTTPException(status_code=400, detail="Session name is required")
|
| 1115 |
+
|
| 1116 |
+
if not select_session(name):
|
| 1117 |
+
raise HTTPException(status_code=404, detail="Session not found")
|
| 1118 |
+
|
| 1119 |
+
return {"success": True, "name": name}
|
| 1120 |
+
|
| 1121 |
+
|
| 1122 |
+
@app.post("/api/sessions/rename")
|
| 1123 |
+
async def api_rename_session(data: dict):
|
| 1124 |
+
"""Rename a session"""
|
| 1125 |
+
old_name = data.get("oldName", "").strip()
|
| 1126 |
+
new_name = data.get("newName", "").strip()
|
| 1127 |
+
|
| 1128 |
+
if not old_name or not new_name:
|
| 1129 |
+
raise HTTPException(status_code=400, detail="Both oldName and newName are required")
|
| 1130 |
+
|
| 1131 |
+
# Sanitize new name
|
| 1132 |
+
safe_new_name = "".join(c for c in new_name if c.isalnum() or c in "- _").strip()
|
| 1133 |
+
if not safe_new_name:
|
| 1134 |
+
raise HTTPException(status_code=400, detail="Invalid new session name")
|
| 1135 |
+
|
| 1136 |
+
old_path = get_session_path(old_name)
|
| 1137 |
+
new_path = get_session_path(safe_new_name)
|
| 1138 |
+
|
| 1139 |
+
if not os.path.exists(old_path):
|
| 1140 |
+
raise HTTPException(status_code=404, detail="Session not found")
|
| 1141 |
+
|
| 1142 |
+
if os.path.exists(new_path):
|
| 1143 |
+
raise HTTPException(status_code=409, detail="A session with that name already exists")
|
| 1144 |
+
|
| 1145 |
+
os.rename(old_path, new_path)
|
| 1146 |
+
|
| 1147 |
+
# Update current session if it was renamed
|
| 1148 |
+
global CURRENT_SESSION, WORKSPACE_FILE
|
| 1149 |
+
if CURRENT_SESSION == old_name:
|
| 1150 |
+
CURRENT_SESSION = safe_new_name
|
| 1151 |
+
WORKSPACE_FILE = os.path.join(new_path, "workspace.json")
|
| 1152 |
+
|
| 1153 |
+
return {"success": True, "name": safe_new_name}
|
| 1154 |
+
|
| 1155 |
+
|
| 1156 |
+
@app.delete("/api/sessions/{session_name}")
|
| 1157 |
+
async def api_delete_session(session_name: str):
|
| 1158 |
+
"""Delete a session"""
|
| 1159 |
+
import shutil
|
| 1160 |
+
|
| 1161 |
+
if not session_name:
|
| 1162 |
+
raise HTTPException(status_code=400, detail="Session name is required")
|
| 1163 |
+
|
| 1164 |
+
session_path = get_session_path(session_name)
|
| 1165 |
+
|
| 1166 |
+
if not os.path.exists(session_path):
|
| 1167 |
+
raise HTTPException(status_code=404, detail="Session not found")
|
| 1168 |
+
|
| 1169 |
+
# Don't allow deleting the current session
|
| 1170 |
+
global CURRENT_SESSION
|
| 1171 |
+
if CURRENT_SESSION == session_name:
|
| 1172 |
+
raise HTTPException(status_code=400, detail="Cannot delete the currently active session")
|
| 1173 |
+
|
| 1174 |
+
# Delete the session folder
|
| 1175 |
+
shutil.rmtree(session_path)
|
| 1176 |
+
|
| 1177 |
+
return {"success": True}
|
| 1178 |
+
|
| 1179 |
+
|
| 1180 |
# ============================================
|
| 1181 |
# Workspace State Persistence
|
| 1182 |
# ============================================
|
|
|
|
| 1207 |
@app.get("/api/workspace")
|
| 1208 |
async def get_workspace():
|
| 1209 |
"""Load workspace state from workspace.json file"""
|
| 1210 |
+
if WORKSPACE_FILE is None:
|
| 1211 |
+
raise HTTPException(status_code=400, detail="No session selected")
|
| 1212 |
try:
|
| 1213 |
if os.path.exists(WORKSPACE_FILE):
|
| 1214 |
with open(WORKSPACE_FILE, "r") as f:
|
|
|
|
| 1224 |
@app.post("/api/workspace")
|
| 1225 |
async def save_workspace(workspace: dict):
|
| 1226 |
"""Save workspace state to workspace.json file"""
|
| 1227 |
+
if WORKSPACE_FILE is None:
|
| 1228 |
+
raise HTTPException(status_code=400, detail="No session selected")
|
| 1229 |
try:
|
| 1230 |
with open(WORKSPACE_FILE, "w") as f:
|
| 1231 |
json.dump(workspace, f, indent=2)
|
|
|
|
| 1237 |
@app.post("/api/workspace/clear")
|
| 1238 |
async def clear_workspace():
|
| 1239 |
"""Clear workspace and start fresh"""
|
| 1240 |
+
if WORKSPACE_FILE is None:
|
| 1241 |
+
raise HTTPException(status_code=400, detail="No session selected")
|
| 1242 |
try:
|
| 1243 |
default_workspace = get_default_workspace()
|
| 1244 |
with open(WORKSPACE_FILE, "w") as f:
|
|
|
|
| 1409 |
# Set custom config directory if provided
|
| 1410 |
if args.config_dir:
|
| 1411 |
set_config_dir(args.config_dir)
|
| 1412 |
+
logger.info(f"Using config directory: {args.config_dir}")
|
| 1413 |
|
| 1414 |
# Set custom workspace directory if provided
|
| 1415 |
if args.workspace_dir:
|
| 1416 |
+
global FILES_ROOT, SESSIONS_ROOT
|
| 1417 |
FILES_ROOT = os.path.abspath(args.workspace_dir)
|
| 1418 |
+
SESSIONS_ROOT = os.path.join(FILES_ROOT, "sessions")
|
|
|
|
| 1419 |
|
| 1420 |
+
# Ensure sessions directory exists
|
| 1421 |
+
os.makedirs(SESSIONS_ROOT, exist_ok=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1422 |
|
| 1423 |
url = f"http://localhost:{args.port}"
|
| 1424 |
+
logger.info(f"Starting Productive server...")
|
| 1425 |
+
logger.info(f"Config directory: {CONFIG_DIR}")
|
| 1426 |
+
logger.info(f"Sessions directory: {SESSIONS_ROOT}")
|
| 1427 |
+
logger.info(f"Opening {url} in your browser...")
|
| 1428 |
|
| 1429 |
# Open browser after a short delay to let the server start
|
| 1430 |
if not args.no_browser:
|
backend/research.py
CHANGED
|
@@ -1,12 +1,20 @@
|
|
| 1 |
"""
|
| 2 |
-
Research notebook backend -
|
|
|
|
|
|
|
|
|
|
| 3 |
"""
|
| 4 |
import json
|
|
|
|
| 5 |
import os
|
| 6 |
-
|
|
|
|
|
|
|
| 7 |
import requests
|
| 8 |
import trafilatura
|
| 9 |
|
|
|
|
|
|
|
| 10 |
|
| 11 |
def search_web(query: str, api_key: str, num_results: int = 10) -> List[Dict[str, str]]:
|
| 12 |
"""Search the web using Serper API"""
|
|
@@ -33,7 +41,7 @@ def search_web(query: str, api_key: str, num_results: int = 10) -> List[Dict[str
|
|
| 33 |
})
|
| 34 |
return results
|
| 35 |
except Exception as e:
|
| 36 |
-
|
| 37 |
return []
|
| 38 |
|
| 39 |
|
|
@@ -51,148 +59,167 @@ def extract_content(url: str) -> Optional[str]:
|
|
| 51 |
)
|
| 52 |
return text
|
| 53 |
except Exception as e:
|
| 54 |
-
|
| 55 |
return None
|
| 56 |
|
| 57 |
|
| 58 |
-
def
|
| 59 |
-
"""Generate
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
{f"Current knowledge gathered: {existing_knowledge}" if existing_knowledge else ""}
|
| 63 |
|
| 64 |
-
Generate 3-5 specific, targeted search queries that would help answer this question.
|
| 65 |
-
Focus on aspects not yet covered or that need more depth.
|
| 66 |
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
messages = [{"role": "user", "content": prompt}]
|
| 71 |
-
|
| 72 |
-
try:
|
| 73 |
-
response = client.chat.completions.create(
|
| 74 |
-
model=model,
|
| 75 |
-
messages=messages,
|
| 76 |
-
max_tokens=500,
|
| 77 |
-
temperature=0.7,
|
| 78 |
-
)
|
| 79 |
|
| 80 |
-
content = response.choices[0].message.content
|
| 81 |
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
except Exception as e:
|
| 90 |
-
print(f"Query generation error: {e}")
|
| 91 |
|
| 92 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
|
| 95 |
-
|
| 96 |
-
"""Analyze content for relevance to research question"""
|
| 97 |
-
prompt = f"""Research Question: "{user_question}"
|
| 98 |
|
| 99 |
-
Source: {source_url}
|
| 100 |
|
| 101 |
-
|
| 102 |
-
|
|
|
|
|
|
|
| 103 |
|
| 104 |
-
Extract ONLY the information relevant to answering the research question.
|
| 105 |
-
Be concise and factual. Include specific claims, data, or insights.
|
| 106 |
-
If nothing is relevant, return "No relevant information found."
|
| 107 |
|
| 108 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
|
| 110 |
-
messages = [{"role": "user", "content": prompt}]
|
| 111 |
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 118 |
)
|
| 119 |
-
return response.choices[0].message.content
|
| 120 |
-
except Exception as e:
|
| 121 |
-
print(f"Analysis error: {e}")
|
| 122 |
-
return "Error analyzing content"
|
| 123 |
-
|
| 124 |
|
| 125 |
-
|
| 126 |
-
"""Assess if we have enough information"""
|
| 127 |
-
findings_text = "\n\n".join([f"Source: {f['source']}\n{f['analysis']}" for f in findings])
|
| 128 |
|
| 129 |
-
prompt = f"""Research Question: "{user_question}"
|
| 130 |
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 162 |
|
| 163 |
-
|
|
|
|
| 164 |
|
| 165 |
|
| 166 |
-
def
|
| 167 |
-
"""
|
| 168 |
-
|
| 169 |
-
f"Source {i+1}: {f['source']}\n{f['analysis']}"
|
| 170 |
-
for i, f in enumerate(findings)
|
| 171 |
-
])
|
| 172 |
|
| 173 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 174 |
|
| 175 |
-
|
| 176 |
|
| 177 |
-
|
|
|
|
|
|
|
|
|
|
| 178 |
|
| 179 |
-
|
|
|
|
|
|
|
|
|
|
| 180 |
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
|
|
|
| 184 |
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
temperature=0.7,
|
| 191 |
-
)
|
| 192 |
-
return response.choices[0].message.content
|
| 193 |
-
except Exception as e:
|
| 194 |
-
print(f"Report generation error: {e}")
|
| 195 |
-
return "Error generating report"
|
| 196 |
|
| 197 |
|
| 198 |
def stream_research(
|
|
@@ -205,260 +232,245 @@ def stream_research(
|
|
| 205 |
system_prompt: str = "",
|
| 206 |
sub_agent_model: Optional[str] = None,
|
| 207 |
parallel_workers: int = 8,
|
| 208 |
-
|
|
|
|
| 209 |
):
|
| 210 |
"""
|
| 211 |
-
Stream deep research results
|
| 212 |
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
parallel_workers: Number of parallel workers for extract+analyze operations
|
| 216 |
-
sub_agent_client: Optional separate OpenAI client for sub-agent (if using different endpoint)
|
| 217 |
|
| 218 |
-
Yields
|
| 219 |
-
dict: Updates with type 'progress', 'source', 'query_stats', 'report', 'result', 'result_preview', 'done', or 'error'
|
| 220 |
"""
|
| 221 |
-
import concurrent.futures
|
| 222 |
-
import re
|
| 223 |
-
from collections import defaultdict
|
| 224 |
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
|
|
|
| 228 |
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
|
|
|
| 232 |
|
| 233 |
-
yield {
|
| 234 |
-
"type": "status",
|
| 235 |
-
"message": f"Starting research: {question}"
|
| 236 |
-
}
|
| 237 |
|
| 238 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 239 |
iteration += 1
|
| 240 |
|
| 241 |
-
#
|
| 242 |
-
yield {
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
"
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 262 |
}
|
| 263 |
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
try:
|
| 267 |
-
results = future.result()
|
| 268 |
-
query_results[query_idx] = results
|
| 269 |
-
except Exception as e:
|
| 270 |
-
print(f"Search failed for query {query_idx}: {e}")
|
| 271 |
-
query_results[query_idx] = []
|
| 272 |
-
|
| 273 |
-
# Interleave results from all queries
|
| 274 |
-
interleaved_urls = []
|
| 275 |
-
max_results = max((len(results) for results in query_results.values()), default=0)
|
| 276 |
-
|
| 277 |
-
for result_idx in range(max_results):
|
| 278 |
-
for query_idx in range(len(queries)):
|
| 279 |
-
if query_idx in query_results and result_idx < len(query_results[query_idx]):
|
| 280 |
-
result = query_results[query_idx][result_idx]
|
| 281 |
-
interleaved_urls.append({
|
| 282 |
-
'query_index': query_idx,
|
| 283 |
-
'query_text': queries[query_idx],
|
| 284 |
-
'url': result['url'],
|
| 285 |
-
'title': result['title']
|
| 286 |
-
})
|
| 287 |
|
| 288 |
-
#
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
# Process URLs in parallel with interleaved order
|
| 292 |
-
def process_url(url_data):
|
| 293 |
-
"""Extract content and analyze for a single URL"""
|
| 294 |
-
query_idx = url_data['query_index']
|
| 295 |
-
query_text = url_data['query_text']
|
| 296 |
-
url = url_data['url']
|
| 297 |
-
title = url_data['title']
|
| 298 |
-
|
| 299 |
-
try:
|
| 300 |
-
# Extract content
|
| 301 |
-
content = extract_content(url)
|
| 302 |
-
|
| 303 |
-
if not content or len(content) < 100:
|
| 304 |
-
return {
|
| 305 |
-
'query_index': query_idx,
|
| 306 |
-
'query_text': query_text,
|
| 307 |
-
'title': title,
|
| 308 |
-
'url': url,
|
| 309 |
-
'analysis': "Could not extract content from this page.",
|
| 310 |
-
'is_relevant': False,
|
| 311 |
-
'is_error': True,
|
| 312 |
-
'error_message': "Content extraction failed"
|
| 313 |
-
}
|
| 314 |
|
| 315 |
-
|
| 316 |
-
|
| 317 |
|
| 318 |
-
|
|
|
|
|
|
|
| 319 |
|
| 320 |
-
|
| 321 |
-
|
| 322 |
-
'
|
| 323 |
-
'title': title,
|
| 324 |
-
'url': url,
|
| 325 |
-
'analysis': analysis,
|
| 326 |
-
'is_relevant': is_relevant,
|
| 327 |
-
'is_error': False,
|
| 328 |
-
'error_message': ""
|
| 329 |
}
|
| 330 |
-
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
|
| 338 |
-
'
|
| 339 |
-
'error_message': str(e)
|
| 340 |
}
|
| 341 |
|
| 342 |
-
|
| 343 |
-
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
|
| 350 |
-
|
| 351 |
-
|
| 352 |
-
|
| 353 |
-
|
| 354 |
-
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
'title': result['title'],
|
| 363 |
-
'analysis': result['analysis']
|
| 364 |
-
})
|
| 365 |
-
else:
|
| 366 |
-
query_stats[result['query_index']]['irrelevant'] += 1
|
| 367 |
-
|
| 368 |
-
# Send source event
|
| 369 |
yield {
|
| 370 |
"type": "source",
|
| 371 |
-
"query_index":
|
| 372 |
-
"query_text":
|
| 373 |
-
"title": result
|
| 374 |
-
"url": result
|
| 375 |
-
"analysis": result
|
| 376 |
"finding_count": len(findings),
|
| 377 |
-
"is_relevant":
|
| 378 |
-
"is_error":
|
| 379 |
-
"error_message":
|
| 380 |
}
|
| 381 |
|
| 382 |
-
|
| 383 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 384 |
yield {
|
| 385 |
-
"type": "
|
| 386 |
-
"query_index":
|
| 387 |
-
"
|
| 388 |
-
"
|
| 389 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 390 |
}
|
| 391 |
|
| 392 |
-
|
| 393 |
-
|
| 394 |
|
| 395 |
-
#
|
| 396 |
-
|
| 397 |
-
yield {
|
| 398 |
-
"type": "status",
|
| 399 |
-
"message": "Evaluating information gathered..."
|
| 400 |
-
}
|
| 401 |
|
| 402 |
-
|
|
|
|
|
|
|
| 403 |
|
| 404 |
-
|
| 405 |
-
|
| 406 |
-
"sufficient": assessment.get('sufficient', False),
|
| 407 |
-
"missing_aspects": assessment.get('missing_aspects', []),
|
| 408 |
-
"findings_count": len(findings),
|
| 409 |
-
"reasoning": assessment.get('reasoning', '')
|
| 410 |
-
}
|
| 411 |
|
| 412 |
-
|
| 413 |
-
|
| 414 |
-
|
| 415 |
-
|
| 416 |
-
}
|
| 417 |
-
break
|
| 418 |
|
| 419 |
-
|
| 420 |
-
|
| 421 |
-
|
| 422 |
-
|
| 423 |
-
|
| 424 |
-
|
| 425 |
-
|
| 426 |
-
|
| 427 |
-
|
| 428 |
-
|
| 429 |
-
|
| 430 |
-
|
| 431 |
-
|
| 432 |
-
|
| 433 |
-
if result_match:
|
| 434 |
-
result_content = result_match.group(1).strip()
|
| 435 |
-
print("\n" + "="*80)
|
| 436 |
-
print("EXTRACTED RESULT CONTENT:")
|
| 437 |
-
print("="*80)
|
| 438 |
-
print(result_content)
|
| 439 |
-
print("="*80 + "\n")
|
| 440 |
-
else:
|
| 441 |
-
# If no result tags, use the full report
|
| 442 |
-
result_content = report
|
| 443 |
-
print(f"Warning: No <result> tags found in report, using full content")
|
| 444 |
-
|
| 445 |
-
# Send result preview to research notebook
|
| 446 |
-
yield {
|
| 447 |
-
"type": "result_preview",
|
| 448 |
-
"content": result_content,
|
| 449 |
-
"figures": {} # Research doesn't generate figures
|
| 450 |
-
}
|
| 451 |
-
|
| 452 |
-
# Send result to command center
|
| 453 |
-
yield {
|
| 454 |
-
"type": "result",
|
| 455 |
-
"content": result_content,
|
| 456 |
-
"figures": {}
|
| 457 |
-
}
|
| 458 |
-
else:
|
| 459 |
-
yield {
|
| 460 |
-
"type": "error",
|
| 461 |
-
"content": "No relevant information found. Please try a different question."
|
| 462 |
-
}
|
| 463 |
|
| 464 |
yield {"type": "done"}
|
|
|
|
| 1 |
"""
|
| 2 |
+
Research notebook backend using DR-TULU model - model-driven deep research
|
| 3 |
+
|
| 4 |
+
DR-TULU drives the research loop - it decides when to search, what to search for,
|
| 5 |
+
and when it has enough information to answer.
|
| 6 |
"""
|
| 7 |
import json
|
| 8 |
+
import logging
|
| 9 |
import os
|
| 10 |
+
import re
|
| 11 |
+
import uuid
|
| 12 |
+
from typing import List, Dict, Optional, Tuple
|
| 13 |
import requests
|
| 14 |
import trafilatura
|
| 15 |
|
| 16 |
+
logger = logging.getLogger(__name__)
|
| 17 |
+
|
| 18 |
|
| 19 |
def search_web(query: str, api_key: str, num_results: int = 10) -> List[Dict[str, str]]:
|
| 20 |
"""Search the web using Serper API"""
|
|
|
|
| 41 |
})
|
| 42 |
return results
|
| 43 |
except Exception as e:
|
| 44 |
+
logger.error(f"Search error: {e}")
|
| 45 |
return []
|
| 46 |
|
| 47 |
|
|
|
|
| 59 |
)
|
| 60 |
return text
|
| 61 |
except Exception as e:
|
| 62 |
+
logger.error(f"Content extraction error for {url}: {e}")
|
| 63 |
return None
|
| 64 |
|
| 65 |
|
| 66 |
+
def generate_snippet_id() -> str:
|
| 67 |
+
"""Generate unique snippet ID"""
|
| 68 |
+
return f"S_{uuid.uuid4().hex[:8]}"
|
|
|
|
|
|
|
| 69 |
|
|
|
|
|
|
|
| 70 |
|
| 71 |
+
def generate_webpage_id() -> str:
|
| 72 |
+
"""Generate unique webpage ID"""
|
| 73 |
+
return f"W_{uuid.uuid4().hex[:8]}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
|
|
|
|
| 75 |
|
| 76 |
+
def parse_tool_calls(text: str) -> List[Dict]:
|
| 77 |
+
"""
|
| 78 |
+
Parse <call_tool name="...">query</call_tool> from model output.
|
| 79 |
+
Returns list of {"name": str, "query": str, "params": dict}
|
| 80 |
+
"""
|
| 81 |
+
pattern = r'<call_tool\s+name="([^"]+)"([^>]*)>([^<]+)</call_tool>'
|
| 82 |
+
matches = re.findall(pattern, text)
|
|
|
|
|
|
|
| 83 |
|
| 84 |
+
tool_calls = []
|
| 85 |
+
for name, params_str, query in matches:
|
| 86 |
+
# Parse optional params like limit="8" year="2021-2025"
|
| 87 |
+
params = {}
|
| 88 |
+
param_pattern = r'(\w+)="([^"]+)"'
|
| 89 |
+
for param_name, param_value in re.findall(param_pattern, params_str):
|
| 90 |
+
params[param_name] = param_value
|
| 91 |
|
| 92 |
+
tool_calls.append({
|
| 93 |
+
"name": name.strip(),
|
| 94 |
+
"query": query.strip(),
|
| 95 |
+
"params": params
|
| 96 |
+
})
|
| 97 |
|
| 98 |
+
return tool_calls
|
|
|
|
|
|
|
| 99 |
|
|
|
|
| 100 |
|
| 101 |
+
def parse_think_blocks(text: str) -> List[str]:
|
| 102 |
+
"""Extract <think>...</think> content"""
|
| 103 |
+
pattern = r'<think>(.*?)</think>'
|
| 104 |
+
return re.findall(pattern, text, re.DOTALL)
|
| 105 |
|
|
|
|
|
|
|
|
|
|
| 106 |
|
| 107 |
+
def parse_answer(text: str) -> Optional[str]:
|
| 108 |
+
"""Extract <answer>...</answer> content"""
|
| 109 |
+
pattern = r'<answer>(.*?)</answer>'
|
| 110 |
+
match = re.search(pattern, text, re.DOTALL)
|
| 111 |
+
return match.group(1).strip() if match else None
|
| 112 |
|
|
|
|
| 113 |
|
| 114 |
+
def format_search_results(results: List[Dict], query: str) -> str:
|
| 115 |
+
"""
|
| 116 |
+
Format search results as DR-TULU tool output.
|
| 117 |
+
"""
|
| 118 |
+
if not results:
|
| 119 |
+
return "<tool_output>No results found.</tool_output>"
|
| 120 |
+
|
| 121 |
+
snippets = []
|
| 122 |
+
for r in results:
|
| 123 |
+
snippet_id = generate_snippet_id()
|
| 124 |
+
# Escape XML special chars in content
|
| 125 |
+
title = r.get("title", "").replace("&", "&").replace("<", "<").replace(">", ">")
|
| 126 |
+
snippet_text = r.get("snippet", "").replace("&", "&").replace("<", "<").replace(">", ">")
|
| 127 |
+
url = r.get("url", "")
|
| 128 |
+
|
| 129 |
+
snippets.append(
|
| 130 |
+
f'<snippet id="{snippet_id}" url="{url}" title="{title}">\n'
|
| 131 |
+
f'{snippet_text}\n'
|
| 132 |
+
f'</snippet>'
|
| 133 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 134 |
|
| 135 |
+
return f"<tool_output>\n" + "\n".join(snippets) + "\n</tool_output>"
|
|
|
|
|
|
|
| 136 |
|
|
|
|
| 137 |
|
| 138 |
+
def format_webpage_content(url: str, title: str, content: str) -> str:
|
| 139 |
+
"""
|
| 140 |
+
Format extracted webpage as DR-TULU tool output.
|
| 141 |
+
"""
|
| 142 |
+
if not content:
|
| 143 |
+
return f"<tool_output>Could not extract content from {url}</tool_output>"
|
| 144 |
+
|
| 145 |
+
webpage_id = generate_webpage_id()
|
| 146 |
+
# Truncate very long content
|
| 147 |
+
if len(content) > 8000:
|
| 148 |
+
content = content[:8000] + "\n[Content truncated...]"
|
| 149 |
+
|
| 150 |
+
# Escape XML special chars
|
| 151 |
+
content = content.replace("&", "&").replace("<", "<").replace(">", ">")
|
| 152 |
+
title = title.replace("&", "&").replace("<", "<").replace(">", ">")
|
| 153 |
+
|
| 154 |
+
return (
|
| 155 |
+
f"<tool_output>\n"
|
| 156 |
+
f'<webpage id="{webpage_id}" url="{url}" title="{title}">\n'
|
| 157 |
+
f'{content}\n'
|
| 158 |
+
f'</webpage>\n'
|
| 159 |
+
f"</tool_output>"
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def execute_tool(
|
| 164 |
+
tool_name: str,
|
| 165 |
+
query: str,
|
| 166 |
+
params: dict,
|
| 167 |
+
serper_key: str
|
| 168 |
+
) -> Tuple[str, List[Dict]]:
|
| 169 |
+
"""
|
| 170 |
+
Execute a tool and return (formatted_output, raw_results).
|
| 171 |
+
"""
|
| 172 |
+
if tool_name == "google_search":
|
| 173 |
+
num_results = int(params.get("limit", 10))
|
| 174 |
+
results = search_web(query, serper_key, num_results=num_results)
|
| 175 |
+
formatted = format_search_results(results, query)
|
| 176 |
+
return formatted, results
|
| 177 |
+
|
| 178 |
+
elif tool_name == "browse_webpage":
|
| 179 |
+
# query is the URL for browse_webpage
|
| 180 |
+
url = query
|
| 181 |
+
content = extract_content(url)
|
| 182 |
+
title = url # Could extract from content if needed
|
| 183 |
+
formatted = format_webpage_content(url, title, content or "")
|
| 184 |
+
return formatted, [{"url": url, "content": content, "title": title}]
|
| 185 |
|
| 186 |
+
else:
|
| 187 |
+
return f"<tool_output>Unknown tool: {tool_name}</tool_output>", []
|
| 188 |
|
| 189 |
|
| 190 |
+
def get_dr_tulu_system_prompt() -> str:
|
| 191 |
+
"""Return the DR-TULU system prompt"""
|
| 192 |
+
return '''You are a research assistant who answers questions through iterative reasoning and research.
|
|
|
|
|
|
|
|
|
|
| 193 |
|
| 194 |
+
## Process
|
| 195 |
+
- Use <think></think> tags to show your reasoning at any point.
|
| 196 |
+
- Use <call_tool name="...">query</call_tool> when you need information (see tools below).
|
| 197 |
+
- You can alternate between thinking and searching multiple times.
|
| 198 |
+
- Only provide <answer></answer> tags when you have enough information for a complete response.
|
| 199 |
+
- Support every non-trivial claim with retrieved evidence. Wrap the exact claim span in <cite id="ID1,ID2">...</cite>, where id are snippet IDs from searched results.
|
| 200 |
|
| 201 |
+
## Calling Tools (<call_tool name="...">query</call_tool>)
|
| 202 |
|
| 203 |
+
1. google_search
|
| 204 |
+
- Purpose: general web search.
|
| 205 |
+
- Input via: <call_tool name="google_search">your query</call_tool>
|
| 206 |
+
- Output: web search snippets.
|
| 207 |
|
| 208 |
+
2. browse_webpage
|
| 209 |
+
- Purpose: open a specific URL and extract readable page text.
|
| 210 |
+
- Input via: <call_tool name="browse_webpage">https://example.com/article</call_tool>
|
| 211 |
+
- Output: webpage content.
|
| 212 |
|
| 213 |
+
## Tool Output
|
| 214 |
+
- After you issue a tool call, we will execute it and return results wrapped in <tool_output> tags.
|
| 215 |
+
- For web search: <tool_output><snippet id=UNIQUE_ID url="..." title="...">content</snippet>...</tool_output>
|
| 216 |
+
- For web browsing: <tool_output><webpage id=UNIQUE_ID url="..." title="...">content</webpage></tool_output>
|
| 217 |
|
| 218 |
+
## Answer and Citation Format
|
| 219 |
+
- Once you collect all necessary information, generate the final answer with <answer></answer> tags.
|
| 220 |
+
- In your answer, wrap supported text in <cite id="SNIPPET_ID">...</cite> using exact IDs from returned snippets.
|
| 221 |
+
- Write comprehensive, well-structured answers with clear sections when appropriate.
|
| 222 |
+
'''
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 223 |
|
| 224 |
|
| 225 |
def stream_research(
|
|
|
|
| 232 |
system_prompt: str = "",
|
| 233 |
sub_agent_model: Optional[str] = None,
|
| 234 |
parallel_workers: int = 8,
|
| 235 |
+
max_tool_calls: int = 20,
|
| 236 |
+
**kwargs
|
| 237 |
):
|
| 238 |
"""
|
| 239 |
+
Stream deep research results using DR-TULU.
|
| 240 |
|
| 241 |
+
The model drives the research loop - it decides when to search,
|
| 242 |
+
what to search for, and when it has enough information to answer.
|
|
|
|
|
|
|
| 243 |
|
| 244 |
+
Yields same event types as the original research.py for API compatibility.
|
|
|
|
| 245 |
"""
|
|
|
|
|
|
|
|
|
|
| 246 |
|
| 247 |
+
# Build system prompt
|
| 248 |
+
dr_tulu_system = get_dr_tulu_system_prompt()
|
| 249 |
+
if system_prompt:
|
| 250 |
+
dr_tulu_system += f"\n\n{system_prompt}"
|
| 251 |
|
| 252 |
+
messages = [
|
| 253 |
+
{"role": "system", "content": dr_tulu_system},
|
| 254 |
+
{"role": "user", "content": question}
|
| 255 |
+
]
|
| 256 |
|
| 257 |
+
yield {"type": "status", "message": f"Starting DR-TULU research: {question}"}
|
|
|
|
|
|
|
|
|
|
| 258 |
|
| 259 |
+
tool_call_count = 0
|
| 260 |
+
findings = [] # Track sources for compatibility
|
| 261 |
+
all_queries = [] # Track queries for compatibility
|
| 262 |
+
iteration = 0
|
| 263 |
+
max_iterations_without_progress = 3
|
| 264 |
+
iterations_without_tool_calls = 0
|
| 265 |
+
|
| 266 |
+
while tool_call_count < max_tool_calls:
|
| 267 |
iteration += 1
|
| 268 |
|
| 269 |
+
# Call DR-TULU
|
| 270 |
+
yield {"type": "status", "message": "Thinking..."}
|
| 271 |
+
|
| 272 |
+
try:
|
| 273 |
+
response = client.chat.completions.create(
|
| 274 |
+
model=model,
|
| 275 |
+
messages=messages,
|
| 276 |
+
max_tokens=4096,
|
| 277 |
+
temperature=0.7,
|
| 278 |
+
)
|
| 279 |
+
assistant_message = response.choices[0].message.content
|
| 280 |
+
except Exception as e:
|
| 281 |
+
yield {"type": "error", "content": f"Model error: {str(e)}"}
|
| 282 |
+
yield {"type": "done"}
|
| 283 |
+
return
|
| 284 |
+
|
| 285 |
+
# Parse thinking blocks and yield as status
|
| 286 |
+
think_blocks = parse_think_blocks(assistant_message)
|
| 287 |
+
for thought in think_blocks:
|
| 288 |
+
# Truncate long thoughts for status display
|
| 289 |
+
thought_preview = thought[:300].strip()
|
| 290 |
+
if len(thought) > 300:
|
| 291 |
+
thought_preview += "..."
|
| 292 |
+
yield {"type": "status", "message": f"Reasoning: {thought_preview}"}
|
| 293 |
+
|
| 294 |
+
# Check for final answer
|
| 295 |
+
answer = parse_answer(assistant_message)
|
| 296 |
+
if answer:
|
| 297 |
+
yield {"type": "status", "message": "Research complete! Generating report..."}
|
| 298 |
+
|
| 299 |
+
# Wrap in <result> tags for compatibility with command center
|
| 300 |
+
result_content = answer
|
| 301 |
+
if "<result>" not in answer:
|
| 302 |
+
result_content = f"<result>\n{answer}\n</result>"
|
| 303 |
+
|
| 304 |
+
yield {"type": "result_preview", "content": answer, "figures": {}}
|
| 305 |
+
yield {"type": "result", "content": answer, "figures": {}}
|
| 306 |
+
yield {"type": "done"}
|
| 307 |
+
return
|
| 308 |
+
|
| 309 |
+
# Parse and execute tool calls
|
| 310 |
+
tool_calls = parse_tool_calls(assistant_message)
|
| 311 |
+
|
| 312 |
+
if not tool_calls:
|
| 313 |
+
# No tool calls and no answer - model might be stuck
|
| 314 |
+
iterations_without_tool_calls += 1
|
| 315 |
+
|
| 316 |
+
if iterations_without_tool_calls >= max_iterations_without_progress:
|
| 317 |
+
# Force final answer
|
| 318 |
+
yield {"type": "status", "message": "No more searches needed, generating final answer..."}
|
| 319 |
+
messages.append({"role": "assistant", "content": assistant_message})
|
| 320 |
+
messages.append({"role": "user", "content": "Please provide your final answer now using <answer></answer> tags."})
|
| 321 |
+
continue
|
| 322 |
+
|
| 323 |
+
# Append message and continue to prompt for more
|
| 324 |
+
messages.append({"role": "assistant", "content": assistant_message})
|
| 325 |
+
messages.append({"role": "user", "content": "Please continue your research or provide your answer using <answer></answer> tags."})
|
| 326 |
+
continue
|
| 327 |
+
|
| 328 |
+
# Reset counter since we have tool calls
|
| 329 |
+
iterations_without_tool_calls = 0
|
| 330 |
+
|
| 331 |
+
# Track queries for compatibility - build map of query -> global index
|
| 332 |
+
new_queries = [tc["query"] for tc in tool_calls if tc["name"] == "google_search"]
|
| 333 |
+
query_start_idx = len(all_queries) # Starting global index for this batch
|
| 334 |
+
|
| 335 |
+
if new_queries:
|
| 336 |
+
all_queries.extend(new_queries)
|
| 337 |
+
yield {
|
| 338 |
+
"type": "queries",
|
| 339 |
+
"queries": new_queries,
|
| 340 |
+
"iteration": iteration
|
| 341 |
}
|
| 342 |
|
| 343 |
+
# Track stats per query for this batch
|
| 344 |
+
query_stats = {} # local index -> {relevant, irrelevant, error}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 345 |
|
| 346 |
+
# Execute tools and collect results
|
| 347 |
+
tool_outputs = []
|
| 348 |
+
search_idx = 0 # Track which search query we're on within this batch
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 349 |
|
| 350 |
+
for i, tc in enumerate(tool_calls):
|
| 351 |
+
tool_call_count += 1
|
| 352 |
|
| 353 |
+
if tc["name"] == "google_search":
|
| 354 |
+
# Calculate global query index
|
| 355 |
+
global_query_idx = query_start_idx + search_idx
|
| 356 |
|
| 357 |
+
yield {
|
| 358 |
+
"type": "status",
|
| 359 |
+
"message": f"Searching: {tc['query'][:50]}..."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 360 |
}
|
| 361 |
+
|
| 362 |
+
# Initialize stats for this query
|
| 363 |
+
if global_query_idx not in query_stats:
|
| 364 |
+
query_stats[global_query_idx] = {"relevant": 0, "irrelevant": 0, "error": 0}
|
| 365 |
+
else:
|
| 366 |
+
global_query_idx = None # browse_webpage doesn't have a query index
|
| 367 |
+
yield {
|
| 368 |
+
"type": "status",
|
| 369 |
+
"message": f"Browsing: {tc['query'][:50]}..."
|
|
|
|
| 370 |
}
|
| 371 |
|
| 372 |
+
formatted_output, raw_results = execute_tool(
|
| 373 |
+
tc["name"],
|
| 374 |
+
tc["query"],
|
| 375 |
+
tc["params"],
|
| 376 |
+
serper_key
|
| 377 |
+
)
|
| 378 |
+
tool_outputs.append(formatted_output)
|
| 379 |
+
|
| 380 |
+
# Yield source events for compatibility
|
| 381 |
+
if tc["name"] == "google_search":
|
| 382 |
+
for j, result in enumerate(raw_results):
|
| 383 |
+
findings.append({
|
| 384 |
+
"source": result.get("url", ""),
|
| 385 |
+
"title": result.get("title", ""),
|
| 386 |
+
"analysis": result.get("snippet", "")
|
| 387 |
+
})
|
| 388 |
+
|
| 389 |
+
# All search results are considered relevant (DR-TULU decides what to use)
|
| 390 |
+
query_stats[global_query_idx]["relevant"] += 1
|
| 391 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 392 |
yield {
|
| 393 |
"type": "source",
|
| 394 |
+
"query_index": global_query_idx,
|
| 395 |
+
"query_text": tc["query"],
|
| 396 |
+
"title": result.get("title", ""),
|
| 397 |
+
"url": result.get("url", ""),
|
| 398 |
+
"analysis": result.get("snippet", ""),
|
| 399 |
"finding_count": len(findings),
|
| 400 |
+
"is_relevant": True, # DR-TULU decides relevance
|
| 401 |
+
"is_error": False,
|
| 402 |
+
"error_message": ""
|
| 403 |
}
|
| 404 |
|
| 405 |
+
# Emit query_stats after processing all results for this query
|
| 406 |
+
yield {
|
| 407 |
+
"type": "query_stats",
|
| 408 |
+
"query_index": global_query_idx,
|
| 409 |
+
"relevant_count": query_stats[global_query_idx]["relevant"],
|
| 410 |
+
"irrelevant_count": query_stats[global_query_idx]["irrelevant"],
|
| 411 |
+
"error_count": query_stats[global_query_idx]["error"]
|
| 412 |
+
}
|
| 413 |
+
|
| 414 |
+
search_idx += 1 # Move to next search query
|
| 415 |
+
|
| 416 |
+
elif tc["name"] == "browse_webpage":
|
| 417 |
+
# browse_webpage results don't belong to a search query
|
| 418 |
+
# We can associate them with the last search query or skip
|
| 419 |
+
for result in raw_results:
|
| 420 |
+
content = result.get("content", "")
|
| 421 |
+
is_error = not content
|
| 422 |
+
findings.append({
|
| 423 |
+
"source": tc["query"],
|
| 424 |
+
"title": result.get("title", tc["query"]),
|
| 425 |
+
"analysis": content[:500] if content else "Failed to extract"
|
| 426 |
+
})
|
| 427 |
+
|
| 428 |
+
# For browse_webpage, use a pseudo-index or skip query association
|
| 429 |
+
# Since it's browsing a specific URL, we'll emit it without query grouping
|
| 430 |
yield {
|
| 431 |
+
"type": "source",
|
| 432 |
+
"query_index": -1, # Special index for browse results
|
| 433 |
+
"query_text": f"Browse: {tc['query'][:50]}",
|
| 434 |
+
"title": result.get("title", tc["query"]),
|
| 435 |
+
"url": tc["query"],
|
| 436 |
+
"analysis": content[:500] if content else "Failed to extract content",
|
| 437 |
+
"finding_count": len(findings),
|
| 438 |
+
"is_relevant": not is_error,
|
| 439 |
+
"is_error": is_error,
|
| 440 |
+
"error_message": "Content extraction failed" if is_error else ""
|
| 441 |
}
|
| 442 |
|
| 443 |
+
if tool_call_count >= max_tool_calls:
|
| 444 |
+
break
|
| 445 |
|
| 446 |
+
# Append assistant message and tool results to conversation
|
| 447 |
+
messages.append({"role": "assistant", "content": assistant_message})
|
|
|
|
|
|
|
|
|
|
|
|
|
| 448 |
|
| 449 |
+
# Combine all tool outputs into one user message
|
| 450 |
+
combined_output = "\n\n".join(tool_outputs)
|
| 451 |
+
messages.append({"role": "user", "content": combined_output})
|
| 452 |
|
| 453 |
+
# Max tool calls reached - ask for final answer
|
| 454 |
+
yield {"type": "status", "message": "Maximum searches reached, generating final answer..."}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 455 |
|
| 456 |
+
messages.append({
|
| 457 |
+
"role": "user",
|
| 458 |
+
"content": "You have reached the maximum number of tool calls. Please provide your final answer now using <answer></answer> tags based on the information gathered."
|
| 459 |
+
})
|
|
|
|
|
|
|
| 460 |
|
| 461 |
+
try:
|
| 462 |
+
response = client.chat.completions.create(
|
| 463 |
+
model=model,
|
| 464 |
+
messages=messages,
|
| 465 |
+
max_tokens=4096,
|
| 466 |
+
temperature=0.7,
|
| 467 |
+
)
|
| 468 |
+
final_message = response.choices[0].message.content
|
| 469 |
+
answer = parse_answer(final_message) or final_message
|
| 470 |
+
|
| 471 |
+
yield {"type": "result_preview", "content": answer, "figures": {}}
|
| 472 |
+
yield {"type": "result", "content": answer, "figures": {}}
|
| 473 |
+
except Exception as e:
|
| 474 |
+
yield {"type": "error", "content": f"Failed to generate final answer: {str(e)}"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 475 |
|
| 476 |
yield {"type": "done"}
|
backend/research_tulu.py
DELETED
|
@@ -1,473 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Research notebook backend using DR-TULU model - model-driven deep research
|
| 3 |
-
|
| 4 |
-
DR-TULU drives the research loop - it decides when to search, what to search for,
|
| 5 |
-
and when it has enough information to answer.
|
| 6 |
-
"""
|
| 7 |
-
import json
|
| 8 |
-
import os
|
| 9 |
-
import re
|
| 10 |
-
import uuid
|
| 11 |
-
from typing import List, Dict, Optional, Tuple
|
| 12 |
-
import requests
|
| 13 |
-
import trafilatura
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
def search_web(query: str, api_key: str, num_results: int = 10) -> List[Dict[str, str]]:
|
| 17 |
-
"""Search the web using Serper API"""
|
| 18 |
-
url = "https://google.serper.dev/search"
|
| 19 |
-
payload = json.dumps({"q": query, "num": num_results})
|
| 20 |
-
headers = {
|
| 21 |
-
'X-API-KEY': api_key,
|
| 22 |
-
'Content-Type': 'application/json'
|
| 23 |
-
}
|
| 24 |
-
|
| 25 |
-
try:
|
| 26 |
-
response = requests.post(url, headers=headers, data=payload, timeout=10)
|
| 27 |
-
|
| 28 |
-
if response.status_code != 200:
|
| 29 |
-
return []
|
| 30 |
-
|
| 31 |
-
data = response.json()
|
| 32 |
-
results = []
|
| 33 |
-
for item in data.get('organic', []):
|
| 34 |
-
results.append({
|
| 35 |
-
'title': item.get('title', ''),
|
| 36 |
-
'url': item.get('link', ''),
|
| 37 |
-
'snippet': item.get('snippet', '')
|
| 38 |
-
})
|
| 39 |
-
return results
|
| 40 |
-
except Exception as e:
|
| 41 |
-
print(f"Search error: {e}")
|
| 42 |
-
return []
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
def extract_content(url: str) -> Optional[str]:
|
| 46 |
-
"""Extract main content from a URL"""
|
| 47 |
-
try:
|
| 48 |
-
downloaded = trafilatura.fetch_url(url)
|
| 49 |
-
if downloaded is None:
|
| 50 |
-
return None
|
| 51 |
-
text = trafilatura.extract(
|
| 52 |
-
downloaded,
|
| 53 |
-
include_comments=False,
|
| 54 |
-
include_tables=False,
|
| 55 |
-
no_fallback=False
|
| 56 |
-
)
|
| 57 |
-
return text
|
| 58 |
-
except Exception as e:
|
| 59 |
-
print(f"Content extraction error for {url}: {e}")
|
| 60 |
-
return None
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
def generate_snippet_id() -> str:
|
| 64 |
-
"""Generate unique snippet ID"""
|
| 65 |
-
return f"S_{uuid.uuid4().hex[:8]}"
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
def generate_webpage_id() -> str:
|
| 69 |
-
"""Generate unique webpage ID"""
|
| 70 |
-
return f"W_{uuid.uuid4().hex[:8]}"
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
def parse_tool_calls(text: str) -> List[Dict]:
|
| 74 |
-
"""
|
| 75 |
-
Parse <call_tool name="...">query</call_tool> from model output.
|
| 76 |
-
Returns list of {"name": str, "query": str, "params": dict}
|
| 77 |
-
"""
|
| 78 |
-
pattern = r'<call_tool\s+name="([^"]+)"([^>]*)>([^<]+)</call_tool>'
|
| 79 |
-
matches = re.findall(pattern, text)
|
| 80 |
-
|
| 81 |
-
tool_calls = []
|
| 82 |
-
for name, params_str, query in matches:
|
| 83 |
-
# Parse optional params like limit="8" year="2021-2025"
|
| 84 |
-
params = {}
|
| 85 |
-
param_pattern = r'(\w+)="([^"]+)"'
|
| 86 |
-
for param_name, param_value in re.findall(param_pattern, params_str):
|
| 87 |
-
params[param_name] = param_value
|
| 88 |
-
|
| 89 |
-
tool_calls.append({
|
| 90 |
-
"name": name.strip(),
|
| 91 |
-
"query": query.strip(),
|
| 92 |
-
"params": params
|
| 93 |
-
})
|
| 94 |
-
|
| 95 |
-
return tool_calls
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
def parse_think_blocks(text: str) -> List[str]:
|
| 99 |
-
"""Extract <think>...</think> content"""
|
| 100 |
-
pattern = r'<think>(.*?)</think>'
|
| 101 |
-
return re.findall(pattern, text, re.DOTALL)
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
def parse_answer(text: str) -> Optional[str]:
|
| 105 |
-
"""Extract <answer>...</answer> content"""
|
| 106 |
-
pattern = r'<answer>(.*?)</answer>'
|
| 107 |
-
match = re.search(pattern, text, re.DOTALL)
|
| 108 |
-
return match.group(1).strip() if match else None
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
def format_search_results(results: List[Dict], query: str) -> str:
|
| 112 |
-
"""
|
| 113 |
-
Format search results as DR-TULU tool output.
|
| 114 |
-
"""
|
| 115 |
-
if not results:
|
| 116 |
-
return "<tool_output>No results found.</tool_output>"
|
| 117 |
-
|
| 118 |
-
snippets = []
|
| 119 |
-
for r in results:
|
| 120 |
-
snippet_id = generate_snippet_id()
|
| 121 |
-
# Escape XML special chars in content
|
| 122 |
-
title = r.get("title", "").replace("&", "&").replace("<", "<").replace(">", ">")
|
| 123 |
-
snippet_text = r.get("snippet", "").replace("&", "&").replace("<", "<").replace(">", ">")
|
| 124 |
-
url = r.get("url", "")
|
| 125 |
-
|
| 126 |
-
snippets.append(
|
| 127 |
-
f'<snippet id="{snippet_id}" url="{url}" title="{title}">\n'
|
| 128 |
-
f'{snippet_text}\n'
|
| 129 |
-
f'</snippet>'
|
| 130 |
-
)
|
| 131 |
-
|
| 132 |
-
return f"<tool_output>\n" + "\n".join(snippets) + "\n</tool_output>"
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
def format_webpage_content(url: str, title: str, content: str) -> str:
|
| 136 |
-
"""
|
| 137 |
-
Format extracted webpage as DR-TULU tool output.
|
| 138 |
-
"""
|
| 139 |
-
if not content:
|
| 140 |
-
return f"<tool_output>Could not extract content from {url}</tool_output>"
|
| 141 |
-
|
| 142 |
-
webpage_id = generate_webpage_id()
|
| 143 |
-
# Truncate very long content
|
| 144 |
-
if len(content) > 8000:
|
| 145 |
-
content = content[:8000] + "\n[Content truncated...]"
|
| 146 |
-
|
| 147 |
-
# Escape XML special chars
|
| 148 |
-
content = content.replace("&", "&").replace("<", "<").replace(">", ">")
|
| 149 |
-
title = title.replace("&", "&").replace("<", "<").replace(">", ">")
|
| 150 |
-
|
| 151 |
-
return (
|
| 152 |
-
f"<tool_output>\n"
|
| 153 |
-
f'<webpage id="{webpage_id}" url="{url}" title="{title}">\n'
|
| 154 |
-
f'{content}\n'
|
| 155 |
-
f'</webpage>\n'
|
| 156 |
-
f"</tool_output>"
|
| 157 |
-
)
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
def execute_tool(
|
| 161 |
-
tool_name: str,
|
| 162 |
-
query: str,
|
| 163 |
-
params: dict,
|
| 164 |
-
serper_key: str
|
| 165 |
-
) -> Tuple[str, List[Dict]]:
|
| 166 |
-
"""
|
| 167 |
-
Execute a tool and return (formatted_output, raw_results).
|
| 168 |
-
"""
|
| 169 |
-
if tool_name == "google_search":
|
| 170 |
-
num_results = int(params.get("limit", 10))
|
| 171 |
-
results = search_web(query, serper_key, num_results=num_results)
|
| 172 |
-
formatted = format_search_results(results, query)
|
| 173 |
-
return formatted, results
|
| 174 |
-
|
| 175 |
-
elif tool_name == "browse_webpage":
|
| 176 |
-
# query is the URL for browse_webpage
|
| 177 |
-
url = query
|
| 178 |
-
content = extract_content(url)
|
| 179 |
-
title = url # Could extract from content if needed
|
| 180 |
-
formatted = format_webpage_content(url, title, content or "")
|
| 181 |
-
return formatted, [{"url": url, "content": content, "title": title}]
|
| 182 |
-
|
| 183 |
-
else:
|
| 184 |
-
return f"<tool_output>Unknown tool: {tool_name}</tool_output>", []
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
def get_dr_tulu_system_prompt() -> str:
|
| 188 |
-
"""Return the DR-TULU system prompt"""
|
| 189 |
-
return '''You are a research assistant who answers questions through iterative reasoning and research.
|
| 190 |
-
|
| 191 |
-
## Process
|
| 192 |
-
- Use <think></think> tags to show your reasoning at any point.
|
| 193 |
-
- Use <call_tool name="...">query</call_tool> when you need information (see tools below).
|
| 194 |
-
- You can alternate between thinking and searching multiple times.
|
| 195 |
-
- Only provide <answer></answer> tags when you have enough information for a complete response.
|
| 196 |
-
- Support every non-trivial claim with retrieved evidence. Wrap the exact claim span in <cite id="ID1,ID2">...</cite>, where id are snippet IDs from searched results.
|
| 197 |
-
|
| 198 |
-
## Calling Tools (<call_tool name="...">query</call_tool>)
|
| 199 |
-
|
| 200 |
-
1. google_search
|
| 201 |
-
- Purpose: general web search.
|
| 202 |
-
- Input via: <call_tool name="google_search">your query</call_tool>
|
| 203 |
-
- Output: web search snippets.
|
| 204 |
-
|
| 205 |
-
2. browse_webpage
|
| 206 |
-
- Purpose: open a specific URL and extract readable page text.
|
| 207 |
-
- Input via: <call_tool name="browse_webpage">https://example.com/article</call_tool>
|
| 208 |
-
- Output: webpage content.
|
| 209 |
-
|
| 210 |
-
## Tool Output
|
| 211 |
-
- After you issue a tool call, we will execute it and return results wrapped in <tool_output> tags.
|
| 212 |
-
- For web search: <tool_output><snippet id=UNIQUE_ID url="..." title="...">content</snippet>...</tool_output>
|
| 213 |
-
- For web browsing: <tool_output><webpage id=UNIQUE_ID url="..." title="...">content</webpage></tool_output>
|
| 214 |
-
|
| 215 |
-
## Answer and Citation Format
|
| 216 |
-
- Once you collect all necessary information, generate the final answer with <answer></answer> tags.
|
| 217 |
-
- In your answer, wrap supported text in <cite id="SNIPPET_ID">...</cite> using exact IDs from returned snippets.
|
| 218 |
-
- Write comprehensive, well-structured answers with clear sections when appropriate.
|
| 219 |
-
'''
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
def stream_research(
|
| 223 |
-
client,
|
| 224 |
-
model: str,
|
| 225 |
-
question: str,
|
| 226 |
-
serper_key: str,
|
| 227 |
-
max_iterations: int = 5,
|
| 228 |
-
max_websites: int = 50,
|
| 229 |
-
system_prompt: str = "",
|
| 230 |
-
sub_agent_model: Optional[str] = None,
|
| 231 |
-
parallel_workers: int = 8,
|
| 232 |
-
max_tool_calls: int = 20,
|
| 233 |
-
**kwargs
|
| 234 |
-
):
|
| 235 |
-
"""
|
| 236 |
-
Stream deep research results using DR-TULU.
|
| 237 |
-
|
| 238 |
-
The model drives the research loop - it decides when to search,
|
| 239 |
-
what to search for, and when it has enough information to answer.
|
| 240 |
-
|
| 241 |
-
Yields same event types as the original research.py for API compatibility.
|
| 242 |
-
"""
|
| 243 |
-
|
| 244 |
-
# Build system prompt
|
| 245 |
-
dr_tulu_system = get_dr_tulu_system_prompt()
|
| 246 |
-
if system_prompt:
|
| 247 |
-
dr_tulu_system += f"\n\n{system_prompt}"
|
| 248 |
-
|
| 249 |
-
messages = [
|
| 250 |
-
{"role": "system", "content": dr_tulu_system},
|
| 251 |
-
{"role": "user", "content": question}
|
| 252 |
-
]
|
| 253 |
-
|
| 254 |
-
yield {"type": "status", "message": f"Starting DR-TULU research: {question}"}
|
| 255 |
-
|
| 256 |
-
tool_call_count = 0
|
| 257 |
-
findings = [] # Track sources for compatibility
|
| 258 |
-
all_queries = [] # Track queries for compatibility
|
| 259 |
-
iteration = 0
|
| 260 |
-
max_iterations_without_progress = 3
|
| 261 |
-
iterations_without_tool_calls = 0
|
| 262 |
-
|
| 263 |
-
while tool_call_count < max_tool_calls:
|
| 264 |
-
iteration += 1
|
| 265 |
-
|
| 266 |
-
# Call DR-TULU
|
| 267 |
-
yield {"type": "status", "message": "Thinking..."}
|
| 268 |
-
|
| 269 |
-
try:
|
| 270 |
-
response = client.chat.completions.create(
|
| 271 |
-
model=model,
|
| 272 |
-
messages=messages,
|
| 273 |
-
max_tokens=4096,
|
| 274 |
-
temperature=0.7,
|
| 275 |
-
)
|
| 276 |
-
assistant_message = response.choices[0].message.content
|
| 277 |
-
except Exception as e:
|
| 278 |
-
yield {"type": "error", "content": f"Model error: {str(e)}"}
|
| 279 |
-
yield {"type": "done"}
|
| 280 |
-
return
|
| 281 |
-
|
| 282 |
-
# Parse thinking blocks and yield as status
|
| 283 |
-
think_blocks = parse_think_blocks(assistant_message)
|
| 284 |
-
for thought in think_blocks:
|
| 285 |
-
# Truncate long thoughts for status display
|
| 286 |
-
thought_preview = thought[:300].strip()
|
| 287 |
-
if len(thought) > 300:
|
| 288 |
-
thought_preview += "..."
|
| 289 |
-
yield {"type": "status", "message": f"Reasoning: {thought_preview}"}
|
| 290 |
-
|
| 291 |
-
# Check for final answer
|
| 292 |
-
answer = parse_answer(assistant_message)
|
| 293 |
-
if answer:
|
| 294 |
-
yield {"type": "status", "message": "Research complete! Generating report..."}
|
| 295 |
-
|
| 296 |
-
# Wrap in <result> tags for compatibility with command center
|
| 297 |
-
result_content = answer
|
| 298 |
-
if "<result>" not in answer:
|
| 299 |
-
result_content = f"<result>\n{answer}\n</result>"
|
| 300 |
-
|
| 301 |
-
yield {"type": "result_preview", "content": answer, "figures": {}}
|
| 302 |
-
yield {"type": "result", "content": answer, "figures": {}}
|
| 303 |
-
yield {"type": "done"}
|
| 304 |
-
return
|
| 305 |
-
|
| 306 |
-
# Parse and execute tool calls
|
| 307 |
-
tool_calls = parse_tool_calls(assistant_message)
|
| 308 |
-
|
| 309 |
-
if not tool_calls:
|
| 310 |
-
# No tool calls and no answer - model might be stuck
|
| 311 |
-
iterations_without_tool_calls += 1
|
| 312 |
-
|
| 313 |
-
if iterations_without_tool_calls >= max_iterations_without_progress:
|
| 314 |
-
# Force final answer
|
| 315 |
-
yield {"type": "status", "message": "No more searches needed, generating final answer..."}
|
| 316 |
-
messages.append({"role": "assistant", "content": assistant_message})
|
| 317 |
-
messages.append({"role": "user", "content": "Please provide your final answer now using <answer></answer> tags."})
|
| 318 |
-
continue
|
| 319 |
-
|
| 320 |
-
# Append message and continue to prompt for more
|
| 321 |
-
messages.append({"role": "assistant", "content": assistant_message})
|
| 322 |
-
messages.append({"role": "user", "content": "Please continue your research or provide your answer using <answer></answer> tags."})
|
| 323 |
-
continue
|
| 324 |
-
|
| 325 |
-
# Reset counter since we have tool calls
|
| 326 |
-
iterations_without_tool_calls = 0
|
| 327 |
-
|
| 328 |
-
# Track queries for compatibility - build map of query -> global index
|
| 329 |
-
new_queries = [tc["query"] for tc in tool_calls if tc["name"] == "google_search"]
|
| 330 |
-
query_start_idx = len(all_queries) # Starting global index for this batch
|
| 331 |
-
|
| 332 |
-
if new_queries:
|
| 333 |
-
all_queries.extend(new_queries)
|
| 334 |
-
yield {
|
| 335 |
-
"type": "queries",
|
| 336 |
-
"queries": new_queries,
|
| 337 |
-
"iteration": iteration
|
| 338 |
-
}
|
| 339 |
-
|
| 340 |
-
# Track stats per query for this batch
|
| 341 |
-
query_stats = {} # local index -> {relevant, irrelevant, error}
|
| 342 |
-
|
| 343 |
-
# Execute tools and collect results
|
| 344 |
-
tool_outputs = []
|
| 345 |
-
search_idx = 0 # Track which search query we're on within this batch
|
| 346 |
-
|
| 347 |
-
for i, tc in enumerate(tool_calls):
|
| 348 |
-
tool_call_count += 1
|
| 349 |
-
|
| 350 |
-
if tc["name"] == "google_search":
|
| 351 |
-
# Calculate global query index
|
| 352 |
-
global_query_idx = query_start_idx + search_idx
|
| 353 |
-
|
| 354 |
-
yield {
|
| 355 |
-
"type": "status",
|
| 356 |
-
"message": f"Searching: {tc['query'][:50]}..."
|
| 357 |
-
}
|
| 358 |
-
|
| 359 |
-
# Initialize stats for this query
|
| 360 |
-
if global_query_idx not in query_stats:
|
| 361 |
-
query_stats[global_query_idx] = {"relevant": 0, "irrelevant": 0, "error": 0}
|
| 362 |
-
else:
|
| 363 |
-
global_query_idx = None # browse_webpage doesn't have a query index
|
| 364 |
-
yield {
|
| 365 |
-
"type": "status",
|
| 366 |
-
"message": f"Browsing: {tc['query'][:50]}..."
|
| 367 |
-
}
|
| 368 |
-
|
| 369 |
-
formatted_output, raw_results = execute_tool(
|
| 370 |
-
tc["name"],
|
| 371 |
-
tc["query"],
|
| 372 |
-
tc["params"],
|
| 373 |
-
serper_key
|
| 374 |
-
)
|
| 375 |
-
tool_outputs.append(formatted_output)
|
| 376 |
-
|
| 377 |
-
# Yield source events for compatibility
|
| 378 |
-
if tc["name"] == "google_search":
|
| 379 |
-
for j, result in enumerate(raw_results):
|
| 380 |
-
findings.append({
|
| 381 |
-
"source": result.get("url", ""),
|
| 382 |
-
"title": result.get("title", ""),
|
| 383 |
-
"analysis": result.get("snippet", "")
|
| 384 |
-
})
|
| 385 |
-
|
| 386 |
-
# All search results are considered relevant (DR-TULU decides what to use)
|
| 387 |
-
query_stats[global_query_idx]["relevant"] += 1
|
| 388 |
-
|
| 389 |
-
yield {
|
| 390 |
-
"type": "source",
|
| 391 |
-
"query_index": global_query_idx,
|
| 392 |
-
"query_text": tc["query"],
|
| 393 |
-
"title": result.get("title", ""),
|
| 394 |
-
"url": result.get("url", ""),
|
| 395 |
-
"analysis": result.get("snippet", ""),
|
| 396 |
-
"finding_count": len(findings),
|
| 397 |
-
"is_relevant": True, # DR-TULU decides relevance
|
| 398 |
-
"is_error": False,
|
| 399 |
-
"error_message": ""
|
| 400 |
-
}
|
| 401 |
-
|
| 402 |
-
# Emit query_stats after processing all results for this query
|
| 403 |
-
yield {
|
| 404 |
-
"type": "query_stats",
|
| 405 |
-
"query_index": global_query_idx,
|
| 406 |
-
"relevant_count": query_stats[global_query_idx]["relevant"],
|
| 407 |
-
"irrelevant_count": query_stats[global_query_idx]["irrelevant"],
|
| 408 |
-
"error_count": query_stats[global_query_idx]["error"]
|
| 409 |
-
}
|
| 410 |
-
|
| 411 |
-
search_idx += 1 # Move to next search query
|
| 412 |
-
|
| 413 |
-
elif tc["name"] == "browse_webpage":
|
| 414 |
-
# browse_webpage results don't belong to a search query
|
| 415 |
-
# We can associate them with the last search query or skip
|
| 416 |
-
for result in raw_results:
|
| 417 |
-
content = result.get("content", "")
|
| 418 |
-
is_error = not content
|
| 419 |
-
findings.append({
|
| 420 |
-
"source": tc["query"],
|
| 421 |
-
"title": result.get("title", tc["query"]),
|
| 422 |
-
"analysis": content[:500] if content else "Failed to extract"
|
| 423 |
-
})
|
| 424 |
-
|
| 425 |
-
# For browse_webpage, use a pseudo-index or skip query association
|
| 426 |
-
# Since it's browsing a specific URL, we'll emit it without query grouping
|
| 427 |
-
yield {
|
| 428 |
-
"type": "source",
|
| 429 |
-
"query_index": -1, # Special index for browse results
|
| 430 |
-
"query_text": f"Browse: {tc['query'][:50]}",
|
| 431 |
-
"title": result.get("title", tc["query"]),
|
| 432 |
-
"url": tc["query"],
|
| 433 |
-
"analysis": content[:500] if content else "Failed to extract content",
|
| 434 |
-
"finding_count": len(findings),
|
| 435 |
-
"is_relevant": not is_error,
|
| 436 |
-
"is_error": is_error,
|
| 437 |
-
"error_message": "Content extraction failed" if is_error else ""
|
| 438 |
-
}
|
| 439 |
-
|
| 440 |
-
if tool_call_count >= max_tool_calls:
|
| 441 |
-
break
|
| 442 |
-
|
| 443 |
-
# Append assistant message and tool results to conversation
|
| 444 |
-
messages.append({"role": "assistant", "content": assistant_message})
|
| 445 |
-
|
| 446 |
-
# Combine all tool outputs into one user message
|
| 447 |
-
combined_output = "\n\n".join(tool_outputs)
|
| 448 |
-
messages.append({"role": "user", "content": combined_output})
|
| 449 |
-
|
| 450 |
-
# Max tool calls reached - ask for final answer
|
| 451 |
-
yield {"type": "status", "message": "Maximum searches reached, generating final answer..."}
|
| 452 |
-
|
| 453 |
-
messages.append({
|
| 454 |
-
"role": "user",
|
| 455 |
-
"content": "You have reached the maximum number of tool calls. Please provide your final answer now using <answer></answer> tags based on the information gathered."
|
| 456 |
-
})
|
| 457 |
-
|
| 458 |
-
try:
|
| 459 |
-
response = client.chat.completions.create(
|
| 460 |
-
model=model,
|
| 461 |
-
messages=messages,
|
| 462 |
-
max_tokens=4096,
|
| 463 |
-
temperature=0.7,
|
| 464 |
-
)
|
| 465 |
-
final_message = response.choices[0].message.content
|
| 466 |
-
answer = parse_answer(final_message) or final_message
|
| 467 |
-
|
| 468 |
-
yield {"type": "result_preview", "content": answer, "figures": {}}
|
| 469 |
-
yield {"type": "result", "content": answer, "figures": {}}
|
| 470 |
-
except Exception as e:
|
| 471 |
-
yield {"type": "error", "content": f"Failed to generate final answer: {str(e)}"}
|
| 472 |
-
|
| 473 |
-
yield {"type": "done"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/utils.py
CHANGED
|
@@ -1,3 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import nbformat
|
| 2 |
from nbformat.v4 import new_notebook, new_markdown_cell, new_code_cell
|
| 3 |
from nbconvert import HTMLExporter
|
|
@@ -6,7 +9,8 @@ from e2b_code_interpreter import Sandbox
|
|
| 6 |
from transformers import AutoTokenizer
|
| 7 |
from traitlets.config import Config
|
| 8 |
from jupyter_handler import JupyterNotebook
|
| 9 |
-
|
|
|
|
| 10 |
|
| 11 |
|
| 12 |
TOOLS = [
|
|
@@ -33,7 +37,7 @@ MAX_TURNS = 40
|
|
| 33 |
|
| 34 |
|
| 35 |
def execute_code(sbx, code):
|
| 36 |
-
execution = sbx.run_code(code, on_stdout=lambda data:
|
| 37 |
output = ""
|
| 38 |
if len(execution.logs.stdout) > 0:
|
| 39 |
output += "\n".join(execution.logs.stdout)
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import logging
|
| 3 |
+
|
| 4 |
import nbformat
|
| 5 |
from nbformat.v4 import new_notebook, new_markdown_cell, new_code_cell
|
| 6 |
from nbconvert import HTMLExporter
|
|
|
|
| 9 |
from transformers import AutoTokenizer
|
| 10 |
from traitlets.config import Config
|
| 11 |
from jupyter_handler import JupyterNotebook
|
| 12 |
+
|
| 13 |
+
logger = logging.getLogger(__name__)
|
| 14 |
|
| 15 |
|
| 16 |
TOOLS = [
|
|
|
|
| 37 |
|
| 38 |
|
| 39 |
def execute_code(sbx, code):
|
| 40 |
+
execution = sbx.run_code(code, on_stdout=lambda data: logger.debug('stdout: %s', data))
|
| 41 |
output = ""
|
| 42 |
if len(execution.logs.stdout) > 0:
|
| 43 |
output += "\n".join(execution.logs.stdout)
|
frontend/index.html
CHANGED
|
@@ -7,7 +7,7 @@
|
|
| 7 |
<link href="https://fonts.googleapis.com/css2?family=JetBrains+Mono:wght@300;400;500;700&display=swap" rel="stylesheet">
|
| 8 |
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/prism/1.29.0/themes/prism.min.css">
|
| 9 |
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.16.9/dist/katex.min.css">
|
| 10 |
-
<link rel="stylesheet" href="style.css?v=
|
| 11 |
</head>
|
| 12 |
<body>
|
| 13 |
<div class="app-container">
|
|
@@ -26,6 +26,7 @@
|
|
| 26 |
</div>
|
| 27 |
</div>
|
| 28 |
<div class="tab-bar-spacer"></div>
|
|
|
|
| 29 |
<button class="files-btn" id="filesBtn">FILES</button>
|
| 30 |
<button class="debug-btn" id="debugBtn">DEBUG</button>
|
| 31 |
<button class="settings-btn" id="settingsBtn">SETTINGS</button>
|
|
@@ -52,74 +53,99 @@
|
|
| 52 |
|
| 53 |
<div class="notebook-body">
|
| 54 |
<div class="chat-container" id="messages-command">
|
| 55 |
-
<div class="welcome-message">
|
| 56 |
<p>Welcome to Productive — an AI interface with specialized notebooks.</p>
|
| 57 |
-
|
| 58 |
-
<
|
| 59 |
-
<
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
<
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
<
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
<
|
| 84 |
-
<
|
| 85 |
-
<
|
| 86 |
-
<
|
| 87 |
-
<
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
<
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 116 |
</div>
|
| 117 |
-
<p>When a notebook is opened, you'll see a widget you can click to jump to it. A pulsing dot on the tab indicates active generation.</p>
|
| 118 |
</div>
|
| 119 |
</div>
|
| 120 |
</div>
|
| 121 |
|
| 122 |
-
<div class="input-area">
|
| 123 |
<div class="input-container">
|
| 124 |
<textarea placeholder="Enter message..." id="input-command" rows="1"></textarea>
|
| 125 |
<button id="sendCommand">SEND</button>
|
|
@@ -305,6 +331,50 @@
|
|
| 305 |
</div>
|
| 306 |
</div>
|
| 307 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 308 |
<!-- Provider Dialog -->
|
| 309 |
<div class="settings-dialog-overlay" id="provider-dialog">
|
| 310 |
<div class="settings-dialog">
|
|
@@ -356,6 +426,11 @@
|
|
| 356 |
<input type="text" id="model-model-id" class="settings-input" placeholder="e.g., gpt-4o, claude-3-5-sonnet-20241022">
|
| 357 |
<span class="dialog-hint">The exact model identifier used in API calls</span>
|
| 358 |
</div>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 359 |
</div>
|
| 360 |
<div class="settings-dialog-actions">
|
| 361 |
<button class="settings-save-btn" onclick="saveModelFromDialog()">SAVE</button>
|
|
@@ -368,7 +443,7 @@
|
|
| 368 |
<script src="https://cdnjs.cloudflare.com/ajax/libs/prism/1.29.0/components/prism-python.min.js"></script>
|
| 369 |
<script src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script>
|
| 370 |
<script src="https://cdn.jsdelivr.net/npm/katex@0.16.9/dist/katex.min.js"></script>
|
| 371 |
-
<script src="research-ui.js?v=
|
| 372 |
-
<script src="script.js?v=
|
| 373 |
</body>
|
| 374 |
</html>
|
|
|
|
| 7 |
<link href="https://fonts.googleapis.com/css2?family=JetBrains+Mono:wght@300;400;500;700&display=swap" rel="stylesheet">
|
| 8 |
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/prism/1.29.0/themes/prism.min.css">
|
| 9 |
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.16.9/dist/katex.min.css">
|
| 10 |
+
<link rel="stylesheet" href="style.css?v=35">
|
| 11 |
</head>
|
| 12 |
<body>
|
| 13 |
<div class="app-container">
|
|
|
|
| 26 |
</div>
|
| 27 |
</div>
|
| 28 |
<div class="tab-bar-spacer"></div>
|
| 29 |
+
<button class="sessions-btn" id="sessionsBtn">SESSIONS</button>
|
| 30 |
<button class="files-btn" id="filesBtn">FILES</button>
|
| 31 |
<button class="debug-btn" id="debugBtn">DEBUG</button>
|
| 32 |
<button class="settings-btn" id="settingsBtn">SETTINGS</button>
|
|
|
|
| 53 |
|
| 54 |
<div class="notebook-body">
|
| 55 |
<div class="chat-container" id="messages-command">
|
| 56 |
+
<div class="welcome-message" id="welcomeMessage">
|
| 57 |
<p>Welcome to Productive — an AI interface with specialized notebooks.</p>
|
| 58 |
+
|
| 59 |
+
<div class="session-selector" id="sessionSelector">
|
| 60 |
+
<div class="session-selector-form">
|
| 61 |
+
<div class="session-new">
|
| 62 |
+
<label>New session:</label>
|
| 63 |
+
<div class="session-new-input">
|
| 64 |
+
<div class="session-input-wrapper">
|
| 65 |
+
<input type="text" id="newSessionName" placeholder="Session name..." />
|
| 66 |
+
<button type="button" class="session-regenerate-btn" id="regenerateNameBtn" title="Generate new name">↻</button>
|
| 67 |
+
</div>
|
| 68 |
+
<button id="createSessionBtn">START</button>
|
| 69 |
+
</div>
|
| 70 |
+
</div>
|
| 71 |
+
|
| 72 |
+
<div class="session-existing">
|
| 73 |
+
<label>Or continue:</label>
|
| 74 |
+
<select id="existingSessionSelect">
|
| 75 |
+
<option value="">-- Select session --</option>
|
| 76 |
+
</select>
|
| 77 |
+
</div>
|
| 78 |
+
</div>
|
| 79 |
+
</div>
|
| 80 |
+
|
| 81 |
+
<div class="welcome-explanation">
|
| 82 |
+
<p>The assistant can automatically open specialized notebooks for different tasks:</p>
|
| 83 |
+
<ul>
|
| 84 |
+
<li><strong style="color: var(--theme-accent)">BASE</strong> — Basic tasks with search</li>
|
| 85 |
+
<li><strong style="color: var(--theme-accent)">CODE</strong> — Programming and data analysis</li>
|
| 86 |
+
<li><strong style="color: var(--theme-accent)">RESEARCH</strong> — Deep research and information gathering</li>
|
| 87 |
+
<li><strong style="color: var(--theme-accent)">COMPUTER</strong> <em>(coming soon)</em> — Computer control and automation</li>
|
| 88 |
+
<li><strong style="color: var(--theme-accent)">IMAGE</strong> <em>(coming soon)</em> — Image generation and editing</li>
|
| 89 |
+
</ul>
|
| 90 |
+
<div class="workflow-diagram">
|
| 91 |
+
<svg class="diagram-svg" viewBox="0 0 500 180" preserveAspectRatio="xMidYMid meet">
|
| 92 |
+
<defs>
|
| 93 |
+
<marker id="arrowhead" markerWidth="8" markerHeight="6" refX="7" refY="3" orient="auto">
|
| 94 |
+
<polygon points="0 0, 8 3, 0 6" fill="var(--theme-accent)" />
|
| 95 |
+
</marker>
|
| 96 |
+
</defs>
|
| 97 |
+
|
| 98 |
+
<!-- Task Center Box -->
|
| 99 |
+
<rect x="20" y="10" width="180" height="160" rx="4" fill="#fafafa" stroke="#e0e0e0" stroke-width="1"/>
|
| 100 |
+
<text x="110" y="28" text-anchor="middle" font-size="11" font-weight="600" fill="var(--theme-accent)">TASK CENTER</text>
|
| 101 |
+
|
| 102 |
+
<!-- User Message -->
|
| 103 |
+
<rect x="35" y="40" width="150" height="24" rx="3" fill="white" stroke="var(--theme-accent)" stroke-width="1"/>
|
| 104 |
+
<text x="110" y="56" text-anchor="middle" font-size="9" fill="#333">User input</text>
|
| 105 |
+
|
| 106 |
+
<!-- Task Widget (combined task + report) -->
|
| 107 |
+
<rect x="35" y="74" width="150" height="86" rx="3" fill="white" stroke="#ccc" stroke-width="1"/>
|
| 108 |
+
<rect x="35" y="74" width="150" height="20" rx="3" fill="var(--theme-accent)"/>
|
| 109 |
+
<text x="110" y="88" text-anchor="middle" font-size="9" font-weight="500" fill="white">TASK</text>
|
| 110 |
+
<text x="110" y="115" text-anchor="middle" font-size="9" fill="#666">processing...</text>
|
| 111 |
+
<line x1="45" y1="130" x2="175" y2="130" stroke="#eee" stroke-width="1"/>
|
| 112 |
+
<text x="110" y="150" text-anchor="middle" font-size="9" fill="#333">Report summary</text>
|
| 113 |
+
|
| 114 |
+
<!-- Notebook Box -->
|
| 115 |
+
<rect x="300" y="10" width="180" height="160" rx="4" fill="#fafafa" stroke="#e0e0e0" stroke-width="1"/>
|
| 116 |
+
<text x="390" y="28" text-anchor="middle" font-size="11" font-weight="600" fill="var(--theme-accent)">NOTEBOOK</text>
|
| 117 |
+
|
| 118 |
+
<!-- Query (top of notebook) -->
|
| 119 |
+
<rect x="315" y="40" width="150" height="24" rx="3" fill="white" stroke="var(--theme-accent)" stroke-width="1"/>
|
| 120 |
+
<text x="390" y="56" text-anchor="middle" font-size="9" fill="#333">Query</text>
|
| 121 |
+
|
| 122 |
+
<!-- Intermediate steps (faded) -->
|
| 123 |
+
<rect x="315" y="72" width="150" height="14" rx="2" fill="#f5f5f5" stroke="#e8e8e8" stroke-width="1"/>
|
| 124 |
+
<text x="390" y="82" text-anchor="middle" font-size="8" fill="#aaa">step 1</text>
|
| 125 |
+
<rect x="315" y="90" width="150" height="14" rx="2" fill="#f8f8f8" stroke="#eee" stroke-width="1"/>
|
| 126 |
+
<text x="390" y="100" text-anchor="middle" font-size="8" fill="#bbb">step 2</text>
|
| 127 |
+
<rect x="315" y="108" width="150" height="14" rx="2" fill="#fbfbfb" stroke="#f0f0f0" stroke-width="1"/>
|
| 128 |
+
<text x="390" y="118" text-anchor="middle" font-size="8" fill="#ccc">...</text>
|
| 129 |
+
|
| 130 |
+
<!-- Report (bottom of notebook) - aligned with Task report area at y=145 -->
|
| 131 |
+
<rect x="315" y="130" width="150" height="30" rx="3" fill="white" stroke="var(--theme-accent)" stroke-width="1"/>
|
| 132 |
+
<text x="390" y="150" text-anchor="middle" font-size="9" fill="#333">Report</text>
|
| 133 |
+
|
| 134 |
+
<!-- Arrows (drawn last to be on top) -->
|
| 135 |
+
<!-- Arrow from Task to Query (straight with corners) -->
|
| 136 |
+
<polyline points="185,84 250,84 250,52 307,52" fill="none" stroke="var(--theme-accent)" stroke-width="1.5" marker-end="url(#arrowhead)" stroke-dasharray="4,2"/>
|
| 137 |
+
|
| 138 |
+
<!-- Arrow from Report back to Task (straight horizontal) -->
|
| 139 |
+
<line x1="315" y1="150" x2="193" y2="150" stroke="var(--theme-accent)" stroke-width="1.5" marker-end="url(#arrowhead)"/>
|
| 140 |
+
</svg>
|
| 141 |
+
</div>
|
| 142 |
+
<p>When a notebook is opened, you'll see a widget you can click to jump to it. A pulsing dot on the tab indicates active generation.</p>
|
| 143 |
</div>
|
|
|
|
| 144 |
</div>
|
| 145 |
</div>
|
| 146 |
</div>
|
| 147 |
|
| 148 |
+
<div class="input-area" id="commandInputArea" style="display: none;">
|
| 149 |
<div class="input-container">
|
| 150 |
<textarea placeholder="Enter message..." id="input-command" rows="1"></textarea>
|
| 151 |
<button id="sendCommand">SEND</button>
|
|
|
|
| 331 |
</div>
|
| 332 |
</div>
|
| 333 |
|
| 334 |
+
<!-- Sessions Panel -->
|
| 335 |
+
<div class="sessions-panel" id="sessionsPanel">
|
| 336 |
+
<div class="sessions-panel-header">
|
| 337 |
+
<h3>SESSIONS</h3>
|
| 338 |
+
<button class="sessions-panel-close" id="sessionsPanelClose">×</button>
|
| 339 |
+
</div>
|
| 340 |
+
<div class="sessions-panel-body">
|
| 341 |
+
<div class="sessions-current">
|
| 342 |
+
<label>Current session:</label>
|
| 343 |
+
<div class="sessions-current-name">
|
| 344 |
+
<input type="text" id="currentSessionRename" placeholder="Session name" />
|
| 345 |
+
<button id="renameSessionBtn" title="Rename">✓</button>
|
| 346 |
+
</div>
|
| 347 |
+
</div>
|
| 348 |
+
|
| 349 |
+
<div class="sessions-divider"></div>
|
| 350 |
+
|
| 351 |
+
<div class="sessions-new">
|
| 352 |
+
<label>New session:</label>
|
| 353 |
+
<div class="sessions-new-input">
|
| 354 |
+
<div class="session-input-wrapper">
|
| 355 |
+
<input type="text" id="panelNewSessionName" placeholder="Name..." />
|
| 356 |
+
<button type="button" class="session-regenerate-btn" id="panelRegenerateNameBtn" title="Generate new name">↻</button>
|
| 357 |
+
</div>
|
| 358 |
+
<button id="panelCreateSessionBtn">CREATE</button>
|
| 359 |
+
</div>
|
| 360 |
+
</div>
|
| 361 |
+
|
| 362 |
+
<div class="sessions-divider"></div>
|
| 363 |
+
|
| 364 |
+
<div class="sessions-list-section">
|
| 365 |
+
<label>Switch to:</label>
|
| 366 |
+
<div class="sessions-list" id="sessionsList">
|
| 367 |
+
<div class="sessions-loading">Loading...</div>
|
| 368 |
+
</div>
|
| 369 |
+
</div>
|
| 370 |
+
</div>
|
| 371 |
+
</div>
|
| 372 |
+
|
| 373 |
+
<!-- Session Indicator (top left) -->
|
| 374 |
+
<div class="session-indicator" id="sessionIndicator" style="display: none;">
|
| 375 |
+
session: <a id="currentSessionName" onclick="openSessionsPanel()"></a>
|
| 376 |
+
</div>
|
| 377 |
+
|
| 378 |
<!-- Provider Dialog -->
|
| 379 |
<div class="settings-dialog-overlay" id="provider-dialog">
|
| 380 |
<div class="settings-dialog">
|
|
|
|
| 426 |
<input type="text" id="model-model-id" class="settings-input" placeholder="e.g., gpt-4o, claude-3-5-sonnet-20241022">
|
| 427 |
<span class="dialog-hint">The exact model identifier used in API calls</span>
|
| 428 |
</div>
|
| 429 |
+
<div class="dialog-field">
|
| 430 |
+
<label class="dialog-label">EXTRA PARAMETERS</label>
|
| 431 |
+
<textarea id="model-extra-params" class="settings-textarea" placeholder='{"enable_thinking": false}' rows="3"></textarea>
|
| 432 |
+
<span class="dialog-hint">Optional JSON object with extra parameters for API calls (e.g., enable_thinking, temperature)</span>
|
| 433 |
+
</div>
|
| 434 |
</div>
|
| 435 |
<div class="settings-dialog-actions">
|
| 436 |
<button class="settings-save-btn" onclick="saveModelFromDialog()">SAVE</button>
|
|
|
|
| 443 |
<script src="https://cdnjs.cloudflare.com/ajax/libs/prism/1.29.0/components/prism-python.min.js"></script>
|
| 444 |
<script src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script>
|
| 445 |
<script src="https://cdn.jsdelivr.net/npm/katex@0.16.9/dist/katex.min.js"></script>
|
| 446 |
+
<script src="research-ui.js?v=23"></script>
|
| 447 |
+
<script src="script.js?v=32"></script>
|
| 448 |
</body>
|
| 449 |
</html>
|
frontend/research-ui.js
CHANGED
|
@@ -86,7 +86,7 @@ function createQueriesMessage(chatContainer, queries, iteration) {
|
|
| 86 |
html += `
|
| 87 |
<div class="query-tree ${isLast ? 'last' : ''}" id="query-group-${idx}">
|
| 88 |
<div class="query-node">
|
| 89 |
-
<span class="query-text">${
|
| 90 |
<span class="query-stats" id="query-stats-${idx}">${stats.relevant} relevant / ${stats.irrelevant} not relevant / ${stats.error} failed</span>
|
| 91 |
</div>
|
| 92 |
<div class="query-sources-tree" id="query-sources-${idx}"></div>
|
|
@@ -162,7 +162,7 @@ function renderFullQueryTree(treeContainer) {
|
|
| 162 |
html += `
|
| 163 |
<div class="query-tree ${isLast ? 'last' : ''}" id="query-group-${idx}">
|
| 164 |
<div class="query-node">
|
| 165 |
-
<span class="query-text">${
|
| 166 |
<span class="query-stats" id="query-stats-${idx}">${stats.relevant} relevant / ${stats.irrelevant} not relevant / ${stats.error} failed</span>
|
| 167 |
</div>
|
| 168 |
<div class="query-sources-tree" id="query-sources-${idx}"></div>
|
|
@@ -305,7 +305,7 @@ function createAssessmentMessage(chatContainer, sufficient, missingAspects, find
|
|
| 305 |
reasoningText = `Continuing research - ${findingsCount} relevant sources found. Missing: ${missingAspects.join(', ')}`;
|
| 306 |
}
|
| 307 |
|
| 308 |
-
reasoningSection.innerHTML = `<div class="research-reasoning">${
|
| 309 |
}
|
| 310 |
|
| 311 |
function createReportMessage(chatContainer, content, sourcesCount, websitesVisited) {
|
|
|
|
| 86 |
html += `
|
| 87 |
<div class="query-tree ${isLast ? 'last' : ''}" id="query-group-${idx}">
|
| 88 |
<div class="query-node">
|
| 89 |
+
<span class="query-text">${parseMarkdown(query)}</span>
|
| 90 |
<span class="query-stats" id="query-stats-${idx}">${stats.relevant} relevant / ${stats.irrelevant} not relevant / ${stats.error} failed</span>
|
| 91 |
</div>
|
| 92 |
<div class="query-sources-tree" id="query-sources-${idx}"></div>
|
|
|
|
| 162 |
html += `
|
| 163 |
<div class="query-tree ${isLast ? 'last' : ''}" id="query-group-${idx}">
|
| 164 |
<div class="query-node">
|
| 165 |
+
<span class="query-text">${parseMarkdown(query)}</span>
|
| 166 |
<span class="query-stats" id="query-stats-${idx}">${stats.relevant} relevant / ${stats.irrelevant} not relevant / ${stats.error} failed</span>
|
| 167 |
</div>
|
| 168 |
<div class="query-sources-tree" id="query-sources-${idx}"></div>
|
|
|
|
| 305 |
reasoningText = `Continuing research - ${findingsCount} relevant sources found. Missing: ${missingAspects.join(', ')}`;
|
| 306 |
}
|
| 307 |
|
| 308 |
+
reasoningSection.innerHTML = `<div class="research-reasoning">${parseMarkdown(reasoningText)}</div>`;
|
| 309 |
}
|
| 310 |
|
| 311 |
function createReportMessage(chatContainer, content, sourcesCount, websitesVisited) {
|
frontend/script.js
CHANGED
|
@@ -1,6 +1,20 @@
|
|
| 1 |
// State management
|
| 2 |
let tabCounter = 1;
|
| 3 |
let activeTabId = 0;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
let settings = {
|
| 5 |
// New provider/model structure
|
| 6 |
providers: {}, // providerId -> {name, endpoint, token}
|
|
@@ -57,8 +71,19 @@ let saveWorkspaceTimer = null;
|
|
| 57 |
document.addEventListener('DOMContentLoaded', async () => {
|
| 58 |
await loadSettings();
|
| 59 |
applyTheme(settings.themeColor || 'forest');
|
| 60 |
-
await loadWorkspace();
|
| 61 |
initializeEventListeners();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
});
|
| 63 |
|
| 64 |
function initializeEventListeners() {
|
|
@@ -182,6 +207,364 @@ function initializeEventListeners() {
|
|
| 182 |
}
|
| 183 |
}
|
| 184 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 185 |
function createNotebookTab(type, initialMessage = null, autoSwitch = true, taskId = null) {
|
| 186 |
const tabId = tabCounter++;
|
| 187 |
|
|
@@ -380,6 +763,40 @@ function closeTab(tabId) {
|
|
| 380 |
}
|
| 381 |
}
|
| 382 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 383 |
async function sendMessage(tabId) {
|
| 384 |
const content = document.querySelector(`[data-content-id="${tabId}"]`);
|
| 385 |
if (!content) return;
|
|
@@ -406,6 +823,15 @@ async function sendMessage(tabId) {
|
|
| 406 |
userMsg.innerHTML = `<div class="message-content">${parseMarkdown(message.trim())}</div>`;
|
| 407 |
chatContainer.appendChild(userMsg);
|
| 408 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 409 |
// Generate a title for the notebook if this is the first message and not command center
|
| 410 |
if (isFirstMessage && tabId !== 0) {
|
| 411 |
generateNotebookTitle(tabId, message);
|
|
@@ -599,11 +1025,13 @@ async function streamChatResponse(messages, chatContainer, notebookType, tabId)
|
|
| 599 |
endpoint: modelConfig.endpoint,
|
| 600 |
token: modelConfig.token || null,
|
| 601 |
model: modelConfig.model,
|
|
|
|
| 602 |
e2b_key: currentSettings.e2bKey || null,
|
| 603 |
serper_key: currentSettings.serperKey || null,
|
| 604 |
research_sub_agent_model: researchSubAgentConfig?.model || null,
|
| 605 |
research_sub_agent_endpoint: researchSubAgentConfig?.endpoint || null,
|
| 606 |
research_sub_agent_token: researchSubAgentConfig?.token || null,
|
|
|
|
| 607 |
research_parallel_workers: currentSettings.researchParallelWorkers || null,
|
| 608 |
research_max_websites: currentSettings.researchMaxWebsites || null,
|
| 609 |
notebook_id: tabId.toString(), // Send unique tab ID for sandbox sessions
|
|
@@ -620,6 +1048,7 @@ async function streamChatResponse(messages, chatContainer, notebookType, tabId)
|
|
| 620 |
let buffer = '';
|
| 621 |
let fullResponse = '';
|
| 622 |
let currentMessageEl = null;
|
|
|
|
| 623 |
|
| 624 |
while (true) {
|
| 625 |
const { done, value } = await reader.read();
|
|
@@ -633,6 +1062,12 @@ async function streamChatResponse(messages, chatContainer, notebookType, tabId)
|
|
| 633 |
if (line.startsWith('data: ')) {
|
| 634 |
const data = JSON.parse(line.slice(6));
|
| 635 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 636 |
// Handle different message types from backend
|
| 637 |
if (data.type === 'thinking') {
|
| 638 |
// Assistant thinking - create message if not exists
|
|
@@ -640,31 +1075,31 @@ async function streamChatResponse(messages, chatContainer, notebookType, tabId)
|
|
| 640 |
currentMessageEl = createAssistantMessage(chatContainer);
|
| 641 |
}
|
| 642 |
appendToMessage(currentMessageEl, parseMarkdown(data.content));
|
| 643 |
-
chatContainer
|
| 644 |
|
| 645 |
} else if (data.type === 'code') {
|
| 646 |
// Code execution result - update the last code cell
|
| 647 |
updateLastCodeCell(chatContainer, data.output, data.error, data.images);
|
| 648 |
currentMessageEl = null; // Reset for next thinking
|
| 649 |
-
chatContainer
|
| 650 |
|
| 651 |
} else if (data.type === 'code_start') {
|
| 652 |
// Code cell starting execution - show with spinner
|
| 653 |
createCodeCell(chatContainer, data.code, null, false, true);
|
| 654 |
currentMessageEl = null;
|
| 655 |
-
chatContainer
|
| 656 |
|
| 657 |
} else if (data.type === 'upload') {
|
| 658 |
// File upload notification
|
| 659 |
createUploadMessage(chatContainer, data.paths, data.output);
|
| 660 |
currentMessageEl = null;
|
| 661 |
-
chatContainer
|
| 662 |
|
| 663 |
} else if (data.type === 'download') {
|
| 664 |
// File download notification
|
| 665 |
createDownloadMessage(chatContainer, data.paths, data.output);
|
| 666 |
currentMessageEl = null;
|
| 667 |
-
chatContainer
|
| 668 |
|
| 669 |
} else if (data.type === 'generating') {
|
| 670 |
// Still generating - no action needed
|
|
@@ -720,27 +1155,27 @@ async function streamChatResponse(messages, chatContainer, notebookType, tabId)
|
|
| 720 |
<div class="report-content">${html}</div>
|
| 721 |
`;
|
| 722 |
chatContainer.appendChild(resultDiv);
|
| 723 |
-
chatContainer
|
| 724 |
|
| 725 |
} else if (data.type === 'status') {
|
| 726 |
// Research status update
|
| 727 |
createStatusMessage(chatContainer, data.message, data.iteration, data.total_iterations);
|
| 728 |
-
chatContainer
|
| 729 |
|
| 730 |
} else if (data.type === 'queries') {
|
| 731 |
// Research queries generated
|
| 732 |
createQueriesMessage(chatContainer, data.queries, data.iteration);
|
| 733 |
-
chatContainer
|
| 734 |
|
| 735 |
} else if (data.type === 'progress') {
|
| 736 |
// Research progress
|
| 737 |
updateProgress(chatContainer, data.message, data.websites_visited, data.max_websites);
|
| 738 |
-
chatContainer
|
| 739 |
|
| 740 |
} else if (data.type === 'source') {
|
| 741 |
// Research source found - now includes query grouping
|
| 742 |
createSourceMessage(chatContainer, data);
|
| 743 |
-
chatContainer
|
| 744 |
|
| 745 |
} else if (data.type === 'query_stats') {
|
| 746 |
// Update query statistics
|
|
@@ -753,12 +1188,12 @@ async function streamChatResponse(messages, chatContainer, notebookType, tabId)
|
|
| 753 |
} else if (data.type === 'assessment') {
|
| 754 |
// Research completeness assessment
|
| 755 |
createAssessmentMessage(chatContainer, data.sufficient, data.missing_aspects, data.findings_count, data.reasoning);
|
| 756 |
-
chatContainer
|
| 757 |
|
| 758 |
} else if (data.type === 'report') {
|
| 759 |
// Final research report
|
| 760 |
createReportMessage(chatContainer, data.content, data.sources_count, data.websites_visited);
|
| 761 |
-
chatContainer
|
| 762 |
|
| 763 |
} else if (data.type === 'content') {
|
| 764 |
// Regular streaming content (non-code notebooks)
|
|
@@ -767,7 +1202,7 @@ async function streamChatResponse(messages, chatContainer, notebookType, tabId)
|
|
| 767 |
}
|
| 768 |
fullResponse += data.content;
|
| 769 |
appendToMessage(currentMessageEl, parseMarkdown(fullResponse));
|
| 770 |
-
chatContainer
|
| 771 |
|
| 772 |
} else if (data.type === 'launch') {
|
| 773 |
// Tool-based notebook launch from command center
|
|
@@ -829,7 +1264,7 @@ async function streamChatResponse(messages, chatContainer, notebookType, tabId)
|
|
| 829 |
infoDiv.innerHTML = `<em>${escapeHtml(data.content)}</em>`;
|
| 830 |
infoDiv.style.color = 'var(--theme-accent)';
|
| 831 |
chatContainer.appendChild(infoDiv);
|
| 832 |
-
chatContainer
|
| 833 |
|
| 834 |
} else if (data.type === 'retry') {
|
| 835 |
// Show retry indicator
|
|
@@ -842,12 +1277,13 @@ async function streamChatResponse(messages, chatContainer, notebookType, tabId)
|
|
| 842 |
errorDiv.className = 'message assistant';
|
| 843 |
errorDiv.innerHTML = `<div class="message-content" style="color: #c62828;">Error: ${escapeHtml(data.content)}</div>`;
|
| 844 |
chatContainer.appendChild(errorDiv);
|
| 845 |
-
chatContainer
|
| 846 |
}
|
| 847 |
}
|
| 848 |
}
|
| 849 |
}
|
| 850 |
} catch (error) {
|
|
|
|
| 851 |
const errorDiv = document.createElement('div');
|
| 852 |
errorDiv.className = 'message assistant';
|
| 853 |
errorDiv.innerHTML = `<div class="message-content" style="color: #c62828;">Connection error: ${escapeHtml(error.message)}</div>`;
|
|
@@ -1023,7 +1459,7 @@ function showActionWidget(chatContainer, action, message, targetTabId, taskId =
|
|
| 1023 |
</div>
|
| 1024 |
<div class="action-widget-body">
|
| 1025 |
<div class="section-label">QUERY</div>
|
| 1026 |
-
<div class="section-content">${
|
| 1027 |
</div>
|
| 1028 |
`;
|
| 1029 |
|
|
@@ -1037,7 +1473,7 @@ function showActionWidget(chatContainer, action, message, targetTabId, taskId =
|
|
| 1037 |
clickableArea.addEventListener('click', clickHandler);
|
| 1038 |
|
| 1039 |
chatContainer.appendChild(widget);
|
| 1040 |
-
chatContainer
|
| 1041 |
|
| 1042 |
// Store widget for later updates
|
| 1043 |
actionWidgets[targetTabId] = widget;
|
|
@@ -1209,7 +1645,7 @@ function showRetryIndicator(chatContainer, data) {
|
|
| 1209 |
</div>
|
| 1210 |
`;
|
| 1211 |
chatContainer.appendChild(retryDiv);
|
| 1212 |
-
chatContainer
|
| 1213 |
|
| 1214 |
// Start countdown
|
| 1215 |
let remaining = data.delay;
|
|
@@ -1524,7 +1960,7 @@ function renderActionWidget(data) {
|
|
| 1524 |
|
| 1525 |
let bodyHtml = `
|
| 1526 |
<div class="section-label">QUERY</div>
|
| 1527 |
-
<div class="section-content">${
|
| 1528 |
`;
|
| 1529 |
|
| 1530 |
// Add result section if present
|
|
@@ -2170,17 +2606,19 @@ function deleteProvider(providerId) {
|
|
| 2170 |
// Show add/edit model dialog
|
| 2171 |
function showModelDialog(modelId = null) {
|
| 2172 |
const isEdit = !!modelId;
|
| 2173 |
-
const model = isEdit ? settings.models[modelId] : { name: '', providerId: '', modelId: '' };
|
| 2174 |
|
| 2175 |
const dialog = document.getElementById('model-dialog');
|
| 2176 |
const title = document.getElementById('model-dialog-title');
|
| 2177 |
const nameInput = document.getElementById('model-name');
|
| 2178 |
const providerSelect = document.getElementById('model-provider');
|
| 2179 |
const modelIdInput = document.getElementById('model-model-id');
|
|
|
|
| 2180 |
|
| 2181 |
title.textContent = isEdit ? 'Edit Model' : 'Add Model';
|
| 2182 |
nameInput.value = model.name;
|
| 2183 |
modelIdInput.value = model.modelId;
|
|
|
|
| 2184 |
|
| 2185 |
// Populate provider dropdown
|
| 2186 |
providerSelect.innerHTML = '<option value="">-- Select Provider --</option>';
|
|
@@ -2209,13 +2647,25 @@ function saveModelFromDialog() {
|
|
| 2209 |
const name = document.getElementById('model-name').value.trim();
|
| 2210 |
const providerId = document.getElementById('model-provider').value;
|
| 2211 |
const apiModelId = document.getElementById('model-model-id').value.trim();
|
|
|
|
| 2212 |
|
| 2213 |
if (!name || !providerId || !apiModelId) {
|
| 2214 |
-
alert('
|
| 2215 |
return;
|
| 2216 |
}
|
| 2217 |
|
| 2218 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2219 |
hideModelDialog();
|
| 2220 |
renderModelsList();
|
| 2221 |
populateModelDropdowns();
|
|
@@ -2439,7 +2889,7 @@ function getSettings() {
|
|
| 2439 |
}
|
| 2440 |
|
| 2441 |
// Resolve model configuration for a notebook type
|
| 2442 |
-
// Returns { endpoint, token, model } or null if not configured
|
| 2443 |
function resolveModelConfig(notebookType) {
|
| 2444 |
const modelId = settings.notebooks?.[notebookType];
|
| 2445 |
if (!modelId) return null;
|
|
@@ -2453,7 +2903,8 @@ function resolveModelConfig(notebookType) {
|
|
| 2453 |
return {
|
| 2454 |
endpoint: provider.endpoint,
|
| 2455 |
token: provider.token,
|
| 2456 |
-
model: model.modelId
|
|
|
|
| 2457 |
};
|
| 2458 |
}
|
| 2459 |
|
|
@@ -2470,7 +2921,8 @@ function getDefaultModelConfig() {
|
|
| 2470 |
return {
|
| 2471 |
endpoint: provider.endpoint,
|
| 2472 |
token: provider.token,
|
| 2473 |
-
model: model.modelId
|
|
|
|
| 2474 |
};
|
| 2475 |
}
|
| 2476 |
|
|
@@ -2647,6 +3099,11 @@ if (debugBtn) {
|
|
| 2647 |
if (filesPanel) filesPanel.classList.remove('active');
|
| 2648 |
if (filesBtn) filesBtn.classList.remove('active');
|
| 2649 |
appContainer.classList.remove('files-panel-open');
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2650 |
|
| 2651 |
// Toggle debug panel
|
| 2652 |
debugPanel.classList.toggle('active');
|
|
@@ -2743,6 +3200,11 @@ if (settingsBtn) {
|
|
| 2743 |
if (filesPanel) filesPanel.classList.remove('active');
|
| 2744 |
if (filesBtn) filesBtn.classList.remove('active');
|
| 2745 |
appContainer.classList.remove('files-panel-open');
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2746 |
|
| 2747 |
openSettings(); // Populate form fields with current values
|
| 2748 |
settingsPanel.classList.add('active');
|
|
@@ -2934,6 +3396,11 @@ if (filesBtn) {
|
|
| 2934 |
debugPanel.classList.remove('active');
|
| 2935 |
debugBtn.classList.remove('active');
|
| 2936 |
appContainer.classList.remove('panel-open');
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2937 |
|
| 2938 |
// Toggle files panel
|
| 2939 |
filesPanel.classList.toggle('active');
|
|
@@ -2972,3 +3439,44 @@ if (showHiddenFiles) {
|
|
| 2972 |
});
|
| 2973 |
}
|
| 2974 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
// State management
|
| 2 |
let tabCounter = 1;
|
| 3 |
let activeTabId = 0;
|
| 4 |
+
let currentSession = null; // Name of the current session
|
| 5 |
+
|
| 6 |
+
// Fetch random isotope name from backend
|
| 7 |
+
async function generateSessionName() {
|
| 8 |
+
try {
|
| 9 |
+
const response = await fetch('/api/sessions/random-name');
|
| 10 |
+
const data = await response.json();
|
| 11 |
+
return data.name;
|
| 12 |
+
} catch (error) {
|
| 13 |
+
// Fallback to timestamp if API fails
|
| 14 |
+
return `session-${Date.now()}`;
|
| 15 |
+
}
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
let settings = {
|
| 19 |
// New provider/model structure
|
| 20 |
providers: {}, // providerId -> {name, endpoint, token}
|
|
|
|
| 71 |
document.addEventListener('DOMContentLoaded', async () => {
|
| 72 |
await loadSettings();
|
| 73 |
applyTheme(settings.themeColor || 'forest');
|
|
|
|
| 74 |
initializeEventListeners();
|
| 75 |
+
initializeSessionListeners();
|
| 76 |
+
|
| 77 |
+
// Check if we have a session selected
|
| 78 |
+
const sessionsData = await fetchSessions();
|
| 79 |
+
if (sessionsData.current) {
|
| 80 |
+
// Session already selected (from backend state)
|
| 81 |
+
currentSession = sessionsData.current;
|
| 82 |
+
await onSessionSelected(currentSession);
|
| 83 |
+
} else {
|
| 84 |
+
// Show session selector
|
| 85 |
+
showSessionSelector(sessionsData.sessions);
|
| 86 |
+
}
|
| 87 |
});
|
| 88 |
|
| 89 |
function initializeEventListeners() {
|
|
|
|
| 207 |
}
|
| 208 |
}
|
| 209 |
|
| 210 |
+
// ============================================
|
| 211 |
+
// Session Management
|
| 212 |
+
// ============================================
|
| 213 |
+
|
| 214 |
+
async function fetchSessions() {
|
| 215 |
+
try {
|
| 216 |
+
const response = await fetch('/api/sessions');
|
| 217 |
+
if (response.ok) {
|
| 218 |
+
return await response.json();
|
| 219 |
+
}
|
| 220 |
+
} catch (e) {
|
| 221 |
+
console.error('Failed to fetch sessions:', e);
|
| 222 |
+
}
|
| 223 |
+
return { sessions: [], current: null };
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
function showSessionSelector(sessions) {
|
| 227 |
+
const selector = document.getElementById('sessionSelector');
|
| 228 |
+
const welcome = document.getElementById('welcomeMessage');
|
| 229 |
+
const sessionIndicator = document.getElementById('sessionIndicator');
|
| 230 |
+
const inputArea = document.getElementById('commandInputArea');
|
| 231 |
+
|
| 232 |
+
// Show welcome message and session selector
|
| 233 |
+
if (welcome) welcome.style.display = 'block';
|
| 234 |
+
if (selector) selector.style.display = 'block';
|
| 235 |
+
if (sessionIndicator) sessionIndicator.style.display = 'none';
|
| 236 |
+
if (inputArea) inputArea.style.display = 'none';
|
| 237 |
+
|
| 238 |
+
// Populate existing sessions dropdown
|
| 239 |
+
const select = document.getElementById('existingSessionSelect');
|
| 240 |
+
if (select) {
|
| 241 |
+
select.innerHTML = '<option value="">-- Select session --</option>';
|
| 242 |
+
sessions.forEach(session => {
|
| 243 |
+
const option = document.createElement('option');
|
| 244 |
+
option.value = session.name;
|
| 245 |
+
option.textContent = `${session.name} (${formatDate(session.modified)})`;
|
| 246 |
+
select.appendChild(option);
|
| 247 |
+
});
|
| 248 |
+
}
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
function hideSessionSelector() {
|
| 252 |
+
const selector = document.getElementById('sessionSelector');
|
| 253 |
+
const welcome = document.getElementById('welcomeMessage');
|
| 254 |
+
const sessionIndicator = document.getElementById('sessionIndicator');
|
| 255 |
+
const inputArea = document.getElementById('commandInputArea');
|
| 256 |
+
|
| 257 |
+
// Hide session selector, keep welcome visible, show session indicator and input
|
| 258 |
+
if (selector) selector.style.display = 'none';
|
| 259 |
+
if (welcome) welcome.style.display = 'block';
|
| 260 |
+
if (sessionIndicator) sessionIndicator.style.display = 'block';
|
| 261 |
+
if (inputArea) inputArea.style.display = 'block';
|
| 262 |
+
}
|
| 263 |
+
|
| 264 |
+
async function onSessionSelected(sessionName) {
|
| 265 |
+
currentSession = sessionName;
|
| 266 |
+
hideSessionSelector();
|
| 267 |
+
|
| 268 |
+
// Update session name display
|
| 269 |
+
const nameEl = document.getElementById('currentSessionName');
|
| 270 |
+
if (nameEl) nameEl.textContent = sessionName;
|
| 271 |
+
|
| 272 |
+
const renameInput = document.getElementById('currentSessionRename');
|
| 273 |
+
if (renameInput) renameInput.value = sessionName;
|
| 274 |
+
|
| 275 |
+
// Load workspace for this session
|
| 276 |
+
await loadWorkspace();
|
| 277 |
+
|
| 278 |
+
// Refresh sessions panel list
|
| 279 |
+
await refreshSessionsList();
|
| 280 |
+
}
|
| 281 |
+
|
| 282 |
+
async function createSession(name) {
|
| 283 |
+
try {
|
| 284 |
+
const response = await fetch('/api/sessions', {
|
| 285 |
+
method: 'POST',
|
| 286 |
+
headers: { 'Content-Type': 'application/json' },
|
| 287 |
+
body: JSON.stringify({ name })
|
| 288 |
+
});
|
| 289 |
+
if (response.ok) {
|
| 290 |
+
const data = await response.json();
|
| 291 |
+
await onSessionSelected(data.name);
|
| 292 |
+
return true;
|
| 293 |
+
} else {
|
| 294 |
+
const error = await response.json();
|
| 295 |
+
alert(error.detail || 'Failed to create session');
|
| 296 |
+
}
|
| 297 |
+
} catch (e) {
|
| 298 |
+
console.error('Failed to create session:', e);
|
| 299 |
+
alert('Failed to create session');
|
| 300 |
+
}
|
| 301 |
+
return false;
|
| 302 |
+
}
|
| 303 |
+
|
| 304 |
+
async function selectSession(name) {
|
| 305 |
+
try {
|
| 306 |
+
const response = await fetch('/api/sessions/select', {
|
| 307 |
+
method: 'POST',
|
| 308 |
+
headers: { 'Content-Type': 'application/json' },
|
| 309 |
+
body: JSON.stringify({ name })
|
| 310 |
+
});
|
| 311 |
+
if (response.ok) {
|
| 312 |
+
// Reload the page to reset all state
|
| 313 |
+
window.location.reload();
|
| 314 |
+
return true;
|
| 315 |
+
} else {
|
| 316 |
+
const error = await response.json();
|
| 317 |
+
alert(error.detail || 'Failed to select session');
|
| 318 |
+
}
|
| 319 |
+
} catch (e) {
|
| 320 |
+
console.error('Failed to select session:', e);
|
| 321 |
+
alert('Failed to select session');
|
| 322 |
+
}
|
| 323 |
+
return false;
|
| 324 |
+
}
|
| 325 |
+
|
| 326 |
+
async function renameSession(oldName, newName) {
|
| 327 |
+
try {
|
| 328 |
+
const response = await fetch('/api/sessions/rename', {
|
| 329 |
+
method: 'POST',
|
| 330 |
+
headers: { 'Content-Type': 'application/json' },
|
| 331 |
+
body: JSON.stringify({ oldName, newName })
|
| 332 |
+
});
|
| 333 |
+
if (response.ok) {
|
| 334 |
+
const data = await response.json();
|
| 335 |
+
currentSession = data.name;
|
| 336 |
+
|
| 337 |
+
// Update displays
|
| 338 |
+
const nameEl = document.getElementById('currentSessionName');
|
| 339 |
+
if (nameEl) nameEl.textContent = data.name;
|
| 340 |
+
|
| 341 |
+
await refreshSessionsList();
|
| 342 |
+
return true;
|
| 343 |
+
} else {
|
| 344 |
+
const error = await response.json();
|
| 345 |
+
alert(error.detail || 'Failed to rename session');
|
| 346 |
+
}
|
| 347 |
+
} catch (e) {
|
| 348 |
+
console.error('Failed to rename session:', e);
|
| 349 |
+
alert('Failed to rename session');
|
| 350 |
+
}
|
| 351 |
+
return false;
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
function openSessionsPanel() {
|
| 355 |
+
const sessionsBtn = document.getElementById('sessionsBtn');
|
| 356 |
+
if (sessionsBtn) {
|
| 357 |
+
sessionsBtn.click();
|
| 358 |
+
}
|
| 359 |
+
}
|
| 360 |
+
|
| 361 |
+
async function refreshSessionsList() {
|
| 362 |
+
const sessionsData = await fetchSessions();
|
| 363 |
+
const listEl = document.getElementById('sessionsList');
|
| 364 |
+
|
| 365 |
+
// Update the current session rename input
|
| 366 |
+
const renameInput = document.getElementById('currentSessionRename');
|
| 367 |
+
if (renameInput && currentSession) {
|
| 368 |
+
renameInput.value = currentSession;
|
| 369 |
+
}
|
| 370 |
+
|
| 371 |
+
if (!listEl) return;
|
| 372 |
+
|
| 373 |
+
if (sessionsData.sessions.length === 0) {
|
| 374 |
+
listEl.innerHTML = '<div class="sessions-loading">No other sessions</div>';
|
| 375 |
+
return;
|
| 376 |
+
}
|
| 377 |
+
|
| 378 |
+
listEl.innerHTML = '';
|
| 379 |
+
sessionsData.sessions.forEach(session => {
|
| 380 |
+
const item = document.createElement('div');
|
| 381 |
+
item.className = 'sessions-list-item' + (session.name === currentSession ? ' current' : '');
|
| 382 |
+
|
| 383 |
+
const isCurrent = session.name === currentSession;
|
| 384 |
+
item.innerHTML = `
|
| 385 |
+
<span class="sessions-list-item-name">${escapeHtml(session.name)}</span>
|
| 386 |
+
<span class="sessions-list-item-date">${formatDate(session.modified)}</span>
|
| 387 |
+
${!isCurrent ? `<button class="sessions-delete-btn" title="Delete session">×</button>` : ''}
|
| 388 |
+
`;
|
| 389 |
+
|
| 390 |
+
if (!isCurrent) {
|
| 391 |
+
// Click on name/date to select
|
| 392 |
+
item.querySelector('.sessions-list-item-name').addEventListener('click', () => selectSession(session.name));
|
| 393 |
+
item.querySelector('.sessions-list-item-date').addEventListener('click', () => selectSession(session.name));
|
| 394 |
+
|
| 395 |
+
// Click on delete button to delete
|
| 396 |
+
const deleteBtn = item.querySelector('.sessions-delete-btn');
|
| 397 |
+
if (deleteBtn) {
|
| 398 |
+
deleteBtn.addEventListener('click', (e) => {
|
| 399 |
+
e.stopPropagation();
|
| 400 |
+
deleteSession(session.name);
|
| 401 |
+
});
|
| 402 |
+
}
|
| 403 |
+
}
|
| 404 |
+
|
| 405 |
+
listEl.appendChild(item);
|
| 406 |
+
});
|
| 407 |
+
}
|
| 408 |
+
|
| 409 |
+
async function deleteSession(sessionName) {
|
| 410 |
+
if (!confirm(`Delete session "${sessionName}"? This cannot be undone.`)) {
|
| 411 |
+
return;
|
| 412 |
+
}
|
| 413 |
+
|
| 414 |
+
try {
|
| 415 |
+
const response = await fetch(`/api/sessions/${encodeURIComponent(sessionName)}`, {
|
| 416 |
+
method: 'DELETE'
|
| 417 |
+
});
|
| 418 |
+
|
| 419 |
+
if (!response.ok) {
|
| 420 |
+
try {
|
| 421 |
+
const error = await response.json();
|
| 422 |
+
alert(error.detail || 'Failed to delete session');
|
| 423 |
+
} catch {
|
| 424 |
+
alert('Failed to delete session');
|
| 425 |
+
}
|
| 426 |
+
return;
|
| 427 |
+
}
|
| 428 |
+
|
| 429 |
+
// Refresh the sessions list in the panel
|
| 430 |
+
refreshSessionsList();
|
| 431 |
+
|
| 432 |
+
// Also refresh the welcome page dropdown
|
| 433 |
+
const sessionsData = await fetchSessions();
|
| 434 |
+
const select = document.getElementById('existingSessionSelect');
|
| 435 |
+
if (select) {
|
| 436 |
+
select.innerHTML = '<option value="">-- Select session --</option>';
|
| 437 |
+
sessionsData.sessions.forEach(session => {
|
| 438 |
+
const option = document.createElement('option');
|
| 439 |
+
option.value = session.name;
|
| 440 |
+
option.textContent = `${session.name} (${formatDate(session.modified)})`;
|
| 441 |
+
select.appendChild(option);
|
| 442 |
+
});
|
| 443 |
+
}
|
| 444 |
+
} catch (error) {
|
| 445 |
+
console.error('Error deleting session:', error);
|
| 446 |
+
alert('Failed to delete session');
|
| 447 |
+
}
|
| 448 |
+
}
|
| 449 |
+
|
| 450 |
+
function formatDate(timestamp) {
|
| 451 |
+
const date = new Date(timestamp * 1000);
|
| 452 |
+
const now = new Date();
|
| 453 |
+
const diff = now - date;
|
| 454 |
+
|
| 455 |
+
if (diff < 60000) return 'just now';
|
| 456 |
+
if (diff < 3600000) return Math.floor(diff / 60000) + 'm ago';
|
| 457 |
+
if (diff < 86400000) return Math.floor(diff / 3600000) + 'h ago';
|
| 458 |
+
if (diff < 604800000) return Math.floor(diff / 86400000) + 'd ago';
|
| 459 |
+
|
| 460 |
+
return date.toLocaleDateString();
|
| 461 |
+
}
|
| 462 |
+
|
| 463 |
+
function initializeSessionListeners() {
|
| 464 |
+
// Welcome page session selector
|
| 465 |
+
const createBtn = document.getElementById('createSessionBtn');
|
| 466 |
+
const newNameInput = document.getElementById('newSessionName');
|
| 467 |
+
const existingSelect = document.getElementById('existingSessionSelect');
|
| 468 |
+
|
| 469 |
+
// Pre-populate with a cool random name
|
| 470 |
+
if (newNameInput) {
|
| 471 |
+
generateSessionName().then(name => newNameInput.value = name);
|
| 472 |
+
}
|
| 473 |
+
|
| 474 |
+
// Regenerate button
|
| 475 |
+
const regenerateBtn = document.getElementById('regenerateNameBtn');
|
| 476 |
+
if (regenerateBtn && newNameInput) {
|
| 477 |
+
regenerateBtn.addEventListener('click', async () => {
|
| 478 |
+
newNameInput.value = await generateSessionName();
|
| 479 |
+
});
|
| 480 |
+
}
|
| 481 |
+
|
| 482 |
+
if (createBtn) {
|
| 483 |
+
createBtn.addEventListener('click', async () => {
|
| 484 |
+
const name = newNameInput?.value.trim();
|
| 485 |
+
if (name) {
|
| 486 |
+
createSession(name);
|
| 487 |
+
} else {
|
| 488 |
+
// Auto-generate name
|
| 489 |
+
createSession(await generateSessionName());
|
| 490 |
+
}
|
| 491 |
+
});
|
| 492 |
+
}
|
| 493 |
+
|
| 494 |
+
if (newNameInput) {
|
| 495 |
+
newNameInput.addEventListener('keydown', (e) => {
|
| 496 |
+
if (e.key === 'Enter') {
|
| 497 |
+
createBtn?.click();
|
| 498 |
+
}
|
| 499 |
+
});
|
| 500 |
+
}
|
| 501 |
+
|
| 502 |
+
if (existingSelect) {
|
| 503 |
+
existingSelect.addEventListener('change', () => {
|
| 504 |
+
const name = existingSelect.value;
|
| 505 |
+
if (name) {
|
| 506 |
+
selectSession(name);
|
| 507 |
+
}
|
| 508 |
+
});
|
| 509 |
+
}
|
| 510 |
+
|
| 511 |
+
// Sessions panel handlers are set up at the end of the file with other panels
|
| 512 |
+
|
| 513 |
+
// Panel rename button
|
| 514 |
+
const renameBtn = document.getElementById('renameSessionBtn');
|
| 515 |
+
const renameInput = document.getElementById('currentSessionRename');
|
| 516 |
+
|
| 517 |
+
if (renameBtn && renameInput) {
|
| 518 |
+
renameBtn.addEventListener('click', () => {
|
| 519 |
+
const newName = renameInput.value.trim();
|
| 520 |
+
if (newName && newName !== currentSession) {
|
| 521 |
+
renameSession(currentSession, newName);
|
| 522 |
+
}
|
| 523 |
+
});
|
| 524 |
+
}
|
| 525 |
+
|
| 526 |
+
// Panel create new session
|
| 527 |
+
const panelCreateBtn = document.getElementById('panelCreateSessionBtn');
|
| 528 |
+
const panelNewNameInput = document.getElementById('panelNewSessionName');
|
| 529 |
+
const panelRegenerateBtn = document.getElementById('panelRegenerateNameBtn');
|
| 530 |
+
|
| 531 |
+
// Pre-populate panel input with cool name too
|
| 532 |
+
if (panelNewNameInput) {
|
| 533 |
+
generateSessionName().then(name => panelNewNameInput.value = name);
|
| 534 |
+
}
|
| 535 |
+
|
| 536 |
+
// Panel regenerate button
|
| 537 |
+
if (panelRegenerateBtn && panelNewNameInput) {
|
| 538 |
+
panelRegenerateBtn.addEventListener('click', async () => {
|
| 539 |
+
panelNewNameInput.value = await generateSessionName();
|
| 540 |
+
});
|
| 541 |
+
}
|
| 542 |
+
|
| 543 |
+
if (panelCreateBtn) {
|
| 544 |
+
panelCreateBtn.addEventListener('click', async () => {
|
| 545 |
+
const name = panelNewNameInput?.value.trim();
|
| 546 |
+
if (name) {
|
| 547 |
+
// Create and switch to new session
|
| 548 |
+
try {
|
| 549 |
+
const response = await fetch('/api/sessions', {
|
| 550 |
+
method: 'POST',
|
| 551 |
+
headers: { 'Content-Type': 'application/json' },
|
| 552 |
+
body: JSON.stringify({ name })
|
| 553 |
+
});
|
| 554 |
+
if (response.ok) {
|
| 555 |
+
window.location.reload();
|
| 556 |
+
} else {
|
| 557 |
+
const error = await response.json();
|
| 558 |
+
alert(error.detail || 'Failed to create session');
|
| 559 |
+
}
|
| 560 |
+
} catch (e) {
|
| 561 |
+
alert('Failed to create session');
|
| 562 |
+
}
|
| 563 |
+
}
|
| 564 |
+
});
|
| 565 |
+
}
|
| 566 |
+
}
|
| 567 |
+
|
| 568 |
function createNotebookTab(type, initialMessage = null, autoSwitch = true, taskId = null) {
|
| 569 |
const tabId = tabCounter++;
|
| 570 |
|
|
|
|
| 763 |
}
|
| 764 |
}
|
| 765 |
|
| 766 |
+
function showProgressWidget(chatContainer) {
|
| 767 |
+
// Remove any existing progress widget
|
| 768 |
+
hideProgressWidget(chatContainer);
|
| 769 |
+
|
| 770 |
+
const widget = document.createElement('div');
|
| 771 |
+
widget.className = 'progress-widget';
|
| 772 |
+
widget.innerHTML = `
|
| 773 |
+
<div class="progress-spinner">
|
| 774 |
+
<span></span>
|
| 775 |
+
<span></span>
|
| 776 |
+
<span></span>
|
| 777 |
+
</div>
|
| 778 |
+
<span class="progress-text">Generating...</span>
|
| 779 |
+
`;
|
| 780 |
+
chatContainer.appendChild(widget);
|
| 781 |
+
scrollChatToBottom(chatContainer);
|
| 782 |
+
return widget;
|
| 783 |
+
}
|
| 784 |
+
|
| 785 |
+
function hideProgressWidget(chatContainer) {
|
| 786 |
+
const widget = chatContainer.querySelector('.progress-widget');
|
| 787 |
+
if (widget) {
|
| 788 |
+
widget.remove();
|
| 789 |
+
}
|
| 790 |
+
}
|
| 791 |
+
|
| 792 |
+
function scrollChatToBottom(chatContainer) {
|
| 793 |
+
// The actual scrolling container is .notebook-body, not .chat-container
|
| 794 |
+
const notebookBody = chatContainer.closest('.notebook-body');
|
| 795 |
+
if (notebookBody) {
|
| 796 |
+
notebookBody.scrollTop = notebookBody.scrollHeight;
|
| 797 |
+
}
|
| 798 |
+
}
|
| 799 |
+
|
| 800 |
async function sendMessage(tabId) {
|
| 801 |
const content = document.querySelector(`[data-content-id="${tabId}"]`);
|
| 802 |
if (!content) return;
|
|
|
|
| 823 |
userMsg.innerHTML = `<div class="message-content">${parseMarkdown(message.trim())}</div>`;
|
| 824 |
chatContainer.appendChild(userMsg);
|
| 825 |
|
| 826 |
+
// Scroll the notebook body (the actual scrolling container) to bottom
|
| 827 |
+
const notebookBody = chatContainer.closest('.notebook-body');
|
| 828 |
+
if (notebookBody) {
|
| 829 |
+
notebookBody.scrollTop = notebookBody.scrollHeight;
|
| 830 |
+
}
|
| 831 |
+
|
| 832 |
+
// Show progress widget while waiting for response
|
| 833 |
+
showProgressWidget(chatContainer);
|
| 834 |
+
|
| 835 |
// Generate a title for the notebook if this is the first message and not command center
|
| 836 |
if (isFirstMessage && tabId !== 0) {
|
| 837 |
generateNotebookTitle(tabId, message);
|
|
|
|
| 1025 |
endpoint: modelConfig.endpoint,
|
| 1026 |
token: modelConfig.token || null,
|
| 1027 |
model: modelConfig.model,
|
| 1028 |
+
extra_params: modelConfig.extraParams || null,
|
| 1029 |
e2b_key: currentSettings.e2bKey || null,
|
| 1030 |
serper_key: currentSettings.serperKey || null,
|
| 1031 |
research_sub_agent_model: researchSubAgentConfig?.model || null,
|
| 1032 |
research_sub_agent_endpoint: researchSubAgentConfig?.endpoint || null,
|
| 1033 |
research_sub_agent_token: researchSubAgentConfig?.token || null,
|
| 1034 |
+
research_sub_agent_extra_params: researchSubAgentConfig?.extraParams || null,
|
| 1035 |
research_parallel_workers: currentSettings.researchParallelWorkers || null,
|
| 1036 |
research_max_websites: currentSettings.researchMaxWebsites || null,
|
| 1037 |
notebook_id: tabId.toString(), // Send unique tab ID for sandbox sessions
|
|
|
|
| 1048 |
let buffer = '';
|
| 1049 |
let fullResponse = '';
|
| 1050 |
let currentMessageEl = null;
|
| 1051 |
+
let progressHidden = false;
|
| 1052 |
|
| 1053 |
while (true) {
|
| 1054 |
const { done, value } = await reader.read();
|
|
|
|
| 1062 |
if (line.startsWith('data: ')) {
|
| 1063 |
const data = JSON.parse(line.slice(6));
|
| 1064 |
|
| 1065 |
+
// Hide progress widget on first meaningful response
|
| 1066 |
+
if (!progressHidden && data.type !== 'generating' && data.type !== 'retry') {
|
| 1067 |
+
hideProgressWidget(chatContainer);
|
| 1068 |
+
progressHidden = true;
|
| 1069 |
+
}
|
| 1070 |
+
|
| 1071 |
// Handle different message types from backend
|
| 1072 |
if (data.type === 'thinking') {
|
| 1073 |
// Assistant thinking - create message if not exists
|
|
|
|
| 1075 |
currentMessageEl = createAssistantMessage(chatContainer);
|
| 1076 |
}
|
| 1077 |
appendToMessage(currentMessageEl, parseMarkdown(data.content));
|
| 1078 |
+
scrollChatToBottom(chatContainer);
|
| 1079 |
|
| 1080 |
} else if (data.type === 'code') {
|
| 1081 |
// Code execution result - update the last code cell
|
| 1082 |
updateLastCodeCell(chatContainer, data.output, data.error, data.images);
|
| 1083 |
currentMessageEl = null; // Reset for next thinking
|
| 1084 |
+
scrollChatToBottom(chatContainer);
|
| 1085 |
|
| 1086 |
} else if (data.type === 'code_start') {
|
| 1087 |
// Code cell starting execution - show with spinner
|
| 1088 |
createCodeCell(chatContainer, data.code, null, false, true);
|
| 1089 |
currentMessageEl = null;
|
| 1090 |
+
scrollChatToBottom(chatContainer);
|
| 1091 |
|
| 1092 |
} else if (data.type === 'upload') {
|
| 1093 |
// File upload notification
|
| 1094 |
createUploadMessage(chatContainer, data.paths, data.output);
|
| 1095 |
currentMessageEl = null;
|
| 1096 |
+
scrollChatToBottom(chatContainer);
|
| 1097 |
|
| 1098 |
} else if (data.type === 'download') {
|
| 1099 |
// File download notification
|
| 1100 |
createDownloadMessage(chatContainer, data.paths, data.output);
|
| 1101 |
currentMessageEl = null;
|
| 1102 |
+
scrollChatToBottom(chatContainer);
|
| 1103 |
|
| 1104 |
} else if (data.type === 'generating') {
|
| 1105 |
// Still generating - no action needed
|
|
|
|
| 1155 |
<div class="report-content">${html}</div>
|
| 1156 |
`;
|
| 1157 |
chatContainer.appendChild(resultDiv);
|
| 1158 |
+
scrollChatToBottom(chatContainer);
|
| 1159 |
|
| 1160 |
} else if (data.type === 'status') {
|
| 1161 |
// Research status update
|
| 1162 |
createStatusMessage(chatContainer, data.message, data.iteration, data.total_iterations);
|
| 1163 |
+
scrollChatToBottom(chatContainer);
|
| 1164 |
|
| 1165 |
} else if (data.type === 'queries') {
|
| 1166 |
// Research queries generated
|
| 1167 |
createQueriesMessage(chatContainer, data.queries, data.iteration);
|
| 1168 |
+
scrollChatToBottom(chatContainer);
|
| 1169 |
|
| 1170 |
} else if (data.type === 'progress') {
|
| 1171 |
// Research progress
|
| 1172 |
updateProgress(chatContainer, data.message, data.websites_visited, data.max_websites);
|
| 1173 |
+
scrollChatToBottom(chatContainer);
|
| 1174 |
|
| 1175 |
} else if (data.type === 'source') {
|
| 1176 |
// Research source found - now includes query grouping
|
| 1177 |
createSourceMessage(chatContainer, data);
|
| 1178 |
+
scrollChatToBottom(chatContainer);
|
| 1179 |
|
| 1180 |
} else if (data.type === 'query_stats') {
|
| 1181 |
// Update query statistics
|
|
|
|
| 1188 |
} else if (data.type === 'assessment') {
|
| 1189 |
// Research completeness assessment
|
| 1190 |
createAssessmentMessage(chatContainer, data.sufficient, data.missing_aspects, data.findings_count, data.reasoning);
|
| 1191 |
+
scrollChatToBottom(chatContainer);
|
| 1192 |
|
| 1193 |
} else if (data.type === 'report') {
|
| 1194 |
// Final research report
|
| 1195 |
createReportMessage(chatContainer, data.content, data.sources_count, data.websites_visited);
|
| 1196 |
+
scrollChatToBottom(chatContainer);
|
| 1197 |
|
| 1198 |
} else if (data.type === 'content') {
|
| 1199 |
// Regular streaming content (non-code notebooks)
|
|
|
|
| 1202 |
}
|
| 1203 |
fullResponse += data.content;
|
| 1204 |
appendToMessage(currentMessageEl, parseMarkdown(fullResponse));
|
| 1205 |
+
scrollChatToBottom(chatContainer);
|
| 1206 |
|
| 1207 |
} else if (data.type === 'launch') {
|
| 1208 |
// Tool-based notebook launch from command center
|
|
|
|
| 1264 |
infoDiv.innerHTML = `<em>${escapeHtml(data.content)}</em>`;
|
| 1265 |
infoDiv.style.color = 'var(--theme-accent)';
|
| 1266 |
chatContainer.appendChild(infoDiv);
|
| 1267 |
+
scrollChatToBottom(chatContainer);
|
| 1268 |
|
| 1269 |
} else if (data.type === 'retry') {
|
| 1270 |
// Show retry indicator
|
|
|
|
| 1277 |
errorDiv.className = 'message assistant';
|
| 1278 |
errorDiv.innerHTML = `<div class="message-content" style="color: #c62828;">Error: ${escapeHtml(data.content)}</div>`;
|
| 1279 |
chatContainer.appendChild(errorDiv);
|
| 1280 |
+
scrollChatToBottom(chatContainer);
|
| 1281 |
}
|
| 1282 |
}
|
| 1283 |
}
|
| 1284 |
}
|
| 1285 |
} catch (error) {
|
| 1286 |
+
hideProgressWidget(chatContainer);
|
| 1287 |
const errorDiv = document.createElement('div');
|
| 1288 |
errorDiv.className = 'message assistant';
|
| 1289 |
errorDiv.innerHTML = `<div class="message-content" style="color: #c62828;">Connection error: ${escapeHtml(error.message)}</div>`;
|
|
|
|
| 1459 |
</div>
|
| 1460 |
<div class="action-widget-body">
|
| 1461 |
<div class="section-label">QUERY</div>
|
| 1462 |
+
<div class="section-content">${parseMarkdown(message)}</div>
|
| 1463 |
</div>
|
| 1464 |
`;
|
| 1465 |
|
|
|
|
| 1473 |
clickableArea.addEventListener('click', clickHandler);
|
| 1474 |
|
| 1475 |
chatContainer.appendChild(widget);
|
| 1476 |
+
scrollChatToBottom(chatContainer);
|
| 1477 |
|
| 1478 |
// Store widget for later updates
|
| 1479 |
actionWidgets[targetTabId] = widget;
|
|
|
|
| 1645 |
</div>
|
| 1646 |
`;
|
| 1647 |
chatContainer.appendChild(retryDiv);
|
| 1648 |
+
scrollChatToBottom(chatContainer);
|
| 1649 |
|
| 1650 |
// Start countdown
|
| 1651 |
let remaining = data.delay;
|
|
|
|
| 1960 |
|
| 1961 |
let bodyHtml = `
|
| 1962 |
<div class="section-label">QUERY</div>
|
| 1963 |
+
<div class="section-content">${parseMarkdown(data.query || '')}</div>
|
| 1964 |
`;
|
| 1965 |
|
| 1966 |
// Add result section if present
|
|
|
|
| 2606 |
// Show add/edit model dialog
|
| 2607 |
function showModelDialog(modelId = null) {
|
| 2608 |
const isEdit = !!modelId;
|
| 2609 |
+
const model = isEdit ? settings.models[modelId] : { name: '', providerId: '', modelId: '', extraParams: null };
|
| 2610 |
|
| 2611 |
const dialog = document.getElementById('model-dialog');
|
| 2612 |
const title = document.getElementById('model-dialog-title');
|
| 2613 |
const nameInput = document.getElementById('model-name');
|
| 2614 |
const providerSelect = document.getElementById('model-provider');
|
| 2615 |
const modelIdInput = document.getElementById('model-model-id');
|
| 2616 |
+
const extraParamsInput = document.getElementById('model-extra-params');
|
| 2617 |
|
| 2618 |
title.textContent = isEdit ? 'Edit Model' : 'Add Model';
|
| 2619 |
nameInput.value = model.name;
|
| 2620 |
modelIdInput.value = model.modelId;
|
| 2621 |
+
extraParamsInput.value = model.extraParams ? JSON.stringify(model.extraParams, null, 2) : '';
|
| 2622 |
|
| 2623 |
// Populate provider dropdown
|
| 2624 |
providerSelect.innerHTML = '<option value="">-- Select Provider --</option>';
|
|
|
|
| 2647 |
const name = document.getElementById('model-name').value.trim();
|
| 2648 |
const providerId = document.getElementById('model-provider').value;
|
| 2649 |
const apiModelId = document.getElementById('model-model-id').value.trim();
|
| 2650 |
+
const extraParamsStr = document.getElementById('model-extra-params').value.trim();
|
| 2651 |
|
| 2652 |
if (!name || !providerId || !apiModelId) {
|
| 2653 |
+
alert('Name, provider, and model ID are required');
|
| 2654 |
return;
|
| 2655 |
}
|
| 2656 |
|
| 2657 |
+
// Parse extra params if provided
|
| 2658 |
+
let extraParams = null;
|
| 2659 |
+
if (extraParamsStr) {
|
| 2660 |
+
try {
|
| 2661 |
+
extraParams = JSON.parse(extraParamsStr);
|
| 2662 |
+
} catch (e) {
|
| 2663 |
+
alert('Invalid JSON in extra parameters: ' + e.message);
|
| 2664 |
+
return;
|
| 2665 |
+
}
|
| 2666 |
+
}
|
| 2667 |
+
|
| 2668 |
+
settings.models[modelId] = { name, providerId, modelId: apiModelId, extraParams };
|
| 2669 |
hideModelDialog();
|
| 2670 |
renderModelsList();
|
| 2671 |
populateModelDropdowns();
|
|
|
|
| 2889 |
}
|
| 2890 |
|
| 2891 |
// Resolve model configuration for a notebook type
|
| 2892 |
+
// Returns { endpoint, token, model, extraParams } or null if not configured
|
| 2893 |
function resolveModelConfig(notebookType) {
|
| 2894 |
const modelId = settings.notebooks?.[notebookType];
|
| 2895 |
if (!modelId) return null;
|
|
|
|
| 2903 |
return {
|
| 2904 |
endpoint: provider.endpoint,
|
| 2905 |
token: provider.token,
|
| 2906 |
+
model: model.modelId,
|
| 2907 |
+
extraParams: model.extraParams || null
|
| 2908 |
};
|
| 2909 |
}
|
| 2910 |
|
|
|
|
| 2921 |
return {
|
| 2922 |
endpoint: provider.endpoint,
|
| 2923 |
token: provider.token,
|
| 2924 |
+
model: model.modelId,
|
| 2925 |
+
extraParams: model.extraParams || null
|
| 2926 |
};
|
| 2927 |
}
|
| 2928 |
|
|
|
|
| 3099 |
if (filesPanel) filesPanel.classList.remove('active');
|
| 3100 |
if (filesBtn) filesBtn.classList.remove('active');
|
| 3101 |
appContainer.classList.remove('files-panel-open');
|
| 3102 |
+
const sessionsPanel = document.getElementById('sessionsPanel');
|
| 3103 |
+
const sessionsBtn = document.getElementById('sessionsBtn');
|
| 3104 |
+
if (sessionsPanel) sessionsPanel.classList.remove('active');
|
| 3105 |
+
if (sessionsBtn) sessionsBtn.classList.remove('active');
|
| 3106 |
+
appContainer.classList.remove('sessions-panel-open');
|
| 3107 |
|
| 3108 |
// Toggle debug panel
|
| 3109 |
debugPanel.classList.toggle('active');
|
|
|
|
| 3200 |
if (filesPanel) filesPanel.classList.remove('active');
|
| 3201 |
if (filesBtn) filesBtn.classList.remove('active');
|
| 3202 |
appContainer.classList.remove('files-panel-open');
|
| 3203 |
+
const sessionsPanel = document.getElementById('sessionsPanel');
|
| 3204 |
+
const sessionsBtn = document.getElementById('sessionsBtn');
|
| 3205 |
+
if (sessionsPanel) sessionsPanel.classList.remove('active');
|
| 3206 |
+
if (sessionsBtn) sessionsBtn.classList.remove('active');
|
| 3207 |
+
appContainer.classList.remove('sessions-panel-open');
|
| 3208 |
|
| 3209 |
openSettings(); // Populate form fields with current values
|
| 3210 |
settingsPanel.classList.add('active');
|
|
|
|
| 3396 |
debugPanel.classList.remove('active');
|
| 3397 |
debugBtn.classList.remove('active');
|
| 3398 |
appContainer.classList.remove('panel-open');
|
| 3399 |
+
const sessionsPanel = document.getElementById('sessionsPanel');
|
| 3400 |
+
const sessionsBtn = document.getElementById('sessionsBtn');
|
| 3401 |
+
if (sessionsPanel) sessionsPanel.classList.remove('active');
|
| 3402 |
+
if (sessionsBtn) sessionsBtn.classList.remove('active');
|
| 3403 |
+
appContainer.classList.remove('sessions-panel-open');
|
| 3404 |
|
| 3405 |
// Toggle files panel
|
| 3406 |
filesPanel.classList.toggle('active');
|
|
|
|
| 3439 |
});
|
| 3440 |
}
|
| 3441 |
|
| 3442 |
+
// Sessions panel (same pattern as Files/Settings/Debug panels)
|
| 3443 |
+
const sessionsPanel = document.getElementById('sessionsPanel');
|
| 3444 |
+
const sessionsPanelClose = document.getElementById('sessionsPanelClose');
|
| 3445 |
+
const sessionsBtn = document.getElementById('sessionsBtn');
|
| 3446 |
+
|
| 3447 |
+
if (sessionsBtn && sessionsPanel) {
|
| 3448 |
+
sessionsBtn.addEventListener('click', () => {
|
| 3449 |
+
const isOpening = !sessionsPanel.classList.contains('active');
|
| 3450 |
+
|
| 3451 |
+
// Close other panels first
|
| 3452 |
+
settingsPanel.classList.remove('active');
|
| 3453 |
+
settingsBtn.classList.remove('active');
|
| 3454 |
+
debugPanel.classList.remove('active');
|
| 3455 |
+
debugBtn.classList.remove('active');
|
| 3456 |
+
filesPanel.classList.remove('active');
|
| 3457 |
+
if (filesBtn) filesBtn.classList.remove('active');
|
| 3458 |
+
appContainer.classList.remove('panel-open');
|
| 3459 |
+
appContainer.classList.remove('files-panel-open');
|
| 3460 |
+
|
| 3461 |
+
// Toggle sessions panel
|
| 3462 |
+
sessionsPanel.classList.toggle('active');
|
| 3463 |
+
sessionsBtn.classList.toggle('active');
|
| 3464 |
+
|
| 3465 |
+
// Shift content when opening, unshift when closing
|
| 3466 |
+
if (isOpening) {
|
| 3467 |
+
appContainer.classList.add('sessions-panel-open');
|
| 3468 |
+
refreshSessionsList();
|
| 3469 |
+
} else {
|
| 3470 |
+
appContainer.classList.remove('sessions-panel-open');
|
| 3471 |
+
}
|
| 3472 |
+
});
|
| 3473 |
+
}
|
| 3474 |
+
|
| 3475 |
+
if (sessionsPanelClose) {
|
| 3476 |
+
sessionsPanelClose.addEventListener('click', () => {
|
| 3477 |
+
sessionsPanel.classList.remove('active');
|
| 3478 |
+
sessionsBtn.classList.remove('active');
|
| 3479 |
+
appContainer.classList.remove('sessions-panel-open');
|
| 3480 |
+
});
|
| 3481 |
+
}
|
| 3482 |
+
|
frontend/style.css
CHANGED
|
@@ -34,10 +34,9 @@ body {
|
|
| 34 |
background: #f5f5f5;
|
| 35 |
padding: 0;
|
| 36 |
gap: 0;
|
| 37 |
-
overflow
|
| 38 |
-
overflow-y: visible;
|
| 39 |
border-bottom: 1px solid #ccc;
|
| 40 |
-
|
| 41 |
position: relative;
|
| 42 |
z-index: 10;
|
| 43 |
}
|
|
@@ -54,8 +53,8 @@ body {
|
|
| 54 |
.tab {
|
| 55 |
display: flex;
|
| 56 |
align-items: center;
|
| 57 |
-
gap:
|
| 58 |
-
padding:
|
| 59 |
background: #f5f5f5;
|
| 60 |
color: #666;
|
| 61 |
cursor: pointer;
|
|
@@ -63,7 +62,7 @@ body {
|
|
| 63 |
white-space: nowrap;
|
| 64 |
transition: all 0.2s;
|
| 65 |
border-right: 1px solid #ccc;
|
| 66 |
-
font-size:
|
| 67 |
font-weight: 500;
|
| 68 |
letter-spacing: 1px;
|
| 69 |
position: relative;
|
|
@@ -136,13 +135,14 @@ body {
|
|
| 136 |
|
| 137 |
.settings-btn,
|
| 138 |
.files-btn,
|
| 139 |
-
.debug-btn
|
|
|
|
| 140 |
background: #f5f5f5;
|
| 141 |
color: #666;
|
| 142 |
border: none;
|
| 143 |
border-left: 1px solid #ccc;
|
| 144 |
-
padding:
|
| 145 |
-
font-size:
|
| 146 |
font-weight: 500;
|
| 147 |
letter-spacing: 1px;
|
| 148 |
cursor: pointer;
|
|
@@ -152,14 +152,16 @@ body {
|
|
| 152 |
|
| 153 |
.settings-btn:hover,
|
| 154 |
.files-btn:hover,
|
| 155 |
-
.debug-btn:hover
|
|
|
|
| 156 |
background: #eee;
|
| 157 |
color: #1a1a1a;
|
| 158 |
}
|
| 159 |
|
| 160 |
.settings-btn.active,
|
| 161 |
.files-btn.active,
|
| 162 |
-
.debug-btn.active
|
|
|
|
| 163 |
background: var(--theme-accent);
|
| 164 |
color: white;
|
| 165 |
}
|
|
@@ -339,14 +341,14 @@ body {
|
|
| 339 |
.welcome-message {
|
| 340 |
color: #999;
|
| 341 |
font-size: 13px;
|
| 342 |
-
line-height: 1.
|
| 343 |
max-width: 700px;
|
| 344 |
margin: 0 auto;
|
| 345 |
-
padding:
|
| 346 |
}
|
| 347 |
|
| 348 |
.welcome-message p {
|
| 349 |
-
margin-bottom:
|
| 350 |
}
|
| 351 |
|
| 352 |
.welcome-message ul {
|
|
@@ -728,8 +730,52 @@ body {
|
|
| 728 |
transform: translate(-50%, -50%) rotate(240deg) translateY(-4.5px);
|
| 729 |
}
|
| 730 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 731 |
.input-area {
|
| 732 |
-
padding:
|
| 733 |
background: white;
|
| 734 |
border-top: 1px solid #ccc;
|
| 735 |
}
|
|
@@ -744,9 +790,9 @@ body {
|
|
| 744 |
|
| 745 |
.input-container textarea {
|
| 746 |
flex: 1;
|
| 747 |
-
padding:
|
| 748 |
border: 1px solid #ccc;
|
| 749 |
-
min-height:
|
| 750 |
max-height: 200px;
|
| 751 |
resize: none;
|
| 752 |
overflow-y: auto;
|
|
@@ -769,7 +815,7 @@ body {
|
|
| 769 |
}
|
| 770 |
|
| 771 |
.input-container button {
|
| 772 |
-
padding:
|
| 773 |
background: #f5f5f5;
|
| 774 |
border: 1px solid #ccc;
|
| 775 |
color: #1a1a1a;
|
|
@@ -884,6 +930,32 @@ body {
|
|
| 884 |
color: #999;
|
| 885 |
}
|
| 886 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 887 |
/* Theme Color Picker */
|
| 888 |
.theme-color-picker {
|
| 889 |
display: grid;
|
|
@@ -1972,10 +2044,10 @@ body {
|
|
| 1972 |
/* Debug Panel */
|
| 1973 |
.debug-panel {
|
| 1974 |
position: fixed;
|
| 1975 |
-
top:
|
| 1976 |
right: -600px;
|
| 1977 |
width: 600px;
|
| 1978 |
-
height: calc(100vh -
|
| 1979 |
background: white;
|
| 1980 |
border-left: 2px solid var(--theme-accent);
|
| 1981 |
z-index: 1000;
|
|
@@ -2122,10 +2194,10 @@ body {
|
|
| 2122 |
/* Settings Panel (side panel like debug) */
|
| 2123 |
.settings-panel {
|
| 2124 |
position: fixed;
|
| 2125 |
-
top:
|
| 2126 |
right: -600px;
|
| 2127 |
width: 600px;
|
| 2128 |
-
height: calc(100vh -
|
| 2129 |
background: white;
|
| 2130 |
border-left: 2px solid var(--theme-accent);
|
| 2131 |
z-index: 1000;
|
|
@@ -2297,21 +2369,30 @@ body {
|
|
| 2297 |
/* Settings Select (dropdowns) */
|
| 2298 |
.settings-select {
|
| 2299 |
width: 100%;
|
| 2300 |
-
padding: 10px 12px;
|
| 2301 |
-
border: 1px solid #
|
| 2302 |
border-radius: 4px;
|
| 2303 |
font-family: inherit;
|
| 2304 |
font-size: 12px;
|
| 2305 |
-
background:
|
| 2306 |
color: #1a1a1a;
|
| 2307 |
outline: none;
|
| 2308 |
cursor: pointer;
|
| 2309 |
transition: border-color 0.2s ease;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2310 |
}
|
| 2311 |
|
| 2312 |
.settings-select:focus {
|
| 2313 |
border-color: var(--theme-accent);
|
| 2314 |
-
background: white;
|
| 2315 |
}
|
| 2316 |
|
| 2317 |
/* Notebook models grid */
|
|
@@ -2437,10 +2518,10 @@ body {
|
|
| 2437 |
/* Files Panel (right side panel, like settings/debug) */
|
| 2438 |
.files-panel {
|
| 2439 |
position: fixed;
|
| 2440 |
-
top:
|
| 2441 |
right: -320px;
|
| 2442 |
width: 320px;
|
| 2443 |
-
height: calc(100vh -
|
| 2444 |
background: white;
|
| 2445 |
border-left: 2px solid var(--theme-accent);
|
| 2446 |
z-index: 1000;
|
|
@@ -2726,3 +2807,354 @@ body {
|
|
| 2726 |
color: #f57c00;
|
| 2727 |
font-weight: 500;
|
| 2728 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
background: #f5f5f5;
|
| 35 |
padding: 0;
|
| 36 |
gap: 0;
|
| 37 |
+
overflow: hidden;
|
|
|
|
| 38 |
border-bottom: 1px solid #ccc;
|
| 39 |
+
height: 29px;
|
| 40 |
position: relative;
|
| 41 |
z-index: 10;
|
| 42 |
}
|
|
|
|
| 53 |
.tab {
|
| 54 |
display: flex;
|
| 55 |
align-items: center;
|
| 56 |
+
gap: 6px;
|
| 57 |
+
padding: 4px 10px;
|
| 58 |
background: #f5f5f5;
|
| 59 |
color: #666;
|
| 60 |
cursor: pointer;
|
|
|
|
| 62 |
white-space: nowrap;
|
| 63 |
transition: all 0.2s;
|
| 64 |
border-right: 1px solid #ccc;
|
| 65 |
+
font-size: 11px;
|
| 66 |
font-weight: 500;
|
| 67 |
letter-spacing: 1px;
|
| 68 |
position: relative;
|
|
|
|
| 135 |
|
| 136 |
.settings-btn,
|
| 137 |
.files-btn,
|
| 138 |
+
.debug-btn,
|
| 139 |
+
.sessions-btn {
|
| 140 |
background: #f5f5f5;
|
| 141 |
color: #666;
|
| 142 |
border: none;
|
| 143 |
border-left: 1px solid #ccc;
|
| 144 |
+
padding: 4px 10px;
|
| 145 |
+
font-size: 11px;
|
| 146 |
font-weight: 500;
|
| 147 |
letter-spacing: 1px;
|
| 148 |
cursor: pointer;
|
|
|
|
| 152 |
|
| 153 |
.settings-btn:hover,
|
| 154 |
.files-btn:hover,
|
| 155 |
+
.debug-btn:hover,
|
| 156 |
+
.sessions-btn:hover {
|
| 157 |
background: #eee;
|
| 158 |
color: #1a1a1a;
|
| 159 |
}
|
| 160 |
|
| 161 |
.settings-btn.active,
|
| 162 |
.files-btn.active,
|
| 163 |
+
.debug-btn.active,
|
| 164 |
+
.sessions-btn.active {
|
| 165 |
background: var(--theme-accent);
|
| 166 |
color: white;
|
| 167 |
}
|
|
|
|
| 341 |
.welcome-message {
|
| 342 |
color: #999;
|
| 343 |
font-size: 13px;
|
| 344 |
+
line-height: 1.6;
|
| 345 |
max-width: 700px;
|
| 346 |
margin: 0 auto;
|
| 347 |
+
padding: 20px 20px;
|
| 348 |
}
|
| 349 |
|
| 350 |
.welcome-message p {
|
| 351 |
+
margin-bottom: 10px;
|
| 352 |
}
|
| 353 |
|
| 354 |
.welcome-message ul {
|
|
|
|
| 730 |
transform: translate(-50%, -50%) rotate(240deg) translateY(-4.5px);
|
| 731 |
}
|
| 732 |
|
| 733 |
+
/* Progress widget - shown while waiting for model response */
|
| 734 |
+
.progress-widget {
|
| 735 |
+
display: flex;
|
| 736 |
+
align-items: center;
|
| 737 |
+
gap: 8px;
|
| 738 |
+
padding: 8px 20px;
|
| 739 |
+
font-size: 12px;
|
| 740 |
+
color: #888;
|
| 741 |
+
}
|
| 742 |
+
|
| 743 |
+
.progress-widget .progress-spinner {
|
| 744 |
+
width: 14px;
|
| 745 |
+
height: 14px;
|
| 746 |
+
position: relative;
|
| 747 |
+
animation: orbit-rotate 1.2s linear infinite;
|
| 748 |
+
}
|
| 749 |
+
|
| 750 |
+
.progress-widget .progress-spinner span {
|
| 751 |
+
position: absolute;
|
| 752 |
+
width: 3px;
|
| 753 |
+
height: 3px;
|
| 754 |
+
border-radius: 50%;
|
| 755 |
+
background: var(--theme-accent);
|
| 756 |
+
top: 50%;
|
| 757 |
+
left: 50%;
|
| 758 |
+
}
|
| 759 |
+
|
| 760 |
+
.progress-widget .progress-spinner span:nth-child(1) {
|
| 761 |
+
transform: translate(-50%, -50%) translateY(-5px);
|
| 762 |
+
}
|
| 763 |
+
|
| 764 |
+
.progress-widget .progress-spinner span:nth-child(2) {
|
| 765 |
+
transform: translate(-50%, -50%) rotate(120deg) translateY(-5px);
|
| 766 |
+
}
|
| 767 |
+
|
| 768 |
+
.progress-widget .progress-spinner span:nth-child(3) {
|
| 769 |
+
transform: translate(-50%, -50%) rotate(240deg) translateY(-5px);
|
| 770 |
+
}
|
| 771 |
+
|
| 772 |
+
.progress-widget .progress-text {
|
| 773 |
+
font-family: 'JetBrains Mono', monospace;
|
| 774 |
+
letter-spacing: 0.5px;
|
| 775 |
+
}
|
| 776 |
+
|
| 777 |
.input-area {
|
| 778 |
+
padding: 10px 20px;
|
| 779 |
background: white;
|
| 780 |
border-top: 1px solid #ccc;
|
| 781 |
}
|
|
|
|
| 790 |
|
| 791 |
.input-container textarea {
|
| 792 |
flex: 1;
|
| 793 |
+
padding: 10px 12px;
|
| 794 |
border: 1px solid #ccc;
|
| 795 |
+
min-height: 38px;
|
| 796 |
max-height: 200px;
|
| 797 |
resize: none;
|
| 798 |
overflow-y: auto;
|
|
|
|
| 815 |
}
|
| 816 |
|
| 817 |
.input-container button {
|
| 818 |
+
padding: 10px 20px;
|
| 819 |
background: #f5f5f5;
|
| 820 |
border: 1px solid #ccc;
|
| 821 |
color: #1a1a1a;
|
|
|
|
| 930 |
color: #999;
|
| 931 |
}
|
| 932 |
|
| 933 |
+
.settings-textarea {
|
| 934 |
+
width: 100%;
|
| 935 |
+
padding: 12px 15px;
|
| 936 |
+
border: 1px solid #ccc;
|
| 937 |
+
border-radius: 4px;
|
| 938 |
+
font-family: 'JetBrains Mono', monospace;
|
| 939 |
+
font-size: 12px;
|
| 940 |
+
background: #f9f9f9;
|
| 941 |
+
color: #1a1a1a;
|
| 942 |
+
outline: none;
|
| 943 |
+
transition: border-color 0.2s ease;
|
| 944 |
+
resize: vertical;
|
| 945 |
+
min-height: 60px;
|
| 946 |
+
box-sizing: border-box;
|
| 947 |
+
}
|
| 948 |
+
|
| 949 |
+
.settings-textarea:focus {
|
| 950 |
+
border-color: var(--theme-accent);
|
| 951 |
+
box-shadow: 0 0 0 1px var(--theme-accent);
|
| 952 |
+
background: white;
|
| 953 |
+
}
|
| 954 |
+
|
| 955 |
+
.settings-textarea::placeholder {
|
| 956 |
+
color: #999;
|
| 957 |
+
}
|
| 958 |
+
|
| 959 |
/* Theme Color Picker */
|
| 960 |
.theme-color-picker {
|
| 961 |
display: grid;
|
|
|
|
| 2044 |
/* Debug Panel */
|
| 2045 |
.debug-panel {
|
| 2046 |
position: fixed;
|
| 2047 |
+
top: 29px;
|
| 2048 |
right: -600px;
|
| 2049 |
width: 600px;
|
| 2050 |
+
height: calc(100vh - 28px);
|
| 2051 |
background: white;
|
| 2052 |
border-left: 2px solid var(--theme-accent);
|
| 2053 |
z-index: 1000;
|
|
|
|
| 2194 |
/* Settings Panel (side panel like debug) */
|
| 2195 |
.settings-panel {
|
| 2196 |
position: fixed;
|
| 2197 |
+
top: 29px;
|
| 2198 |
right: -600px;
|
| 2199 |
width: 600px;
|
| 2200 |
+
height: calc(100vh - 28px);
|
| 2201 |
background: white;
|
| 2202 |
border-left: 2px solid var(--theme-accent);
|
| 2203 |
z-index: 1000;
|
|
|
|
| 2369 |
/* Settings Select (dropdowns) */
|
| 2370 |
.settings-select {
|
| 2371 |
width: 100%;
|
| 2372 |
+
padding: 10px 32px 10px 12px;
|
| 2373 |
+
border: 1px solid #ddd;
|
| 2374 |
border-radius: 4px;
|
| 2375 |
font-family: inherit;
|
| 2376 |
font-size: 12px;
|
| 2377 |
+
background: white;
|
| 2378 |
color: #1a1a1a;
|
| 2379 |
outline: none;
|
| 2380 |
cursor: pointer;
|
| 2381 |
transition: border-color 0.2s ease;
|
| 2382 |
+
-webkit-appearance: none;
|
| 2383 |
+
-moz-appearance: none;
|
| 2384 |
+
appearance: none;
|
| 2385 |
+
background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' viewBox='0 0 12 12'%3E%3Cpath fill='%23999' d='M6 8L2 4h8z'/%3E%3C/svg%3E");
|
| 2386 |
+
background-repeat: no-repeat;
|
| 2387 |
+
background-position: right 10px center;
|
| 2388 |
+
}
|
| 2389 |
+
|
| 2390 |
+
.settings-select:hover {
|
| 2391 |
+
border-color: #bbb;
|
| 2392 |
}
|
| 2393 |
|
| 2394 |
.settings-select:focus {
|
| 2395 |
border-color: var(--theme-accent);
|
|
|
|
| 2396 |
}
|
| 2397 |
|
| 2398 |
/* Notebook models grid */
|
|
|
|
| 2518 |
/* Files Panel (right side panel, like settings/debug) */
|
| 2519 |
.files-panel {
|
| 2520 |
position: fixed;
|
| 2521 |
+
top: 29px;
|
| 2522 |
right: -320px;
|
| 2523 |
width: 320px;
|
| 2524 |
+
height: calc(100vh - 28px);
|
| 2525 |
background: white;
|
| 2526 |
border-left: 2px solid var(--theme-accent);
|
| 2527 |
z-index: 1000;
|
|
|
|
| 2807 |
color: #f57c00;
|
| 2808 |
font-weight: 500;
|
| 2809 |
}
|
| 2810 |
+
|
| 2811 |
+
/* Session Selector (inside welcome message) */
|
| 2812 |
+
.session-selector {
|
| 2813 |
+
margin-top: 16px;
|
| 2814 |
+
padding-top: 16px;
|
| 2815 |
+
border-top: 1px solid #e0e0e0;
|
| 2816 |
+
}
|
| 2817 |
+
|
| 2818 |
+
/* Welcome explanation section (below session selector) */
|
| 2819 |
+
.welcome-explanation {
|
| 2820 |
+
margin-top: 20px;
|
| 2821 |
+
padding-top: 16px;
|
| 2822 |
+
border-top: 1px solid #e0e0e0;
|
| 2823 |
+
color: #666;
|
| 2824 |
+
}
|
| 2825 |
+
|
| 2826 |
+
.session-indicator {
|
| 2827 |
+
position: fixed;
|
| 2828 |
+
bottom: 6px;
|
| 2829 |
+
right: 12px;
|
| 2830 |
+
font-size: 13px;
|
| 2831 |
+
color: #999;
|
| 2832 |
+
z-index: 100;
|
| 2833 |
+
}
|
| 2834 |
+
|
| 2835 |
+
.session-indicator a {
|
| 2836 |
+
color: var(--theme-accent);
|
| 2837 |
+
text-decoration: none;
|
| 2838 |
+
cursor: pointer;
|
| 2839 |
+
}
|
| 2840 |
+
|
| 2841 |
+
.session-indicator a:hover {
|
| 2842 |
+
text-decoration: underline;
|
| 2843 |
+
}
|
| 2844 |
+
|
| 2845 |
+
.session-selector-form {
|
| 2846 |
+
display: flex;
|
| 2847 |
+
gap: 20px;
|
| 2848 |
+
align-items: flex-end;
|
| 2849 |
+
}
|
| 2850 |
+
|
| 2851 |
+
.session-new {
|
| 2852 |
+
flex: 1;
|
| 2853 |
+
}
|
| 2854 |
+
|
| 2855 |
+
.session-existing {
|
| 2856 |
+
flex: 1;
|
| 2857 |
+
}
|
| 2858 |
+
|
| 2859 |
+
.session-new label,
|
| 2860 |
+
.session-existing label {
|
| 2861 |
+
display: block;
|
| 2862 |
+
font-size: 10px;
|
| 2863 |
+
font-weight: 600;
|
| 2864 |
+
color: #666;
|
| 2865 |
+
text-transform: uppercase;
|
| 2866 |
+
letter-spacing: 0.5px;
|
| 2867 |
+
margin-bottom: 6px;
|
| 2868 |
+
}
|
| 2869 |
+
|
| 2870 |
+
.session-new-input {
|
| 2871 |
+
display: flex;
|
| 2872 |
+
gap: 6px;
|
| 2873 |
+
}
|
| 2874 |
+
|
| 2875 |
+
.session-input-wrapper {
|
| 2876 |
+
flex: 1;
|
| 2877 |
+
position: relative;
|
| 2878 |
+
display: flex;
|
| 2879 |
+
}
|
| 2880 |
+
|
| 2881 |
+
.session-input-wrapper input {
|
| 2882 |
+
flex: 1;
|
| 2883 |
+
padding: 7px 28px 7px 10px;
|
| 2884 |
+
border: 1px solid #ccc;
|
| 2885 |
+
border-radius: 4px;
|
| 2886 |
+
font-size: 12px;
|
| 2887 |
+
font-family: inherit;
|
| 2888 |
+
height: 32px;
|
| 2889 |
+
box-sizing: border-box;
|
| 2890 |
+
}
|
| 2891 |
+
|
| 2892 |
+
.session-input-wrapper input:focus {
|
| 2893 |
+
outline: none;
|
| 2894 |
+
border-color: var(--theme-accent);
|
| 2895 |
+
}
|
| 2896 |
+
|
| 2897 |
+
.session-regenerate-btn {
|
| 2898 |
+
position: absolute;
|
| 2899 |
+
right: 6px;
|
| 2900 |
+
top: 50%;
|
| 2901 |
+
transform: translateY(-50%);
|
| 2902 |
+
background: transparent;
|
| 2903 |
+
border: none;
|
| 2904 |
+
color: #bbb;
|
| 2905 |
+
font-size: 13px;
|
| 2906 |
+
cursor: pointer;
|
| 2907 |
+
padding: 0;
|
| 2908 |
+
line-height: 1;
|
| 2909 |
+
transition: color 0.2s;
|
| 2910 |
+
}
|
| 2911 |
+
|
| 2912 |
+
.session-regenerate-btn:hover {
|
| 2913 |
+
color: var(--theme-accent);
|
| 2914 |
+
}
|
| 2915 |
+
|
| 2916 |
+
.session-new-input button:not(.session-regenerate-btn) {
|
| 2917 |
+
padding: 7px 14px;
|
| 2918 |
+
background: #f5f5f5;
|
| 2919 |
+
color: #1a1a1a;
|
| 2920 |
+
border: 1px solid #ccc;
|
| 2921 |
+
border-radius: 4px;
|
| 2922 |
+
font-size: 11px;
|
| 2923 |
+
font-weight: 500;
|
| 2924 |
+
cursor: pointer;
|
| 2925 |
+
transition: all 0.2s;
|
| 2926 |
+
letter-spacing: 0.5px;
|
| 2927 |
+
}
|
| 2928 |
+
|
| 2929 |
+
.session-new-input button:not(.session-regenerate-btn):hover {
|
| 2930 |
+
background: var(--theme-accent);
|
| 2931 |
+
color: #ffffff;
|
| 2932 |
+
border-color: var(--theme-accent);
|
| 2933 |
+
}
|
| 2934 |
+
|
| 2935 |
+
.session-existing select {
|
| 2936 |
+
width: 100%;
|
| 2937 |
+
padding: 7px 32px 7px 10px;
|
| 2938 |
+
border: 1px solid #ddd;
|
| 2939 |
+
border-radius: 4px;
|
| 2940 |
+
font-size: 12px;
|
| 2941 |
+
font-family: inherit;
|
| 2942 |
+
background: white;
|
| 2943 |
+
color: #1a1a1a;
|
| 2944 |
+
cursor: pointer;
|
| 2945 |
+
height: 32px;
|
| 2946 |
+
box-sizing: border-box;
|
| 2947 |
+
-webkit-appearance: none;
|
| 2948 |
+
-moz-appearance: none;
|
| 2949 |
+
appearance: none;
|
| 2950 |
+
background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' viewBox='0 0 12 12'%3E%3Cpath fill='%23999' d='M6 8L2 4h8z'/%3E%3C/svg%3E");
|
| 2951 |
+
background-repeat: no-repeat;
|
| 2952 |
+
background-position: right 10px center;
|
| 2953 |
+
transition: border-color 0.2s ease;
|
| 2954 |
+
}
|
| 2955 |
+
|
| 2956 |
+
.session-existing select:hover {
|
| 2957 |
+
border-color: #bbb;
|
| 2958 |
+
}
|
| 2959 |
+
|
| 2960 |
+
.session-existing select:focus {
|
| 2961 |
+
outline: none;
|
| 2962 |
+
border-color: var(--theme-accent);
|
| 2963 |
+
}
|
| 2964 |
+
|
| 2965 |
+
/* Sessions Panel (right side) */
|
| 2966 |
+
.sessions-panel {
|
| 2967 |
+
position: fixed;
|
| 2968 |
+
top: 29px;
|
| 2969 |
+
right: -320px;
|
| 2970 |
+
width: 320px;
|
| 2971 |
+
height: calc(100vh - 28px);
|
| 2972 |
+
background: white;
|
| 2973 |
+
border-left: 2px solid var(--theme-accent);
|
| 2974 |
+
z-index: 1000;
|
| 2975 |
+
display: flex;
|
| 2976 |
+
flex-direction: column;
|
| 2977 |
+
transition: right 0.3s ease;
|
| 2978 |
+
}
|
| 2979 |
+
|
| 2980 |
+
.sessions-panel.active {
|
| 2981 |
+
right: 0;
|
| 2982 |
+
}
|
| 2983 |
+
|
| 2984 |
+
.sessions-panel-open .content-area {
|
| 2985 |
+
margin-right: 320px;
|
| 2986 |
+
}
|
| 2987 |
+
|
| 2988 |
+
.sessions-panel-header {
|
| 2989 |
+
padding: 8px 12px;
|
| 2990 |
+
border-bottom: 1px solid #e0e0e0;
|
| 2991 |
+
display: flex;
|
| 2992 |
+
justify-content: space-between;
|
| 2993 |
+
align-items: center;
|
| 2994 |
+
background: var(--theme-accent);
|
| 2995 |
+
}
|
| 2996 |
+
|
| 2997 |
+
.sessions-panel-header h3 {
|
| 2998 |
+
margin: 0;
|
| 2999 |
+
font-size: 12px;
|
| 3000 |
+
font-weight: 600;
|
| 3001 |
+
color: white;
|
| 3002 |
+
text-transform: uppercase;
|
| 3003 |
+
letter-spacing: 0.5px;
|
| 3004 |
+
}
|
| 3005 |
+
|
| 3006 |
+
.sessions-panel-close {
|
| 3007 |
+
background: none;
|
| 3008 |
+
border: none;
|
| 3009 |
+
font-size: 20px;
|
| 3010 |
+
color: white;
|
| 3011 |
+
cursor: pointer;
|
| 3012 |
+
padding: 0;
|
| 3013 |
+
width: 24px;
|
| 3014 |
+
height: 24px;
|
| 3015 |
+
display: flex;
|
| 3016 |
+
align-items: center;
|
| 3017 |
+
justify-content: center;
|
| 3018 |
+
border-radius: 4px;
|
| 3019 |
+
transition: background 0.2s;
|
| 3020 |
+
}
|
| 3021 |
+
|
| 3022 |
+
.sessions-panel-close:hover {
|
| 3023 |
+
background: rgba(255, 255, 255, 0.2);
|
| 3024 |
+
}
|
| 3025 |
+
|
| 3026 |
+
.sessions-panel-body {
|
| 3027 |
+
flex: 1;
|
| 3028 |
+
padding: 16px;
|
| 3029 |
+
overflow-y: auto;
|
| 3030 |
+
}
|
| 3031 |
+
|
| 3032 |
+
.sessions-panel-body label {
|
| 3033 |
+
display: block;
|
| 3034 |
+
font-size: 10px;
|
| 3035 |
+
font-weight: 600;
|
| 3036 |
+
color: #666;
|
| 3037 |
+
text-transform: uppercase;
|
| 3038 |
+
letter-spacing: 0.5px;
|
| 3039 |
+
margin-bottom: 8px;
|
| 3040 |
+
}
|
| 3041 |
+
|
| 3042 |
+
.sessions-current-name,
|
| 3043 |
+
.sessions-new-input {
|
| 3044 |
+
display: flex;
|
| 3045 |
+
gap: 8px;
|
| 3046 |
+
}
|
| 3047 |
+
|
| 3048 |
+
.sessions-current-name input,
|
| 3049 |
+
.sessions-new-input input {
|
| 3050 |
+
flex: 1;
|
| 3051 |
+
padding: 8px 28px 8px 10px;
|
| 3052 |
+
border: 1px solid #ddd;
|
| 3053 |
+
border-radius: 4px;
|
| 3054 |
+
font-size: 13px;
|
| 3055 |
+
font-family: inherit;
|
| 3056 |
+
}
|
| 3057 |
+
|
| 3058 |
+
.sessions-new-input .session-input-wrapper {
|
| 3059 |
+
width: 180px;
|
| 3060 |
+
position: relative;
|
| 3061 |
+
display: flex;
|
| 3062 |
+
}
|
| 3063 |
+
|
| 3064 |
+
.sessions-new-input .session-input-wrapper input {
|
| 3065 |
+
width: 100%;
|
| 3066 |
+
flex: none;
|
| 3067 |
+
}
|
| 3068 |
+
|
| 3069 |
+
.sessions-current-name input:focus,
|
| 3070 |
+
.sessions-new-input input:focus {
|
| 3071 |
+
outline: none;
|
| 3072 |
+
border-color: var(--theme-accent);
|
| 3073 |
+
}
|
| 3074 |
+
|
| 3075 |
+
.sessions-current-name button,
|
| 3076 |
+
.sessions-new-input button:not(.session-regenerate-btn) {
|
| 3077 |
+
padding: 8px 12px;
|
| 3078 |
+
background: var(--theme-accent);
|
| 3079 |
+
color: white;
|
| 3080 |
+
border: none;
|
| 3081 |
+
border-radius: 4px;
|
| 3082 |
+
font-size: 11px;
|
| 3083 |
+
font-weight: 600;
|
| 3084 |
+
cursor: pointer;
|
| 3085 |
+
transition: opacity 0.2s;
|
| 3086 |
+
flex-shrink: 0;
|
| 3087 |
+
}
|
| 3088 |
+
|
| 3089 |
+
.sessions-current-name button:hover,
|
| 3090 |
+
.sessions-new-input button:not(.session-regenerate-btn):hover {
|
| 3091 |
+
opacity: 0.9;
|
| 3092 |
+
}
|
| 3093 |
+
|
| 3094 |
+
.sessions-divider {
|
| 3095 |
+
height: 1px;
|
| 3096 |
+
background: #e0e0e0;
|
| 3097 |
+
margin: 16px 0;
|
| 3098 |
+
}
|
| 3099 |
+
|
| 3100 |
+
.sessions-list {
|
| 3101 |
+
display: flex;
|
| 3102 |
+
flex-direction: column;
|
| 3103 |
+
gap: 4px;
|
| 3104 |
+
}
|
| 3105 |
+
|
| 3106 |
+
.sessions-list-item {
|
| 3107 |
+
padding: 10px 12px;
|
| 3108 |
+
background: #f5f5f5;
|
| 3109 |
+
border-radius: 4px;
|
| 3110 |
+
cursor: pointer;
|
| 3111 |
+
transition: background 0.2s;
|
| 3112 |
+
display: flex;
|
| 3113 |
+
justify-content: space-between;
|
| 3114 |
+
align-items: center;
|
| 3115 |
+
}
|
| 3116 |
+
|
| 3117 |
+
.sessions-list-item:hover {
|
| 3118 |
+
background: #e8e8e8;
|
| 3119 |
+
}
|
| 3120 |
+
|
| 3121 |
+
.sessions-list-item.current {
|
| 3122 |
+
background: var(--theme-accent-bg);
|
| 3123 |
+
border: 1px solid var(--theme-accent);
|
| 3124 |
+
}
|
| 3125 |
+
|
| 3126 |
+
.sessions-list-item-name {
|
| 3127 |
+
font-size: 13px;
|
| 3128 |
+
font-weight: 500;
|
| 3129 |
+
color: #333;
|
| 3130 |
+
}
|
| 3131 |
+
|
| 3132 |
+
.sessions-list-item-date {
|
| 3133 |
+
font-size: 10px;
|
| 3134 |
+
color: #999;
|
| 3135 |
+
flex: 1;
|
| 3136 |
+
text-align: right;
|
| 3137 |
+
margin-right: 8px;
|
| 3138 |
+
}
|
| 3139 |
+
|
| 3140 |
+
.sessions-delete-btn {
|
| 3141 |
+
background: none;
|
| 3142 |
+
border: none;
|
| 3143 |
+
color: #999;
|
| 3144 |
+
font-size: 16px;
|
| 3145 |
+
cursor: pointer;
|
| 3146 |
+
padding: 0 4px;
|
| 3147 |
+
line-height: 1;
|
| 3148 |
+
transition: color 0.2s;
|
| 3149 |
+
}
|
| 3150 |
+
|
| 3151 |
+
.sessions-delete-btn:hover {
|
| 3152 |
+
color: #e53935;
|
| 3153 |
+
}
|
| 3154 |
+
|
| 3155 |
+
.sessions-loading {
|
| 3156 |
+
text-align: center;
|
| 3157 |
+
color: #666;
|
| 3158 |
+
padding: 20px;
|
| 3159 |
+
}
|
| 3160 |
+
|