|
|
import json |
|
|
import os |
|
|
from pathlib import Path |
|
|
from datetime import datetime |
|
|
from enum import Enum, auto |
|
|
import re |
|
|
import uuid |
|
|
import pandas as pd |
|
|
|
|
|
import streamlit as st |
|
|
import time |
|
|
from collections import OrderedDict |
|
|
from html_style import html_style |
|
|
from config import config, MCM_PROBLEMS, stages_config |
|
|
from agent_api import ModelingAgentSystem |
|
|
|
|
|
|
|
|
class GenerationStep(Enum): |
|
|
PROBLEM_ANALYSIS = "Problem Analysis" |
|
|
HIGH_LEVEL_MODELING = "High-Level Modeling" |
|
|
TASK_DECOMPOSITION = "Task Decomposition" |
|
|
DEPENDENCY_ANALYSIS = "Dependency Analysis" |
|
|
|
|
|
|
|
|
@classmethod |
|
|
def get_task_step(cls, task_id, step_type): |
|
|
"""Returns a step name for a specific task and step type.""" |
|
|
return f"Task {task_id} {step_type}" |
|
|
|
|
|
@classmethod |
|
|
def is_task_step(cls, step_name): |
|
|
"""Checks if a step name belongs to a task-specific step.""" |
|
|
return step_name.startswith("Task ") |
|
|
|
|
|
@classmethod |
|
|
def parse_task_step(cls, step_name): |
|
|
"""Parses a task step name into task_id and step_type components.""" |
|
|
if not cls.is_task_step(step_name): |
|
|
return None, None |
|
|
|
|
|
parts = step_name.split(" ", 2) |
|
|
if len(parts) < 3: |
|
|
return None, None |
|
|
|
|
|
try: |
|
|
task_id = int(parts[1]) |
|
|
step_type = parts[2] |
|
|
return task_id, step_type |
|
|
except ValueError: |
|
|
return None, None |
|
|
|
|
|
|
|
|
APP_TITLE = "Mathematical Modeling Multi-Agent System" |
|
|
st.set_page_config(page_title=APP_TITLE, layout="wide", initial_sidebar_state="expanded") |
|
|
|
|
|
|
|
|
|
|
|
st.markdown(html_style, unsafe_allow_html=True) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def initialize_session_state(): |
|
|
"""Initializes session state variables if they don't exist.""" |
|
|
defaults = { |
|
|
"api_base_url": "", |
|
|
"api_key": "", |
|
|
"api_model": "", |
|
|
"problem_input_method": "Select Predefined", |
|
|
"selected_mcm_problem": list(MCM_PROBLEMS.keys())[0], |
|
|
"custom_background": "", |
|
|
"custom_requirements": "", |
|
|
"problem_defined": False, |
|
|
"problem_definition_expanded": True, |
|
|
"current_problem_title": "", |
|
|
"current_problem_details": "", |
|
|
"stages": {}, |
|
|
"active_stage": None, |
|
|
"critique_rounds": 0, |
|
|
"modeling_agent": None, |
|
|
"agent_initialized": False, |
|
|
"session_id": str(uuid.uuid4()), |
|
|
"uploaded_files": [], |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for key, value in defaults.items(): |
|
|
if key not in st.session_state: |
|
|
st.session_state[key] = value |
|
|
|
|
|
|
|
|
|
|
|
if st.session_state.problem_defined and not st.session_state.stages: |
|
|
reset_stages() |
|
|
|
|
|
def reset_stages(): |
|
|
""" Resets stage content and progress. """ |
|
|
|
|
|
if st.session_state.api_base_url and st.session_state.api_key and st.session_state.api_model: |
|
|
initialize_modeling_agent() |
|
|
|
|
|
|
|
|
if st.session_state.agent_initialized and st.session_state.modeling_agent: |
|
|
|
|
|
planned_steps = st.session_state.modeling_agent.get_planned_steps() |
|
|
completed_steps = st.session_state.modeling_agent.get_completed_steps() |
|
|
|
|
|
|
|
|
st.session_state.stages = {} |
|
|
for step_name in planned_steps: |
|
|
status = "completed" if step_name in completed_steps else "not_started" |
|
|
|
|
|
if status == "not_started" and not any(s["status"] == "in_progress" for s in st.session_state.stages.values()): |
|
|
status = "in_progress" |
|
|
|
|
|
st.session_state.stages[step_name] = { |
|
|
"title": step_name, |
|
|
"content": f"# {step_name}\n\n*(Content not generated yet. Use the '✨ Generate Content' button and '✏️ Edit Content' after generation.)*", |
|
|
"status": status, |
|
|
"edit_mode": False, |
|
|
"prompt_hint": f"Complete the {step_name} section. {stages_config.get(step_name, '')}" |
|
|
} |
|
|
|
|
|
|
|
|
in_progress_stages = [k for k, v in st.session_state.stages.items() if v["status"] == "in_progress"] |
|
|
if in_progress_stages: |
|
|
st.session_state.active_stage = in_progress_stages[0] |
|
|
elif st.session_state.stages: |
|
|
st.session_state.active_stage = next(iter(st.session_state.stages)) |
|
|
else: |
|
|
|
|
|
chapter_structure = OrderedDict(config['stages']) |
|
|
|
|
|
st.session_state.stages = { |
|
|
key: {"title": data["title"], |
|
|
"content": f"# {data['title']}\n\n*(Content not generated yet. Use the '✨ Generate Content' button and '✏️ Edit Content' after generation.)*", |
|
|
"status": "not_started", |
|
|
"edit_mode": False, |
|
|
"prompt_hint": data["prompt_hint"]} |
|
|
for key, data in chapter_structure.items() |
|
|
} |
|
|
|
|
|
|
|
|
if chapter_structure: |
|
|
first_stage_key = list(chapter_structure.keys())[0] |
|
|
if first_stage_key in st.session_state.stages: |
|
|
st.session_state.stages[first_stage_key]["status"] = "in_progress" |
|
|
st.session_state.active_stage = first_stage_key |
|
|
|
|
|
def initialize_modeling_agent(): |
|
|
"""Initialize the ModelingAgentSystem with current configuration""" |
|
|
|
|
|
if not st.session_state.api_base_url or not st.session_state.api_key or not st.session_state.api_model: |
|
|
st.error("API Base URL, API Key, and Model Name must be configured before initializing the agent.") |
|
|
st.session_state.modeling_agent = None |
|
|
st.session_state.agent_initialized = False |
|
|
return |
|
|
|
|
|
|
|
|
agent_config = { |
|
|
'top_method_num': 6, |
|
|
'problem_analysis_round': st.session_state.critique_rounds, |
|
|
'problem_modeling_round': st.session_state.critique_rounds, |
|
|
'task_formulas_round': st.session_state.critique_rounds, |
|
|
'tasknum': 1, |
|
|
'chart_num': 1, |
|
|
'model_name': st.session_state.api_model, |
|
|
"method_name": "MAPP-Streamlit" |
|
|
} |
|
|
|
|
|
|
|
|
if st.session_state.problem_input_method == "Select Predefined": |
|
|
problem_key = st.session_state.selected_mcm_problem |
|
|
|
|
|
problem_path = f'../data/actor_data/input/problem/{problem_key}.json' |
|
|
print(problem_path) |
|
|
dataset_path = "" |
|
|
else: |
|
|
|
|
|
session_dir = Path('data') / st.session_state.session_id |
|
|
problem_dir = session_dir |
|
|
data_dir = session_dir / 'data' |
|
|
output_dir = session_dir / 'output' |
|
|
|
|
|
for dir_path in [problem_dir, data_dir, output_dir]: |
|
|
dir_path.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
|
|
|
dataset_paths = [] |
|
|
for uploaded_file_info in st.session_state.uploaded_files: |
|
|
dataset_paths.append(str(uploaded_file_info['path'])) |
|
|
|
|
|
custom_problem = { |
|
|
"title": "Custom Problem", |
|
|
"background": st.session_state.custom_background, |
|
|
"problem_requirement": st.session_state.custom_requirements, |
|
|
"dataset_path": dataset_paths, |
|
|
"dataset_description": {}, |
|
|
"variable_description": {}, |
|
|
"addendum": "" |
|
|
} |
|
|
|
|
|
problem_path = problem_dir / 'problem.json' |
|
|
with open(problem_path, 'w') as f: |
|
|
json.dump(custom_problem, f, indent=2) |
|
|
|
|
|
dataset_path = str(data_dir) |
|
|
|
|
|
|
|
|
output_path = str(Path('data') / st.session_state.session_id / 'output') |
|
|
os.makedirs(output_path, exist_ok=True) |
|
|
|
|
|
try: |
|
|
|
|
|
st.session_state.modeling_agent = ModelingAgentSystem( |
|
|
problem_path=str(problem_path), |
|
|
config=agent_config, |
|
|
dataset_path=dataset_path, |
|
|
output_path=output_path, |
|
|
name=st.session_state.selected_mcm_problem if st.session_state.problem_input_method == "Select Predefined" else "custom" |
|
|
) |
|
|
|
|
|
|
|
|
st.session_state.modeling_agent.llm.reset( |
|
|
api_key=st.session_state.api_key, |
|
|
api_base=st.session_state.api_base_url, |
|
|
model_name=st.session_state.api_model |
|
|
) |
|
|
|
|
|
|
|
|
st.session_state.agent_initialized = True |
|
|
|
|
|
|
|
|
if st.session_state.problem_input_method == "Select Predefined": |
|
|
problem_key = st.session_state.selected_mcm_problem |
|
|
st.session_state.current_problem_title = problem_key |
|
|
st.session_state.current_problem_details = f"**Background:**\n{MCM_PROBLEMS[problem_key]['background']}\n\n**Requirements:**\n{MCM_PROBLEMS[problem_key]['problem_requirement']}" |
|
|
else: |
|
|
|
|
|
st.session_state.current_problem_title = "Custom Problem" |
|
|
st.session_state.current_problem_details = f"**Background:**\n{st.session_state.custom_background}\n\n**Requirements:**\n{st.session_state.custom_requirements}" |
|
|
except Exception as e: |
|
|
st.error(f"Failed to initialize modeling agent: {e}") |
|
|
st.session_state.modeling_agent = None |
|
|
st.session_state.agent_initialized = False |
|
|
|
|
|
def update_api_settings(): |
|
|
"""Update API settings for the ModelingAgentSystem in the middle of a session.""" |
|
|
if not st.session_state.agent_initialized or st.session_state.modeling_agent is None: |
|
|
|
|
|
return False |
|
|
|
|
|
|
|
|
if not (st.session_state.api_base_url and st.session_state.api_key and st.session_state.api_model): |
|
|
st.error("Please provide valid API Base URL, API Key, and Model Name.") |
|
|
return False |
|
|
|
|
|
try: |
|
|
|
|
|
st.session_state.modeling_agent.llm.reset( |
|
|
api_key=st.session_state.api_key, |
|
|
api_base=st.session_state.api_base_url, |
|
|
model_name=st.session_state.api_model |
|
|
) |
|
|
print(f'Reset LLM: {st.session_state.api_model}') |
|
|
|
|
|
|
|
|
st.session_state.modeling_agent.config['model_name'] = st.session_state.api_model |
|
|
|
|
|
|
|
|
for agent_name in ['pa', 'pm', 'td', 'task', 'mr', 'chart', 'coordinator']: |
|
|
if hasattr(st.session_state.modeling_agent, agent_name): |
|
|
agent = getattr(st.session_state.modeling_agent, agent_name) |
|
|
if hasattr(agent, 'llm'): |
|
|
agent.llm = st.session_state.modeling_agent.llm |
|
|
|
|
|
st.success("API settings updated successfully!") |
|
|
return True |
|
|
except Exception as e: |
|
|
st.error(f"Failed to update API settings: {e}") |
|
|
import traceback |
|
|
st.error(traceback.format_exc()) |
|
|
return False |
|
|
|
|
|
def get_navigatable_stages(): |
|
|
"""Determines which stages can be navigated to (unlocked) based on agent's step dependencies.""" |
|
|
|
|
|
if not st.session_state.agent_initialized or not st.session_state.stages: |
|
|
return [] |
|
|
|
|
|
navigatable = [] |
|
|
|
|
|
if st.session_state.modeling_agent: |
|
|
|
|
|
completed_steps = st.session_state.modeling_agent.get_completed_steps() |
|
|
|
|
|
dependencies = st.session_state.modeling_agent._define_dependencies() |
|
|
|
|
|
|
|
|
for stage_key in st.session_state.stages.keys(): |
|
|
|
|
|
if stage_key in completed_steps: |
|
|
navigatable.append(stage_key) |
|
|
continue |
|
|
|
|
|
|
|
|
deps = dependencies.get(stage_key, []) |
|
|
if all(dep in completed_steps for dep in deps): |
|
|
navigatable.append(stage_key) |
|
|
else: |
|
|
|
|
|
chapter_keys = list(st.session_state.stages.keys()) |
|
|
|
|
|
|
|
|
for i, key in enumerate(chapter_keys): |
|
|
if st.session_state.stages[key]['status'] == 'completed': |
|
|
navigatable.append(key) |
|
|
|
|
|
|
|
|
next_stage = None |
|
|
for key in chapter_keys: |
|
|
if st.session_state.stages[key]['status'] != 'completed': |
|
|
next_stage = key |
|
|
break |
|
|
|
|
|
|
|
|
if next_stage: |
|
|
navigatable.append(next_stage) |
|
|
|
|
|
|
|
|
if not navigatable and chapter_keys: |
|
|
navigatable.append(chapter_keys[0]) |
|
|
|
|
|
return navigatable |
|
|
|
|
|
|
|
|
def get_stage_status_icon(status): |
|
|
"""Returns an icon based on stage status.""" |
|
|
if status == "completed": |
|
|
return "✅" |
|
|
elif status == "in_progress": |
|
|
return "⏳" |
|
|
else: |
|
|
return "📄" |
|
|
|
|
|
def get_stage_display_status(key, navigatable_stages): |
|
|
""" Gets status or locked state """ |
|
|
if key not in st.session_state.stages: |
|
|
return "locked" |
|
|
if key in navigatable_stages: |
|
|
return st.session_state.stages[key]["status"] |
|
|
else: |
|
|
return "locked" |
|
|
|
|
|
|
|
|
def generate_markdown_export(): |
|
|
"""Concatenates all stage content into a single markdown string.""" |
|
|
|
|
|
if st.session_state.agent_initialized and st.session_state.modeling_agent: |
|
|
paper = st.session_state.modeling_agent.get_paper() |
|
|
with open('paper.json', 'w') as f: |
|
|
json.dump(paper, f, indent=2) |
|
|
full_doc = f"# {st.session_state.current_problem_title}\n\n" |
|
|
full_doc += f"## Problem Description\n\n{st.session_state.current_problem_details}\n\n---\n\n" |
|
|
|
|
|
|
|
|
if 'problem_analysis' in paper: |
|
|
full_doc += f"# Problem Analysis\n\n{paper['problem_analysis']}\n\n---\n\n" |
|
|
|
|
|
|
|
|
if 'high_level_modeling' in paper: |
|
|
full_doc += f"# High-Level Modeling\n\n{paper['high_level_modeling']}\n\n---\n\n" |
|
|
|
|
|
|
|
|
if 'task_decomposition_summary' in paper: |
|
|
full_doc += f"# Task Decomposition\n\n{paper['task_decomposition_summary']}\n\n---\n\n" |
|
|
|
|
|
|
|
|
if 'task_dependency_analysis' in paper: |
|
|
full_doc += f"# Task Dependency Analysis\n\n" |
|
|
if isinstance(paper['task_dependency_analysis'], list): |
|
|
for i, analysis in enumerate(paper['task_dependency_analysis']): |
|
|
full_doc += f"## Task {i+1} Dependencies\n{analysis}\n\n" |
|
|
else: |
|
|
full_doc += f"{paper['task_dependency_analysis']}\n\n" |
|
|
full_doc += "---\n\n" |
|
|
|
|
|
|
|
|
for i, task in enumerate(paper.get('tasks', [])): |
|
|
if task: |
|
|
task_id = i + 1 |
|
|
full_doc += f"# Task {task_id}\n\n" |
|
|
|
|
|
|
|
|
components_order = [ |
|
|
'task_description', |
|
|
'task_analysis', |
|
|
'preliminary_formulas', |
|
|
'mathematical_modeling_process', |
|
|
'solution_interpretation', |
|
|
'subtask_outcome_analysis', |
|
|
'charts' |
|
|
] |
|
|
|
|
|
for component in components_order: |
|
|
if component in task: |
|
|
component_title = component.replace('_', ' ').title() |
|
|
content = task[component] |
|
|
|
|
|
if component == 'charts' and isinstance(content, list): |
|
|
full_doc += f"## {component_title}\n\n" |
|
|
for j, chart in enumerate(content): |
|
|
full_doc += f"### Chart {j+1}\n{chart}\n\n" |
|
|
else: |
|
|
full_doc += f"## {component_title}\n\n{content}\n\n" |
|
|
|
|
|
full_doc += "---\n\n" |
|
|
|
|
|
return full_doc |
|
|
else: |
|
|
|
|
|
full_doc = f"# {st.session_state.current_problem_title}\n\n" |
|
|
full_doc += f"## Problem Description\n\n{st.session_state.current_problem_details}\n\n---\n\n" |
|
|
|
|
|
for key, data in st.session_state.stages.items(): |
|
|
|
|
|
full_doc += data["content"] + "\n\n---\n\n" |
|
|
return full_doc |
|
|
|
|
|
|
|
|
def get_chapter_structure(): |
|
|
"""Dynamically build the chapter structure using ModelingAgentSystem interfaces.""" |
|
|
if st.session_state.agent_initialized and st.session_state.modeling_agent: |
|
|
|
|
|
planned_steps = st.session_state.modeling_agent.get_planned_steps() |
|
|
|
|
|
|
|
|
chapter_structure = OrderedDict() |
|
|
for step_name in planned_steps: |
|
|
|
|
|
title = step_name |
|
|
prompt_hint = f"Complete the {step_name} section. {stages_config.get(step_name, '')}" |
|
|
|
|
|
|
|
|
if GenerationStep.is_task_step(step_name): |
|
|
task_id, step_type = GenerationStep.parse_task_step(step_name) |
|
|
if task_id and step_type: |
|
|
prompt_hint = f"Complete the {step_type} for Task {task_id}." |
|
|
|
|
|
chapter_structure[step_name] = { |
|
|
"title": title, |
|
|
"prompt_hint": prompt_hint |
|
|
} |
|
|
|
|
|
return chapter_structure |
|
|
else: |
|
|
|
|
|
return OrderedDict(config['stages']) |
|
|
|
|
|
def sync_stages_with_agent(): |
|
|
"""Synchronizes session stages with modeling agent progress""" |
|
|
if not st.session_state.agent_initialized or not st.session_state.modeling_agent: |
|
|
return |
|
|
|
|
|
|
|
|
completed_steps = st.session_state.modeling_agent.get_completed_steps() |
|
|
planned_steps = st.session_state.modeling_agent.get_planned_steps() |
|
|
paper = st.session_state.modeling_agent.get_paper() |
|
|
|
|
|
|
|
|
chapter_structure = get_chapter_structure() |
|
|
|
|
|
|
|
|
for step_name in planned_steps: |
|
|
if step_name not in st.session_state.stages: |
|
|
prompt_hint = chapter_structure.get(step_name, {}).get('prompt_hint', f"Complete the {step_name} section. {stages_config.get(step_name, '')}") |
|
|
|
|
|
st.session_state.stages[step_name] = { |
|
|
"title": step_name, |
|
|
"content": f"# {step_name}\n\n*(Content not generated yet. Use the '✨ Generate Content' button and '✏️ Edit Content' after generation.)*", |
|
|
"status": "completed" if step_name in completed_steps else "not_started", |
|
|
"edit_mode": False, |
|
|
"prompt_hint": prompt_hint |
|
|
} |
|
|
|
|
|
|
|
|
for step_name in st.session_state.stages: |
|
|
if step_name in completed_steps: |
|
|
st.session_state.stages[step_name]["status"] = "completed" |
|
|
|
|
|
|
|
|
|
|
|
if 'problem_background' in paper and 'Problem Background' in st.session_state.stages: |
|
|
st.session_state.stages['Problem Background']['content'] = f"# Problem Background\n\n{paper['problem_background']}" |
|
|
|
|
|
if 'problem_requirement' in paper and 'Problem Requirement' in st.session_state.stages: |
|
|
st.session_state.stages['Problem Requirement']['content'] = f"# Problem Requirement\n\n{paper['problem_requirement']}" |
|
|
|
|
|
|
|
|
if 'problem_analysis' in paper and 'Problem Analysis' in st.session_state.stages: |
|
|
st.session_state.stages['Problem Analysis']['content'] = f"# Problem Analysis\n\n{paper['problem_analysis']}" |
|
|
|
|
|
|
|
|
if 'high_level_modeling' in paper and 'High-Level Modeling' in st.session_state.stages: |
|
|
st.session_state.stages['High-Level Modeling']['content'] = f"# High-Level Modeling\n\n{paper['high_level_modeling']}" |
|
|
|
|
|
|
|
|
if 'task_decomposition_summary' in paper and 'Task Decomposition' in st.session_state.stages: |
|
|
st.session_state.stages['Task Decomposition']['content'] = f"# Task Decomposition\n\n{paper['task_decomposition_summary']}" |
|
|
|
|
|
|
|
|
if 'task_dependency_analysis' in paper and 'Dependency Analysis' in st.session_state.stages: |
|
|
dependency_content = "# Task Dependency Analysis\n\n" |
|
|
if isinstance(paper['task_dependency_analysis'], list): |
|
|
for i, analysis in enumerate(paper['task_dependency_analysis']): |
|
|
dependency_content += f"## Task {i+1} Dependencies\n{analysis}\n\n" |
|
|
else: |
|
|
dependency_content += str(paper['task_dependency_analysis']) |
|
|
st.session_state.stages['Dependency Analysis']['content'] = dependency_content |
|
|
|
|
|
|
|
|
if 'tasks' in paper: |
|
|
for task_index, task_dict in enumerate(paper['tasks']): |
|
|
if not task_dict: |
|
|
continue |
|
|
|
|
|
task_id = task_index + 1 |
|
|
|
|
|
|
|
|
component_to_step = { |
|
|
'task_description': f'Task {task_id} Description', |
|
|
'task_analysis': f'Task {task_id} Analysis', |
|
|
'preliminary_formulas': f'Task {task_id} Preliminary Formulas', |
|
|
'mathematical_modeling_process': f'Task {task_id} Mathematical Modeling Process', |
|
|
'task_code': f'Task {task_id} Code', |
|
|
'solution_interpretation': f'Task {task_id} Solution Interpretation', |
|
|
'subtask_outcome_analysis': f'Task {task_id} Subtask Outcome Analysis', |
|
|
'charts': f'Task {task_id} Charts' |
|
|
} |
|
|
|
|
|
|
|
|
for component, step_name in component_to_step.items(): |
|
|
if component in task_dict and step_name in st.session_state.stages: |
|
|
content = task_dict[component] |
|
|
|
|
|
if component == 'charts': |
|
|
formatted_content = f"# Charts for Task {task_id}\n\n" |
|
|
if isinstance(content, list): |
|
|
for i, chart in enumerate(content): |
|
|
formatted_content += f"## Chart {i+1}\n{chart}\n\n" |
|
|
else: |
|
|
formatted_content += str(content) |
|
|
else: |
|
|
|
|
|
formatted_content = f"# {step_name}\n\n{content}" |
|
|
|
|
|
st.session_state.stages[step_name]['content'] = formatted_content |
|
|
|
|
|
def _handle_content_edit(active_stage_key, new_content): |
|
|
"""将用户编辑的内容同步回 ModelingAgentSystem""" |
|
|
if not st.session_state.agent_initialized or not st.session_state.modeling_agent: |
|
|
return |
|
|
|
|
|
|
|
|
st.session_state.stages[active_stage_key]['content'] = new_content |
|
|
|
|
|
|
|
|
agent = st.session_state.modeling_agent |
|
|
paper = agent.paper |
|
|
|
|
|
if active_stage_key == 'Problem Analysis': |
|
|
paper['problem_analysis'] = new_content.replace('# Problem Analysis\n\n', '') |
|
|
elif active_stage_key == 'High-Level Modeling': |
|
|
paper['high_level_modeling'] = new_content.replace('# High-Level Modeling\n\n', '') |
|
|
elif active_stage_key == 'Task Decomposition': |
|
|
paper['task_decomposition_summary'] = new_content.replace('# Task Decomposition\n\n', '') |
|
|
elif active_stage_key == 'Dependency Analysis': |
|
|
|
|
|
clean_content = new_content.replace('# Task Dependency Analysis\n\n', '') |
|
|
paper['task_dependency_analysis'] = clean_content |
|
|
elif active_stage_key.startswith('Task '): |
|
|
|
|
|
match = re.match(r"Task (\d+) (.*)", active_stage_key) |
|
|
if match: |
|
|
task_id = int(match.group(1)) |
|
|
step_type = match.group(2) |
|
|
task_index = task_id - 1 |
|
|
|
|
|
|
|
|
while len(paper['tasks']) <= task_index: |
|
|
paper['tasks'].append({}) |
|
|
|
|
|
|
|
|
step_to_key = { |
|
|
'Description': 'task_description', |
|
|
'Analysis': 'task_analysis', |
|
|
'Preliminary Formulas': 'preliminary_formulas', |
|
|
'Mathematical Modeling Process': 'mathematical_modeling_process', |
|
|
'Solution Interpretation': 'solution_interpretation', |
|
|
'Subtask Outcome Analysis': 'subtask_outcome_analysis', |
|
|
'Charts': 'charts' |
|
|
} |
|
|
|
|
|
if step_type in step_to_key: |
|
|
key = step_to_key[step_type] |
|
|
clean_content = new_content.replace(f'# {active_stage_key}\n\n', '') |
|
|
paper['tasks'][task_index][key] = clean_content |
|
|
|
|
|
|
|
|
if step_type != 'Charts': |
|
|
agent.coordinator.memory.setdefault(str(task_id), {})[key] = clean_content |
|
|
|
|
|
|
|
|
|
|
|
initialize_session_state() |
|
|
|
|
|
|
|
|
if not st.session_state.problem_defined: |
|
|
st.title(f"{APP_TITLE}") |
|
|
st.caption("An AI-assisted platform for structuring and drafting mathematical modeling reports.") |
|
|
|
|
|
|
|
|
with st.sidebar: |
|
|
st.header("⚙️ Configuration") |
|
|
|
|
|
|
|
|
api_base_provided = bool(st.session_state.api_base_url) |
|
|
api_key_provided = bool(st.session_state.api_key) |
|
|
api_model_provided = bool(st.session_state.api_model) |
|
|
|
|
|
with st.expander("LLM API Details", expanded=not (api_base_provided and api_key_provided and api_model_provided)): |
|
|
|
|
|
if 'api_base_url_main' in st.session_state: |
|
|
st.session_state.api_base_url = st.session_state.api_base_url_main |
|
|
if 'api_key_main' in st.session_state: |
|
|
st.session_state.api_key = st.session_state.api_key_main |
|
|
if 'api_model_main' in st.session_state: |
|
|
st.session_state.api_model = st.session_state.api_model_main |
|
|
|
|
|
st.text_input( |
|
|
"API Base URL", |
|
|
value=st.session_state.api_base_url, |
|
|
key="api_base_url", |
|
|
placeholder="e.g., https://api.openai.com/v1", |
|
|
help="Your OpenAI compatible API endpoint." |
|
|
) |
|
|
st.text_input( |
|
|
"API Key", |
|
|
value=st.session_state.api_key, |
|
|
key="api_key", |
|
|
type="password", |
|
|
help="Your OpenAI compatible API key. Can also be set via Streamlit secrets (API_KEY)." |
|
|
) |
|
|
st.text_input( |
|
|
"Model Name", |
|
|
value=st.session_state.api_model, |
|
|
key="api_model", |
|
|
placeholder="e.g., gpt-4-turbo", |
|
|
help="The specific model to use for generation." |
|
|
) |
|
|
|
|
|
if st.button("Save", key="save_api_settings", type="secondary", use_container_width=True): |
|
|
st.session_state.api_base_url_main = st.session_state.api_base_url |
|
|
st.session_state.api_key_main = st.session_state.api_key |
|
|
st.session_state.api_model_main = st.session_state.api_model |
|
|
update_api_settings() |
|
|
|
|
|
st.divider() |
|
|
|
|
|
|
|
|
st.header("🔍 Problem Definition") |
|
|
with st.expander("Problem Background & Requirements", expanded=st.session_state.problem_definition_expanded): |
|
|
|
|
|
api_configured = bool(st.session_state.api_base_url and st.session_state.api_key and st.session_state.api_model) |
|
|
if not api_configured: |
|
|
st.warning("Please provide valid API Base URL, API Key, and Model Name in the configuration above to define a problem.") |
|
|
|
|
|
else: |
|
|
problem_input_method = st.radio( |
|
|
"Select Problem Source:", |
|
|
["Select Predefined", "Input Custom"], |
|
|
key="problem_input_method", |
|
|
horizontal=True, |
|
|
|
|
|
) |
|
|
|
|
|
confirm_problem = False |
|
|
if st.session_state.problem_input_method == "Select Predefined": |
|
|
st.selectbox( |
|
|
"Choose MCM/ICM Problem:", |
|
|
options=list(MCM_PROBLEMS.keys()), |
|
|
format_func=lambda x: f"MCM_{x}", |
|
|
key="selected_mcm_problem" |
|
|
) |
|
|
if st.button("Load Problem", type="primary", key="load_predefined", use_container_width=True): |
|
|
confirm_problem = True |
|
|
problem_key = st.session_state.selected_mcm_problem |
|
|
st.session_state.current_problem_title = problem_key |
|
|
st.session_state.current_problem_details = f"**Background:**\n{MCM_PROBLEMS[problem_key]['background']}\n\n**Requirements:**\n{MCM_PROBLEMS[problem_key]['problem_requirement']}" |
|
|
|
|
|
else: |
|
|
|
|
|
|
|
|
st.text_area("Problem Background:", key="custom_background", height=150, placeholder="Provide context and background information.") |
|
|
st.text_area("Problem Requirements:", key="custom_requirements", height=100, placeholder="Detail the specific tasks or questions to be addressed.") |
|
|
|
|
|
|
|
|
st.subheader("Upload Data Files") |
|
|
uploaded_files = st.file_uploader( |
|
|
"Upload CSV or Excel files for your problem (optional)", |
|
|
type=["csv", "xlsx", "xls"], |
|
|
accept_multiple_files=True, |
|
|
help="Data files will be available for the modeling agent to use." |
|
|
) |
|
|
|
|
|
|
|
|
if uploaded_files and len(uploaded_files) > 0: |
|
|
|
|
|
if "last_upload_count" not in st.session_state or st.session_state.last_upload_count != len(uploaded_files): |
|
|
st.session_state.uploaded_files = [] |
|
|
st.session_state.last_upload_count = len(uploaded_files) |
|
|
|
|
|
|
|
|
for uploaded_file in uploaded_files: |
|
|
|
|
|
file_already_processed = any(info['name'] == uploaded_file.name for info in st.session_state.uploaded_files) |
|
|
|
|
|
if not file_already_processed: |
|
|
|
|
|
file_dir = Path('data') / st.session_state.session_id / 'data' |
|
|
file_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
|
|
|
file_path = file_dir / uploaded_file.name |
|
|
with open(file_path, "wb") as f: |
|
|
f.write(uploaded_file.getbuffer()) |
|
|
|
|
|
|
|
|
preview = None |
|
|
if uploaded_file.name.endswith(('.csv', '.xlsx', '.xls')): |
|
|
try: |
|
|
if uploaded_file.name.endswith('.csv'): |
|
|
df = pd.read_csv(file_path) |
|
|
else: |
|
|
df = pd.read_excel(file_path) |
|
|
preview = df.head(5) |
|
|
except Exception as e: |
|
|
preview = f"Error reading file: {str(e)}" |
|
|
|
|
|
|
|
|
st.session_state.uploaded_files.append({ |
|
|
'name': uploaded_file.name, |
|
|
'path': str(file_path.absolute()), |
|
|
'preview': preview |
|
|
}) |
|
|
|
|
|
|
|
|
if st.session_state.uploaded_files: |
|
|
st.success(f"{len(st.session_state.uploaded_files)} file(s) uploaded successfully") |
|
|
for file_info in st.session_state.uploaded_files: |
|
|
|
|
|
with st.container(border=True): |
|
|
st.markdown(f"**📄 {file_info['name']}**") |
|
|
if isinstance(file_info['preview'], pd.DataFrame): |
|
|
st.dataframe(file_info['preview']) |
|
|
else: |
|
|
st.write(file_info['preview']) |
|
|
|
|
|
if st.button("Set Custom Problem", type="primary", key="load_custom", use_container_width=True): |
|
|
if st.session_state.custom_background and st.session_state.custom_requirements: |
|
|
confirm_problem = True |
|
|
|
|
|
st.session_state.current_problem_title = "Custom Problem" |
|
|
st.session_state.current_problem_details = f"**Background:**\n{st.session_state.custom_background}\n\n**Requirements:**\n{st.session_state.custom_requirements}" |
|
|
else: |
|
|
st.warning("Please provide background and requirements for the custom problem.") |
|
|
|
|
|
|
|
|
|
|
|
if confirm_problem: |
|
|
if st.session_state.problem_defined: |
|
|
st.toast("Reloading problem: Existing stage content and progress will be reset.") |
|
|
time.sleep(1) |
|
|
st.session_state.problem_defined = True |
|
|
st.session_state.problem_definition_expanded = False |
|
|
reset_stages() |
|
|
st.rerun() |
|
|
|
|
|
|
|
|
|
|
|
if st.session_state.problem_defined: |
|
|
st.divider() |
|
|
st.header("📚 Stages") |
|
|
navigatable_stages = get_navigatable_stages() |
|
|
|
|
|
|
|
|
if st.session_state.active_stage not in navigatable_stages: |
|
|
|
|
|
|
|
|
if navigatable_stages: |
|
|
st.session_state.active_stage = navigatable_stages[-1] |
|
|
else: |
|
|
|
|
|
pass |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with st.container(border=False): |
|
|
st.markdown('<div data-testid="stSidebarNavItems">', unsafe_allow_html=True) |
|
|
|
|
|
for key, data in st.session_state.stages.items(): |
|
|
stage_info = st.session_state.stages.get(key) |
|
|
if stage_info: |
|
|
is_navigatable = key in navigatable_stages |
|
|
is_active = key == st.session_state.active_stage |
|
|
display_status = get_stage_display_status(key, navigatable_stages) |
|
|
|
|
|
if display_status == "locked": |
|
|
icon = "🔒" |
|
|
label_markdown = f"{icon} {stage_info['title']}" |
|
|
else: |
|
|
icon = get_stage_status_icon(display_status) |
|
|
label_markdown = f"{icon} {stage_info['title']}" |
|
|
|
|
|
|
|
|
button_label_html = f'<div style="display: flex; align-items: center;">{label_markdown}</div>' |
|
|
|
|
|
|
|
|
|
|
|
button_type = "primary" if is_active else "secondary" |
|
|
|
|
|
if st.button( |
|
|
label=label_markdown, |
|
|
key=f"nav_{key}", |
|
|
disabled=not is_navigatable, |
|
|
use_container_width=True, |
|
|
type=button_type, |
|
|
help=f"Status: {display_status.replace('_', ' ').title()}" if is_navigatable else "Complete previous stages to unlock" |
|
|
): |
|
|
if is_navigatable and not is_active: |
|
|
st.session_state.active_stage = key |
|
|
|
|
|
if st.session_state.stages[key].get('edit_mode', False): |
|
|
st.session_state.stages[key]['edit_mode'] = False |
|
|
st.rerun() |
|
|
|
|
|
st.markdown('</div>', unsafe_allow_html=True) |
|
|
|
|
|
|
|
|
st.divider() |
|
|
st.header("📄 Solution Report") |
|
|
if st.session_state.stages: |
|
|
markdown_content = generate_markdown_export() |
|
|
st.download_button( |
|
|
label="📥 Export Intermediate Process (.md)", |
|
|
data=markdown_content, |
|
|
file_name=f"mapp_export_{st.session_state.current_problem_title.replace(' ', '_').lower()}.md", |
|
|
mime="text/markdown", |
|
|
use_container_width=True |
|
|
) |
|
|
|
|
|
|
|
|
all_completed = all(stage_data["status"] == "completed" for stage_data in st.session_state.stages.values()) |
|
|
|
|
|
|
|
|
if st.button( |
|
|
"📊 Export Solution Report (.latex & .pdf)", |
|
|
disabled=not all_completed, |
|
|
use_container_width=True, |
|
|
help="Generate and download a complete LaTeX and PDF report (available after all stages are completed)" |
|
|
): |
|
|
if st.session_state.agent_initialized and st.session_state.modeling_agent: |
|
|
with st.spinner("Generating LaTeX and PDF solution report... This may take a few minutes."): |
|
|
try: |
|
|
|
|
|
output_path = str(Path('data') / st.session_state.session_id / 'output') |
|
|
|
|
|
st.session_state.modeling_agent.generate_paper(output_path) |
|
|
|
|
|
|
|
|
st.session_state.latex_path = f'{output_path}/latex/solution.tex' |
|
|
st.session_state.pdf_path = f'{output_path}/latex/solution.pdf' |
|
|
|
|
|
except Exception as e: |
|
|
st.error(f"Error generating report: {str(e)}") |
|
|
import traceback |
|
|
st.error(traceback.format_exc()) |
|
|
st.rerun() |
|
|
else: |
|
|
st.error("ModelingAgentSystem not initialized. Please check API configuration.") |
|
|
|
|
|
|
|
|
if hasattr(st.session_state, 'latex_path') and Path(st.session_state.latex_path).exists(): |
|
|
with open(st.session_state.latex_path, "rb") as f: |
|
|
st.download_button( |
|
|
label="📥 Download LaTeX (.tex)", |
|
|
data=f, |
|
|
file_name="solution.tex", |
|
|
mime="application/x-tex", |
|
|
key="download_latex" |
|
|
) |
|
|
|
|
|
if hasattr(st.session_state, 'pdf_path') and Path(st.session_state.pdf_path).exists(): |
|
|
with open(st.session_state.pdf_path, "rb") as f: |
|
|
st.download_button( |
|
|
label="📥 Download PDF Report", |
|
|
data=f, |
|
|
file_name="solution.pdf", |
|
|
mime="application/pdf", |
|
|
key="download_pdf" |
|
|
) |
|
|
else: |
|
|
st.info("Define a problem and generate content to enable export.") |
|
|
|
|
|
|
|
|
|
|
|
if not st.session_state.problem_defined: |
|
|
|
|
|
st.info("⬅️ Welcome to Mathematical Modeling Agent! Please configure your API details and define a modeling problem using the sidebar to begin.") |
|
|
st.markdown("---") |
|
|
st.subheader("How it works:") |
|
|
st.markdown(""" |
|
|
1. **Configure API:** Enter your OpenAI compatible API details in the sidebar. These can also be set via Streamlit secrets (`API_BASE_URL`, `API_KEY`). |
|
|
2. **Define Problem:** Choose a predefined problem or input your own custom problem description and requirements. |
|
|
3. **Navigate Stages:** Use the sidebar to move through the standard sections of a modeling report. Stages unlock as you mark previous ones complete. |
|
|
4. **Generate & Edit:** For each stage, you can: |
|
|
* Use the **✨ Generate Content** button (with optional instructions) to get an initial draft from the AI (mock generation in this version). |
|
|
* **✏️ Edit Content** after generation. |
|
|
* Mark stages as **✅ Complete** to unlock the next one. |
|
|
5. **Export:** Download your progress as a Markdown file at any time, or export your complete solution as both LaTeX and PDF files when finished. |
|
|
""") |
|
|
|
|
|
else: |
|
|
active_stage_key = st.session_state.active_stage |
|
|
stage_data = st.session_state.stages[active_stage_key] |
|
|
|
|
|
|
|
|
st.header(f"{stage_data['title']}") |
|
|
st.markdown(f"> **Goal:** *{stage_data['prompt_hint']}*") |
|
|
st.divider() |
|
|
|
|
|
|
|
|
|
|
|
with st.container(border=True): |
|
|
st.subheader("🚀 Agent Content Generation") |
|
|
col1, col2 = st.columns([3, 1]) |
|
|
with col1: |
|
|
user_prompt = st.text_area( |
|
|
"Instructions / Prompt Refinement:", |
|
|
key=f"prompt_{active_stage_key}", |
|
|
placeholder="Optional: Provide specific instructions, focus points, or data for the Agent to use in this stage.", |
|
|
help="Guide the AI generation for this specific stage.", |
|
|
height=100 |
|
|
) |
|
|
with col2: |
|
|
st.session_state.critique_rounds = st.slider( |
|
|
"Critic Rounds", 0, 3, st.session_state.critique_rounds, |
|
|
help="Simulated self-critique iterations for the AI (0-3). More rounds might improve quality but take longer (mock only).", |
|
|
key=f"critique_{active_stage_key}" |
|
|
) |
|
|
if st.button("✨ Generate Content", key=f"generate_{active_stage_key}", type="primary", use_container_width=True): |
|
|
if not st.session_state.agent_initialized or st.session_state.modeling_agent is None: |
|
|
st.error("ModelingAgentSystem not initialized. Please check API configuration.") |
|
|
else: |
|
|
with st.spinner(f"🤖 Generating content for '{active_stage_key}'... Please wait."): |
|
|
try: |
|
|
|
|
|
st.session_state.modeling_agent.config['problem_analysis_round'] = st.session_state.critique_rounds |
|
|
st.session_state.modeling_agent.config['problem_modeling_round'] = st.session_state.critique_rounds |
|
|
st.session_state.modeling_agent.config['task_formulas_round'] = st.session_state.critique_rounds |
|
|
|
|
|
|
|
|
if user_prompt: |
|
|
print(user_prompt, st.session_state.critique_rounds) |
|
|
success = st.session_state.modeling_agent.generate_step(active_stage_key, user_prompt=user_prompt, round=st.session_state.critique_rounds) |
|
|
|
|
|
if success: |
|
|
|
|
|
sync_stages_with_agent() |
|
|
|
|
|
|
|
|
if st.session_state.stages[active_stage_key]['status'] == 'not_started': |
|
|
st.session_state.stages[active_stage_key]['status'] = 'in_progress' |
|
|
|
|
|
st.success(f"Successfully generated content for '{active_stage_key}'!") |
|
|
|
|
|
|
|
|
completed_steps = st.session_state.modeling_agent.get_completed_steps() |
|
|
if active_stage_key in completed_steps: |
|
|
st.session_state.stages[active_stage_key]['status'] = 'completed' |
|
|
|
|
|
else: |
|
|
st.error(f"Could not generate content for '{active_stage_key}'. Check dependencies or retry.") |
|
|
|
|
|
except Exception as e: |
|
|
st.error(f"Error generating content: {str(e)}") |
|
|
import traceback |
|
|
st.error(traceback.format_exc()) |
|
|
|
|
|
st.rerun() |
|
|
|
|
|
st.divider() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
edit_mode = st.session_state.stages[active_stage_key].get('edit_mode', False) |
|
|
content = st.session_state.stages[active_stage_key]['content'] |
|
|
is_completed = st.session_state.stages[active_stage_key]['status'] == 'completed' |
|
|
|
|
|
if edit_mode: |
|
|
|
|
|
st.info("✍️ You are in Edit Mode. Use Markdown syntax. Save or Cancel when done.") |
|
|
new_content = st.text_area( |
|
|
"Edit Content (Markdown Supported)", |
|
|
value=content, |
|
|
key=f"editor_{active_stage_key}", |
|
|
height=500, |
|
|
label_visibility="collapsed" |
|
|
) |
|
|
col_save, col_cancel, _ = st.columns([1, 1, 4]) |
|
|
with col_save: |
|
|
if st.button("💾 Save Changes", key=f"save_{active_stage_key}", type="primary"): |
|
|
_handle_content_edit(active_stage_key, new_content) |
|
|
st.session_state.stages[active_stage_key]['edit_mode'] = False |
|
|
|
|
|
if st.session_state.stages[active_stage_key]['status'] == 'not_started': |
|
|
st.session_state.stages[active_stage_key]['status'] = 'in_progress' |
|
|
st.toast("Changes saved!", icon="💾") |
|
|
st.rerun() |
|
|
with col_cancel: |
|
|
if st.button("❌ Cancel Edit", key=f"cancel_{active_stage_key}"): |
|
|
st.session_state.stages[active_stage_key]['edit_mode'] = False |
|
|
st.rerun() |
|
|
|
|
|
else: |
|
|
|
|
|
with st.container(border=True): |
|
|
st.markdown(content, unsafe_allow_html=True) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cols = st.columns(3) |
|
|
with cols[0]: |
|
|
if st.button("✏️ Edit Content", key=f"edit_{active_stage_key}", use_container_width=True, disabled=edit_mode): |
|
|
st.session_state.stages[active_stage_key]['edit_mode'] = True |
|
|
st.rerun() |
|
|
|
|
|
with cols[1]: |
|
|
|
|
|
if not is_completed: |
|
|
if st.button("✅ Mark as Complete", key=f"complete_{active_stage_key}", use_container_width=True): |
|
|
st.session_state.stages[active_stage_key]['status'] = 'completed' |
|
|
st.toast(f"Stage '{stage_data['title']}' marked complete!", icon="✅") |
|
|
|
|
|
stage_keys = list(st.session_state.stages.keys()) |
|
|
current_index = stage_keys.index(active_stage_key) |
|
|
if current_index + 1 < len(stage_keys): |
|
|
next_stage_key = stage_keys[current_index + 1] |
|
|
|
|
|
if st.session_state.stages[next_stage_key]['status'] == 'not_started': |
|
|
st.session_state.stages[next_stage_key]['status'] = 'in_progress' |
|
|
st.session_state.active_stage = next_stage_key |
|
|
else: |
|
|
st.success("🎉 All stages completed!") |
|
|
st.rerun() |
|
|
else: |
|
|
|
|
|
if st.button("✅ Completed (Click to advance)", key=f"completed_{active_stage_key}", use_container_width=True): |
|
|
|
|
|
stage_keys = list(st.session_state.stages.keys()) |
|
|
current_index = stage_keys.index(active_stage_key) |
|
|
if current_index + 1 < len(stage_keys): |
|
|
next_stage_key = stage_keys[current_index + 1] |
|
|
|
|
|
if st.session_state.stages[next_stage_key]['status'] == 'not_started': |
|
|
st.session_state.stages[next_stage_key]['status'] = 'in_progress' |
|
|
st.session_state.active_stage = next_stage_key |
|
|
st.rerun() |
|
|
else: |
|
|
st.success("🎉 All stages completed!") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
st.markdown("---") |
|
|
st.caption("Mathematical Modeling Multi-Agent System | Prototype") |
|
|
|
|
|
|
|
|
def on_page_load(): |
|
|
"""页面加载时同步代理状态""" |
|
|
if st.session_state.agent_initialized and st.session_state.modeling_agent: |
|
|
sync_stages_with_agent() |
|
|
|
|
|
|
|
|
if st.session_state.problem_defined and st.session_state.agent_initialized: |
|
|
on_page_load() |
|
|
|