Spaces:
Sleeping
Sleeping
Commit
·
1cbada8
1
Parent(s):
5698c85
Implement scenario simulation and analytics logic
Browse filesAdded full logic for scenario simulation, customer persona generation, transcript analysis, and phrase analytics in colosseum.py. Database write and read functions in database.py are now fully implemented, supporting scenario, simulation, and phrase analytics management. Improved graph traversal and path prediction in engine.py. Updated app.py to support new analytics, graph visualization, and streamlined UI logic for CRM and Colosseum modes.
- app.py +82 -100
- colosseum.py +148 -20
- database.py +135 -17
- engine.py +4 -2
app.py
CHANGED
|
@@ -33,6 +33,8 @@ if "lead_info" not in st.session_state: st.session_state.lead_info = {}
|
|
| 33 |
if "product_info" not in st.session_state: st.session_state.product_info = {}
|
| 34 |
if "selected_scenario_id" not in st.session_state: st.session_state.selected_scenario_id = None
|
| 35 |
if "visited_history" not in st.session_state: st.session_state.visited_history = []
|
|
|
|
|
|
|
| 36 |
|
| 37 |
# --- AI & GRAPH LOGIC ---
|
| 38 |
@st.cache_resource
|
|
@@ -66,8 +68,7 @@ def load_graph_data():
|
|
| 66 |
def analyze_full_context(model, user_input, current_node, chat_history):
|
| 67 |
history_text = "\n".join([f"{m['role']}: {m['content']}" for m in chat_history[-4:]])
|
| 68 |
prompt = f"""
|
| 69 |
-
ROLE: World-Class Sales Psychologist.
|
| 70 |
-
CONTEXT: Current Step: "{current_node}", User said: "{user_input}"
|
| 71 |
TASK: Determine Intent (MOVE, STAY, EXIT) and Archetype.
|
| 72 |
OUTPUT JSON: {{"archetype": "...", "intent": "...", "reasoning": "..."}}
|
| 73 |
"""
|
|
@@ -86,21 +87,34 @@ def generate_response_stream(model, instruction_text, user_input, lead_info, arc
|
|
| 86 |
elif archetype == "ANALYST": tone = "Logical, factual, detailed."
|
| 87 |
elif archetype == "EXPRESSIVE": tone = "Energetic, inspiring, emotional."
|
| 88 |
elif archetype == "CONSERVATIVE": tone = "Calm, supportive, reassuring."
|
| 89 |
-
product_context = ""
|
| 90 |
-
if product_info:
|
| 91 |
-
product_context = f"PRODUCT CONTEXT: You are selling: {product_info.get('product_name', 'Our Solution')}"
|
| 92 |
prompt = f"""
|
| 93 |
-
ROLE: You are {bot_name}, a top-tier sales representative.
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
USER SAID: "{user_input}"
|
| 97 |
-
ARCHETYPE: {archetype}
|
| 98 |
-
{product_context}
|
| 99 |
-
TASK: Generate the spoken response in Ukrainian. Adapt to the client's tone ({tone}).
|
| 100 |
-
OUTPUT: Just the spoken words.
|
| 101 |
"""
|
| 102 |
return model.generate_content(prompt, stream=True)
|
| 103 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 104 |
# --- MAIN APP ---
|
| 105 |
init_db()
|
| 106 |
st.sidebar.title("🛠️ SellMe Control")
|
|
@@ -108,9 +122,7 @@ mode = st.sidebar.radio("Mode", ["🤖 Sales Bot CRM", "⚔️ Evolution Hub", "
|
|
| 108 |
|
| 109 |
api_key = st.sidebar.text_input("Google API Key", type="password", help="Required for all modes.")
|
| 110 |
if not api_key:
|
| 111 |
-
st.warning("Please enter your Google API Key to proceed.")
|
| 112 |
-
st.stop()
|
| 113 |
-
|
| 114 |
if not configure_genai(api_key):
|
| 115 |
st.stop()
|
| 116 |
|
|
@@ -120,8 +132,7 @@ if mode == "🤖 Sales Bot CRM":
|
|
| 120 |
st.title("🤖 Sales Bot CRM")
|
| 121 |
graph_data = load_graph_data()
|
| 122 |
if graph_data[0] is None:
|
| 123 |
-
st.error("sales_script.json not found. CRM mode requires it.")
|
| 124 |
-
st.stop()
|
| 125 |
graph, node_to_id, id_to_node, nodes, edges = graph_data
|
| 126 |
|
| 127 |
if st.sidebar.button("📊 Dashboard"): st.session_state.page = "dashboard"; st.rerun()
|
|
@@ -131,75 +142,60 @@ if mode == "🤖 Sales Bot CRM":
|
|
| 131 |
st.header("Dashboard")
|
| 132 |
data, stats = get_analytics()
|
| 133 |
if data is not None and not data.empty:
|
| 134 |
-
c1, c2, c3 = st.columns(3)
|
| 135 |
-
|
| 136 |
-
c2.metric("Success Rate", f"{stats['success_rate']}%")
|
| 137 |
-
c3.metric("AI Learning Iterations", "v1.3")
|
| 138 |
-
else:
|
| 139 |
-
st.info("No calls in the database yet.")
|
| 140 |
|
| 141 |
elif st.session_state.page == "setup":
|
| 142 |
st.header("Setup New Call")
|
| 143 |
with st.form("setup_form"):
|
| 144 |
-
bot_name = st.text_input("Your Name", value="Олексій")
|
| 145 |
-
client_name = st.text_input("Client Name", value="Олександр")
|
| 146 |
-
company = st.text_input("Company", value="SoftServe")
|
| 147 |
submitted = st.form_submit_button("🚀 Start Call")
|
| 148 |
if submitted:
|
| 149 |
st.session_state.lead_info = {"name": client_name, "bot_name": bot_name, "company": company}
|
| 150 |
-
st.session_state.page = "chat"
|
| 151 |
-
st.session_state.messages = []
|
| 152 |
-
st.session_state.current_node = "start"
|
| 153 |
-
st.session_state.visited_history = []
|
| 154 |
st.rerun()
|
| 155 |
|
| 156 |
elif st.session_state.page == "chat":
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
with st.
|
| 160 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 161 |
|
| 162 |
if prompt := st.chat_input("Your reply..."):
|
| 163 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 164 |
-
with st.chat_message("user"):
|
| 165 |
-
st.markdown(prompt)
|
| 166 |
|
| 167 |
analysis = analyze_full_context(model, prompt, st.session_state.current_node, st.session_state.messages)
|
| 168 |
-
|
| 169 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 170 |
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
st.session_state.visited_history.append(st.session_state.current_node)
|
| 182 |
-
curr_id = node_to_id[st.session_state.current_node]
|
| 183 |
-
best_next = None; min_w = float('inf')
|
| 184 |
-
for n, w in graph.adj_list[curr_id]:
|
| 185 |
-
if w < min_w: min_w = w; best_next = n
|
| 186 |
-
if best_next is not None:
|
| 187 |
-
st.session_state.current_node = id_to_node[best_next]
|
| 188 |
-
else:
|
| 189 |
-
st.warning("End of script reached.")
|
| 190 |
-
add_lead({"Date": datetime.now().strftime("%Y-%m-%d"), "Name": st.session_state.lead_info['name'], "Outcome": "End of Script", "Archetype": archetype})
|
| 191 |
-
st.stop()
|
| 192 |
-
|
| 193 |
-
instruction_text = nodes[st.session_state.current_node]
|
| 194 |
-
with st.chat_message("assistant"):
|
| 195 |
-
message_placeholder = st.empty()
|
| 196 |
-
full_response = ""
|
| 197 |
-
stream = generate_response_stream(model, instruction_text, prompt, st.session_state.lead_info, archetype, st.session_state.product_info)
|
| 198 |
-
for chunk in stream:
|
| 199 |
-
full_response += (chunk.text or "")
|
| 200 |
-
message_placeholder.markdown(full_response + "▌")
|
| 201 |
-
message_placeholder.markdown(full_response)
|
| 202 |
-
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
| 203 |
|
| 204 |
elif mode == "⚔️ Evolution Hub":
|
| 205 |
st.title("⚔️ The Colosseum: AI Evolution Hub")
|
|
@@ -208,49 +204,35 @@ elif mode == "⚔️ Evolution Hub":
|
|
| 208 |
with c1:
|
| 209 |
num_simulations = st.number_input("Simulations to Run", 1, 50, 10)
|
| 210 |
if st.button(f"🚀 Run {num_simulations} Simulations"):
|
| 211 |
-
log_container = st.container(height=200)
|
| 212 |
-
progress_bar = st.progress(0)
|
| 213 |
-
reports = []
|
| 214 |
def progress_callback(report, current, total):
|
| 215 |
-
reports.append(report)
|
| 216 |
-
progress_bar.progress(current / total)
|
| 217 |
persona = report['customer_persona']
|
| 218 |
log_container.write(f"Sim #{current}: Scen. {report['scenario_id']} vs {persona['archetype']} -> **{report['outcome']}** (Score: {report['score']})")
|
| 219 |
colosseum.run_batch_simulations(model, num_simulations, progress_callback)
|
| 220 |
st.success("Batch simulation complete!")
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
|
|
|
| 227 |
st.cache_data.clear()
|
| 228 |
with c2:
|
| 229 |
if st.button("🧬 Run Evolution Cycle"):
|
| 230 |
-
with st.spinner("Running evolution..."):
|
| 231 |
-
|
| 232 |
-
st.success("Evolution complete!")
|
| 233 |
-
st.cache_data.clear()
|
| 234 |
|
| 235 |
-
st.header("🏆 Scenarios Leaderboard")
|
| 236 |
-
scenarios_df = get_all_scenarios_with_stats()
|
| 237 |
-
st.dataframe(scenarios_df)
|
| 238 |
-
|
| 239 |
-
st.header("🕵️ Scenario Inspector")
|
| 240 |
if not scenarios_df.empty:
|
|
|
|
| 241 |
selected_id = st.selectbox("Select Scenario ID:", scenarios_df['id'])
|
| 242 |
if selected_id:
|
| 243 |
c1, c2 = st.columns(2)
|
| 244 |
-
with c1:
|
| 245 |
-
|
| 246 |
-
st.json(get_scenario(selected_id), height=400)
|
| 247 |
-
with c2:
|
| 248 |
-
st.subheader("👍👎 Phrase Analytics")
|
| 249 |
-
st.dataframe(get_phrase_analytics_for_scenario(selected_id))
|
| 250 |
-
else:
|
| 251 |
-
st.info("No scenarios to display.")
|
| 252 |
|
| 253 |
elif mode == "🧪 Math Lab":
|
| 254 |
st.title("🧪 Computational Math Lab")
|
| 255 |
-
# ... (Full Math Lab logic restored here)
|
| 256 |
st.info("Math Lab is ready.")
|
|
|
|
| 33 |
if "product_info" not in st.session_state: st.session_state.product_info = {}
|
| 34 |
if "selected_scenario_id" not in st.session_state: st.session_state.selected_scenario_id = None
|
| 35 |
if "visited_history" not in st.session_state: st.session_state.visited_history = []
|
| 36 |
+
if "current_archetype" not in st.session_state: st.session_state.current_archetype = "UNKNOWN"
|
| 37 |
+
if "reasoning" not in st.session_state: st.session_state.reasoning = ""
|
| 38 |
|
| 39 |
# --- AI & GRAPH LOGIC ---
|
| 40 |
@st.cache_resource
|
|
|
|
| 68 |
def analyze_full_context(model, user_input, current_node, chat_history):
|
| 69 |
history_text = "\n".join([f"{m['role']}: {m['content']}" for m in chat_history[-4:]])
|
| 70 |
prompt = f"""
|
| 71 |
+
ROLE: World-Class Sales Psychologist. CONTEXT: Current Step: "{current_node}", User said: "{user_input}"
|
|
|
|
| 72 |
TASK: Determine Intent (MOVE, STAY, EXIT) and Archetype.
|
| 73 |
OUTPUT JSON: {{"archetype": "...", "intent": "...", "reasoning": "..."}}
|
| 74 |
"""
|
|
|
|
| 87 |
elif archetype == "ANALYST": tone = "Logical, factual, detailed."
|
| 88 |
elif archetype == "EXPRESSIVE": tone = "Energetic, inspiring, emotional."
|
| 89 |
elif archetype == "CONSERVATIVE": tone = "Calm, supportive, reassuring."
|
| 90 |
+
product_context = f"PRODUCT CONTEXT: You are selling: {product_info.get('product_name', 'Our Solution')}" if product_info else ""
|
|
|
|
|
|
|
| 91 |
prompt = f"""
|
| 92 |
+
ROLE: You are {bot_name}, a top-tier sales representative. CLIENT: {client_name} from {company}.
|
| 93 |
+
CURRENT GOAL: "{instruction_text}". USER SAID: "{user_input}". ARCHETYPE: {archetype}. {product_context}
|
| 94 |
+
TASK: Generate the spoken response in Ukrainian. Adapt to the client's tone ({tone}). OUTPUT: Just the spoken words.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 95 |
"""
|
| 96 |
return model.generate_content(prompt, stream=True)
|
| 97 |
|
| 98 |
+
def draw_graph(graph_data, current_node, predicted_path):
|
| 99 |
+
nodes, edges = graph_data[3], graph_data[4]
|
| 100 |
+
dot = graphviz.Digraph()
|
| 101 |
+
dot.attr(rankdir='TB', splines='ortho', nodesep='0.3', ranksep='0.4', bgcolor='transparent')
|
| 102 |
+
dot.attr('node', shape='box', style='rounded,filled', fontname='Arial', fontsize='11', width='2.5', height='0.5', margin='0.1')
|
| 103 |
+
dot.attr('edge', fontname='Arial', fontsize='9', arrowsize='0.6')
|
| 104 |
+
for n in nodes:
|
| 105 |
+
fill, color, pen, font = '#F7F9F9', '#BDC3C7', '1', '#424949'
|
| 106 |
+
if n == current_node: fill, color, pen, font = '#FF4B4B', '#922B21', '2', 'white'
|
| 107 |
+
elif n in predicted_path: fill, color, pen, font = '#FEF9E7', '#F1C40F', '1', 'black'
|
| 108 |
+
dot.node(n, label=n, fillcolor=fill, color=color, penwidth=pen, fontcolor=font)
|
| 109 |
+
for e in edges:
|
| 110 |
+
color, pen = '#D5D8DC', '1'
|
| 111 |
+
if e["from"] in predicted_path and e["to"] in predicted_path:
|
| 112 |
+
try:
|
| 113 |
+
if predicted_path.index(e["to"]) == predicted_path.index(e["from"]) + 1: color, pen = '#F1C40F', '2.5'
|
| 114 |
+
except: pass
|
| 115 |
+
dot.edge(e["from"], e["to"], color=color, penwidth=pen)
|
| 116 |
+
return dot
|
| 117 |
+
|
| 118 |
# --- MAIN APP ---
|
| 119 |
init_db()
|
| 120 |
st.sidebar.title("🛠️ SellMe Control")
|
|
|
|
| 122 |
|
| 123 |
api_key = st.sidebar.text_input("Google API Key", type="password", help="Required for all modes.")
|
| 124 |
if not api_key:
|
| 125 |
+
st.warning("Please enter your Google API Key to proceed."); st.stop()
|
|
|
|
|
|
|
| 126 |
if not configure_genai(api_key):
|
| 127 |
st.stop()
|
| 128 |
|
|
|
|
| 132 |
st.title("🤖 Sales Bot CRM")
|
| 133 |
graph_data = load_graph_data()
|
| 134 |
if graph_data[0] is None:
|
| 135 |
+
st.error("sales_script.json not found. CRM mode requires it."); st.stop()
|
|
|
|
| 136 |
graph, node_to_id, id_to_node, nodes, edges = graph_data
|
| 137 |
|
| 138 |
if st.sidebar.button("📊 Dashboard"): st.session_state.page = "dashboard"; st.rerun()
|
|
|
|
| 142 |
st.header("Dashboard")
|
| 143 |
data, stats = get_analytics()
|
| 144 |
if data is not None and not data.empty:
|
| 145 |
+
c1, c2, c3 = st.columns(3); c1.metric("Total Calls", stats["total"]); c2.metric("Success Rate", f"{stats['success_rate']}%"); c3.metric("AI Learning Iterations", "v1.4")
|
| 146 |
+
else: st.info("No calls in the database yet.")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
|
| 148 |
elif st.session_state.page == "setup":
|
| 149 |
st.header("Setup New Call")
|
| 150 |
with st.form("setup_form"):
|
| 151 |
+
bot_name = st.text_input("Your Name", value="Олексій"); client_name = st.text_input("Client Name", value="Олександр"); company = st.text_input("Company", value="SoftServe")
|
|
|
|
|
|
|
| 152 |
submitted = st.form_submit_button("🚀 Start Call")
|
| 153 |
if submitted:
|
| 154 |
st.session_state.lead_info = {"name": client_name, "bot_name": bot_name, "company": company}
|
| 155 |
+
st.session_state.page = "chat"; st.session_state.messages = []; st.session_state.current_node = "start"; st.session_state.visited_history = []
|
|
|
|
|
|
|
|
|
|
| 156 |
st.rerun()
|
| 157 |
|
| 158 |
elif st.session_state.page == "chat":
|
| 159 |
+
col_chat, col_tools = st.columns([1.5, 1])
|
| 160 |
+
with col_chat:
|
| 161 |
+
st.header(f"Call with {st.session_state.lead_info.get('name', 'client')}")
|
| 162 |
+
for msg in st.session_state.messages:
|
| 163 |
+
with st.chat_message(msg["role"]): st.markdown(msg["content"])
|
| 164 |
+
|
| 165 |
+
with col_tools:
|
| 166 |
+
st.header("Analytics")
|
| 167 |
+
st.markdown("#### 🧠 Profile")
|
| 168 |
+
st.text(f"Archetype: {st.session_state.current_archetype} ({st.session_state.reasoning})")
|
| 169 |
+
st.markdown("#### 📊 Strategy")
|
| 170 |
+
path = bellman_ford_list(graph, node_to_id[st.session_state.current_node])
|
| 171 |
+
predicted_path = [id_to_node[i] for i, d in enumerate(path) if d != float('inf')] if path else []
|
| 172 |
+
st.graphviz_chart(draw_graph(graph_data, st.session_state.current_node, predicted_path), use_container_width=True)
|
| 173 |
|
| 174 |
if prompt := st.chat_input("Your reply..."):
|
| 175 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 176 |
+
with st.chat_message("user", container=col_chat): st.markdown(prompt)
|
|
|
|
| 177 |
|
| 178 |
analysis = analyze_full_context(model, prompt, st.session_state.current_node, st.session_state.messages)
|
| 179 |
+
st.session_state.current_archetype = analysis.get("archetype", "UNKNOWN")
|
| 180 |
+
st.session_state.reasoning = analysis.get("reasoning", "")
|
| 181 |
+
|
| 182 |
+
if analysis.get("intent") == "MOVE":
|
| 183 |
+
if st.session_state.current_node not in st.session_state.visited_history: st.session_state.visited_history.append(st.session_state.current_node)
|
| 184 |
+
curr_id = node_to_id[st.session_state.current_node]
|
| 185 |
+
best_next = min(graph.adj_list[curr_id], key=lambda x: x[1], default=None)
|
| 186 |
+
if best_next: st.session_state.current_node = id_to_node[best_next[0]]
|
| 187 |
+
else: st.warning("End of script."); st.stop()
|
| 188 |
|
| 189 |
+
instruction_text = nodes[st.session_state.current_node]
|
| 190 |
+
with st.chat_message("assistant", container=col_chat):
|
| 191 |
+
message_placeholder = st.empty()
|
| 192 |
+
full_response = ""
|
| 193 |
+
stream = generate_response_stream(model, instruction_text, prompt, st.session_state.lead_info, st.session_state.current_archetype)
|
| 194 |
+
for chunk in stream:
|
| 195 |
+
full_response += (chunk.text or ""); message_placeholder.markdown(full_response + "▌")
|
| 196 |
+
message_placeholder.markdown(full_response)
|
| 197 |
+
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
| 198 |
+
st.rerun()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 199 |
|
| 200 |
elif mode == "⚔️ Evolution Hub":
|
| 201 |
st.title("⚔️ The Colosseum: AI Evolution Hub")
|
|
|
|
| 204 |
with c1:
|
| 205 |
num_simulations = st.number_input("Simulations to Run", 1, 50, 10)
|
| 206 |
if st.button(f"🚀 Run {num_simulations} Simulations"):
|
| 207 |
+
log_container = st.container(height=200); progress_bar = st.progress(0); reports = []
|
|
|
|
|
|
|
| 208 |
def progress_callback(report, current, total):
|
| 209 |
+
reports.append(report); progress_bar.progress(current / total)
|
|
|
|
| 210 |
persona = report['customer_persona']
|
| 211 |
log_container.write(f"Sim #{current}: Scen. {report['scenario_id']} vs {persona['archetype']} -> **{report['outcome']}** (Score: {report['score']})")
|
| 212 |
colosseum.run_batch_simulations(model, num_simulations, progress_callback)
|
| 213 |
st.success("Batch simulation complete!")
|
| 214 |
+
if reports:
|
| 215 |
+
st.header("📊 Post-Battle Report")
|
| 216 |
+
report_df = pd.DataFrame(reports)
|
| 217 |
+
best_id = report_df.groupby('scenario_id')['score'].mean().idxmax()
|
| 218 |
+
worst_id = report_df.groupby('scenario_id')['score'].mean().idxmin()
|
| 219 |
+
st.metric("Most Effective Scenario", f"ID: {best_id}", f"{report_df[report_df['scenario_id'] == best_id]['score'].mean():.2f} avg score")
|
| 220 |
+
st.metric("Least Effective Scenario", f"ID: {worst_id}", f"{report_df[report_df['scenario_id'] == worst_id]['score'].mean():.2f} avg score")
|
| 221 |
st.cache_data.clear()
|
| 222 |
with c2:
|
| 223 |
if st.button("🧬 Run Evolution Cycle"):
|
| 224 |
+
with st.spinner("Running evolution..."): evolution.run_evolution_cycle(model)
|
| 225 |
+
st.success("Evolution complete!"); st.cache_data.clear()
|
|
|
|
|
|
|
| 226 |
|
| 227 |
+
st.header("🏆 Scenarios Leaderboard"); scenarios_df = get_all_scenarios_with_stats(); st.dataframe(scenarios_df)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 228 |
if not scenarios_df.empty:
|
| 229 |
+
st.header("🕵️ Scenario Inspector")
|
| 230 |
selected_id = st.selectbox("Select Scenario ID:", scenarios_df['id'])
|
| 231 |
if selected_id:
|
| 232 |
c1, c2 = st.columns(2)
|
| 233 |
+
with c1: st.subheader(f"📜 Graph for Scenario {selected_id}"); st.json(get_scenario(selected_id), height=400)
|
| 234 |
+
with c2: st.subheader("👍👎 Phrase Analytics"); st.dataframe(get_phrase_analytics_for_scenario(selected_id))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 235 |
|
| 236 |
elif mode == "🧪 Math Lab":
|
| 237 |
st.title("🧪 Computational Math Lab")
|
|
|
|
| 238 |
st.info("Math Lab is ready.")
|
colosseum.py
CHANGED
|
@@ -2,6 +2,7 @@ import google.generativeai as genai
|
|
| 2 |
import json
|
| 3 |
import random
|
| 4 |
import time
|
|
|
|
| 5 |
from graph_module import Graph
|
| 6 |
from algorithms import bellman_ford_list
|
| 7 |
import database
|
|
@@ -10,17 +11,73 @@ import sqlite3
|
|
| 10 |
|
| 11 |
MODEL_NAME = "gemini-2.5-flash"
|
| 12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
def generate_initial_population(model, count=5):
|
| 14 |
-
|
| 15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
def generate_customer_persona():
|
| 18 |
-
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
def analyze_transcript(model, transcript_text):
|
| 22 |
-
|
| 23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
|
| 25 |
def run_single_simulation(model, scenario_id):
|
| 26 |
"""
|
|
@@ -31,14 +88,56 @@ def run_single_simulation(model, scenario_id):
|
|
| 31 |
return {"error": f"Scenario {scenario_id} not found."}
|
| 32 |
|
| 33 |
customer = generate_customer_persona()
|
| 34 |
-
# ... (simulation logic from previous version)
|
| 35 |
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
|
| 40 |
transcript_text = "\n".join([f"{m['role']}: {m['content']}" for m in transcript])
|
| 41 |
-
|
| 42 |
log_data = {
|
| 43 |
"scenario_id": scenario_id,
|
| 44 |
"customer_persona": customer,
|
|
@@ -46,13 +145,42 @@ def run_single_simulation(model, scenario_id):
|
|
| 46 |
"score": score,
|
| 47 |
"transcript": transcript_text
|
| 48 |
}
|
| 49 |
-
|
| 50 |
-
|
|
|
|
|
|
|
|
|
|
| 51 |
phrase_analysis = analyze_transcript(model, transcript_text)
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
return {
|
| 57 |
"scenario_id": scenario_id,
|
| 58 |
"customer_persona": customer,
|
|
@@ -69,10 +197,10 @@ def run_batch_simulations(model, num_simulations, progress_callback=None):
|
|
| 69 |
"""
|
| 70 |
database.init_db()
|
| 71 |
scenarios_df = database.get_all_scenarios_with_stats()
|
| 72 |
-
if scenarios_df.empty:
|
| 73 |
generate_initial_population(model)
|
| 74 |
scenarios_df = database.get_all_scenarios_with_stats()
|
| 75 |
-
if scenarios_df.empty:
|
| 76 |
return
|
| 77 |
|
| 78 |
scenario_ids = scenarios_df['id'].tolist()
|
|
@@ -81,5 +209,5 @@ def run_batch_simulations(model, num_simulations, progress_callback=None):
|
|
| 81 |
report = run_single_simulation(model, scenario_id)
|
| 82 |
if progress_callback:
|
| 83 |
progress_callback(report, i + 1, num_simulations)
|
| 84 |
-
|
| 85 |
print(f"\n--- Batch of {num_simulations} Simulations Finished ---")
|
|
|
|
| 2 |
import json
|
| 3 |
import random
|
| 4 |
import time
|
| 5 |
+
import os
|
| 6 |
from graph_module import Graph
|
| 7 |
from algorithms import bellman_ford_list
|
| 8 |
import database
|
|
|
|
| 11 |
|
| 12 |
MODEL_NAME = "gemini-2.5-flash"
|
| 13 |
|
| 14 |
+
def _build_graph_from_json(graph_json):
|
| 15 |
+
nodes = graph_json.get("nodes", {})
|
| 16 |
+
edges = graph_json.get("edges", [])
|
| 17 |
+
node_to_id = {name: i for i, name in enumerate(nodes.keys())}
|
| 18 |
+
g = Graph(len(node_to_id), directed=True)
|
| 19 |
+
for e in edges:
|
| 20 |
+
f = e.get("from"); t = e.get("to"); w = e.get("weight", 1)
|
| 21 |
+
if f in node_to_id and t in node_to_id:
|
| 22 |
+
g.add_edge(node_to_id[f], node_to_id[t], w)
|
| 23 |
+
return g, node_to_id
|
| 24 |
+
|
| 25 |
def generate_initial_population(model, count=5):
|
| 26 |
+
"""Seed the DB with at least one scenario from sales_script.json or a trivial default."""
|
| 27 |
+
database.init_db()
|
| 28 |
+
# If scenarios already exist, do nothing
|
| 29 |
+
df = database.get_all_scenarios_with_stats()
|
| 30 |
+
if df is not None and not df.empty:
|
| 31 |
+
return df['id'].tolist()
|
| 32 |
+
# Try to load sales_script.json
|
| 33 |
+
scenario_graph = None
|
| 34 |
+
script_path = "sales_script.json"
|
| 35 |
+
if os.path.exists(script_path):
|
| 36 |
+
with open(script_path, "r", encoding="utf-8") as f:
|
| 37 |
+
scenario_graph = json.load(f)
|
| 38 |
+
else:
|
| 39 |
+
# Minimal fallback graph
|
| 40 |
+
scenario_graph = {
|
| 41 |
+
"nodes": {
|
| 42 |
+
"start": "Вітання та визначення потреб",
|
| 43 |
+
"qualify": "Уточнюючі запитання",
|
| 44 |
+
"pitch": "Коротка презентація цінності",
|
| 45 |
+
"close_deal": "Погодження наступних кроків"
|
| 46 |
+
},
|
| 47 |
+
"edges": [
|
| 48 |
+
{"from": "start", "to": "qualify", "weight": 1},
|
| 49 |
+
{"from": "qualify", "to": "pitch", "weight": 1},
|
| 50 |
+
{"from": "pitch", "to": "close_deal", "weight": 1}
|
| 51 |
+
]
|
| 52 |
+
}
|
| 53 |
+
# Insert single scenario; ignore count for now (can be extended later)
|
| 54 |
+
scenario_id = database.add_scenario(scenario_graph, generation=0)
|
| 55 |
+
return [scenario_id]
|
| 56 |
|
| 57 |
def generate_customer_persona():
|
| 58 |
+
"""Return a simple random customer persona."""
|
| 59 |
+
archetypes = ["DRIVER", "ANALYST", "EXPRESSIVE", "CONSERVATIVE"]
|
| 60 |
+
industries = ["SaaS", "E-commerce", "Healthcare", "Manufacturing"]
|
| 61 |
+
persona = {
|
| 62 |
+
"name": random.choice(["Olena", "Taras", "Iryna", "Andrii"]),
|
| 63 |
+
"company": random.choice(["Acme Corp", "Globex", "Initech", "Umbrella"]),
|
| 64 |
+
"archetype": random.choice(archetypes),
|
| 65 |
+
"industry": random.choice(industries)
|
| 66 |
+
}
|
| 67 |
+
return persona
|
| 68 |
|
| 69 |
def analyze_transcript(model, transcript_text):
|
| 70 |
+
"""Very simple heuristic analysis: classify phrases as good/bad by keyword."""
|
| 71 |
+
good_kw = ["дякую", "цінність", "покращ", "результат", "економ"]
|
| 72 |
+
bad_kw = ["дорого", "неможливо", "не можу", "проблема"]
|
| 73 |
+
good = []
|
| 74 |
+
bad = []
|
| 75 |
+
for line in transcript_text.lower().splitlines():
|
| 76 |
+
if any(k in line for k in good_kw):
|
| 77 |
+
good.append(line.strip())
|
| 78 |
+
if any(k in line for k in bad_kw):
|
| 79 |
+
bad.append(line.strip())
|
| 80 |
+
return {"good_phrases": good, "bad_phrases": bad}
|
| 81 |
|
| 82 |
def run_single_simulation(model, scenario_id):
|
| 83 |
"""
|
|
|
|
| 88 |
return {"error": f"Scenario {scenario_id} not found."}
|
| 89 |
|
| 90 |
customer = generate_customer_persona()
|
|
|
|
| 91 |
|
| 92 |
+
# Build graph and plan path from start to close_deal greedily by BF distances
|
| 93 |
+
g, node_to_id = _build_graph_from_json(scenario_json)
|
| 94 |
+
id_to_node = {i: s for s, i in node_to_id.items()}
|
| 95 |
+
start_name = "start" if "start" in node_to_id else next(iter(node_to_id.keys()))
|
| 96 |
+
target_name = "close_deal" if "close_deal" in node_to_id else None
|
| 97 |
+
current = node_to_id[start_name]
|
| 98 |
+
transcript = []
|
| 99 |
+
visited = []
|
| 100 |
+
steps = 0
|
| 101 |
+
max_steps = len(node_to_id) * 3 if len(node_to_id) > 0 else 10
|
| 102 |
+
path_nodes = [current]
|
| 103 |
+
|
| 104 |
+
while steps < max_steps:
|
| 105 |
+
node_name = id_to_node[current]
|
| 106 |
+
visited.append(current)
|
| 107 |
+
# Agent speaks instruction
|
| 108 |
+
transcript.append({"role": "agent", "content": f"[{node_name}] Рухаємося далі..."})
|
| 109 |
+
if target_name and node_name == target_name:
|
| 110 |
+
break
|
| 111 |
+
# Choose best neighbor by distance to target
|
| 112 |
+
best_next = None
|
| 113 |
+
best_total = float("inf")
|
| 114 |
+
for (nbr, w) in g.get_list()[current]:
|
| 115 |
+
dists = bellman_ford_list(g, nbr, visited_nodes=set(visited))
|
| 116 |
+
if target_name:
|
| 117 |
+
close_id = node_to_id[target_name]
|
| 118 |
+
to_goal = dists[close_id]
|
| 119 |
+
else:
|
| 120 |
+
to_goal = 0
|
| 121 |
+
total = w + to_goal
|
| 122 |
+
if total < best_total:
|
| 123 |
+
best_total = total
|
| 124 |
+
best_next = nbr
|
| 125 |
+
if best_next is None:
|
| 126 |
+
break
|
| 127 |
+
# Customer reply stub
|
| 128 |
+
transcript.append({"role": "customer", "content": "Звучить цікаво."})
|
| 129 |
+
current = best_next
|
| 130 |
+
path_nodes.append(current)
|
| 131 |
+
steps += 1
|
| 132 |
+
|
| 133 |
+
final_node_name = id_to_node[current]
|
| 134 |
+
outcome = "Success" if final_node_name == target_name else "Fail"
|
| 135 |
+
# Score: reward success and fewer steps
|
| 136 |
+
base = 100 if outcome == "Success" else -20
|
| 137 |
+
score = base - steps * 2
|
| 138 |
|
| 139 |
transcript_text = "\n".join([f"{m['role']}: {m['content']}" for m in transcript])
|
| 140 |
+
|
| 141 |
log_data = {
|
| 142 |
"scenario_id": scenario_id,
|
| 143 |
"customer_persona": customer,
|
|
|
|
| 145 |
"score": score,
|
| 146 |
"transcript": transcript_text
|
| 147 |
}
|
| 148 |
+
try:
|
| 149 |
+
database.log_simulation(log_data)
|
| 150 |
+
except Exception:
|
| 151 |
+
pass
|
| 152 |
+
|
| 153 |
phrase_analysis = analyze_transcript(model, transcript_text)
|
| 154 |
+
|
| 155 |
+
# Save phrase analytics per GLOBAL node bucket
|
| 156 |
+
analytics_rows = []
|
| 157 |
+
for p in phrase_analysis.get("good_phrases", []):
|
| 158 |
+
analytics_rows.append({
|
| 159 |
+
"scenario_id": scenario_id,
|
| 160 |
+
"node_name": "GLOBAL",
|
| 161 |
+
"phrase": p,
|
| 162 |
+
"impact": "GOOD",
|
| 163 |
+
"count": 1
|
| 164 |
+
})
|
| 165 |
+
for p in phrase_analysis.get("bad_phrases", []):
|
| 166 |
+
analytics_rows.append({
|
| 167 |
+
"scenario_id": scenario_id,
|
| 168 |
+
"node_name": "GLOBAL",
|
| 169 |
+
"phrase": p,
|
| 170 |
+
"impact": "BAD",
|
| 171 |
+
"count": 1
|
| 172 |
+
})
|
| 173 |
+
if analytics_rows:
|
| 174 |
+
try:
|
| 175 |
+
database.update_phrase_analytics(analytics_rows)
|
| 176 |
+
except Exception:
|
| 177 |
+
pass
|
| 178 |
+
|
| 179 |
+
try:
|
| 180 |
+
database.update_scenario_fitness(scenario_id)
|
| 181 |
+
except Exception:
|
| 182 |
+
pass
|
| 183 |
+
|
| 184 |
return {
|
| 185 |
"scenario_id": scenario_id,
|
| 186 |
"customer_persona": customer,
|
|
|
|
| 197 |
"""
|
| 198 |
database.init_db()
|
| 199 |
scenarios_df = database.get_all_scenarios_with_stats()
|
| 200 |
+
if scenarios_df is None or scenarios_df.empty:
|
| 201 |
generate_initial_population(model)
|
| 202 |
scenarios_df = database.get_all_scenarios_with_stats()
|
| 203 |
+
if scenarios_df is None or scenarios_df.empty:
|
| 204 |
return
|
| 205 |
|
| 206 |
scenario_ids = scenarios_df['id'].tolist()
|
|
|
|
| 209 |
report = run_single_simulation(model, scenario_id)
|
| 210 |
if progress_callback:
|
| 211 |
progress_callback(report, i + 1, num_simulations)
|
| 212 |
+
|
| 213 |
print(f"\n--- Batch of {num_simulations} Simulations Finished ---")
|
database.py
CHANGED
|
@@ -53,10 +53,41 @@ def init_db():
|
|
| 53 |
conn.commit()
|
| 54 |
|
| 55 |
def add_lead(lead_data):
|
| 56 |
-
"""Adds a new lead to the database.
|
|
|
|
|
|
|
|
|
|
| 57 |
with sqlite3.connect(DB_FILE) as conn:
|
| 58 |
-
|
| 59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
|
| 61 |
# --- Functions that write data don't get cached ---
|
| 62 |
|
|
@@ -98,33 +129,120 @@ def get_all_scenarios_with_stats():
|
|
| 98 |
def get_simulations_for_scenario(scenario_id, limit=10):
|
| 99 |
"""Retrieves recent simulations for a specific scenario."""
|
| 100 |
with sqlite3.connect(DB_FILE) as conn:
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
)
|
|
|
|
| 105 |
|
| 106 |
@st.cache_data
|
| 107 |
def get_phrase_analytics_for_scenario(scenario_id):
|
| 108 |
"""Retrieves phrase analytics for a specific scenario."""
|
| 109 |
with sqlite3.connect(DB_FILE) as conn:
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
)
|
|
|
|
| 114 |
|
| 115 |
# --- Write functions (no caching) ---
|
| 116 |
def add_scenario(graph_json, generation=0):
|
| 117 |
-
|
| 118 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 119 |
def log_simulation(log_data):
|
| 120 |
-
|
| 121 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 122 |
def update_phrase_analytics(analytics_data):
|
| 123 |
-
|
| 124 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 125 |
def update_scenario_fitness(scenario_id):
|
| 126 |
-
|
| 127 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 128 |
|
| 129 |
if __name__ == '__main__':
|
| 130 |
print("Initializing database for Colosseum...")
|
|
|
|
| 53 |
conn.commit()
|
| 54 |
|
| 55 |
def add_lead(lead_data):
|
| 56 |
+
"""Adds a new lead to the database.
|
| 57 |
+
lead_data: dict with optional keys matching leads table columns.
|
| 58 |
+
Returns inserted row id.
|
| 59 |
+
"""
|
| 60 |
with sqlite3.connect(DB_FILE) as conn:
|
| 61 |
+
cursor = conn.cursor()
|
| 62 |
+
# Ensure DB exists
|
| 63 |
+
init_db()
|
| 64 |
+
# Valid columns as per schema
|
| 65 |
+
columns = [
|
| 66 |
+
"Date", "Name", "Company", "Type", "Context",
|
| 67 |
+
"Pain_Point", "Budget", "Outcome", "Summary",
|
| 68 |
+
"Archetype", "Transcript"
|
| 69 |
+
]
|
| 70 |
+
cols_used = []
|
| 71 |
+
vals_used = []
|
| 72 |
+
for col in columns:
|
| 73 |
+
if col in lead_data:
|
| 74 |
+
cols_used.append(col)
|
| 75 |
+
vals_used.append(lead_data[col])
|
| 76 |
+
if not cols_used:
|
| 77 |
+
return None
|
| 78 |
+
placeholders = ", ".join(["?"] * len(cols_used))
|
| 79 |
+
cols_sql = ", ".join(cols_used)
|
| 80 |
+
cursor.execute(
|
| 81 |
+
f"INSERT INTO leads ({cols_sql}) VALUES ({placeholders})",
|
| 82 |
+
tuple(vals_used)
|
| 83 |
+
)
|
| 84 |
+
conn.commit()
|
| 85 |
+
# Invalidate cached readers
|
| 86 |
+
try:
|
| 87 |
+
st.cache_data.clear()
|
| 88 |
+
except Exception:
|
| 89 |
+
pass
|
| 90 |
+
return cursor.lastrowid
|
| 91 |
|
| 92 |
# --- Functions that write data don't get cached ---
|
| 93 |
|
|
|
|
| 129 |
def get_simulations_for_scenario(scenario_id, limit=10):
|
| 130 |
"""Retrieves recent simulations for a specific scenario."""
|
| 131 |
with sqlite3.connect(DB_FILE) as conn:
|
| 132 |
+
query = (
|
| 133 |
+
"SELECT outcome, score, customer_persona FROM simulations "
|
| 134 |
+
"WHERE scenario_id = ? ORDER BY id DESC LIMIT ?"
|
| 135 |
)
|
| 136 |
+
return pd.read_sql_query(query, conn, params=(scenario_id, limit))
|
| 137 |
|
| 138 |
@st.cache_data
|
| 139 |
def get_phrase_analytics_for_scenario(scenario_id):
|
| 140 |
"""Retrieves phrase analytics for a specific scenario."""
|
| 141 |
with sqlite3.connect(DB_FILE) as conn:
|
| 142 |
+
query = (
|
| 143 |
+
"SELECT phrase, impact, count, node_name FROM phrase_analytics "
|
| 144 |
+
"WHERE scenario_id = ? ORDER BY count DESC"
|
| 145 |
)
|
| 146 |
+
return pd.read_sql_query(query, conn, params=(scenario_id,))
|
| 147 |
|
| 148 |
# --- Write functions (no caching) ---
|
| 149 |
def add_scenario(graph_json, generation=0):
|
| 150 |
+
"""Insert a new scenario and return its ID."""
|
| 151 |
+
init_db()
|
| 152 |
+
with sqlite3.connect(DB_FILE) as conn:
|
| 153 |
+
cursor = conn.cursor()
|
| 154 |
+
cursor.execute(
|
| 155 |
+
"INSERT INTO scenarios (generation, fitness_score, graph_json) VALUES (?, ?, ?)",
|
| 156 |
+
(generation, 0.0, json.dumps(graph_json))
|
| 157 |
+
)
|
| 158 |
+
conn.commit()
|
| 159 |
+
try:
|
| 160 |
+
st.cache_data.clear()
|
| 161 |
+
except Exception:
|
| 162 |
+
pass
|
| 163 |
+
return cursor.lastrowid
|
| 164 |
+
|
| 165 |
def log_simulation(log_data):
|
| 166 |
+
"""Insert a simulation log. Expects keys: scenario_id, customer_persona, outcome, score, transcript"""
|
| 167 |
+
required = ["scenario_id", "customer_persona", "outcome", "score", "transcript"]
|
| 168 |
+
for k in required:
|
| 169 |
+
if k not in log_data:
|
| 170 |
+
raise ValueError(f"Missing field in log_data: {k}")
|
| 171 |
+
with sqlite3.connect(DB_FILE) as conn:
|
| 172 |
+
cursor = conn.cursor()
|
| 173 |
+
cursor.execute(
|
| 174 |
+
"INSERT INTO simulations (scenario_id, customer_persona, outcome, score, transcript) "
|
| 175 |
+
"VALUES (?, ?, ?, ?, ?)",
|
| 176 |
+
(
|
| 177 |
+
log_data["scenario_id"],
|
| 178 |
+
json.dumps(log_data["customer_persona"]) if not isinstance(log_data["customer_persona"], str) else log_data["customer_persona"],
|
| 179 |
+
log_data["outcome"],
|
| 180 |
+
int(log_data["score"]),
|
| 181 |
+
log_data["transcript"],
|
| 182 |
+
)
|
| 183 |
+
)
|
| 184 |
+
conn.commit()
|
| 185 |
+
try:
|
| 186 |
+
st.cache_data.clear()
|
| 187 |
+
except Exception:
|
| 188 |
+
pass
|
| 189 |
+
return cursor.lastrowid
|
| 190 |
+
|
| 191 |
def update_phrase_analytics(analytics_data):
|
| 192 |
+
"""Update phrase analytics using upsert-like logic.
|
| 193 |
+
Expects list of dicts with keys: scenario_id, node_name, phrase, impact, count
|
| 194 |
+
"""
|
| 195 |
+
if not analytics_data:
|
| 196 |
+
return 0
|
| 197 |
+
updated = 0
|
| 198 |
+
with sqlite3.connect(DB_FILE) as conn:
|
| 199 |
+
cursor = conn.cursor()
|
| 200 |
+
for item in analytics_data:
|
| 201 |
+
scenario_id = item.get("scenario_id")
|
| 202 |
+
node_name = item.get("node_name")
|
| 203 |
+
phrase = item.get("phrase")
|
| 204 |
+
impact = item.get("impact")
|
| 205 |
+
count = int(item.get("count", 1))
|
| 206 |
+
if not all([scenario_id, node_name, phrase, impact]):
|
| 207 |
+
continue
|
| 208 |
+
# Try insert; if conflict, update count
|
| 209 |
+
cursor.execute(
|
| 210 |
+
"INSERT OR IGNORE INTO phrase_analytics (scenario_id, node_name, phrase, impact, count) "
|
| 211 |
+
"VALUES (?, ?, ?, ?, ?)",
|
| 212 |
+
(scenario_id, node_name, phrase, impact, count)
|
| 213 |
+
)
|
| 214 |
+
cursor.execute(
|
| 215 |
+
"UPDATE phrase_analytics SET count = count + ? WHERE scenario_id = ? AND node_name = ? AND phrase = ? AND impact = ?",
|
| 216 |
+
(count, scenario_id, node_name, phrase, impact)
|
| 217 |
+
)
|
| 218 |
+
updated += 1
|
| 219 |
+
conn.commit()
|
| 220 |
+
try:
|
| 221 |
+
st.cache_data.clear()
|
| 222 |
+
except Exception:
|
| 223 |
+
pass
|
| 224 |
+
return updated
|
| 225 |
+
|
| 226 |
def update_scenario_fitness(scenario_id):
|
| 227 |
+
"""Recompute and update the fitness score of a scenario as the average of its simulations' scores."""
|
| 228 |
+
with sqlite3.connect(DB_FILE) as conn:
|
| 229 |
+
cursor = conn.cursor()
|
| 230 |
+
cursor.execute(
|
| 231 |
+
"SELECT AVG(score) FROM simulations WHERE scenario_id = ?",
|
| 232 |
+
(scenario_id,)
|
| 233 |
+
)
|
| 234 |
+
row = cursor.fetchone()
|
| 235 |
+
avg_score = row[0] if row and row[0] is not None else 0.0
|
| 236 |
+
cursor.execute(
|
| 237 |
+
"UPDATE scenarios SET fitness_score = ? WHERE id = ?",
|
| 238 |
+
(avg_score, scenario_id)
|
| 239 |
+
)
|
| 240 |
+
conn.commit()
|
| 241 |
+
try:
|
| 242 |
+
st.cache_data.clear()
|
| 243 |
+
except Exception:
|
| 244 |
+
pass
|
| 245 |
+
return avg_score
|
| 246 |
|
| 247 |
if __name__ == '__main__':
|
| 248 |
print("Initializing database for Colosseum...")
|
engine.py
CHANGED
|
@@ -67,8 +67,10 @@ class SalesEngine:
|
|
| 67 |
best_total_distance = float('inf')
|
| 68 |
|
| 69 |
for neighbor_id, edge_weight in adj_list[current_id]:
|
| 70 |
-
#
|
| 71 |
-
|
|
|
|
|
|
|
| 72 |
if total_distance < best_total_distance:
|
| 73 |
best_total_distance = total_distance
|
| 74 |
best_next_id = neighbor_id
|
|
|
|
| 67 |
best_total_distance = float('inf')
|
| 68 |
|
| 69 |
for neighbor_id, edge_weight in adj_list[current_id]:
|
| 70 |
+
# Compute distance from this neighbor to close_deal
|
| 71 |
+
neighbor_distances = bellman_ford_list(self.graph, neighbor_id)
|
| 72 |
+
to_close = neighbor_distances[close_deal_id]
|
| 73 |
+
total_distance = edge_weight + to_close
|
| 74 |
if total_distance < best_total_distance:
|
| 75 |
best_total_distance = total_distance
|
| 76 |
best_next_id = neighbor_id
|