Spaces:
Sleeping
Sleeping
Commit ·
be83ac2
1
Parent(s): 0079b07
Add product context and graph connectivity features
Browse filesIntroduces product_info to session state and injects product context into response and greeting generation for more tailored sales interactions. Refactors UI for product/service setup, streamlines code comments, and adds a check_connectivity method to Graph for verifying graph connectivity in Math Lab. Minor UI and logic improvements throughout for clarity and maintainability.
- app.py +132 -317
- graph_module.py +25 -0
app.py
CHANGED
|
@@ -21,17 +21,17 @@ if "page" not in st.session_state: st.session_state.page = "dashboard"
|
|
| 21 |
if "messages" not in st.session_state: st.session_state.messages = []
|
| 22 |
if "current_node" not in st.session_state: st.session_state.current_node = "start"
|
| 23 |
if "lead_info" not in st.session_state: st.session_state.lead_info = {}
|
| 24 |
-
if "
|
|
|
|
| 25 |
if "current_archetype" not in st.session_state: st.session_state.current_archetype = "UNKNOWN"
|
| 26 |
if "reasoning" not in st.session_state: st.session_state.reasoning = ""
|
| 27 |
if "current_sentiment" not in st.session_state: st.session_state.current_sentiment = 0.0
|
| 28 |
-
# Checklist status based on your screenshot
|
| 29 |
if "checklist" not in st.session_state:
|
| 30 |
st.session_state.checklist = {
|
| 31 |
"Identify Customer": False,
|
| 32 |
"Determine Objectives": False,
|
| 33 |
"Outline Advantages": False,
|
| 34 |
-
"Keep it Brief": True,
|
| 35 |
"Experiment/Revise": False
|
| 36 |
}
|
| 37 |
|
|
@@ -46,7 +46,6 @@ def init_db():
|
|
| 46 |
|
| 47 |
def save_lead_to_db(lead_info, chat_history, outcome):
|
| 48 |
init_db()
|
| 49 |
-
# Ask AI to extract structured data from chat
|
| 50 |
model = genai.GenerativeModel(MODEL_NAME)
|
| 51 |
chat_text = "\n".join([f"{m['role']}: {m['content']}" for m in chat_history])
|
| 52 |
|
|
@@ -61,7 +60,6 @@ def save_lead_to_db(lead_info, chat_history, outcome):
|
|
| 61 |
"""
|
| 62 |
try:
|
| 63 |
response = model.generate_content(prompt)
|
| 64 |
-
# Simple parsing (in production use structured output)
|
| 65 |
ai_data = response.text
|
| 66 |
except:
|
| 67 |
ai_data = "AI Extraction Failed"
|
|
@@ -72,7 +70,7 @@ def save_lead_to_db(lead_info, chat_history, outcome):
|
|
| 72 |
"Company": lead_info.get("company"),
|
| 73 |
"Type": lead_info.get("type"),
|
| 74 |
"Context": lead_info.get("context"),
|
| 75 |
-
"Pain Point": "AI Analysis Pending",
|
| 76 |
"Budget": "Unknown",
|
| 77 |
"Outcome": outcome,
|
| 78 |
"Summary": f"Call with {len(chat_history)} messages. {outcome}"
|
|
@@ -103,11 +101,9 @@ def load_graph_data():
|
|
| 103 |
return graph, node_to_id, id_to_node, nodes, edges
|
| 104 |
|
| 105 |
def get_predicted_path(graph, start_id, target_id, id_to_node, node_to_id):
|
| 106 |
-
# Get visited node IDs and client type for smart pathfinding
|
| 107 |
visited_ids = [node_to_id[n] for n in st.session_state.get('visited_history', []) if n in node_to_id]
|
| 108 |
client_type = st.session_state.lead_info.get('type', 'B2B')
|
| 109 |
|
| 110 |
-
# Use enhanced Bellman-Ford with penalties
|
| 111 |
dist = bellman_ford_list(graph, start_id, visited_nodes=visited_ids, client_type=client_type)
|
| 112 |
if dist[target_id] == float('inf'): return []
|
| 113 |
path = [target_id]
|
|
@@ -125,9 +121,6 @@ def get_predicted_path(graph, start_id, target_id, id_to_node, node_to_id):
|
|
| 125 |
return [id_to_node[i] for i in reversed(path)]
|
| 126 |
|
| 127 |
def analyze_full_context(model, user_input, current_node, chat_history):
|
| 128 |
-
"""
|
| 129 |
-
Аналізує Інтент з налаштуванням "NEVER GIVE UP".
|
| 130 |
-
"""
|
| 131 |
history_text = "\n".join([f"{m['role']}: {m['content']}" for m in chat_history[-4:]])
|
| 132 |
|
| 133 |
prompt = f"""
|
|
@@ -141,12 +134,7 @@ def analyze_full_context(model, user_input, current_node, chat_history):
|
|
| 141 |
|
| 142 |
CRITICAL RULES FOR INTENT:
|
| 143 |
1. **EXIT** triggers ONLY if user is HOSTILE or EXPLICITLY ends the call.
|
| 144 |
-
- Examples: "Stop calling me", "Fuck off", "Put me on blacklist", "Bye", "Hang up".
|
| 145 |
-
|
| 146 |
2. **STAY** (Objection Handling) triggers for ANY resistance.
|
| 147 |
-
- Examples: "Not interested", "No time", "We have a vendor", "Too expensive", "Send info to mail".
|
| 148 |
-
- Even if they say "No" to the first question -> It is NOT an exit. It is an objection to handle!
|
| 149 |
-
|
| 150 |
3. **MOVE** triggers only if user agrees or answers a question positively.
|
| 151 |
|
| 152 |
OUTPUT JSON format:
|
|
@@ -161,31 +149,40 @@ def analyze_full_context(model, user_input, current_node, chat_history):
|
|
| 161 |
clean_text = response.text.replace("```json", "").replace("```", "").strip()
|
| 162 |
return json.loads(clean_text)
|
| 163 |
except:
|
| 164 |
-
# За замовчуванням STAY! Краще зайвий раз перепитати, ніж кинути слухавку.
|
| 165 |
return {"archetype": "UNKNOWN", "intent": "STAY", "reasoning": "Fallback safety"}
|
| 166 |
|
| 167 |
-
def generate_response(model, instruction_text, user_input, intent, lead_info, archetype):
|
| 168 |
"""
|
| 169 |
-
|
| 170 |
"""
|
| 171 |
-
# 1. Формуємо портрет клієнта
|
| 172 |
bot_name = lead_info.get('bot_name', 'Олексій')
|
| 173 |
client_name = lead_info.get('name', 'Клієнт')
|
| 174 |
company = lead_info.get('company', 'Компанія')
|
| 175 |
context = lead_info.get('context', 'Cold')
|
| 176 |
|
| 177 |
-
# 2. Налаштування стилю (Tone of Voice)
|
| 178 |
tone = "Professional, confident."
|
| 179 |
if archetype == "DRIVER": tone = "Direct, concise, results-oriented (Time is money)."
|
| 180 |
elif archetype == "ANALYST": tone = "Logical, factual, detailed."
|
| 181 |
elif archetype == "EXPRESSIVE": tone = "Energetic, inspiring, emotional."
|
| 182 |
elif archetype == "CONSERVATIVE": tone = "Calm, supportive, reassuring."
|
| 183 |
|
| 184 |
-
# 3. Налаштування довжини (Brevity)
|
| 185 |
length_instruction = "Keep it concise."
|
| 186 |
if "Cold" in context: length_instruction = "Extremely short and punchy (Elevator Pitch)."
|
| 187 |
|
| 188 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 189 |
prompt = f"""
|
| 190 |
ROLE: You are {bot_name}, a top-tier sales representative at SellMe AI.
|
| 191 |
CLIENT: {client_name} from {company}.
|
|
@@ -194,6 +191,8 @@ def generate_response(model, instruction_text, user_input, intent, lead_info, ar
|
|
| 194 |
INTENT DETECTED: {intent}
|
| 195 |
ARCHETYPE: {archetype}
|
| 196 |
|
|
|
|
|
|
|
| 197 |
TASK: Generate the spoken response in Ukrainian.
|
| 198 |
|
| 199 |
CRITICAL RULES:
|
|
@@ -211,17 +210,27 @@ def generate_response(model, instruction_text, user_input, intent, lead_info, ar
|
|
| 211 |
except Exception as e:
|
| 212 |
return f"[System Error: {e}]"
|
| 213 |
|
| 214 |
-
def generate_greeting(model, start_instruction, lead_info):
|
| 215 |
bot_name = lead_info.get('bot_name', 'Manager')
|
| 216 |
client_name = lead_info.get('name', 'Client')
|
| 217 |
context = lead_info.get('context', 'Cold')
|
| 218 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 219 |
prompt = f"""
|
| 220 |
ROLE: Sales Rep {bot_name}.
|
| 221 |
CLIENT: {client_name}.
|
| 222 |
CONTEXT: {context} call.
|
| 223 |
INSTRUCTION: "{start_instruction}"
|
| 224 |
|
|
|
|
|
|
|
| 225 |
TASK: Generate the opening line.
|
| 226 |
- If Cold Call: Be brief, aggressive (pattern interrupt).
|
| 227 |
- If Warm Call: Be welcoming, reference the application.
|
|
@@ -234,30 +243,17 @@ def generate_greeting(model, start_instruction, lead_info):
|
|
| 234 |
|
| 235 |
|
| 236 |
def train_brain():
|
| 237 |
-
"""
|
| 238 |
-
RL MODULE: Аналізує минулі діалоги і оновлює ваги графа.
|
| 239 |
-
"""
|
| 240 |
-
# 1. Завантажуємо статистику
|
| 241 |
df, _ = get_analytics()
|
| 242 |
if df is None or df.empty or "Transcript" not in df.columns:
|
| 243 |
return "Недостатньо даних для навчання."
|
| 244 |
|
| 245 |
-
# 2. Завантажуємо поточний граф
|
| 246 |
graph, node_to_id, id_to_node, nodes, edges = load_graph_data()
|
| 247 |
-
|
| 248 |
-
# 3. Аналіз успішних/провальних шляхів
|
| 249 |
-
# (Це спрощена логіка: ми шукаємо ключові слова кроків у транскрипті)
|
| 250 |
-
success_bonuses = {} # node_name -> bonus
|
| 251 |
|
| 252 |
for index, row in df.iterrows():
|
| 253 |
is_success = (row["Outcome"] == "Success")
|
| 254 |
transcript = str(row["Transcript"])
|
| 255 |
-
|
| 256 |
-
# Проходимо по всіх вузлах і шукаємо, чи були вони в діалозі
|
| 257 |
-
# (Це грубий метод, в ідеалі треба зберігати шлях ID у базу)
|
| 258 |
for node_name, node_text in nodes.items():
|
| 259 |
-
# Шукаємо унікальні шматки тексту інструкції в транскрипті, щоб зрозуміти, чи були ми там
|
| 260 |
-
# Або просто перевіряємо, чи згадується цей етап в логах
|
| 261 |
snippet = node_text[:20]
|
| 262 |
if snippet in transcript:
|
| 263 |
if is_success:
|
|
@@ -265,31 +261,23 @@ def train_brain():
|
|
| 265 |
else:
|
| 266 |
success_bonuses[node_name] = success_bonuses.get(node_name, 0) - 1
|
| 267 |
|
| 268 |
-
# 4. Оновлення ваг (Reinforcement)
|
| 269 |
new_edges = []
|
| 270 |
changes_log = []
|
| 271 |
-
|
| 272 |
for edge in edges:
|
| 273 |
u_name, v_name = edge["from"], edge["to"]
|
| 274 |
old_weight = edge["weight"]
|
| 275 |
new_weight = old_weight
|
| 276 |
-
|
| 277 |
-
# Якщо вузол 'to' часто зустрічається в успішних діалогах -> зменшуємо вагу вхідних ребер
|
| 278 |
score = success_bonuses.get(v_name, 0)
|
| 279 |
|
| 280 |
-
if score > 0:
|
| 281 |
-
|
| 282 |
-
elif score < 0: # Провальний вузол
|
| 283 |
-
new_weight *= 1.1 # Штраф 10%
|
| 284 |
|
| 285 |
-
# Обмеження щоб ваги не зламались
|
| 286 |
new_weight = max(1, min(new_weight, 100))
|
| 287 |
new_edges.append({"from": u_name, "to": v_name, "weight": round(new_weight, 2)})
|
| 288 |
|
| 289 |
if old_weight != new_weight:
|
| 290 |
changes_log.append(f"{u_name}->{v_name}: {old_weight} -> {new_weight}")
|
| 291 |
|
| 292 |
-
# 5. Збереження "Розумного" файлу
|
| 293 |
learned_data = {"nodes": nodes, "edges": new_edges}
|
| 294 |
with open("sales_script_learned.json", "w", encoding="utf-8") as f:
|
| 295 |
json.dump(learned_data, f, ensure_ascii=False, indent=2)
|
|
@@ -300,62 +288,27 @@ def train_brain():
|
|
| 300 |
def draw_graph(graph_data, current_node, predicted_path):
|
| 301 |
nodes = graph_data[3]
|
| 302 |
edges = graph_data[4]
|
| 303 |
-
|
| 304 |
dot = graphviz.Digraph()
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
dot.attr(
|
| 308 |
-
rankdir='TB', # Зверху вниз
|
| 309 |
-
splines='ortho', # Ламані лінії (прямі кути)
|
| 310 |
-
nodesep='0.3', # Мінімальний відступ збоку
|
| 311 |
-
ranksep='0.4', # Мінімальний відступ знизу (робить граф коротшим)
|
| 312 |
-
bgcolor='transparent' # Прозорий фон, щоб зливався з темою
|
| 313 |
-
)
|
| 314 |
-
|
| 315 |
-
# --- СТИЛЬ БЛОКІВ (Wide & Slim) ---
|
| 316 |
-
# shape='note' виглядає як документ, або 'box' для суворості
|
| 317 |
-
# fixedsize='false' дозволяє блоку розтягуватись під текст, але ми задаємо мінімальну ширину
|
| 318 |
-
dot.attr('node',
|
| 319 |
-
shape='box',
|
| 320 |
-
style='rounded,filled',
|
| 321 |
-
fontname='Arial',
|
| 322 |
-
fontsize='11',
|
| 323 |
-
width='2.5', # Робимо їх широкими
|
| 324 |
-
height='0.5', # Робимо їх низькими
|
| 325 |
-
margin='0.1' # Менше полів всередині блоку
|
| 326 |
-
)
|
| 327 |
-
|
| 328 |
-
# --- СТИЛЬ ЛІНІЙ ---
|
| 329 |
dot.attr('edge', fontname='Arial', fontsize='9', arrowsize='0.6')
|
| 330 |
|
| 331 |
for n in nodes:
|
| 332 |
-
# Базовий стиль (Світло-сірий, непомітний)
|
| 333 |
fill = '#F7F9F9'; color = '#BDC3C7'; pen = '1'; font = '#424949'
|
| 334 |
-
|
| 335 |
-
# Поточний крок (Червоний акцент)
|
| 336 |
if n == current_node:
|
| 337 |
fill = '#FF4B4B'; color = '#922B21'; pen = '2'; font = 'white'
|
| 338 |
-
|
| 339 |
-
# Золотий шлях (Жовтий підсвіт)
|
| 340 |
elif n in predicted_path:
|
| 341 |
fill = '#FEF9E7'; color = '#F1C40F'; pen = '1'; font = 'black'
|
| 342 |
-
|
| 343 |
-
# Малюємо вузол
|
| 344 |
dot.node(n, label=n, fillcolor=fill, color=color, penwidth=pen, fontcolor=font)
|
| 345 |
|
| 346 |
for e in edges:
|
| 347 |
-
color = '#D5D8DC'; pen = '1'
|
| 348 |
-
|
| 349 |
-
# Підсвітка шляху
|
| 350 |
if e["from"] in predicted_path and e["to"] in predicted_path:
|
| 351 |
try:
|
| 352 |
-
# Перевіряємо послідовність
|
| 353 |
if predicted_path.index(e["to"]) == predicted_path.index(e["from"]) + 1:
|
| 354 |
-
color = '#F1C40F'; pen = '2.5'
|
| 355 |
except: pass
|
| 356 |
-
|
| 357 |
dot.edge(e["from"], e["to"], color=color, penwidth=pen)
|
| 358 |
-
|
| 359 |
return dot
|
| 360 |
|
| 361 |
# --- MAIN APP ---
|
|
@@ -363,21 +316,12 @@ st.sidebar.title("🛠️ SellMe Control")
|
|
| 363 |
mode = st.sidebar.radio("Mode", ["🤖 Sales Bot CRM", "🧪 Math Lab"])
|
| 364 |
|
| 365 |
if mode == "🤖 Sales Bot CRM":
|
| 366 |
-
# --- SALES BOT DEMO (Moved & Indented) ---
|
| 367 |
-
|
| 368 |
-
# --- API KEY SETUP (Robust) ---
|
| 369 |
api_key = None
|
| 370 |
try:
|
| 371 |
-
|
| 372 |
-
|
| 373 |
-
api_key = st.secrets["GOOGLE_API_KEY"]
|
| 374 |
-
except:
|
| 375 |
-
# If secrets file is missing (Local run), just ignore and pass
|
| 376 |
-
pass
|
| 377 |
|
| 378 |
-
|
| 379 |
-
if not api_key:
|
| 380 |
-
api_key = st.sidebar.text_input("Google API Key", type="password")
|
| 381 |
|
| 382 |
if st.sidebar.button("📊 Dashboard"): st.session_state.page = "dashboard"; st.rerun()
|
| 383 |
if st.sidebar.button("📞 New Call"): st.session_state.page = "setup"; st.rerun()
|
|
@@ -389,103 +333,84 @@ if mode == "🤖 Sales Bot CRM":
|
|
| 389 |
configure_genai(api_key)
|
| 390 |
model = genai.GenerativeModel(MODEL_NAME)
|
| 391 |
graph_data = load_graph_data()
|
| 392 |
-
graph,
|
|
|
|
| 393 |
|
| 394 |
-
# ---
|
| 395 |
if st.session_state.page == "dashboard":
|
| 396 |
st.title("📊 CRM & Analytics Hub")
|
| 397 |
-
|
| 398 |
-
# Кнопка для запуску навчання
|
| 399 |
if st.button("🧠 Train AI on History (RL)"):
|
| 400 |
-
with st.spinner("Analyzing patterns... Updating weights..."):
|
| 401 |
-
msg = train_brain()
|
| 402 |
st.success(msg)
|
| 403 |
|
| 404 |
data, stats = get_analytics()
|
| 405 |
-
|
| 406 |
if data is not None and not data.empty:
|
| 407 |
-
# Метрики
|
| 408 |
c1, c2, c3 = st.columns(3)
|
| 409 |
c1.metric("Total Calls", stats["total"])
|
| 410 |
c2.metric("Success Rate", f"{stats['success_rate']}%")
|
| 411 |
-
c3.metric("AI Learning Iterations", "v1.2")
|
| 412 |
-
|
| 413 |
st.divider()
|
| 414 |
|
| 415 |
-
# Вибір дзвінка для детального аналізу
|
| 416 |
st.subheader("🕵️ Call Inspector")
|
| 417 |
-
|
| 418 |
-
# Створюємо список для селектора: "Дата - Ім'я - Результат"
|
| 419 |
options = data.apply(lambda x: f"{x['Date']} | {x['Name']} ({x['Outcome']})", axis=1).tolist()
|
| 420 |
selected_option = st.selectbox("Select a call to review:", options)
|
| 421 |
-
|
| 422 |
if selected_option:
|
| 423 |
-
# Знаходимо вибраний рядок
|
| 424 |
selected_row = data.iloc[options.index(selected_option)]
|
| 425 |
-
|
| 426 |
with st.expander("📝 Full Transcript & Insights", expanded=True):
|
| 427 |
st.markdown(f"**Client:** {selected_row['Name']} ({selected_row['Type']})")
|
| 428 |
st.markdown(f"**Result:** {selected_row['Outcome']}")
|
| 429 |
st.text_area("Transcript", str(selected_row.get("Transcript", "No transcript available")), height=300)
|
| 430 |
-
|
| 431 |
if "AI Insights" in selected_row and selected_row["AI Insights"]:
|
| 432 |
st.info(f"💡 **AI Insight:** {selected_row['AI Insights']}")
|
| 433 |
-
else:
|
| 434 |
-
|
| 435 |
-
|
| 436 |
-
else:
|
| 437 |
-
st.info("Database is empty. Make some calls!")
|
| 438 |
|
| 439 |
-
# ---
|
| 440 |
elif st.session_state.page == "setup":
|
| 441 |
st.title("👤 Налаштування Дзвінка")
|
| 442 |
-
|
| 443 |
with st.form("lead_form"):
|
| 444 |
-
st.markdown("### 👨💼 Хто дзвонить?")
|
| 445 |
-
bot_name = st.text_input("Ваше ім'я (Менеджера)", "Олексій")
|
| 446 |
-
|
| 447 |
-
st.markdown("### 📞 Кому дзвонимо?")
|
| 448 |
c1, c2 = st.columns(2)
|
|
|
|
|
|
|
|
|
|
| 449 |
name = c1.text_input("Ім'я Клієнта", "Олександр")
|
| 450 |
-
company =
|
| 451 |
-
|
| 452 |
type_ = c1.selectbox("Тип бізнесу", ["B2B", "B2C"])
|
| 453 |
-
context =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 454 |
|
| 455 |
-
|
| 456 |
-
# Спробуємо знайти клієнта в базі (якщо підключено Google Sheets)
|
| 457 |
-
if st.checkbox("🔍 Перевірити в базі (за ім'ям)"):
|
| 458 |
try:
|
| 459 |
-
# Це працює, якщо у нас є leads_manager з Google Sheets
|
| 460 |
from leads_manager import connect_to_gsheet
|
| 461 |
sheet = connect_to_gsheet()
|
| 462 |
if sheet:
|
| 463 |
records = sheet.get_all_records()
|
| 464 |
found = [r for r in records if str(r['Name']).lower() == name.lower()]
|
| 465 |
-
|
| 466 |
if found:
|
| 467 |
-
|
| 468 |
-
st.info(f"📜
|
| 469 |
-
st.warning(f"Результат минулого разу: {last_interaction['Outcome']}")
|
| 470 |
-
st.caption(f"Нотатки: {last_interaction.get('Summary', '')}")
|
| 471 |
-
|
| 472 |
-
# Можна автоматично змінити контекст на "Повторний"
|
| 473 |
context = "Повторний дзвінок"
|
| 474 |
-
else:
|
| 475 |
-
|
| 476 |
-
except Exception as e:
|
| 477 |
-
st.error("База даних недоступна.")
|
| 478 |
-
|
| 479 |
-
submitted = st.form_submit_button("🚀 Почати розмову")
|
| 480 |
|
|
|
|
| 481 |
if submitted:
|
| 482 |
-
# Зберігаємо все, включаючи ім'я бота
|
| 483 |
st.session_state.lead_info = {
|
| 484 |
-
"bot_name": bot_name,
|
| 485 |
-
"
|
| 486 |
-
|
| 487 |
-
|
| 488 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 489 |
}
|
| 490 |
st.session_state.messages = []
|
| 491 |
st.session_state.current_node = "start"
|
|
@@ -494,170 +419,94 @@ if mode == "🤖 Sales Bot CRM":
|
|
| 494 |
st.session_state.visited_history = []
|
| 495 |
st.rerun()
|
| 496 |
|
| 497 |
-
# ---
|
| 498 |
elif st.session_state.page == "chat":
|
| 499 |
st.markdown(f"### Call with {st.session_state.lead_info['name']}")
|
| 500 |
-
|
| 501 |
col_chat, col_tools = st.columns([1.5, 1])
|
| 502 |
|
| 503 |
with col_tools:
|
| 504 |
-
st.markdown("#### 🎯
|
| 505 |
-
# Logic to auto-update checklist based on node
|
| 506 |
if "qualification" in st.session_state.current_node: st.session_state.checklist["Identify Customer"] = True
|
| 507 |
-
if "pain" in st.session_state.current_node
|
| 508 |
if "pitch" in st.session_state.current_node: st.session_state.checklist["Outline Advantages"] = True
|
|
|
|
| 509 |
|
| 510 |
-
|
| 511 |
-
icon = "✅" if done else "⬜"
|
| 512 |
-
st.write(f"{icon} {goal}")
|
| 513 |
-
|
| 514 |
-
# Display Client Profile (Real-time)
|
| 515 |
-
st.markdown("#### 🧠 Client Profile (Real-time)")
|
| 516 |
-
|
| 517 |
-
# Get current archetype from session
|
| 518 |
current_archetype = st.session_state.get("current_archetype", "Analyzing...")
|
| 519 |
-
|
| 520 |
-
# Visual cards for archetypes
|
| 521 |
cols = st.columns(4)
|
| 522 |
-
|
| 523 |
-
|
| 524 |
-
|
| 525 |
-
|
| 526 |
-
cols[
|
| 527 |
-
|
| 528 |
-
cols[2].markdown(f"<div style='opacity:{get_opacity('EXPRESSIVE')}; font-size:20px; text-align:center'>🟡<br>Fan</div>", unsafe_allow_html=True)
|
| 529 |
-
cols[3].markdown(f"<div style='opacity:{get_opacity('CONSERVATIVE')}; font-size:20px; text-align:center'>🟢<br>Safe</div>", unsafe_allow_html=True)
|
| 530 |
-
|
| 531 |
-
if st.session_state.reasoning:
|
| 532 |
-
st.caption(f"🤖 AI Insight: {st.session_state.reasoning}")
|
| 533 |
|
| 534 |
-
st.markdown("#### 📊
|
| 535 |
curr_id = node_to_id[st.session_state.current_node]
|
| 536 |
target_id = node_to_id["close_standard"]
|
| 537 |
path = get_predicted_path(graph, curr_id, target_id, id_to_node, node_to_id)
|
| 538 |
-
st.graphviz_chart(
|
| 539 |
-
draw_graph(graph_data, st.session_state.current_node, path),
|
| 540 |
-
use_container_width=True # Розтягує граф на всю ширину колонки
|
| 541 |
-
)
|
| 542 |
|
| 543 |
-
|
| 544 |
-
st.markdown("---")
|
| 545 |
-
with st.expander("🧮 Алгоритм Беллмана-Форда (Live Logs)", expanded=False):
|
| 546 |
-
st.markdown("""
|
| 547 |
-
**Математика прийняття рішень:**
|
| 548 |
-
Алгоритм шукає шлях $P$, де сума ваг $W$ є мінімальною:
|
| 549 |
-
$$ D[v] = \\min(D[v], D[u] + W(u, v)) $$
|
| 550 |
-
""")
|
| 551 |
-
|
| 552 |
-
# 1. Calculate real data
|
| 553 |
visited_ids = [node_to_id[n] for n in st.session_state.get('visited_history', []) if n in node_to_id]
|
| 554 |
client_type = st.session_state.lead_info.get('type', 'B2B')
|
| 555 |
current_sentiment = st.session_state.get("current_sentiment", 0.0)
|
|
|
|
| 556 |
|
| 557 |
-
# Call algorithm to get distance array
|
| 558 |
-
raw_dist = bellman_ford_list(
|
| 559 |
-
graph,
|
| 560 |
-
curr_id,
|
| 561 |
-
visited_nodes=visited_ids,
|
| 562 |
-
client_type=client_type,
|
| 563 |
-
sentiment_score=current_sentiment
|
| 564 |
-
)
|
| 565 |
-
|
| 566 |
-
# 2. Build beautiful table for humans
|
| 567 |
debug_data = []
|
| 568 |
-
target_path_set = set(path)
|
| 569 |
-
|
| 570 |
for i, d in enumerate(raw_dist):
|
| 571 |
node_name = id_to_node[i]
|
| 572 |
-
|
| 573 |
-
# Format infinity
|
| 574 |
-
cost_display = "∞" if d == float('inf') else round(d, 2)
|
| 575 |
-
|
| 576 |
-
# Node status
|
| 577 |
status = "⬜"
|
| 578 |
if node_name == st.session_state.current_node: status = "📍 Start"
|
| 579 |
elif node_name in target_path_set: status = "✨ Path"
|
| 580 |
elif d == float('inf'): status = "🚫 Unreachable"
|
| 581 |
-
|
| 582 |
-
debug_data.append({
|
| 583 |
-
"Node": node_name,
|
| 584 |
-
"Cost (Weight)": cost_display,
|
| 585 |
-
"Status": status
|
| 586 |
-
})
|
| 587 |
|
| 588 |
-
# Convert to DataFrame
|
| 589 |
df_log = pd.DataFrame(debug_data)
|
| 590 |
-
|
| 591 |
-
|
| 592 |
-
df_log["sort_key"] = df_log["Cost (Weight)"].apply(lambda x: 9999 if x == "∞" else float(x))
|
| 593 |
-
df_log = df_log.sort_values(by="sort_key").drop(columns=["sort_key"])
|
| 594 |
-
|
| 595 |
-
# Display
|
| 596 |
-
st.dataframe(
|
| 597 |
-
df_log,
|
| 598 |
-
use_container_width=True,
|
| 599 |
-
hide_index=True
|
| 600 |
-
)
|
| 601 |
-
|
| 602 |
-
# 3. Explanation "Why?"
|
| 603 |
-
st.info(f"""
|
| 604 |
-
**Фактори впливу:**
|
| 605 |
-
- 🎭 **Емоція:** {current_sentiment} (впливає на вартість агресивних кроків)
|
| 606 |
-
- 🏢 **Тип:** {client_type} (змінює пріоритет швидкості)
|
| 607 |
-
- 🔄 **Повтори:** Вузли, де ми вже були, мають штраф x50.
|
| 608 |
-
""")
|
| 609 |
|
| 610 |
with col_chat:
|
| 611 |
for msg in st.session_state.messages:
|
| 612 |
with st.chat_message(msg["role"]): st.write(msg["content"])
|
| 613 |
|
| 614 |
-
# --- ГЕНЕРАЦІЯ ПЕРШОГО ПОВІДОМЛЕННЯ ---
|
| 615 |
if not st.session_state.messages:
|
| 616 |
-
with st.spinner("AI
|
| 617 |
-
#
|
| 618 |
-
greeting = generate_greeting(model, nodes["start"], st.session_state.lead_info)
|
| 619 |
-
|
| 620 |
st.session_state.messages.append({"role": "assistant", "content": greeting})
|
| 621 |
st.rerun()
|
| 622 |
|
| 623 |
if user_input := st.chat_input("Reply..."):
|
| 624 |
st.session_state.messages.append({"role": "user", "content": user_input})
|
| 625 |
-
|
| 626 |
-
# Logic - Analyze with full context including archetype detection
|
| 627 |
current_text = nodes[st.session_state.current_node]
|
| 628 |
analysis = analyze_full_context(model, user_input, st.session_state.current_node, st.session_state.messages)
|
| 629 |
-
intent = analysis.get("intent", "STAY")
|
| 630 |
-
archetype = analysis.get("archetype", "UNKNOWN")
|
| 631 |
-
reasoning = analysis.get("reasoning", "")
|
| 632 |
-
|
| 633 |
-
# Store archetype and reasoning for display
|
| 634 |
st.session_state.current_archetype = archetype
|
| 635 |
-
st.session_state.reasoning = reasoning
|
| 636 |
|
| 637 |
if "EXIT" in intent:
|
| 638 |
outcome = "Success" if "close" in st.session_state.current_node else "Fail"
|
| 639 |
save_lead_to_db(st.session_state.lead_info, st.session_state.messages, outcome)
|
| 640 |
st.success("Call Saved!")
|
| 641 |
st.session_state.page = "dashboard"; st.rerun()
|
| 642 |
-
|
| 643 |
elif "STAY" in intent:
|
| 644 |
-
|
| 645 |
-
|
| 646 |
else: # MOVE
|
| 647 |
-
|
| 648 |
-
if st.session_state.current_node not in st.session_state.visited_history:
|
| 649 |
st.session_state.visited_history.append(st.session_state.current_node)
|
| 650 |
-
|
| 651 |
-
|
| 652 |
-
|
| 653 |
-
for n, w in graph.adj_list[curr_id]:
|
| 654 |
if w < min_w: min_w = w; best_next = n
|
| 655 |
-
|
| 656 |
-
if best_next is not None:
|
| 657 |
st.session_state.current_node = id_to_node[best_next]
|
| 658 |
new_text = nodes[st.session_state.current_node]
|
| 659 |
-
|
| 660 |
-
|
|
|
|
| 661 |
resp = "Call finished."
|
| 662 |
save_lead_to_db(st.session_state.lead_info, st.session_state.messages, "End of Script")
|
| 663 |
|
|
@@ -666,95 +515,61 @@ if mode == "🤖 Sales Bot CRM":
|
|
| 666 |
|
| 667 |
elif mode == "🧪 Math Lab":
|
| 668 |
st.title("🧪 Computational Math Lab")
|
| 669 |
-
|
| 670 |
st.markdown("### Section A: Graph Inspector")
|
| 671 |
col1, col2 = st.columns(2)
|
| 672 |
n_nodes = col1.slider("N (Vertices)", 5, 15, 10)
|
| 673 |
density = col2.slider("Density", 0.1, 1.0, 0.5)
|
| 674 |
|
| 675 |
if st.button("Generate Graph"):
|
| 676 |
-
# Generate graph using experiments module
|
| 677 |
graph = experiments.generate_erdos_renyi(n_nodes, density)
|
| 678 |
st.session_state.lab_graph = graph
|
| 679 |
|
| 680 |
-
# Display if graph exists in session state
|
| 681 |
if 'lab_graph' in st.session_state:
|
| 682 |
graph = st.session_state.lab_graph
|
| 683 |
-
|
| 684 |
tab1, tab2, tab3 = st.tabs(["Visual Graph", "Adjacency Matrix", "Adjacency List"])
|
| 685 |
|
| 686 |
with tab1:
|
| 687 |
st.subheader("Graphviz Visualization")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 688 |
dot = graphviz.Digraph()
|
| 689 |
-
# Basic Graphviz from graph.adj_list
|
| 690 |
for u, neighbors in graph.adj_list.items():
|
| 691 |
dot.node(str(u), label=str(u))
|
| 692 |
-
for v, w in neighbors:
|
| 693 |
-
dot.edge(str(u), str(v), label=str(w))
|
| 694 |
st.graphviz_chart(dot)
|
| 695 |
|
| 696 |
with tab2:
|
| 697 |
-
st.subheader("Adjacency Matrix")
|
| 698 |
-
# Reuse logic to show infinity as string
|
| 699 |
matrix = graph.to_adjacency_matrix()
|
| 700 |
-
|
| 701 |
-
|
|
|
|
| 702 |
|
| 703 |
with tab3:
|
| 704 |
st.subheader("Adjacency List")
|
| 705 |
st.write(graph.adj_list)
|
| 706 |
|
| 707 |
st.divider()
|
| 708 |
-
|
| 709 |
st.markdown("### Section B: Scientific Experiments")
|
| 710 |
st.markdown("Comparing Bellman-Ford implementations: **Adjacency List vs Adjacency Matrix**.")
|
| 711 |
-
|
| 712 |
-
# Preset parameters
|
| 713 |
-
sizes_preset = list(range(20, 220, 20)) # [20, 40, ..., 200]
|
| 714 |
densities_preset = [0.1, 0.3, 0.5, 0.7, 0.9]
|
| 715 |
|
| 716 |
if st.button("🚀 Run Scientific Benchmark"):
|
| 717 |
-
with st.spinner("Running
|
| 718 |
-
# Execute run_scientific_benchmark from experiments.py
|
| 719 |
results = experiments.run_scientific_benchmark(sizes_preset, densities_preset, num_runs=20)
|
| 720 |
-
|
| 721 |
-
# Show Results Table
|
| 722 |
df_results = pd.DataFrame(results)
|
| 723 |
-
st.subheader("Raw
|
| 724 |
st.dataframe(df_results)
|
| 725 |
-
|
| 726 |
-
# --- Visualization Logic ---
|
| 727 |
st.divider()
|
| 728 |
-
st.subheader("Performance Comparison Chart")
|
| 729 |
-
|
| 730 |
-
# Use columns for layout
|
| 731 |
-
col_chart, col_filter = st.columns([3, 1])
|
| 732 |
-
|
| 733 |
-
with col_filter:
|
| 734 |
-
# Filter by Density
|
| 735 |
-
selected_density = st.selectbox(
|
| 736 |
-
"Select Density to Visualize:",
|
| 737 |
-
densities_preset,
|
| 738 |
-
index=2 # Default to 0.5
|
| 739 |
-
)
|
| 740 |
-
|
| 741 |
-
# Analysis
|
| 742 |
-
st.info(f"""
|
| 743 |
-
**Analysis for Density {selected_density}:**
|
| 744 |
-
|
| 745 |
-
- 🟦 **Time_List**: Uses Bellman-Ford on Adjacency List. Complexity $O(V \\cdot E)$.
|
| 746 |
-
- 🟥 **Time_Matrix**: Uses Bellman-Ford on Adjacency Matrix. Complexity $O(V^3)$.
|
| 747 |
-
""")
|
| 748 |
|
| 749 |
-
|
| 750 |
-
|
| 751 |
-
|
| 752 |
-
|
| 753 |
-
|
| 754 |
-
|
| 755 |
-
|
| 756 |
-
|
| 757 |
-
# Plot
|
| 758 |
-
st.line_chart(chart_data)
|
| 759 |
-
|
| 760 |
-
st.success(f"Benchmarking complete! Notice how Matrix implementation performance stays constant or redundant for sparse graphs, while List implementation scales with edges.")
|
|
|
|
| 21 |
if "messages" not in st.session_state: st.session_state.messages = []
|
| 22 |
if "current_node" not in st.session_state: st.session_state.current_node = "start"
|
| 23 |
if "lead_info" not in st.session_state: st.session_state.lead_info = {}
|
| 24 |
+
if "product_info" not in st.session_state: st.session_state.product_info = {} # NEW: Product Context
|
| 25 |
+
if "visited_history" not in st.session_state: st.session_state.visited_history = []
|
| 26 |
if "current_archetype" not in st.session_state: st.session_state.current_archetype = "UNKNOWN"
|
| 27 |
if "reasoning" not in st.session_state: st.session_state.reasoning = ""
|
| 28 |
if "current_sentiment" not in st.session_state: st.session_state.current_sentiment = 0.0
|
|
|
|
| 29 |
if "checklist" not in st.session_state:
|
| 30 |
st.session_state.checklist = {
|
| 31 |
"Identify Customer": False,
|
| 32 |
"Determine Objectives": False,
|
| 33 |
"Outline Advantages": False,
|
| 34 |
+
"Keep it Brief": True,
|
| 35 |
"Experiment/Revise": False
|
| 36 |
}
|
| 37 |
|
|
|
|
| 46 |
|
| 47 |
def save_lead_to_db(lead_info, chat_history, outcome):
|
| 48 |
init_db()
|
|
|
|
| 49 |
model = genai.GenerativeModel(MODEL_NAME)
|
| 50 |
chat_text = "\n".join([f"{m['role']}: {m['content']}" for m in chat_history])
|
| 51 |
|
|
|
|
| 60 |
"""
|
| 61 |
try:
|
| 62 |
response = model.generate_content(prompt)
|
|
|
|
| 63 |
ai_data = response.text
|
| 64 |
except:
|
| 65 |
ai_data = "AI Extraction Failed"
|
|
|
|
| 70 |
"Company": lead_info.get("company"),
|
| 71 |
"Type": lead_info.get("type"),
|
| 72 |
"Context": lead_info.get("context"),
|
| 73 |
+
"Pain Point": "AI Analysis Pending",
|
| 74 |
"Budget": "Unknown",
|
| 75 |
"Outcome": outcome,
|
| 76 |
"Summary": f"Call with {len(chat_history)} messages. {outcome}"
|
|
|
|
| 101 |
return graph, node_to_id, id_to_node, nodes, edges
|
| 102 |
|
| 103 |
def get_predicted_path(graph, start_id, target_id, id_to_node, node_to_id):
|
|
|
|
| 104 |
visited_ids = [node_to_id[n] for n in st.session_state.get('visited_history', []) if n in node_to_id]
|
| 105 |
client_type = st.session_state.lead_info.get('type', 'B2B')
|
| 106 |
|
|
|
|
| 107 |
dist = bellman_ford_list(graph, start_id, visited_nodes=visited_ids, client_type=client_type)
|
| 108 |
if dist[target_id] == float('inf'): return []
|
| 109 |
path = [target_id]
|
|
|
|
| 121 |
return [id_to_node[i] for i in reversed(path)]
|
| 122 |
|
| 123 |
def analyze_full_context(model, user_input, current_node, chat_history):
|
|
|
|
|
|
|
|
|
|
| 124 |
history_text = "\n".join([f"{m['role']}: {m['content']}" for m in chat_history[-4:]])
|
| 125 |
|
| 126 |
prompt = f"""
|
|
|
|
| 134 |
|
| 135 |
CRITICAL RULES FOR INTENT:
|
| 136 |
1. **EXIT** triggers ONLY if user is HOSTILE or EXPLICITLY ends the call.
|
|
|
|
|
|
|
| 137 |
2. **STAY** (Objection Handling) triggers for ANY resistance.
|
|
|
|
|
|
|
|
|
|
| 138 |
3. **MOVE** triggers only if user agrees or answers a question positively.
|
| 139 |
|
| 140 |
OUTPUT JSON format:
|
|
|
|
| 149 |
clean_text = response.text.replace("```json", "").replace("```", "").strip()
|
| 150 |
return json.loads(clean_text)
|
| 151 |
except:
|
|
|
|
| 152 |
return {"archetype": "UNKNOWN", "intent": "STAY", "reasoning": "Fallback safety"}
|
| 153 |
|
| 154 |
+
def generate_response(model, instruction_text, user_input, intent, lead_info, archetype, product_info={}):
|
| 155 |
"""
|
| 156 |
+
Generates a generic or product-specific response.
|
| 157 |
"""
|
|
|
|
| 158 |
bot_name = lead_info.get('bot_name', 'Олексій')
|
| 159 |
client_name = lead_info.get('name', 'Клієнт')
|
| 160 |
company = lead_info.get('company', 'Компанія')
|
| 161 |
context = lead_info.get('context', 'Cold')
|
| 162 |
|
|
|
|
| 163 |
tone = "Professional, confident."
|
| 164 |
if archetype == "DRIVER": tone = "Direct, concise, results-oriented (Time is money)."
|
| 165 |
elif archetype == "ANALYST": tone = "Logical, factual, detailed."
|
| 166 |
elif archetype == "EXPRESSIVE": tone = "Energetic, inspiring, emotional."
|
| 167 |
elif archetype == "CONSERVATIVE": tone = "Calm, supportive, reassuring."
|
| 168 |
|
|
|
|
| 169 |
length_instruction = "Keep it concise."
|
| 170 |
if "Cold" in context: length_instruction = "Extremely short and punchy (Elevator Pitch)."
|
| 171 |
|
| 172 |
+
# NEW: Product Context Injection
|
| 173 |
+
product_context = ""
|
| 174 |
+
if product_info:
|
| 175 |
+
product_context = f"""
|
| 176 |
+
PRODUCT CONTEXT:
|
| 177 |
+
You are selling: {product_info.get('product_name', 'Our Solution')}
|
| 178 |
+
Value Proposition: {product_info.get('product_value', 'High Value')}
|
| 179 |
+
Pricing: {product_info.get('product_price', 'Custom Pricing')}
|
| 180 |
+
Competitive Edge: {product_info.get('competitor_diff', 'Best in class')}
|
| 181 |
+
|
| 182 |
+
CRITICAL INSTRUCTION:
|
| 183 |
+
Whenever the script graph says "Pitch", "Price", or "Objection", use the PRODUCT CONTEXT above. Do NOT invent fake features.
|
| 184 |
+
"""
|
| 185 |
+
|
| 186 |
prompt = f"""
|
| 187 |
ROLE: You are {bot_name}, a top-tier sales representative at SellMe AI.
|
| 188 |
CLIENT: {client_name} from {company}.
|
|
|
|
| 191 |
INTENT DETECTED: {intent}
|
| 192 |
ARCHETYPE: {archetype}
|
| 193 |
|
| 194 |
+
{product_context}
|
| 195 |
+
|
| 196 |
TASK: Generate the spoken response in Ukrainian.
|
| 197 |
|
| 198 |
CRITICAL RULES:
|
|
|
|
| 210 |
except Exception as e:
|
| 211 |
return f"[System Error: {e}]"
|
| 212 |
|
| 213 |
+
def generate_greeting(model, start_instruction, lead_info, product_info={}):
|
| 214 |
bot_name = lead_info.get('bot_name', 'Manager')
|
| 215 |
client_name = lead_info.get('name', 'Client')
|
| 216 |
context = lead_info.get('context', 'Cold')
|
| 217 |
|
| 218 |
+
# NEW: Product Context Injection
|
| 219 |
+
product_context = ""
|
| 220 |
+
if product_info:
|
| 221 |
+
product_context = f"""
|
| 222 |
+
PRODUCT CONTEXT:
|
| 223 |
+
You are selling: {product_info.get('product_name', 'Our Solution')}
|
| 224 |
+
"""
|
| 225 |
+
|
| 226 |
prompt = f"""
|
| 227 |
ROLE: Sales Rep {bot_name}.
|
| 228 |
CLIENT: {client_name}.
|
| 229 |
CONTEXT: {context} call.
|
| 230 |
INSTRUCTION: "{start_instruction}"
|
| 231 |
|
| 232 |
+
{product_context}
|
| 233 |
+
|
| 234 |
TASK: Generate the opening line.
|
| 235 |
- If Cold Call: Be brief, aggressive (pattern interrupt).
|
| 236 |
- If Warm Call: Be welcoming, reference the application.
|
|
|
|
| 243 |
|
| 244 |
|
| 245 |
def train_brain():
|
|
|
|
|
|
|
|
|
|
|
|
|
| 246 |
df, _ = get_analytics()
|
| 247 |
if df is None or df.empty or "Transcript" not in df.columns:
|
| 248 |
return "Недостатньо даних для навчання."
|
| 249 |
|
|
|
|
| 250 |
graph, node_to_id, id_to_node, nodes, edges = load_graph_data()
|
| 251 |
+
success_bonuses = {}
|
|
|
|
|
|
|
|
|
|
| 252 |
|
| 253 |
for index, row in df.iterrows():
|
| 254 |
is_success = (row["Outcome"] == "Success")
|
| 255 |
transcript = str(row["Transcript"])
|
|
|
|
|
|
|
|
|
|
| 256 |
for node_name, node_text in nodes.items():
|
|
|
|
|
|
|
| 257 |
snippet = node_text[:20]
|
| 258 |
if snippet in transcript:
|
| 259 |
if is_success:
|
|
|
|
| 261 |
else:
|
| 262 |
success_bonuses[node_name] = success_bonuses.get(node_name, 0) - 1
|
| 263 |
|
|
|
|
| 264 |
new_edges = []
|
| 265 |
changes_log = []
|
|
|
|
| 266 |
for edge in edges:
|
| 267 |
u_name, v_name = edge["from"], edge["to"]
|
| 268 |
old_weight = edge["weight"]
|
| 269 |
new_weight = old_weight
|
|
|
|
|
|
|
| 270 |
score = success_bonuses.get(v_name, 0)
|
| 271 |
|
| 272 |
+
if score > 0: new_weight *= 0.9
|
| 273 |
+
elif score < 0: new_weight *= 1.1
|
|
|
|
|
|
|
| 274 |
|
|
|
|
| 275 |
new_weight = max(1, min(new_weight, 100))
|
| 276 |
new_edges.append({"from": u_name, "to": v_name, "weight": round(new_weight, 2)})
|
| 277 |
|
| 278 |
if old_weight != new_weight:
|
| 279 |
changes_log.append(f"{u_name}->{v_name}: {old_weight} -> {new_weight}")
|
| 280 |
|
|
|
|
| 281 |
learned_data = {"nodes": nodes, "edges": new_edges}
|
| 282 |
with open("sales_script_learned.json", "w", encoding="utf-8") as f:
|
| 283 |
json.dump(learned_data, f, ensure_ascii=False, indent=2)
|
|
|
|
| 288 |
def draw_graph(graph_data, current_node, predicted_path):
|
| 289 |
nodes = graph_data[3]
|
| 290 |
edges = graph_data[4]
|
|
|
|
| 291 |
dot = graphviz.Digraph()
|
| 292 |
+
dot.attr(rankdir='TB', splines='ortho', nodesep='0.3', ranksep='0.4', bgcolor='transparent')
|
| 293 |
+
dot.attr('node', shape='box', style='rounded,filled', fontname='Arial', fontsize='11', width='2.5', height='0.5', margin='0.1')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 294 |
dot.attr('edge', fontname='Arial', fontsize='9', arrowsize='0.6')
|
| 295 |
|
| 296 |
for n in nodes:
|
|
|
|
| 297 |
fill = '#F7F9F9'; color = '#BDC3C7'; pen = '1'; font = '#424949'
|
|
|
|
|
|
|
| 298 |
if n == current_node:
|
| 299 |
fill = '#FF4B4B'; color = '#922B21'; pen = '2'; font = 'white'
|
|
|
|
|
|
|
| 300 |
elif n in predicted_path:
|
| 301 |
fill = '#FEF9E7'; color = '#F1C40F'; pen = '1'; font = 'black'
|
|
|
|
|
|
|
| 302 |
dot.node(n, label=n, fillcolor=fill, color=color, penwidth=pen, fontcolor=font)
|
| 303 |
|
| 304 |
for e in edges:
|
| 305 |
+
color = '#D5D8DC'; pen = '1'
|
|
|
|
|
|
|
| 306 |
if e["from"] in predicted_path and e["to"] in predicted_path:
|
| 307 |
try:
|
|
|
|
| 308 |
if predicted_path.index(e["to"]) == predicted_path.index(e["from"]) + 1:
|
| 309 |
+
color = '#F1C40F'; pen = '2.5'
|
| 310 |
except: pass
|
|
|
|
| 311 |
dot.edge(e["from"], e["to"], color=color, penwidth=pen)
|
|
|
|
| 312 |
return dot
|
| 313 |
|
| 314 |
# --- MAIN APP ---
|
|
|
|
| 316 |
mode = st.sidebar.radio("Mode", ["🤖 Sales Bot CRM", "🧪 Math Lab"])
|
| 317 |
|
| 318 |
if mode == "🤖 Sales Bot CRM":
|
|
|
|
|
|
|
|
|
|
| 319 |
api_key = None
|
| 320 |
try:
|
| 321 |
+
if "GOOGLE_API_KEY" in st.secrets: api_key = st.secrets["GOOGLE_API_KEY"]
|
| 322 |
+
except: pass
|
|
|
|
|
|
|
|
|
|
|
|
|
| 323 |
|
| 324 |
+
if not api_key: api_key = st.sidebar.text_input("Google API Key", type="password")
|
|
|
|
|
|
|
| 325 |
|
| 326 |
if st.sidebar.button("📊 Dashboard"): st.session_state.page = "dashboard"; st.rerun()
|
| 327 |
if st.sidebar.button("📞 New Call"): st.session_state.page = "setup"; st.rerun()
|
|
|
|
| 333 |
configure_genai(api_key)
|
| 334 |
model = genai.GenerativeModel(MODEL_NAME)
|
| 335 |
graph_data = load_graph_data()
|
| 336 |
+
graph, nodes = graph_data[0], graph_data[3]
|
| 337 |
+
node_to_id, id_to_node = graph_data[1], graph_data[2]
|
| 338 |
|
| 339 |
+
# --- DASHBOARD ---
|
| 340 |
if st.session_state.page == "dashboard":
|
| 341 |
st.title("📊 CRM & Analytics Hub")
|
|
|
|
|
|
|
| 342 |
if st.button("🧠 Train AI on History (RL)"):
|
| 343 |
+
with st.spinner("Analyzing patterns... Updating weights..."): msg = train_brain()
|
|
|
|
| 344 |
st.success(msg)
|
| 345 |
|
| 346 |
data, stats = get_analytics()
|
|
|
|
| 347 |
if data is not None and not data.empty:
|
|
|
|
| 348 |
c1, c2, c3 = st.columns(3)
|
| 349 |
c1.metric("Total Calls", stats["total"])
|
| 350 |
c2.metric("Success Rate", f"{stats['success_rate']}%")
|
| 351 |
+
c3.metric("AI Learning Iterations", "v1.2")
|
|
|
|
| 352 |
st.divider()
|
| 353 |
|
|
|
|
| 354 |
st.subheader("🕵️ Call Inspector")
|
|
|
|
|
|
|
| 355 |
options = data.apply(lambda x: f"{x['Date']} | {x['Name']} ({x['Outcome']})", axis=1).tolist()
|
| 356 |
selected_option = st.selectbox("Select a call to review:", options)
|
|
|
|
| 357 |
if selected_option:
|
|
|
|
| 358 |
selected_row = data.iloc[options.index(selected_option)]
|
|
|
|
| 359 |
with st.expander("📝 Full Transcript & Insights", expanded=True):
|
| 360 |
st.markdown(f"**Client:** {selected_row['Name']} ({selected_row['Type']})")
|
| 361 |
st.markdown(f"**Result:** {selected_row['Outcome']}")
|
| 362 |
st.text_area("Transcript", str(selected_row.get("Transcript", "No transcript available")), height=300)
|
|
|
|
| 363 |
if "AI Insights" in selected_row and selected_row["AI Insights"]:
|
| 364 |
st.info(f"💡 **AI Insight:** {selected_row['AI Insights']}")
|
| 365 |
+
else: st.warning("No insights generated for this call.")
|
| 366 |
+
else: st.info("Database is empty. Make some calls!")
|
|
|
|
|
|
|
|
|
|
| 367 |
|
| 368 |
+
# --- SETUP WITH PRODUCT INFO ---
|
| 369 |
elif st.session_state.page == "setup":
|
| 370 |
st.title("👤 Налаштування Дзвінка")
|
|
|
|
| 371 |
with st.form("lead_form"):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 372 |
c1, c2 = st.columns(2)
|
| 373 |
+
# Lead Info
|
| 374 |
+
c1.markdown("### 👨💼 Lead Info")
|
| 375 |
+
bot_name = c1.text_input("Ваше ім'я (Менеджера)", "Олексій")
|
| 376 |
name = c1.text_input("Ім'я Клієнта", "Олександр")
|
| 377 |
+
company = c1.text_input("Компанія", "SoftServe")
|
|
|
|
| 378 |
type_ = c1.selectbox("Тип бізнесу", ["B2B", "B2C"])
|
| 379 |
+
context = c1.selectbox("Контекст", ["Холодний дзвінок", "Теплий лід", "Повторний дзвінок"])
|
| 380 |
+
|
| 381 |
+
# NEW: Product Info
|
| 382 |
+
c2.markdown("### 📦 Product / Service Info")
|
| 383 |
+
p_name = c2.text_input("Product Name", "AI Sales Engine")
|
| 384 |
+
p_value = c2.text_input("Main Benefit (Value)", "Increases sales by 300%")
|
| 385 |
+
p_price = c2.text_input("Price / Pricing Model", "$100/month")
|
| 386 |
+
p_diff = c2.text_input("Competitive Edge (Why us?)", "Learns from every call")
|
| 387 |
|
| 388 |
+
if c1.checkbox("🔍 Перевірити в базі"):
|
|
|
|
|
|
|
| 389 |
try:
|
|
|
|
| 390 |
from leads_manager import connect_to_gsheet
|
| 391 |
sheet = connect_to_gsheet()
|
| 392 |
if sheet:
|
| 393 |
records = sheet.get_all_records()
|
| 394 |
found = [r for r in records if str(r['Name']).lower() == name.lower()]
|
|
|
|
| 395 |
if found:
|
| 396 |
+
last = found[-1]
|
| 397 |
+
st.info(f"📜 Contact Found: {last['Date']}")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 398 |
context = "Повторний дзвінок"
|
| 399 |
+
else: st.success("✨ New Client")
|
| 400 |
+
except: st.error("Database unavailable.")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 401 |
|
| 402 |
+
submitted = st.form_submit_button("🚀 Start Call")
|
| 403 |
if submitted:
|
|
|
|
| 404 |
st.session_state.lead_info = {
|
| 405 |
+
"bot_name": bot_name, "name": name,
|
| 406 |
+
"company": company, "type": type_, "context": context
|
| 407 |
+
}
|
| 408 |
+
# Store Product Info
|
| 409 |
+
st.session_state.product_info = {
|
| 410 |
+
"product_name": p_name,
|
| 411 |
+
"product_value": p_value,
|
| 412 |
+
"product_price": p_price,
|
| 413 |
+
"competitor_diff": p_diff
|
| 414 |
}
|
| 415 |
st.session_state.messages = []
|
| 416 |
st.session_state.current_node = "start"
|
|
|
|
| 419 |
st.session_state.visited_history = []
|
| 420 |
st.rerun()
|
| 421 |
|
| 422 |
+
# --- CHAT ---
|
| 423 |
elif st.session_state.page == "chat":
|
| 424 |
st.markdown(f"### Call with {st.session_state.lead_info['name']}")
|
|
|
|
| 425 |
col_chat, col_tools = st.columns([1.5, 1])
|
| 426 |
|
| 427 |
with col_tools:
|
| 428 |
+
st.markdown("#### 🎯 Objectives")
|
|
|
|
| 429 |
if "qualification" in st.session_state.current_node: st.session_state.checklist["Identify Customer"] = True
|
| 430 |
+
if "pain" in st.session_state.current_node: st.session_state.checklist["Determine Objectives"] = True
|
| 431 |
if "pitch" in st.session_state.current_node: st.session_state.checklist["Outline Advantages"] = True
|
| 432 |
+
for goal, done in st.session_state.checklist.items(): st.write(f"{'✅' if done else '⬜'} {goal}")
|
| 433 |
|
| 434 |
+
st.markdown("#### 🧠 Profile")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 435 |
current_archetype = st.session_state.get("current_archetype", "Analyzing...")
|
|
|
|
|
|
|
| 436 |
cols = st.columns(4)
|
| 437 |
+
def op(t): return "1.0" if current_archetype == t else "0.3"
|
| 438 |
+
cols[0].markdown(f"<div style='opacity:{op('DRIVER')};text-align:center'>🔴<br>Boss</div>", unsafe_allow_html=True)
|
| 439 |
+
cols[1].markdown(f"<div style='opacity:{op('ANALYST')};text-align:center'>🔵<br>Analyst</div>", unsafe_allow_html=True)
|
| 440 |
+
cols[2].markdown(f"<div style='opacity:{op('EXPRESSIVE')};text-align:center'>🟡<br>Fan</div>", unsafe_allow_html=True)
|
| 441 |
+
cols[3].markdown(f"<div style='opacity:{op('CONSERVATIVE')};text-align:center'>🟢<br>Safe</div>", unsafe_allow_html=True)
|
| 442 |
+
if st.session_state.reasoning: st.caption(f"🤖 {st.session_state.reasoning}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 443 |
|
| 444 |
+
st.markdown("#### 📊 Strategy")
|
| 445 |
curr_id = node_to_id[st.session_state.current_node]
|
| 446 |
target_id = node_to_id["close_standard"]
|
| 447 |
path = get_predicted_path(graph, curr_id, target_id, id_to_node, node_to_id)
|
| 448 |
+
st.graphviz_chart(draw_graph(graph_data, st.session_state.current_node, path), use_container_width=True)
|
|
|
|
|
|
|
|
|
|
| 449 |
|
| 450 |
+
with st.expander("🧮 Bellman-Ford Logs"):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 451 |
visited_ids = [node_to_id[n] for n in st.session_state.get('visited_history', []) if n in node_to_id]
|
| 452 |
client_type = st.session_state.lead_info.get('type', 'B2B')
|
| 453 |
current_sentiment = st.session_state.get("current_sentiment", 0.0)
|
| 454 |
+
raw_dist = bellman_ford_list(graph, curr_id, visited_nodes=visited_ids, client_type=client_type, sentiment_score=current_sentiment)
|
| 455 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 456 |
debug_data = []
|
| 457 |
+
target_path_set = set(path)
|
|
|
|
| 458 |
for i, d in enumerate(raw_dist):
|
| 459 |
node_name = id_to_node[i]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 460 |
status = "⬜"
|
| 461 |
if node_name == st.session_state.current_node: status = "📍 Start"
|
| 462 |
elif node_name in target_path_set: status = "✨ Path"
|
| 463 |
elif d == float('inf'): status = "🚫 Unreachable"
|
| 464 |
+
debug_data.append({"Node": node_name, "Cost": "∞" if d==float('inf') else round(d,2), "Status": status})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 465 |
|
|
|
|
| 466 |
df_log = pd.DataFrame(debug_data)
|
| 467 |
+
df_log["sort"] = df_log["Cost"].apply(lambda x: 9999 if x=="∞" else float(x))
|
| 468 |
+
st.dataframe(df_log.sort_values("sort").drop(columns=["sort"]), hide_index=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 469 |
|
| 470 |
with col_chat:
|
| 471 |
for msg in st.session_state.messages:
|
| 472 |
with st.chat_message(msg["role"]): st.write(msg["content"])
|
| 473 |
|
|
|
|
| 474 |
if not st.session_state.messages:
|
| 475 |
+
with st.spinner("AI warming up..."):
|
| 476 |
+
# Pass Product Info
|
| 477 |
+
greeting = generate_greeting(model, nodes["start"], st.session_state.lead_info, st.session_state.product_info)
|
|
|
|
| 478 |
st.session_state.messages.append({"role": "assistant", "content": greeting})
|
| 479 |
st.rerun()
|
| 480 |
|
| 481 |
if user_input := st.chat_input("Reply..."):
|
| 482 |
st.session_state.messages.append({"role": "user", "content": user_input})
|
|
|
|
|
|
|
| 483 |
current_text = nodes[st.session_state.current_node]
|
| 484 |
analysis = analyze_full_context(model, user_input, st.session_state.current_node, st.session_state.messages)
|
| 485 |
+
intent, archetype = analysis.get("intent", "STAY"), analysis.get("archetype", "UNKNOWN")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 486 |
st.session_state.current_archetype = archetype
|
| 487 |
+
st.session_state.reasoning = analysis.get("reasoning", "")
|
| 488 |
|
| 489 |
if "EXIT" in intent:
|
| 490 |
outcome = "Success" if "close" in st.session_state.current_node else "Fail"
|
| 491 |
save_lead_to_db(st.session_state.lead_info, st.session_state.messages, outcome)
|
| 492 |
st.success("Call Saved!")
|
| 493 |
st.session_state.page = "dashboard"; st.rerun()
|
|
|
|
| 494 |
elif "STAY" in intent:
|
| 495 |
+
# Pass Product Info
|
| 496 |
+
resp = generate_response(model, current_text, user_input, "STAY", st.session_state.lead_info, archetype, st.session_state.product_info)
|
| 497 |
else: # MOVE
|
| 498 |
+
if st.session_state.current_node not in st.session_state.visited_history:
|
|
|
|
| 499 |
st.session_state.visited_history.append(st.session_state.current_node)
|
| 500 |
+
curr_id = node_to_id[st.session_state.current_node]
|
| 501 |
+
best_next = None; min_w = float('inf')
|
| 502 |
+
for n, w in graph.adj_list[curr_id]:
|
|
|
|
| 503 |
if w < min_w: min_w = w; best_next = n
|
| 504 |
+
if best_next is not None:
|
|
|
|
| 505 |
st.session_state.current_node = id_to_node[best_next]
|
| 506 |
new_text = nodes[st.session_state.current_node]
|
| 507 |
+
# Pass Product Info
|
| 508 |
+
resp = generate_response(model, new_text, user_input, "MOVE", st.session_state.lead_info, archetype, st.session_state.product_info)
|
| 509 |
+
else:
|
| 510 |
resp = "Call finished."
|
| 511 |
save_lead_to_db(st.session_state.lead_info, st.session_state.messages, "End of Script")
|
| 512 |
|
|
|
|
| 515 |
|
| 516 |
elif mode == "🧪 Math Lab":
|
| 517 |
st.title("🧪 Computational Math Lab")
|
|
|
|
| 518 |
st.markdown("### Section A: Graph Inspector")
|
| 519 |
col1, col2 = st.columns(2)
|
| 520 |
n_nodes = col1.slider("N (Vertices)", 5, 15, 10)
|
| 521 |
density = col2.slider("Density", 0.1, 1.0, 0.5)
|
| 522 |
|
| 523 |
if st.button("Generate Graph"):
|
|
|
|
| 524 |
graph = experiments.generate_erdos_renyi(n_nodes, density)
|
| 525 |
st.session_state.lab_graph = graph
|
| 526 |
|
|
|
|
| 527 |
if 'lab_graph' in st.session_state:
|
| 528 |
graph = st.session_state.lab_graph
|
|
|
|
| 529 |
tab1, tab2, tab3 = st.tabs(["Visual Graph", "Adjacency Matrix", "Adjacency List"])
|
| 530 |
|
| 531 |
with tab1:
|
| 532 |
st.subheader("Graphviz Visualization")
|
| 533 |
+
is_connected, unreachable = graph.check_connectivity(0)
|
| 534 |
+
if is_connected: st.success("✅ Graph is Fully Connected (from Node 0)")
|
| 535 |
+
else: st.error(f"⚠️ Warning: Unreachable nodes: {unreachable}")
|
| 536 |
+
|
| 537 |
dot = graphviz.Digraph()
|
|
|
|
| 538 |
for u, neighbors in graph.adj_list.items():
|
| 539 |
dot.node(str(u), label=str(u))
|
| 540 |
+
for v, w in neighbors: dot.edge(str(u), str(v), label=str(w))
|
|
|
|
| 541 |
st.graphviz_chart(dot)
|
| 542 |
|
| 543 |
with tab2:
|
| 544 |
+
st.subheader("Adjacency Matrix (Heatmap)")
|
|
|
|
| 545 |
matrix = graph.to_adjacency_matrix()
|
| 546 |
+
df_matrix = pd.DataFrame(matrix)
|
| 547 |
+
df_heatmap = df_matrix.replace(float('inf'), None)
|
| 548 |
+
st.dataframe(df_heatmap.style.background_gradient(cmap="Blues", axis=None).format(formatter=lambda x: f"{x:.0f}" if pd.notnull(x) else "∞"))
|
| 549 |
|
| 550 |
with tab3:
|
| 551 |
st.subheader("Adjacency List")
|
| 552 |
st.write(graph.adj_list)
|
| 553 |
|
| 554 |
st.divider()
|
|
|
|
| 555 |
st.markdown("### Section B: Scientific Experiments")
|
| 556 |
st.markdown("Comparing Bellman-Ford implementations: **Adjacency List vs Adjacency Matrix**.")
|
| 557 |
+
sizes_preset = list(range(20, 220, 20))
|
|
|
|
|
|
|
| 558 |
densities_preset = [0.1, 0.3, 0.5, 0.7, 0.9]
|
| 559 |
|
| 560 |
if st.button("🚀 Run Scientific Benchmark"):
|
| 561 |
+
with st.spinner("Running benchmarks..."):
|
|
|
|
| 562 |
results = experiments.run_scientific_benchmark(sizes_preset, densities_preset, num_runs=20)
|
|
|
|
|
|
|
| 563 |
df_results = pd.DataFrame(results)
|
| 564 |
+
st.subheader("Raw Data")
|
| 565 |
st.dataframe(df_results)
|
|
|
|
|
|
|
| 566 |
st.divider()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 567 |
|
| 568 |
+
c_chart, c_filter = st.columns([3, 1])
|
| 569 |
+
with c_filter:
|
| 570 |
+
sel_density = st.selectbox("Density:", densities_preset, index=2)
|
| 571 |
+
st.info(f"**Analysis:** List O(E) vs Matrix O(V^3).")
|
| 572 |
+
with c_chart:
|
| 573 |
+
filtered_df = df_results[df_results["Density"] == sel_density].sort_values("Vertices (N)")
|
| 574 |
+
st.line_chart(filtered_df.set_index("Vertices (N)")[["Time_List", "Time_Matrix"]])
|
| 575 |
+
st.success("Benchmarking complete!")
|
|
|
|
|
|
|
|
|
|
|
|
graph_module.py
CHANGED
|
@@ -59,3 +59,28 @@ class Graph:
|
|
| 59 |
|
| 60 |
def get_list(self):
|
| 61 |
return self.adj_list
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
|
| 60 |
def get_list(self):
|
| 61 |
return self.adj_list
|
| 62 |
+
|
| 63 |
+
def check_connectivity(self, start_node_id):
|
| 64 |
+
"""
|
| 65 |
+
Checks if all nodes are reachable from start_node_id using BFS.
|
| 66 |
+
Returns: (is_connected: bool, unreachable_nodes: list)
|
| 67 |
+
"""
|
| 68 |
+
if start_node_id < 0 or start_node_id >= self.num_vertices:
|
| 69 |
+
return False, []
|
| 70 |
+
|
| 71 |
+
visited = set()
|
| 72 |
+
queue = [start_node_id]
|
| 73 |
+
visited.add(start_node_id)
|
| 74 |
+
|
| 75 |
+
while queue:
|
| 76 |
+
u = queue.pop(0)
|
| 77 |
+
for v, weight in self.adj_list[u]:
|
| 78 |
+
if v not in visited:
|
| 79 |
+
visited.add(v)
|
| 80 |
+
queue.append(v)
|
| 81 |
+
|
| 82 |
+
all_nodes = set(range(self.num_vertices))
|
| 83 |
+
unreachable = list(all_nodes - visited)
|
| 84 |
+
unreachable.sort()
|
| 85 |
+
|
| 86 |
+
return (len(unreachable) == 0), unreachable
|