Dipan04 commited on
Commit
4b7573c
·
1 Parent(s): 4251788

sab kam khatam guys heehe

Browse files
.gitignore CHANGED
@@ -1 +1,3 @@
1
  __pycache__/
 
 
 
1
  __pycache__/
2
+
3
+ .env
agent/__pycache__/intent_agent.cpython-312.pyc CHANGED
Binary files a/agent/__pycache__/intent_agent.cpython-312.pyc and b/agent/__pycache__/intent_agent.cpython-312.pyc differ
 
agent/extraction_agent.py CHANGED
@@ -1,70 +1,69 @@
1
  """
2
- Field extraction agent for Notiflow.
3
- """
 
 
 
 
4
 
5
- from __future__ import annotations
 
 
 
 
 
6
 
7
  import json
8
  import logging
9
  import re
10
  from pathlib import Path
11
 
12
- from botocore.exceptions import BotoCoreError, ClientError
13
-
14
- from app.bedrock_client import get_bedrock_client
15
- from app.config import MODEL_ID
16
-
17
  PROMPT_PATH = Path(__file__).parent.parent / "prompts" / "extraction_prompt.txt"
18
 
19
  INTENT_SCHEMA: dict[str, list[str]] = {
20
- "order": ["intent", "customer", "item", "quantity"],
21
- "payment": ["intent", "customer", "amount", "payment_type"],
22
- "credit": ["intent", "customer", "item", "quantity", "amount"],
23
- "return": ["intent", "customer", "item", "reason"],
24
  "preparation": ["intent", "item", "quantity"],
25
- "other": ["intent", "note"],
26
  }
27
-
28
  VALID_INTENTS = set(INTENT_SCHEMA.keys())
29
- logger = logging.getLogger(__name__)
30
 
 
31
 
32
- def _get_bedrock_client():
33
- """Return a cached Bedrock runtime client."""
34
- return get_bedrock_client()
35
 
 
 
 
36
 
37
  def _load_prompt(message: str, intent: str) -> str:
38
- """Load the extraction prompt template and inject the message and intent."""
39
  template = PROMPT_PATH.read_text(encoding="utf-8")
40
- prompt = template.replace("{message}", message.strip())
41
- return prompt.replace("{intent}", intent.strip().lower())
42
-
43
-
44
- def _call_nova(prompt: str) -> str:
45
- """Send a prompt to Amazon Nova 2 Lite via Bedrock Converse API."""
46
- client = _get_bedrock_client()
47
- request_body = {
48
- "messages": [{"role": "user", "content": [{"text": prompt}]}],
49
- "inferenceConfig": {
50
- "maxTokens": 256,
51
- "temperature": 0.0,
52
- "topP": 1.0,
53
- },
54
- }
55
 
56
- try:
57
- response = client.converse(modelId=MODEL_ID, **request_body)
58
- output_message = response["output"]["message"]
59
- text_parts = [block["text"] for block in output_message["content"] if "text" in block]
60
- return " ".join(text_parts).strip()
61
- except (BotoCoreError, ClientError) as exc:
62
- logger.error("Bedrock API error: %s", exc)
63
- raise RuntimeError(f"Failed to call Amazon Nova: {exc}") from exc
64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
 
66
  def _parse_extraction_response(raw: str, intent: str) -> dict:
67
- """Parse model output into a schema-conformant dict."""
68
  cleaned = re.sub(r"```(?:json)?|```", "", raw).strip()
69
 
70
  try:
@@ -75,62 +74,70 @@ def _parse_extraction_response(raw: str, intent: str) -> dict:
75
  try:
76
  parsed = json.loads(match.group(0))
77
  except json.JSONDecodeError:
78
- logger.warning("Could not parse Nova response as JSON; returning nulls")
79
  parsed = {}
80
  else:
81
  parsed = {}
82
 
83
  schema_fields = INTENT_SCHEMA.get(intent, INTENT_SCHEMA["other"])
84
- result = {field: parsed.get(field, None) for field in schema_fields}
85
  result["intent"] = intent
86
 
87
  if "customer" in result and isinstance(result["customer"], str):
88
  result["customer"] = result["customer"].strip().title()
89
 
90
- if "amount" in result and result["amount"] is not None:
91
- try:
92
- result["amount"] = float(result["amount"])
93
- if result["amount"].is_integer():
94
- result["amount"] = int(result["amount"])
95
- except (ValueError, TypeError):
96
- result["amount"] = None
97
-
98
- if "quantity" in result and result["quantity"] is not None:
99
- try:
100
- result["quantity"] = float(result["quantity"])
101
- if result["quantity"].is_integer():
102
- result["quantity"] = int(result["quantity"])
103
- except (ValueError, TypeError):
104
- result["quantity"] = None
105
 
106
  return result
107
 
108
 
 
 
 
 
109
  def extract_fields(message: str, intent: str) -> dict:
110
- """Extract structured business fields from a Hinglish message."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  if not message or not message.strip():
112
- logger.warning("Empty message received")
113
  return _null_result(intent)
114
 
115
  intent = intent.lower().strip()
116
  if intent not in VALID_INTENTS:
117
- raise ValueError(
118
- f"Unsupported intent: '{intent}'. Must be one of: {', '.join(sorted(VALID_INTENTS))}"
119
- )
120
 
121
- logger.info("Extracting fields | intent=%s | message=%r", intent, message)
122
  prompt = _load_prompt(message, intent)
123
- raw_response = _call_nova(prompt)
124
- logger.debug("Raw Nova response: %r", raw_response)
125
- result = _parse_extraction_response(raw_response, intent)
126
- logger.info("Extracted fields: %s", result)
127
  return result
128
 
129
 
130
  def _null_result(intent: str) -> dict:
131
- """Return a fully-null result for the given intent."""
132
- intent = intent.lower().strip() if intent in VALID_INTENTS else "other"
133
  schema_fields = INTENT_SCHEMA.get(intent, INTENT_SCHEMA["other"])
134
- result = {field: None for field in schema_fields}
135
  result["intent"] = intent
136
- return result
 
1
  """
2
+ extraction_agent.py
3
+ -------------------
4
+ Stage 3: Field Extraction Agent for Notiflow
5
+
6
+ Uses ModelRouter (Nova primary → Gemini fallback) to extract structured
7
+ business fields from Hinglish messages, given a pre-classified intent.
8
 
9
+ message Intent Agent → intent → Extraction Agent → structured fields
10
+
11
+ Integration note (backend upgrade):
12
+ The private _call_model() function now delegates to agent/model_router.py.
13
+ All schema enforcement, parsing, and public API logic is unchanged.
14
+ """
15
 
16
  import json
17
  import logging
18
  import re
19
  from pathlib import Path
20
 
 
 
 
 
 
21
  PROMPT_PATH = Path(__file__).parent.parent / "prompts" / "extraction_prompt.txt"
22
 
23
  INTENT_SCHEMA: dict[str, list[str]] = {
24
+ "order": ["intent", "customer", "item", "quantity"],
25
+ "payment": ["intent", "customer", "amount", "payment_type"],
26
+ "credit": ["intent", "customer", "item", "quantity", "amount"],
27
+ "return": ["intent", "customer", "item", "reason"],
28
  "preparation": ["intent", "item", "quantity"],
29
+ "other": ["intent", "note"],
30
  }
 
31
  VALID_INTENTS = set(INTENT_SCHEMA.keys())
 
32
 
33
+ logger = logging.getLogger(__name__)
34
 
 
 
 
35
 
36
+ # ---------------------------------------------------------------------------
37
+ # Prompt loader
38
+ # ---------------------------------------------------------------------------
39
 
40
  def _load_prompt(message: str, intent: str) -> str:
 
41
  template = PROMPT_PATH.read_text(encoding="utf-8")
42
+ prompt = template.replace("{message}", message.strip())
43
+ prompt = prompt.replace("{intent}", intent.strip().lower())
44
+ return prompt
 
 
 
 
 
 
 
 
 
 
 
 
45
 
 
 
 
 
 
 
 
 
46
 
47
+ # ---------------------------------------------------------------------------
48
+ # Model inference (now via ModelRouter — Nova primary, Gemini fallback)
49
+ # ---------------------------------------------------------------------------
50
+
51
+ def _call_model(prompt: str) -> str:
52
+ """
53
+ Route the prompt through ModelRouter.
54
+ Returns raw text response from whichever model was available.
55
+ """
56
+ from agent.model_router import route
57
+ raw, model_used = route(prompt, max_tokens=256)
58
+ logger.info("Extraction inference served by: %s", model_used)
59
+ return raw
60
+
61
+
62
+ # ---------------------------------------------------------------------------
63
+ # Response parser & normaliser
64
+ # ---------------------------------------------------------------------------
65
 
66
  def _parse_extraction_response(raw: str, intent: str) -> dict:
 
67
  cleaned = re.sub(r"```(?:json)?|```", "", raw).strip()
68
 
69
  try:
 
74
  try:
75
  parsed = json.loads(match.group(0))
76
  except json.JSONDecodeError:
77
+ logger.warning("Could not parse model response as JSON; returning nulls")
78
  parsed = {}
79
  else:
80
  parsed = {}
81
 
82
  schema_fields = INTENT_SCHEMA.get(intent, INTENT_SCHEMA["other"])
83
+ result = {field: parsed.get(field, None) for field in schema_fields}
84
  result["intent"] = intent
85
 
86
  if "customer" in result and isinstance(result["customer"], str):
87
  result["customer"] = result["customer"].strip().title()
88
 
89
+ for num_field in ("amount", "quantity"):
90
+ if num_field in result and result[num_field] is not None:
91
+ try:
92
+ val = float(result[num_field])
93
+ result[num_field] = int(val) if val.is_integer() else val
94
+ except (ValueError, TypeError):
95
+ result[num_field] = None
 
 
 
 
 
 
 
 
96
 
97
  return result
98
 
99
 
100
+ # ---------------------------------------------------------------------------
101
+ # Public API
102
+ # ---------------------------------------------------------------------------
103
+
104
  def extract_fields(message: str, intent: str) -> dict:
105
+ """
106
+ Extract structured business fields from a Hinglish message.
107
+
108
+ Args:
109
+ message: Raw business message (Hinglish or English).
110
+ intent: Intent string from the Intent Agent.
111
+
112
+ Returns:
113
+ Dict with "intent" + extracted fields. Missing fields are null.
114
+
115
+ Raises:
116
+ ValueError: Unsupported intent.
117
+ RuntimeError: Both Nova and Gemini unavailable.
118
+
119
+ Examples:
120
+ >>> extract_fields("rahul ne 15000 bheja", "payment")
121
+ {'intent': 'payment', 'customer': 'Rahul', 'amount': 15000, 'payment_type': None}
122
+ """
123
  if not message or not message.strip():
 
124
  return _null_result(intent)
125
 
126
  intent = intent.lower().strip()
127
  if intent not in VALID_INTENTS:
128
+ raise ValueError(f"Unsupported intent: '{intent}'.")
 
 
129
 
130
+ logger.info("Extracting | intent=%s | message=%r", intent, message)
131
  prompt = _load_prompt(message, intent)
132
+ raw = _call_model(prompt)
133
+ result = _parse_extraction_response(raw, intent)
134
+ logger.info("Extracted: %s", result)
 
135
  return result
136
 
137
 
138
  def _null_result(intent: str) -> dict:
139
+ intent = intent.lower().strip() if intent in VALID_INTENTS else "other"
 
140
  schema_fields = INTENT_SCHEMA.get(intent, INTENT_SCHEMA["other"])
141
+ result = {field: None for field in schema_fields}
142
  result["intent"] = intent
143
+ return result
agent/intent_agent.py CHANGED
@@ -1,90 +1,102 @@
1
  """
2
- Intent detection agent for Notiflow.
3
- """
 
 
 
 
4
 
5
- from __future__ import annotations
 
 
 
 
 
 
 
6
 
7
  import json
8
  import logging
9
  import re
10
  from pathlib import Path
11
 
12
- from botocore.exceptions import BotoCoreError, ClientError
13
-
14
- from app.bedrock_client import get_bedrock_client
15
- from app.config import MODEL_ID
16
-
17
  PROMPT_PATH = Path(__file__).parent.parent / "prompts" / "intent_prompt.txt"
18
  VALID_INTENTS = {"order", "payment", "credit", "return", "preparation", "other"}
19
 
20
  logger = logging.getLogger(__name__)
21
 
22
 
23
- def _get_bedrock_client():
24
- """Return a cached Bedrock runtime client."""
25
- return get_bedrock_client()
26
-
27
 
28
  def _load_prompt(message: str) -> str:
29
- """Load the intent prompt template and inject the user message."""
30
  template = PROMPT_PATH.read_text(encoding="utf-8")
31
  return template.replace("{message}", message.strip())
32
 
33
 
34
- def _call_nova(prompt: str) -> str:
35
- """Send a prompt to Amazon Nova 2 Lite via Bedrock Converse API."""
36
- client = _get_bedrock_client()
37
- request_body = {
38
- "messages": [{"role": "user", "content": [{"text": prompt}]}],
39
- "inferenceConfig": {
40
- "maxTokens": 64,
41
- "temperature": 0.0,
42
- "topP": 1.0,
43
- },
44
- }
 
 
45
 
46
- try:
47
- response = client.converse(modelId=MODEL_ID, **request_body)
48
- output_message = response["output"]["message"]
49
- text_parts = [
50
- block["text"]
51
- for block in output_message["content"]
52
- if block.get("type") == "text" or "text" in block
53
- ]
54
- return " ".join(text_parts).strip()
55
- except (BotoCoreError, ClientError) as exc:
56
- logger.error("Bedrock API error: %s", exc)
57
- raise RuntimeError(f"Failed to call Amazon Nova: {exc}") from exc
58
-
59
-
60
- def _parse_intent_response(raw: str) -> dict[str, str]:
61
- """Parse model output into a validated intent dict."""
62
- cleaned = re.sub(r"```(?:json)?|```", "", raw).strip()
63
 
 
 
 
 
 
 
64
  try:
65
  result = json.loads(cleaned)
66
  intent = result.get("intent", "other").lower().strip()
67
  except json.JSONDecodeError:
68
- match = re.search(r'"intent"\s*:\s*"(\w+)"', cleaned)
69
  intent = match.group(1).lower() if match else "other"
70
 
71
  if intent not in VALID_INTENTS:
72
- logger.warning("Model returned unknown intent '%s', defaulting to 'other'", intent)
73
  intent = "other"
74
 
75
  return {"intent": intent}
76
 
77
 
78
- def detect_intent(message: str) -> dict[str, str]:
79
- """Detect the business intent of a Hinglish message."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
  if not message or not message.strip():
81
  logger.warning("Empty message received, returning 'other'")
82
  return {"intent": "other"}
83
 
84
- logger.info("Detecting intent for message: %r", message)
85
- prompt = _load_prompt(message)
86
- raw_response = _call_nova(prompt)
87
- logger.debug("Raw Nova response: %r", raw_response)
88
- result = _parse_intent_response(raw_response)
89
  logger.info("Detected intent: %s", result["intent"])
90
- return result
 
1
  """
2
+ intent_agent.py
3
+ ---------------
4
+ Stage 2: Intent Detection Agent for Notiflow
5
+
6
+ Uses ModelRouter (Nova primary → Gemini fallback) to classify
7
+ the business intent of Hinglish messages.
8
 
9
+ Supported intents:
10
+ order | payment | credit | return | preparation | other
11
+
12
+ Integration note (backend upgrade):
13
+ The private _call_model() function now delegates to agent/model_router.py
14
+ instead of calling bedrock_client directly. All parsing and public API
15
+ logic is unchanged.
16
+ """
17
 
18
  import json
19
  import logging
20
  import re
21
  from pathlib import Path
22
 
 
 
 
 
 
23
  PROMPT_PATH = Path(__file__).parent.parent / "prompts" / "intent_prompt.txt"
24
  VALID_INTENTS = {"order", "payment", "credit", "return", "preparation", "other"}
25
 
26
  logger = logging.getLogger(__name__)
27
 
28
 
29
+ # ---------------------------------------------------------------------------
30
+ # Prompt loader
31
+ # ---------------------------------------------------------------------------
 
32
 
33
  def _load_prompt(message: str) -> str:
 
34
  template = PROMPT_PATH.read_text(encoding="utf-8")
35
  return template.replace("{message}", message.strip())
36
 
37
 
38
+ # ---------------------------------------------------------------------------
39
+ # Model inference (now via ModelRouter Nova primary, Gemini fallback)
40
+ # ---------------------------------------------------------------------------
41
+
42
+ def _call_model(prompt: str) -> str:
43
+ """
44
+ Route the prompt through ModelRouter.
45
+ Returns raw text response from whichever model was available.
46
+ """
47
+ from agent.model_router import route
48
+ raw, model_used = route(prompt, max_tokens=64)
49
+ logger.info("Intent inference served by: %s", model_used)
50
+ return raw
51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
+ # ---------------------------------------------------------------------------
54
+ # Response parser
55
+ # ---------------------------------------------------------------------------
56
+
57
+ def _parse_intent_response(raw: str) -> dict:
58
+ cleaned = re.sub(r"```(?:json)?|```", "", raw).strip()
59
  try:
60
  result = json.loads(cleaned)
61
  intent = result.get("intent", "other").lower().strip()
62
  except json.JSONDecodeError:
63
+ match = re.search(r'"intent"\s*:\s*"(\w+)"', cleaned)
64
  intent = match.group(1).lower() if match else "other"
65
 
66
  if intent not in VALID_INTENTS:
67
+ logger.warning("Unknown intent '%s', defaulting to 'other'", intent)
68
  intent = "other"
69
 
70
  return {"intent": intent}
71
 
72
 
73
+ # ---------------------------------------------------------------------------
74
+ # Public API
75
+ # ---------------------------------------------------------------------------
76
+
77
+ def detect_intent(message: str) -> dict:
78
+ """
79
+ Detect the business intent of a Hinglish message.
80
+
81
+ Args:
82
+ message: Raw business message (Hinglish or English).
83
+
84
+ Returns:
85
+ {"intent": "<intent_string>"}
86
+
87
+ Examples:
88
+ >>> detect_intent("bhaiya 2 kilo bhej dena")
89
+ {'intent': 'order'}
90
+ >>> detect_intent("rahul ne 15000 bheja")
91
+ {'intent': 'payment'}
92
+ """
93
  if not message or not message.strip():
94
  logger.warning("Empty message received, returning 'other'")
95
  return {"intent": "other"}
96
 
97
+ logger.info("Detecting intent for: %r", message)
98
+ prompt = _load_prompt(message)
99
+ raw = _call_model(prompt)
100
+ result = _parse_intent_response(raw)
 
101
  logger.info("Detected intent: %s", result["intent"])
102
+ return result
agent/model_router.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ agent/model_router.py
3
+ ---------------------
4
+ ModelRouter for Notiflow.
5
+
6
+ Acts as the unified inference interface used by intent_agent and
7
+ extraction_agent instead of calling bedrock_client directly.
8
+
9
+ Routing strategy:
10
+ Primary → Amazon Nova Lite (app/bedrock_client.py :: call_nova)
11
+ Fallback → Google Gemini (models/gemini_client.py :: generate)
12
+
13
+ Fallback triggers:
14
+ - AWS credentials missing / not configured
15
+ - Bedrock API error (any exception from call_nova)
16
+ - Model invocation failure
17
+ - Timeout (boto3 raises an exception on timeout)
18
+
19
+ Both Nova and Gemini receive the SAME prompt. The prompts already
20
+ instruct the model to return JSON — the router returns the raw text
21
+ and lets the calling agent parse it.
22
+
23
+ Public API
24
+ ----------
25
+ route(prompt: str, max_tokens: int = 256) -> tuple[str, str]
26
+ Returns (raw_text_response, model_used)
27
+ model_used is "nova" or "gemini"
28
+ """
29
+
30
+ from __future__ import annotations
31
+
32
+ import logging
33
+
34
+ logger = logging.getLogger(__name__)
35
+
36
+
37
+ def route(prompt: str, max_tokens: int = 256) -> tuple[str, str]:
38
+ """
39
+ Send a prompt to the best available model.
40
+
41
+ Tries Nova first; falls back to Gemini on any failure.
42
+
43
+ Args:
44
+ prompt: Fully rendered prompt string.
45
+ max_tokens: Max tokens for Nova (Gemini ignores this param).
46
+
47
+ Returns:
48
+ Tuple of (response_text, model_used) where model_used is
49
+ "nova" or "gemini".
50
+
51
+ Raises:
52
+ RuntimeError: If BOTH Nova and Gemini fail.
53
+ """
54
+ # ── Attempt Nova ─────────────────────────────────────────────────────────
55
+ _nova_error: Exception | None = None
56
+ try:
57
+ from app.bedrock_client import call_nova
58
+ response = call_nova(prompt, max_tokens=max_tokens)
59
+ logger.info("ModelRouter → nova (success)")
60
+ return response, "nova"
61
+
62
+ except Exception as nova_exc:
63
+ _nova_error = nova_exc # save before Python deletes `nova_exc`
64
+ logger.warning(
65
+ "Nova unavailable (%s) — falling back to Gemini.", nova_exc
66
+ )
67
+
68
+ # ── Fallback to Gemini ────────────────────────────────────────────────────
69
+ try:
70
+ from models.gemini_client import generate
71
+ response = generate(prompt)
72
+ logger.info("ModelRouter → gemini (fallback)")
73
+ return response, "gemini"
74
+
75
+ except Exception as gemini_exc:
76
+ logger.error("Gemini fallback also failed: %s", gemini_exc)
77
+ raise RuntimeError(
78
+ f"Both Nova and Gemini are unavailable.\n"
79
+ f" Nova error: {_nova_error}\n"
80
+ f" Gemini error: {gemini_exc}"
81
+ ) from gemini_exc
api/notification_routes.py ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ api/notification_routes.py
3
+ --------------------------
4
+ FastAPI router for Notiflow notification endpoints.
5
+
6
+ Endpoints
7
+ ---------
8
+ POST /api/notification
9
+ Receives a notification, runs the full agent pipeline,
10
+ returns the structured orchestrator result.
11
+
12
+ GET /api/notifications/generate
13
+ Calls Gemini to generate a batch of demo notifications.
14
+ Query param: n (default 5)
15
+
16
+ WebSocket /ws/notifications
17
+ Streams live notifications to connected clients.
18
+ Accepts both frontend-pushed and Gemini-generated events.
19
+ Broadcasts to all connected clients.
20
+ """
21
+
22
+ from __future__ import annotations
23
+
24
+ import asyncio
25
+ import json
26
+ import logging
27
+ from typing import Any
28
+
29
+ from fastapi import APIRouter, WebSocket, WebSocketDisconnect, Query, HTTPException
30
+ from pydantic import BaseModel
31
+
32
+ logger = logging.getLogger(__name__)
33
+
34
+ router = APIRouter()
35
+
36
+
37
+ # ---------------------------------------------------------------------------
38
+ # Pydantic models
39
+ # ---------------------------------------------------------------------------
40
+
41
+ class NotificationRequest(BaseModel):
42
+ """Incoming notification payload."""
43
+ source: str # e.g. "whatsapp", "amazon", "payment", "return"
44
+ message: str # Raw Hinglish business message
45
+
46
+
47
+ class NotificationResponse(BaseModel):
48
+ """
49
+ Full orchestrator result — same shape as run_notiflow() output.
50
+ Frontend reads fields from data{} and event{}.
51
+ """
52
+ message: str
53
+ intent: str
54
+ data: dict[str, Any]
55
+ event: dict[str, Any]
56
+ source: str # Echo back the notification source
57
+ model: str | None = None # "nova" | "gemini" | "demo"
58
+
59
+
60
+ # ---------------------------------------------------------------------------
61
+ # WebSocket connection manager
62
+ # ---------------------------------------------------------------------------
63
+
64
+ class _ConnectionManager:
65
+ """Simple in-memory broadcast manager for WebSocket clients."""
66
+
67
+ def __init__(self):
68
+ self._active: list[WebSocket] = []
69
+
70
+ async def connect(self, ws: WebSocket) -> None:
71
+ await ws.accept()
72
+ self._active.append(ws)
73
+ logger.info("WS client connected. Total: %d", len(self._active))
74
+
75
+ def disconnect(self, ws: WebSocket) -> None:
76
+ self._active = [c for c in self._active if c is not ws]
77
+ logger.info("WS client disconnected. Total: %d", len(self._active))
78
+
79
+ async def broadcast(self, payload: dict) -> None:
80
+ """Send JSON payload to all connected WebSocket clients."""
81
+ dead = []
82
+ for ws in self._active:
83
+ try:
84
+ await ws.send_json(payload)
85
+ except Exception:
86
+ dead.append(ws)
87
+ for ws in dead:
88
+ self.disconnect(ws)
89
+
90
+
91
+ _manager = _ConnectionManager()
92
+
93
+
94
+ # ---------------------------------------------------------------------------
95
+ # POST /api/notification
96
+ # ---------------------------------------------------------------------------
97
+
98
+ @router.post("/api/notification", response_model=NotificationResponse)
99
+ async def process_notification(body: NotificationRequest):
100
+ """
101
+ Receive a business notification and run the full Notiflow pipeline.
102
+
103
+ The endpoint calls run_notiflow(message) from app/main.py — it does
104
+ not call agents or skills directly.
105
+
106
+ After processing, the result is broadcast to all connected WebSocket
107
+ clients so the live stream panel updates in real time.
108
+
109
+ Args:
110
+ body: {"source": "whatsapp", "message": "bhaiya 3 kurti bhej dena"}
111
+
112
+ Returns:
113
+ Full orchestrator result + source echo.
114
+ """
115
+ if not body.message or not body.message.strip():
116
+ raise HTTPException(status_code=422, detail="Message cannot be empty.")
117
+
118
+ logger.info("POST /api/notification | source=%s | msg=%r", body.source, body.message)
119
+
120
+ try:
121
+ from app.main import run_notiflow
122
+ result = run_notiflow(body.message.strip())
123
+ except Exception as exc:
124
+ logger.error("Pipeline error: %s", exc)
125
+ raise HTTPException(status_code=500, detail=f"Pipeline error: {exc}")
126
+
127
+ response = NotificationResponse(
128
+ message = result["message"],
129
+ intent = result["intent"],
130
+ data = result["data"],
131
+ event = result["event"],
132
+ source = body.source,
133
+ model = "demo" if not result["event"].get("event", "").endswith("_recorded")
134
+ and not result["event"].get("event", "").endswith("_received")
135
+ and not result["event"].get("event", "").endswith("_requested")
136
+ and not result["event"].get("event", "").endswith("_queued")
137
+ else "live",
138
+ )
139
+
140
+ # Broadcast to WebSocket clients
141
+ await _manager.broadcast(response.model_dump())
142
+
143
+ return response
144
+
145
+
146
+ # ---------------------------------------------------------------------------
147
+ # GET /api/notifications/generate
148
+ # ---------------------------------------------------------------------------
149
+
150
+ @router.get("/api/notifications/generate")
151
+ async def generate_demo_notifications(n: int = Query(default=5, ge=1, le=20)):
152
+ """
153
+ Generate n demo notifications using Gemini (or static fallback).
154
+
155
+ Query params:
156
+ n: number of notifications to generate (1-20, default 5)
157
+
158
+ Returns:
159
+ {"notifications": [{"source": str, "message": str}, ...]}
160
+ """
161
+ from services.notification_generator import get_notifications
162
+ notifications = get_notifications(n)
163
+ return {"notifications": notifications}
164
+
165
+
166
+ # ---------------------------------------------------------------------------
167
+ # WebSocket /ws/notifications
168
+ # ---------------------------------------------------------------------------
169
+
170
+ @router.websocket("/ws/notifications")
171
+ async def websocket_notification_stream(websocket: WebSocket):
172
+ """
173
+ WebSocket endpoint for real-time notification streaming.
174
+
175
+ Clients connect and receive:
176
+ - Notifications pushed by the frontend simulation
177
+ - Notifications generated by Gemini automation
178
+ - Results of processed notifications (broadcast from POST endpoint)
179
+
180
+ The client can also SEND a notification over the WebSocket:
181
+ {"source": "whatsapp", "message": "bhaiya 3 kurti bhej dena"}
182
+
183
+ The server will process it through the pipeline and broadcast the
184
+ result to all connected clients.
185
+
186
+ Protocol:
187
+ Client → Server: {"source": str, "message": str}
188
+ Server → Client: NotificationResponse JSON
189
+ """
190
+ await _manager.connect(websocket)
191
+ try:
192
+ while True:
193
+ raw = await websocket.receive_text()
194
+ try:
195
+ payload = json.loads(raw)
196
+ source = payload.get("source", "websocket")
197
+ message = payload.get("message", "").strip()
198
+
199
+ if not message:
200
+ await websocket.send_json({"error": "Empty message"})
201
+ continue
202
+
203
+ from app.main import run_notiflow
204
+ result = run_notiflow(message)
205
+
206
+ response = {
207
+ "message": result["message"],
208
+ "intent": result["intent"],
209
+ "data": result["data"],
210
+ "event": result["event"],
211
+ "source": source,
212
+ }
213
+ await _manager.broadcast(response)
214
+
215
+ except json.JSONDecodeError:
216
+ await websocket.send_json({"error": "Invalid JSON payload"})
217
+ except Exception as exc:
218
+ logger.error("WS pipeline error: %s", exc)
219
+ await websocket.send_json({"error": str(exc)})
220
+
221
+ except WebSocketDisconnect:
222
+ _manager.disconnect(websocket)
223
+
224
+
225
+ # ---------------------------------------------------------------------------
226
+ # GET /api/stream/start
227
+ # ---------------------------------------------------------------------------
228
+
229
+ @router.get("/api/stream/start")
230
+ async def start_gemini_stream(
231
+ n: int = Query(default=5, ge=1, le=20),
232
+ delay: float = Query(default=2.0, ge=0.5, le=30.0),
233
+ ):
234
+ """
235
+ Trigger Gemini to generate n notifications and stream them to all
236
+ connected WebSocket clients with a delay between each.
237
+
238
+ Query params:
239
+ n: number of notifications (default 5)
240
+ delay: seconds between each broadcast (default 2.0)
241
+
242
+ This runs in the background — the HTTP response returns immediately.
243
+ """
244
+ async def _stream():
245
+ from services.notification_generator import stream_notifications
246
+ async for notification in stream_notifications(n=n, delay_seconds=delay):
247
+ await _manager.broadcast({
248
+ "type": "incoming_notification",
249
+ "source": notification["source"],
250
+ "message": notification["message"],
251
+ })
252
+
253
+ asyncio.create_task(_stream())
254
+ return {"status": "streaming started", "n": n, "delay_seconds": delay}
app/__pycache__/config.cpython-312.pyc CHANGED
Binary files a/app/__pycache__/config.cpython-312.pyc and b/app/__pycache__/config.cpython-312.pyc differ
 
app/__pycache__/main.cpython-312.pyc CHANGED
Binary files a/app/__pycache__/main.cpython-312.pyc and b/app/__pycache__/main.cpython-312.pyc differ
 
app/config.py CHANGED
@@ -31,7 +31,10 @@ REGISTRY_FILE = ROOT / "skills" / "skill_registry.json" # Skill registry
31
  # When True the dashboard and main.py simulate the pipeline locally.
32
  # Set to False (or override via env var) to use real Bedrock inference.
33
  import os
34
- DEMO_MODE: bool = os.getenv("NOTIFLOW_DEMO_MODE", "true").lower() != "false"
 
 
 
35
 
36
  # ---------------------------------------------------------------------------
37
  # Amazon Bedrock settings
@@ -44,4 +47,19 @@ BEDROCK_MODEL_ID = "amazon.nova-lite-v1:0"
44
  # Ensure data directory exists at import time
45
  # ---------------------------------------------------------------------------
46
 
47
- DATA_DIR.mkdir(parents=True, exist_ok=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  # When True the dashboard and main.py simulate the pipeline locally.
32
  # Set to False (or override via env var) to use real Bedrock inference.
33
  import os
34
+ NOTIFLOW_DEMO_MODE = os.getenv("NOTIFLOW_DEMO_MODE", "true").lower() == "true"
35
+
36
+ # Legacy alias for backward compatibility
37
+ DEMO_MODE = NOTIFLOW_DEMO_MODE
38
 
39
  # ---------------------------------------------------------------------------
40
  # Amazon Bedrock settings
 
47
  # Ensure data directory exists at import time
48
  # ---------------------------------------------------------------------------
49
 
50
+ DATA_DIR.mkdir(parents=True, exist_ok=True)
51
+
52
+ # ---------------------------------------------------------------------------
53
+ # Gemini settings (added: backend upgrade)
54
+ # ---------------------------------------------------------------------------
55
+
56
+ GEMINI_API_KEY: str | None = os.getenv("GEMINI_API_KEY")
57
+
58
+ # ---------------------------------------------------------------------------
59
+ # Excel sync path (added: backend upgrade)
60
+ # If EXCEL_FILE_PATH is set in .env, it overrides the default DATA_FILE.
61
+ # If not set, the existing DATA_FILE path is used as fallback.
62
+ # ---------------------------------------------------------------------------
63
+
64
+ _env_excel = os.getenv("EXCEL_FILE_PATH")
65
+ EXCEL_SYNC_FILE = Path(_env_excel) if _env_excel else DATA_FILE
backend/main.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ backend/main.py
3
+ ---------------
4
+ FastAPI application entry point for Notiflow.
5
+
6
+ Startup:
7
+ 1. Loads .env file from project root (python-dotenv)
8
+ 2. Configures logging
9
+ 3. Mounts the notification API router
10
+ 4. Exposes health check endpoint
11
+
12
+ Run:
13
+ uvicorn backend.main:app --reload
14
+
15
+ The server expects to be started from the project root so that
16
+ all relative imports (app/, agent/, services/, etc.) resolve correctly.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ import logging
22
+ import sys
23
+ from pathlib import Path
24
+
25
+ # ---------------------------------------------------------------------------
26
+ # .env loader — must happen BEFORE any app.config import
27
+ # ---------------------------------------------------------------------------
28
+
29
+ try:
30
+ from dotenv import load_dotenv
31
+ _env_path = Path(__file__).parent.parent / ".env"
32
+ load_dotenv(dotenv_path=_env_path, override=False)
33
+ except ImportError:
34
+ pass # python-dotenv not installed; env vars must be set externally
35
+
36
+ # ---------------------------------------------------------------------------
37
+ # Ensure project root is on sys.path when running via uvicorn
38
+ # ---------------------------------------------------------------------------
39
+
40
+ _PROJECT_ROOT = str(Path(__file__).parent.parent)
41
+ if _PROJECT_ROOT not in sys.path:
42
+ sys.path.insert(0, _PROJECT_ROOT)
43
+
44
+ # ---------------------------------------------------------------------------
45
+ # Logging
46
+ # ---------------------------------------------------------------------------
47
+
48
+ logging.basicConfig(
49
+ level=logging.INFO,
50
+ format="%(asctime)s %(levelname)-8s %(name)s — %(message)s",
51
+ datefmt="%H:%M:%S",
52
+ )
53
+ logger = logging.getLogger(__name__)
54
+
55
+ # ---------------------------------------------------------------------------
56
+ # FastAPI app
57
+ # ---------------------------------------------------------------------------
58
+
59
+ from fastapi import FastAPI
60
+ from fastapi.middleware.cors import CORSMiddleware
61
+
62
+ from api.notification_routes import router as notification_router
63
+
64
+ app = FastAPI(
65
+ title = "Notiflow API",
66
+ description = (
67
+ "AI operations assistant for small businesses.\n\n"
68
+ "Converts informal Hinglish business notifications into "
69
+ "structured operations — powered by Amazon Nova 2 Lite."
70
+ ),
71
+ version = "1.0.0",
72
+ docs_url = "/docs",
73
+ redoc_url = "/redoc",
74
+ )
75
+
76
+ # ---------------------------------------------------------------------------
77
+ # CORS — allow all origins for hackathon demo
78
+ # ---------------------------------------------------------------------------
79
+
80
+ app.add_middleware(
81
+ CORSMiddleware,
82
+ allow_origins = ["*"],
83
+ allow_credentials = True,
84
+ allow_methods = ["*"],
85
+ allow_headers = ["*"],
86
+ )
87
+
88
+ # ---------------------------------------------------------------------------
89
+ # Routers
90
+ # ---------------------------------------------------------------------------
91
+
92
+ app.include_router(notification_router)
93
+
94
+ # ---------------------------------------------------------------------------
95
+ # Health check
96
+ # ---------------------------------------------------------------------------
97
+
98
+ @app.get("/health", tags=["Meta"])
99
+ async def health():
100
+ """
101
+ Basic health check.
102
+
103
+ Returns service status and configuration summary.
104
+ """
105
+ from app.config import DEMO_MODE, BEDROCK_MODEL_ID, GEMINI_API_KEY, EXCEL_SYNC_FILE
106
+ return {
107
+ "status": "ok",
108
+ "demo_mode": DEMO_MODE,
109
+ "bedrock_model": BEDROCK_MODEL_ID,
110
+ "gemini_enabled": bool(GEMINI_API_KEY),
111
+ "excel_file": str(EXCEL_SYNC_FILE),
112
+ }
113
+
114
+
115
+ # ---------------------------------------------------------------------------
116
+ # Root redirect to docs
117
+ # ---------------------------------------------------------------------------
118
+
119
+ @app.get("/", include_in_schema=False)
120
+ async def root():
121
+ from fastapi.responses import RedirectResponse
122
+ return RedirectResponse(url="/docs")
123
+
124
+
125
+ # ---------------------------------------------------------------------------
126
+ # Startup / shutdown events
127
+ # ---------------------------------------------------------------------------
128
+
129
+ @app.on_event("startup")
130
+ async def on_startup():
131
+ from app.config import DEMO_MODE, GEMINI_API_KEY, EXCEL_SYNC_FILE
132
+ logger.info("=" * 55)
133
+ logger.info(" Notiflow API starting up")
134
+ logger.info(" Demo mode : %s", DEMO_MODE)
135
+ logger.info(" Gemini : %s", "enabled" if GEMINI_API_KEY else "disabled (no API key)")
136
+ logger.info(" Excel file : %s", EXCEL_SYNC_FILE)
137
+ logger.info("=" * 55)
138
+
139
+
140
+ @app.on_event("shutdown")
141
+ async def on_shutdown():
142
+ logger.info("Notiflow API shutting down.")
backend/requirements.txt ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Notiflow — backend dependencies
2
+ # Install: pip install -r backend/requirements.txt
3
+
4
+ # FastAPI stack
5
+ fastapi>=0.111.0
6
+ uvicorn[standard]>=0.29.0
7
+ python-multipart>=0.0.9 # required for FastAPI form handling
8
+
9
+ # Environment
10
+ python-dotenv>=1.0.0
11
+
12
+ # Amazon Bedrock
13
+ boto3>=1.34.0
14
+
15
+ # Gemini
16
+ google-generativeai>=0.7.0
17
+
18
+ # Excel
19
+ pandas>=2.2.0
20
+ openpyxl>=3.1.0
21
+
22
+ # Pydantic (FastAPI uses v2)
23
+ pydantic>=2.7.0
dashboard/.env.local ADDED
@@ -0,0 +1 @@
 
 
1
+ VITE_API_URL=http://localhost:8000
dashboard/src/api.ts ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * API service for communicating with FastAPI backend
3
+ * Running on http://localhost:8000
4
+ */
5
+
6
+ const API_BASE_URL = import.meta.env.VITE_API_URL || 'http://localhost:8000';
7
+
8
+ export interface NotificationPayload {
9
+ source: string;
10
+ message: string;
11
+ }
12
+
13
+ export interface ApiResponse {
14
+ intent: string;
15
+ data: Record<string, any>;
16
+ event: Record<string, any>;
17
+ message: string;
18
+ }
19
+
20
+ class ApiClient {
21
+ /**
22
+ * Send notification to backend
23
+ * POST /api/notification
24
+ */
25
+ async sendNotification(payload: NotificationPayload): Promise<ApiResponse> {
26
+ try {
27
+ const response = await fetch(`${API_BASE_URL}/api/notification`, {
28
+ method: 'POST',
29
+ headers: {
30
+ 'Content-Type': 'application/json',
31
+ },
32
+ body: JSON.stringify(payload),
33
+ timeout: 30000,
34
+ });
35
+
36
+ if (!response.ok) {
37
+ throw new Error(`Backend error: ${response.statusText}`);
38
+ }
39
+
40
+ return await response.json();
41
+ } catch (error) {
42
+ console.error('API Error:', error);
43
+ throw error;
44
+ }
45
+ }
46
+
47
+ /**
48
+ * Health check endpoint
49
+ * GET /health
50
+ */
51
+ async healthCheck(): Promise<boolean> {
52
+ try {
53
+ const response = await fetch(`${API_BASE_URL}/health`, {
54
+ method: 'GET',
55
+ });
56
+ return response.ok;
57
+ } catch {
58
+ return false;
59
+ }
60
+ }
61
+ }
62
+
63
+ export const apiClient = new ApiClient();
dashboard/src/main.tsx CHANGED
@@ -1,10 +1,10 @@
1
- import { StrictMode } from 'react'
2
  import { createRoot } from 'react-dom/client'
3
  import './index.css'
4
  import App from './App.tsx'
5
 
 
 
 
6
  createRoot(document.getElementById('root')!).render(
7
- <StrictMode>
8
- <App />
9
- </StrictMode>,
10
  )
 
 
1
  import { createRoot } from 'react-dom/client'
2
  import './index.css'
3
  import App from './App.tsx'
4
 
5
+ // StrictMode removed: React 18 StrictMode intentionally remounts components
6
+ // in dev (mount → unmount → remount), which resets useRef values and
7
+ // double-fires effects — causing duplicate real API calls to the backend.
8
  createRoot(document.getElementById('root')!).render(
9
+ <App />
 
 
10
  )
dashboard/src/react.tsx CHANGED
@@ -1,4 +1,4 @@
1
- import React, { useState, useEffect, useCallback, useRef } from 'react';
2
  import {
3
  MessageSquare,
4
  ShoppingCart,
@@ -19,6 +19,7 @@ import {
19
  Server,
20
  Terminal
21
  } from 'lucide-react';
 
22
 
23
  interface Entity {
24
  item?: string;
@@ -48,10 +49,12 @@ interface SheetsState {
48
  }
49
 
50
  const VyaparFlow = () => {
51
- // --- Simulation State ---
52
  const [notifications, setNotifications] = useState<AppEvent[]>([]);
53
  const [activeEvent, setActiveEvent] = useState<AppEvent | null>(null);
54
- const [processingStep, setProcessingStep] = useState(0); // 0: idle, 1: API Request, 2: Reasoning, 3: Completed
 
 
55
  const [view, setView] = useState('dashboard');
56
  const [activeSheet, setActiveSheet] = useState('Orders');
57
 
@@ -74,107 +77,157 @@ const VyaparFlow = () => {
74
  ]
75
  });
76
 
77
- // Template for events
78
- const eventTemplates: AppEvent[] = [
79
- {
80
- source: 'WhatsApp',
81
- icon: <MessageSquare size={14} className="text-emerald-400" />,
82
- title: 'Harish (Customer)',
83
- message: 'bhaiya 3 kilo aata aur bhej dena kal tak',
84
- type: 'order',
85
- entity: { item: 'Aata', qty: '3kg', customer: 'Harish' }
86
- },
87
- {
88
- source: 'Amazon Seller',
89
- icon: <ShoppingCart size={14} className="text-orange-400" />,
90
- title: 'New Order Received',
91
- message: 'Product: Silk Kurti (Red). Qty: 1. Total: ₹1850',
92
- type: 'marketplace',
93
- entity: { item: 'Silk Kurti', qty: '1', customer: 'Amzn_User_44' }
94
- },
95
- {
96
- source: 'GPay',
97
- icon: <IndianRupee size={14} className="text-blue-400" />,
98
- title: 'Payment Confirmed',
99
- message: '₹4,500 received from Amit Kumar.',
100
- type: 'payment',
101
- entity: { amount: '₹4,500', customer: 'Amit Kumar' }
102
  }
103
- ];
104
 
105
- // --- Automated Logic ---
106
-
107
- const triggerBackendProcess = useCallback(async (event: AppEvent) => {
108
- if (isProcessingRef.current) return;
109
- isProcessingRef.current = true;
110
-
111
- setActiveEvent(event);
112
- setProcessingStep(1); // Stage 1: API Endpoint hit
113
 
114
- // Simulate Network Latency to "POST /api/v1/process-event"
115
- await new Promise(r => setTimeout(r, 1500));
116
- setProcessingStep(2); // Stage 2: Reasoning Phase (Nova 2 Lite)
117
 
118
- // Simulate AI Reasoning duration
119
- await new Promise(r => setTimeout(r, 2500));
120
- setProcessingStep(3); // Stage 3: Skill Execution / Record Completion
 
 
121
 
122
- // Update Sheets with the new data
123
- setSheets(prev => {
124
- const newSheets = { ...prev };
125
- const rowId = `RECORD-${Date.now()}`;
126
- if (event.type === 'payment') {
127
- newSheets.Ledger = [{
128
- id: rowId,
129
- customer: event.entity.customer || 'Unknown',
130
- type: 'Credit',
131
- amount: event.entity.amount || '0',
132
- date: 'Today',
133
- status: 'Settled'
134
- }, ...prev.Ledger].slice(0, 10);
135
- } else {
136
- newSheets.Orders = [{
137
- id: `ORD-${Math.floor(Math.random()*900 + 100)}`,
138
- source: event.source,
139
- customer: event.entity.customer || 'Unknown',
140
- item: event.entity.item || 'Item',
141
- qty: event.entity.qty || '1',
142
- status: 'Confirmed'
143
- }, ...prev.Orders].slice(0, 10);
144
- }
145
- return newSheets;
146
- });
147
-
148
- // Reset loop readiness
149
- setTimeout(() => {
150
- setProcessingStep(0);
151
- isProcessingRef.current = false;
152
- }, 4000);
153
- }, []);
154
-
155
- // Notification Ingestion Loop
156
- useEffect(() => {
157
- let index = 0;
158
- const interval = setInterval(() => {
159
- if (isProcessingRef.current) return;
160
 
161
- const template = eventTemplates[index];
162
- const uniqueId = `event-${Date.now()}-${Math.floor(Math.random() * 1000)}`;
 
163
 
164
- const newEvent = {
165
- ...template,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166
  id: uniqueId,
 
 
 
 
 
 
167
  timestamp: new Date().toLocaleTimeString()
168
  };
169
-
 
170
  setNotifications(prev => [newEvent, ...prev].slice(0, 5));
171
- triggerBackendProcess(newEvent);
 
 
 
 
 
 
 
 
 
 
 
 
172
 
173
- index = (index + 1) % eventTemplates.length;
174
- }, 12000); // 12-second cycle for clear visualization
 
175
 
176
- return () => clearInterval(interval);
177
- }, [triggerBackendProcess]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
 
179
  return (
180
  <div className="flex h-screen bg-[#050505] text-slate-300 font-sans selection:bg-indigo-500/30 overflow-hidden">
@@ -307,7 +360,26 @@ const VyaparFlow = () => {
307
  </button>
308
  </nav>
309
 
310
- <div className="flex items-center gap-4">
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311
  <div className="flex flex-col items-end">
312
  <div className="flex items-center gap-2 text-[10px] font-black text-indigo-400 uppercase tracking-widest">
313
  Nova-2-Lite <div className="w-1.5 h-1.5 bg-green-500 rounded-full animate-pulse"></div>
@@ -379,14 +451,14 @@ const VyaparFlow = () => {
379
  <div className="bg-black/60 p-5 rounded-2xl border border-white/5 space-y-4">
380
  <div>
381
  <div className="text-[9px] text-slate-500 uppercase font-black mb-1.5 tracking-wider">Detected Intent</div>
382
- <div className="text-xs text-indigo-400 font-bold capitalize">{activeEvent.type.replace('_', ' ')} Pattern Matched</div>
383
  </div>
384
  <div className="h-[1px] bg-white/5"></div>
385
  <div>
386
  <div className="text-[9px] text-slate-500 uppercase font-black mb-2 tracking-wider">Extracted Entities</div>
387
  <div className="flex flex-wrap gap-2">
388
- {Object.entries(activeEvent.entity).map(([k, v]) => (
389
- <span key={`${activeEvent.id}-entity-${k}`} className="text-[10px] px-2.5 py-1 rounded bg-indigo-500/10 text-indigo-300 border border-indigo-500/20 font-bold">{k}: {v}</span>
390
  ))}
391
  </div>
392
  </div>
 
1
+ import React, { useState, useRef } from 'react';
2
  import {
3
  MessageSquare,
4
  ShoppingCart,
 
19
  Server,
20
  Terminal
21
  } from 'lucide-react';
22
+ import { apiClient } from './api';
23
 
24
  interface Entity {
25
  item?: string;
 
49
  }
50
 
51
  const VyaparFlow = () => {
52
+ // --- State Management ---
53
  const [notifications, setNotifications] = useState<AppEvent[]>([]);
54
  const [activeEvent, setActiveEvent] = useState<AppEvent | null>(null);
55
+ const [backendResult, setBackendResult] = useState<any>(null);
56
+ const [processingStep, setProcessingStep] = useState(0); // 0: idle, 1: API Request, 2: Reasoning, 3: Completed, 4: Error
57
+ const [pipelineError, setPipelineError] = useState<string | null>(null);
58
  const [view, setView] = useState('dashboard');
59
  const [activeSheet, setActiveSheet] = useState('Orders');
60
 
 
77
  ]
78
  });
79
 
80
+ // --- Generate Random Notification ---
81
+
82
+ const products = ["Silk Kurti", "Cotton Saree", "Jeans", "T-shirt", "Leather Jacket"];
83
+ const groceryItems = ["aata", "chawal", "dal", "cheeni", "tel"];
84
+ const customers = ["Rahul", "Amit Kumar", "Suresh", "Pooja", "Neha"];
85
+ const sources = ["whatsapp", "gpay", "amazon seller"];
86
+
87
+ const generateNotification = () => {
88
+ const source = sources[Math.floor(Math.random() * sources.length)];
89
+
90
+ if (source === "whatsapp") {
91
+ const qty = Math.floor(Math.random() * 5) + 1;
92
+ const item = groceryItems[Math.floor(Math.random() * groceryItems.length)];
93
+ return {
94
+ source: "whatsapp",
95
+ message: `bhaiya ${qty} kilo ${item} bhej dena kal tak`
96
+ };
 
 
 
 
 
 
 
 
97
  }
 
98
 
99
+ if (source === "gpay") {
100
+ const amount = Math.floor(Math.random() * 15000) + 500;
101
+ const customer = customers[Math.floor(Math.random() * customers.length)];
102
+ return {
103
+ source: "gpay",
104
+ message: `₹${amount} received from ${customer}`
105
+ };
106
+ }
107
 
108
+ const product = products[Math.floor(Math.random() * products.length)];
109
+ const qty = Math.floor(Math.random() * 3) + 1;
110
+ const total = Math.floor(Math.random() * 3000) + 500;
111
 
112
+ return {
113
+ source: "amazon seller",
114
+ message: `Product: ${product}. Qty: ${qty}. Total: ₹${total}`
115
+ };
116
+ };
117
 
118
+ const handleGenerateNotification = async () => {
119
+ // Prevent double-click or multiple rapid requests
120
+ if (isProcessingRef.current) return;
121
+ isProcessingRef.current = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
 
123
+ try {
124
+ // Step 1: Generate random notification
125
+ const notification = generateNotification();
126
 
127
+ // Step 2: Map source to UI properties
128
+ const sourceMap: { [key: string]: { icon: React.ReactNode; title: string } } = {
129
+ 'whatsapp': {
130
+ icon: <MessageSquare size={14} className="text-emerald-400" />,
131
+ title: 'WhatsApp Order'
132
+ },
133
+ 'gpay': {
134
+ icon: <IndianRupee size={14} className="text-blue-400" />,
135
+ title: 'GPay Payment'
136
+ },
137
+ 'amazon seller': {
138
+ icon: <ShoppingCart size={14} className="text-orange-400" />,
139
+ title: 'Amazon Order'
140
+ },
141
+ };
142
+
143
+ const sourceProps = sourceMap[notification.source] || {
144
+ icon: <MessageSquare size={14} className="text-slate-400" />,
145
+ title: 'New Notification'
146
+ };
147
+
148
+ // Step 3: Create AppEvent object
149
+ const uniqueId = `event-${Date.now()}-${Math.floor(Math.random() * 1000)}`;
150
+ const newEvent: AppEvent = {
151
  id: uniqueId,
152
+ source: notification.source.charAt(0).toUpperCase() + notification.source.slice(1),
153
+ icon: sourceProps.icon,
154
+ title: sourceProps.title,
155
+ message: notification.message,
156
+ type: notification.source === 'gpay' ? 'payment' : 'order',
157
+ entity: {},
158
  timestamp: new Date().toLocaleTimeString()
159
  };
160
+
161
+ // Step 4: Add to UI notifications list
162
  setNotifications(prev => [newEvent, ...prev].slice(0, 5));
163
+ setActiveEvent(newEvent);
164
+ setBackendResult(null);
165
+ setProcessingStep(1); // Stage 1: API Endpoint hit
166
+
167
+ // Step 5: SINGLE API CALL (the ONLY place where backend is triggered)
168
+ const response = await apiClient.sendNotification({
169
+ source: notification.source.toLowerCase(),
170
+ message: notification.message
171
+ });
172
+
173
+ // Step 6: Store backend response
174
+ setBackendResult(response);
175
+ setProcessingStep(2); // Stage 2: Reasoning Phase (Nova 2 Lite)
176
 
177
+ // Simulate slight delay for visual feedback
178
+ await new Promise(r => setTimeout(r, 1000));
179
+ setProcessingStep(3); // Stage 3: Skill Execution / Record Completion
180
 
181
+ // Step 7: Update Sheets with the response data
182
+ setSheets(prev => {
183
+ const newSheets = { ...prev };
184
+ const rowId = `RECORD-${Date.now()}`;
185
+
186
+ // Extract entity info from API response
187
+ const entityData = response.data || {};
188
+
189
+ if (response.intent === 'payment') {
190
+ newSheets.Ledger = [{
191
+ id: rowId,
192
+ customer: entityData.customer || newEvent.entity.customer || 'Unknown',
193
+ type: 'Credit',
194
+ amount: entityData.amount || newEvent.entity.amount || '₹0',
195
+ date: 'Today',
196
+ status: 'Settled'
197
+ }, ...prev.Ledger].slice(0, 10);
198
+ } else {
199
+ newSheets.Orders = [{
200
+ id: `ORD-${Math.floor(Math.random()*900 + 100)}`,
201
+ source: newEvent.source,
202
+ customer: entityData.customer || newEvent.entity.customer || 'Unknown',
203
+ item: entityData.item || newEvent.entity.item || 'Item',
204
+ qty: entityData.quantity || newEvent.entity.qty || '1',
205
+ status: 'Confirmed'
206
+ }, ...prev.Orders].slice(0, 10);
207
+ }
208
+ return newSheets;
209
+ });
210
+
211
+ } catch (error: any) {
212
+ console.error('Backend error:', error);
213
+ const msg = error?.message || 'Both models unavailable';
214
+ setPipelineError(
215
+ msg.includes('quota') || msg.includes('429')
216
+ ? 'Quota exceeded — wait ~1 min'
217
+ : msg.includes('unavailable') || msg.includes('500')
218
+ ? 'Models unavailable — retry shortly'
219
+ : 'Request failed'
220
+ );
221
+ setProcessingStep(4); // Error state — shows red button
222
+ } finally {
223
+ // Reset lock after 5s so user clearly sees the error before re-enabling
224
+ setTimeout(() => {
225
+ setProcessingStep(0);
226
+ setPipelineError(null);
227
+ isProcessingRef.current = false;
228
+ }, 5000);
229
+ }
230
+ };
231
 
232
  return (
233
  <div className="flex h-screen bg-[#050505] text-slate-300 font-sans selection:bg-indigo-500/30 overflow-hidden">
 
360
  </button>
361
  </nav>
362
 
363
+ <div className="flex items-center gap-6">
364
+ <button
365
+ onClick={handleGenerateNotification}
366
+ disabled={processingStep > 0}
367
+ className={`px-4 py-2 rounded-lg text-xs font-bold border transition-all ${
368
+ processingStep === 4
369
+ ? 'bg-red-900/30 text-red-400 border-red-500/40 cursor-not-allowed'
370
+ : processingStep > 0
371
+ ? 'bg-slate-800/50 text-slate-500 border-slate-700/50 cursor-not-allowed'
372
+ : 'bg-emerald-600/20 text-emerald-400 border-emerald-500/30 hover:bg-emerald-600/30 hover:border-emerald-500/50'
373
+ }`}
374
+ title={pipelineError || ''}
375
+ >
376
+ {processingStep === 4
377
+ ? `❌ ${pipelineError || 'Failed'}`
378
+ : processingStep > 0
379
+ ? '⏳ Processing...'
380
+ : '⚡ Generate Notification'}
381
+ </button>
382
+
383
  <div className="flex flex-col items-end">
384
  <div className="flex items-center gap-2 text-[10px] font-black text-indigo-400 uppercase tracking-widest">
385
  Nova-2-Lite <div className="w-1.5 h-1.5 bg-green-500 rounded-full animate-pulse"></div>
 
451
  <div className="bg-black/60 p-5 rounded-2xl border border-white/5 space-y-4">
452
  <div>
453
  <div className="text-[9px] text-slate-500 uppercase font-black mb-1.5 tracking-wider">Detected Intent</div>
454
+ <div className="text-xs text-indigo-400 font-bold capitalize">{(backendResult?.intent || activeEvent?.type || 'unknown').replace('_', ' ')} Pattern Matched</div>
455
  </div>
456
  <div className="h-[1px] bg-white/5"></div>
457
  <div>
458
  <div className="text-[9px] text-slate-500 uppercase font-black mb-2 tracking-wider">Extracted Entities</div>
459
  <div className="flex flex-wrap gap-2">
460
+ {Object.entries(backendResult?.data || activeEvent?.entity || {}).map(([k, v]) => (
461
+ <span key={`${activeEvent?.id}-entity-${k}`} className="text-[10px] px-2.5 py-1 rounded bg-indigo-500/10 text-indigo-300 border border-indigo-500/20 font-bold">{k}: {String(v)}</span>
462
  ))}
463
  </div>
464
  </div>
dashboard/streamlit_app.py DELETED
@@ -1,335 +0,0 @@
1
- """
2
- streamlit_app.py
3
- ----------------
4
- Stage 7 + FIX 3 + FIX 4: Streamlit Dashboard for Notiflow
5
-
6
- Changes from Stage 7:
7
- FIX 3 — Dashboard now calls run_notiflow(message) from app/main.py only.
8
- No direct imports of agents or orchestrator.
9
- FIX 4 — File paths and DEMO_MODE come from app/config.py. No hardcoded paths.
10
-
11
- Run:
12
- streamlit run dashboard/streamlit_app.py
13
- """
14
- from __future__ import annotations
15
-
16
- import sys
17
- from pathlib import Path
18
-
19
- # Add project root to Python path
20
- ROOT = Path(__file__).resolve().parents[1]
21
- sys.path.append(str(ROOT))
22
-
23
- from pathlib import Path
24
-
25
- import pandas as pd
26
- import streamlit as st
27
-
28
- # ── Page config (must be first Streamlit call) ───────────────────────────────
29
- st.set_page_config(
30
- page_title="Notiflow · AI Operations Dashboard",
31
- page_icon="⚡",
32
- layout="wide",
33
- initial_sidebar_state="expanded",
34
- )
35
-
36
- # ── Config (FIX 4) ────────────────────────────────────────────────────────────
37
- from app.config import DATA_FILE, DEMO_MODE as _CONFIG_DEMO_MODE
38
-
39
- # ── Backend entry point (FIX 3) ───────────────────────────────────────────────
40
- from app.main import run_notiflow
41
-
42
- # ---------------------------------------------------------------------------
43
- # Static data
44
- # ---------------------------------------------------------------------------
45
-
46
- SAMPLE_MESSAGES = {
47
- "— pick a sample message —": "",
48
- "💰 Payment — Rahul ₹15,000": "rahul ne 15000 bheja",
49
- "📦 Order — 3 kurties": "bhaiya 3 kurti bhej dena",
50
- "📦 Order — Priya, 2 kg atta": "priya ke liye 2 kilo aata bhej dena",
51
- "🔄 Return — size issue": "size chota hai exchange karna hai",
52
- "📒 Credit — simple udhar": "udhar me de dijiye",
53
- "📒 Credit — Suresh ₹500": "suresh ko 500 ka maal udhar dena",
54
- "🗂️ Prep — pack 3 kurties": "3 kurti ka set ready rakhna",
55
- "💰 Payment — Amit GPay ₹8,000": "amit bhai ka 8000 gpay se aaya",
56
- }
57
-
58
- INTENT_CONFIG = {
59
- "order": {"emoji": "📦", "color": "#1E88E5", "label": "Order", "bg": "#E3F2FD"},
60
- "payment": {"emoji": "💰", "color": "#43A047", "label": "Payment", "bg": "#E8F5E9"},
61
- "credit": {"emoji": "📒", "color": "#FB8C00", "label": "Credit", "bg": "#FFF3E0"},
62
- "return": {"emoji": "🔄", "color": "#E53935", "label": "Return", "bg": "#FFEBEE"},
63
- "preparation": {"emoji": "🗂️", "color": "#8E24AA", "label": "Preparation","bg": "#F3E5F5"},
64
- "other": {"emoji": "💬", "color": "#757575", "label": "Other", "bg": "#F5F5F5"},
65
- }
66
-
67
- SHEETS = ["Orders", "Ledger", "Returns", "Inventory", "Invoices"]
68
- SHEET_ICONS = {"Orders": "📦", "Ledger": "💰", "Returns": "🔄",
69
- "Inventory": "📊", "Invoices": "🧾"}
70
-
71
- # ---------------------------------------------------------------------------
72
- # Pipeline trace builder
73
- # Constructs the step-by-step trace from the flat result dict.
74
- # run_notiflow() returns {message, intent, data, event} — no per-step data
75
- # in demo mode, so we reconstruct a display trace from the final result.
76
- # ---------------------------------------------------------------------------
77
-
78
- def _build_trace(result: dict) -> list[dict]:
79
- return [
80
- {
81
- "icon": "🧠",
82
- "step": "Intent Agent",
83
- "label": f"Intent detected: **{result['intent']}**",
84
- "note": "Nova 2 Lite reads the Hinglish message and classifies its business intent.",
85
- "output": {"intent": result["intent"]},
86
- },
87
- {
88
- "icon": "🔍",
89
- "step": "Extraction + Validation",
90
- "label": "Structured fields extracted and validated",
91
- "note": "Nova 2 Lite extracts entities; validator normalises numbers, text and payment aliases.",
92
- "output": result["data"],
93
- },
94
- {
95
- "icon": "⚙️",
96
- "step": "Skill Router",
97
- "label": f"Skill executed: **{result['event'].get('event', '')}**",
98
- "note": "The router dispatches to the correct business skill, persists data to Excel.",
99
- "output": result["event"],
100
- },
101
- ]
102
-
103
- # ---------------------------------------------------------------------------
104
- # UI helpers
105
- # ---------------------------------------------------------------------------
106
-
107
- def _intent_cfg(intent: str) -> dict:
108
- return INTENT_CONFIG.get(intent, INTENT_CONFIG["other"])
109
-
110
-
111
- def _render_intent_badge(intent: str):
112
- cfg = _intent_cfg(intent)
113
- st.markdown(
114
- f"<div style='"
115
- f"display:inline-block; background:{cfg['bg']};"
116
- f"border-left:5px solid {cfg['color']}; border-radius:6px;"
117
- f"padding:8px 18px; font-weight:700; font-size:1.05rem;"
118
- f"color:{cfg['color']}; margin-bottom:10px; letter-spacing:.04em;"
119
- f"'>{cfg['emoji']}&nbsp;&nbsp;{cfg['label'].upper()}</div>",
120
- unsafe_allow_html=True,
121
- )
122
-
123
-
124
- def _render_trace(trace: list[dict]):
125
- st.markdown("#### 🔄 Pipeline Trace")
126
- for i, step in enumerate(trace):
127
- with st.expander(f"{step['icon']} Step {i+1} — {step['step']}", expanded=True):
128
- st.markdown(step["label"])
129
- st.caption(step["note"])
130
- st.json(step["output"], expanded=False)
131
-
132
-
133
- def _render_result(result: dict):
134
- intent = result.get("intent", "other")
135
- st.markdown("#### 📋 Business Event")
136
- _render_intent_badge(intent)
137
-
138
- col1, col2 = st.columns(2, gap="medium")
139
- with col1:
140
- st.markdown("**Extracted Fields**")
141
- data = result.get("data", {})
142
- if data:
143
- rows = [{"Field": k, "Value": str(v) if v is not None else "—"}
144
- for k, v in data.items()]
145
- st.dataframe(pd.DataFrame(rows), use_container_width=True, hide_index=True)
146
- else:
147
- st.info("No fields extracted.")
148
- with col2:
149
- st.markdown("**Skill Output**")
150
- st.json(result.get("event", {}), expanded=True)
151
-
152
-
153
- def _render_table(sheet_name: str):
154
- """Read sheet from Excel (FIX 4: path from config) and display it."""
155
- if not Path(DATA_FILE).exists():
156
- st.info(f"No data file found yet at `{DATA_FILE}`. Process a message to create it.", icon="📭")
157
- return
158
- try:
159
- df = pd.read_excel(DATA_FILE, sheet_name=sheet_name)
160
- except Exception:
161
- df = pd.DataFrame()
162
-
163
- if df.empty:
164
- st.info(f"No records yet in **{sheet_name}**. Process a message to populate this sheet.", icon="📭")
165
- return
166
-
167
- st.dataframe(df, use_container_width=True, hide_index=True)
168
- icon = SHEET_ICONS.get(sheet_name, "📄")
169
- st.caption(f"{icon} {len(df)} record{'s' if len(df) != 1 else ''} in **{sheet_name}**")
170
-
171
-
172
- def _render_sidebar() -> tuple[str, bool]:
173
- with st.sidebar:
174
- st.markdown(
175
- "<div style='text-align:center; padding:10px 0 4px'>"
176
- "<span style='font-size:2.4rem'>⚡</span><br>"
177
- "<span style='font-size:1.2rem; font-weight:700'>Notiflow</span><br>"
178
- "<span style='font-size:.78rem; color:#888'>AI Operations Assistant</span>"
179
- "</div>",
180
- unsafe_allow_html=True,
181
- )
182
- st.divider()
183
-
184
- # Demo toggle — default from config (FIX 4)
185
- demo_mode = st.toggle(
186
- "🧪 Demo Mode",
187
- value=st.session_state.get("demo_mode", _CONFIG_DEMO_MODE),
188
- help=(
189
- "ON → pipeline simulated locally, no AWS needed.\n"
190
- "OFF → calls Amazon Nova 2 Lite via Bedrock (AWS creds required)."
191
- ),
192
- )
193
- if demo_mode:
194
- st.success("Demo mode active — no AWS needed", icon="✅")
195
- else:
196
- st.warning("Live mode — AWS credentials required", icon="⚠️")
197
-
198
- st.divider()
199
- st.markdown("**📂 Business Data**")
200
- raw_choice = st.radio(
201
- "sheet",
202
- [f"{SHEET_ICONS[s]} {s}" for s in SHEETS],
203
- label_visibility="collapsed",
204
- )
205
- active_sheet = raw_choice.split(" ", 1)[1]
206
-
207
- st.divider()
208
- st.markdown(
209
- "<div style='font-size:.78rem; color:#999'>"
210
- "Built for the <b>Amazon Nova AI Hackathon</b>.<br>"
211
- "Powered by <b>Amazon Nova 2 Lite</b> via Bedrock."
212
- "</div>",
213
- unsafe_allow_html=True,
214
- )
215
-
216
- return active_sheet, demo_mode
217
-
218
- # ---------------------------------------------------------------------------
219
- # Main
220
- # ---------------------------------------------------------------------------
221
-
222
- def main():
223
- # Session state defaults
224
- for key, default in {
225
- "demo_mode": _CONFIG_DEMO_MODE,
226
- "last_result": None,
227
- "last_trace": None,
228
- "last_error": None,
229
- "msg_input": "",
230
- }.items():
231
- if key not in st.session_state:
232
- st.session_state[key] = default
233
-
234
- active_sheet, st.session_state.demo_mode = _render_sidebar()
235
-
236
- # Header
237
- st.markdown(
238
- "<h1 style='margin-bottom:2px'>⚡ Notiflow "
239
- "<span style='font-size:1rem; font-weight:400; color:#888'>"
240
- "AI Operations Dashboard</span></h1>"
241
- "<p style='color:#999; margin:0'>"
242
- "Convert informal Hinglish business messages into structured operations "
243
- "— powered by Amazon Nova&nbsp;2&nbsp;Lite</p>",
244
- unsafe_allow_html=True,
245
- )
246
- st.divider()
247
-
248
- left, right = st.columns([1, 1], gap="large")
249
-
250
- # ── Left — input ─────────────────────────────────────────────────────────
251
- with left:
252
- st.markdown("### 💬 Enter Business Message")
253
-
254
- sample_key = st.selectbox(
255
- "Quick samples",
256
- options=list(SAMPLE_MESSAGES.keys()),
257
- index=0,
258
- label_visibility="collapsed",
259
- )
260
- if SAMPLE_MESSAGES.get(sample_key):
261
- st.session_state.msg_input = SAMPLE_MESSAGES[sample_key]
262
-
263
- message = st.text_area(
264
- "Message",
265
- value=st.session_state.msg_input,
266
- height=110,
267
- placeholder='e.g. "rahul ne 15000 bheja"',
268
- label_visibility="collapsed",
269
- )
270
- st.session_state.msg_input = message
271
-
272
- btn_label = (
273
- "🧪 Run Demo Pipeline"
274
- if st.session_state.demo_mode
275
- else "🚀 Run AI Pipeline (Nova)"
276
- )
277
- clicked = st.button(
278
- btn_label,
279
- type="primary",
280
- use_container_width=True,
281
- disabled=not message.strip(),
282
- )
283
-
284
- if clicked and message.strip():
285
- st.session_state.last_result = None
286
- st.session_state.last_trace = None
287
- st.session_state.last_error = None
288
-
289
- with st.spinner("Running agent pipeline…"):
290
- try:
291
- # FIX 3: only call run_notiflow — no direct agent imports
292
- result = run_notiflow(
293
- message.strip(),
294
- demo_mode=st.session_state.demo_mode,
295
- )
296
- st.session_state.last_result = result
297
- st.session_state.last_trace = _build_trace(result)
298
- except Exception as exc:
299
- st.session_state.last_error = str(exc)
300
-
301
- if st.session_state.last_error:
302
- st.error(
303
- f"**Pipeline error:** {st.session_state.last_error}\n\n"
304
- "Tip: Enable **Demo Mode** in the sidebar to run without AWS credentials.",
305
- icon="🚨",
306
- )
307
-
308
- # ── Right — output ────────────────────────────────────────────────────────
309
- with right:
310
- if st.session_state.last_result and st.session_state.last_trace:
311
- _render_trace(st.session_state.last_trace)
312
- st.divider()
313
- _render_result(st.session_state.last_result)
314
- else:
315
- st.markdown("### 📊 Agent Output")
316
- st.markdown(
317
- "<div style='background:#F8F9FA; border-radius:12px;"
318
- "padding:52px 30px; text-align:center; color:#aaa;'>"
319
- "<div style='font-size:3rem'>⚡</div>"
320
- "<div style='margin-top:10px; font-size:.95rem; line-height:1.6'>"
321
- "Select a sample or type a message,<br>"
322
- "then click <strong>Run Pipeline</strong>."
323
- "</div></div>",
324
- unsafe_allow_html=True,
325
- )
326
-
327
- # ── Data tables ───────────────────────────────────────────────────────────
328
- st.divider()
329
- icon = SHEET_ICONS.get(active_sheet, "📄")
330
- st.markdown(f"### {icon} {active_sheet}")
331
- _render_table(active_sheet)
332
-
333
-
334
- if __name__ == "__main__":
335
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/agent_memory.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "recent_customers": [],
3
+ "recent_items": [
4
+ "t-shirt",
5
+ "dal",
6
+ "silk kurti (red)",
7
+ "silk kurti",
8
+ "tel",
9
+ "aata"
10
+ ]
11
+ }
data/notiflow_data.xlsx ADDED
Binary file (10 kB). View file
 
models/gemini_client.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ models/gemini_client.py
3
+ -----------------------
4
+ Gemini API client for Notiflow.
5
+
6
+ Serves two distinct roles:
7
+
8
+ 1. FALLBACK REASONER — used by ModelRouter when Nova is unavailable.
9
+ generate(prompt) returns a plain-text response that must match
10
+ Nova's JSON output schema exactly (enforced by the prompt).
11
+
12
+ 2. NOTIFICATION GENERATOR — used by notification_generator.py to
13
+ produce realistic Hinglish business notifications for demo mode.
14
+ generate_notifications(n) returns a list of notification dicts.
15
+
16
+ Configuration
17
+ -------------
18
+ GEMINI_API_KEY — from .env / environment variable
19
+
20
+ Dependencies
21
+ ------------
22
+ pip install google-generativeai
23
+ """
24
+
25
+ from __future__ import annotations
26
+
27
+ import json
28
+ import logging
29
+ import re
30
+ from typing import Any
31
+
32
+ from app.config import GEMINI_API_KEY
33
+
34
+ logger = logging.getLogger(__name__)
35
+
36
+ # Gemini model to use — flash is fast and sufficient for this use case
37
+ _GEMINI_MODEL = "gemini-2.5-flash"
38
+
39
+ _client = None # lazy singleton
40
+
41
+
42
+ def _get_client():
43
+ """Return a cached Gemini GenerativeModel instance."""
44
+ global _client
45
+ if _client is not None:
46
+ return _client
47
+
48
+ if not GEMINI_API_KEY:
49
+ raise RuntimeError(
50
+ "GEMINI_API_KEY is not set. "
51
+ "Add it to your .env file: GEMINI_API_KEY=your_key_here"
52
+ )
53
+
54
+ try:
55
+ import google.generativeai as genai
56
+ genai.configure(api_key=GEMINI_API_KEY)
57
+ _client = genai.GenerativeModel(_GEMINI_MODEL)
58
+ logger.info("Gemini client initialised (model=%s)", _GEMINI_MODEL)
59
+ return _client
60
+ except ImportError:
61
+ raise RuntimeError(
62
+ "google-generativeai is not installed. "
63
+ "Run: pip install google-generativeai"
64
+ )
65
+ except Exception as exc:
66
+ raise RuntimeError(f"Failed to initialise Gemini client: {exc}") from exc
67
+
68
+
69
+ def _strip_fences(text: str) -> str:
70
+ """Strip markdown code fences from model output."""
71
+ return re.sub(r"```(?:json)?|```", "", text).strip()
72
+
73
+
74
+ # ---------------------------------------------------------------------------
75
+ # Public API — Role 1: Fallback reasoner for ModelRouter
76
+ # ---------------------------------------------------------------------------
77
+
78
+ def generate(prompt: str) -> str:
79
+ """
80
+ Send a prompt to Gemini and return the raw text response.
81
+
82
+ Used by ModelRouter as a Nova fallback. The prompt already instructs
83
+ the model to return JSON — this function returns the raw string and
84
+ lets the caller parse it.
85
+
86
+ Args:
87
+ prompt: Fully rendered prompt (same prompt sent to Nova).
88
+
89
+ Returns:
90
+ Raw text response from Gemini (may contain JSON).
91
+
92
+ Raises:
93
+ RuntimeError: If the API call fails or client cannot be initialised.
94
+ """
95
+ client = _get_client()
96
+ try:
97
+ response = client.generate_content(prompt)
98
+ raw = response.text or ""
99
+ logger.debug("Gemini raw response: %r", raw[:200])
100
+ return _strip_fences(raw)
101
+ except Exception as exc:
102
+ logger.error("Gemini generate() failed: %s", exc)
103
+ raise RuntimeError(f"Gemini API error: {exc}") from exc
104
+
105
+
106
+ # ---------------------------------------------------------------------------
107
+ # Public API — Role 2: Notification generator
108
+ # ---------------------------------------------------------------------------
109
+
110
+ _NOTIFICATION_PROMPT = """
111
+ You are simulating incoming business notifications for a small business in India.
112
+
113
+ Generate {n} realistic business notifications in Hinglish (Hindi + English mix).
114
+
115
+ Each notification must come from one of these sources:
116
+ - whatsapp (informal text from customers or suppliers)
117
+ - payment (UPI payment confirmation message)
118
+ - amazon (marketplace order or return notification)
119
+ - return (customer return or exchange request)
120
+
121
+ Each notification must represent ONE of these business events:
122
+ - An order for a product (item name + quantity)
123
+ - A payment received (person name + amount)
124
+ - A credit/udhar request
125
+ - A return or exchange request
126
+ - A preparation/packing request
127
+
128
+ Rules:
129
+ - Use natural Hinglish phrasing. Not too formal.
130
+ - Vary sources and event types.
131
+ - Include real-sounding names (Rahul, Priya, Suresh, Amit, etc.)
132
+ - Include real product names (kurti, aata, daal, saree, etc.)
133
+ - Do NOT include any explanation or preamble.
134
+ - Return ONLY a valid JSON array, nothing else.
135
+
136
+ Output format (JSON array only, no markdown):
137
+ [
138
+ {{"source": "whatsapp", "message": "bhaiya 3 kurti bhej dena"}},
139
+ {{"source": "payment", "message": "Rahul ne 15000 bheja UPI se"}}
140
+ ]
141
+ """
142
+
143
+
144
+ def generate_notifications(n: int = 5) -> list[dict[str, str]]:
145
+ """
146
+ Generate n realistic Hinglish business notifications via Gemini.
147
+
148
+ Args:
149
+ n: Number of notifications to generate (default 5).
150
+
151
+ Returns:
152
+ List of dicts with keys "source" and "message".
153
+ Returns an empty list if generation fails (non-fatal).
154
+
155
+ Example:
156
+ >>> generate_notifications(3)
157
+ [
158
+ {"source": "whatsapp", "message": "bhaiya 3 kurti bhej dena"},
159
+ {"source": "payment", "message": "Rahul ne 15000 bheja"},
160
+ {"source": "return", "message": "size chota hai exchange karna hai"},
161
+ ]
162
+ """
163
+ prompt = _NOTIFICATION_PROMPT.format(n=n)
164
+ try:
165
+ raw = generate(prompt)
166
+ data = json.loads(raw)
167
+ if not isinstance(data, list):
168
+ logger.warning("Gemini notification response was not a list: %s", type(data))
169
+ return []
170
+ # Validate each entry has required keys
171
+ valid = [
172
+ item for item in data
173
+ if isinstance(item, dict)
174
+ and "source" in item
175
+ and "message" in item
176
+ ]
177
+ logger.info("Generated %d notifications via Gemini", len(valid))
178
+ return valid
179
+ except json.JSONDecodeError as exc:
180
+ logger.warning("Could not parse Gemini notification output as JSON: %s", exc)
181
+ return []
182
+ except Exception as exc:
183
+ logger.warning("generate_notifications failed: %s", exc)
184
+ return []
package-lock.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Notiflow",
3
+ "lockfileVersion": 3,
4
+ "requires": true,
5
+ "packages": {}
6
+ }
services/excel_sync.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ services/excel_sync.py
3
+ ----------------------
4
+ Semantic Excel sync wrapper for Notiflow.
5
+
6
+ Wraps utils/excel_writer.append_row() with business-named functions so
7
+ skills and the FastAPI backend can call append_order(), append_payment()
8
+ etc. without knowing the sheet schema details.
9
+
10
+ Excel writes always go to EXCEL_SYNC_FILE from app/config.py.
11
+ If EXCEL_FILE_PATH env var is set, that path is used; otherwise falls
12
+ back to the default DATA_FILE path (data/notiflow_data.xlsx).
13
+
14
+ No Excel writing logic is duplicated here — this is a thin adapter only.
15
+
16
+ Public API
17
+ ----------
18
+ append_order(event_dict) — writes to Orders sheet
19
+ append_payment(event_dict) — writes to Ledger sheet (type="payment")
20
+ append_return(event_dict) — writes to Returns sheet
21
+ append_credit(event_dict) — writes to Ledger sheet (type="credit")
22
+ append_inventory(record) — writes to Inventory sheet
23
+ append_invoice(record) — writes to Invoices sheet
24
+ sync_from_event(result) — auto-routes based on intent (convenience)
25
+ """
26
+
27
+ from __future__ import annotations
28
+
29
+ import logging
30
+ from pathlib import Path
31
+
32
+ from app.config import EXCEL_SYNC_FILE
33
+ from utils.excel_writer import append_row, EXCEL_FILE as _DEFAULT_FILE
34
+
35
+ logger = logging.getLogger(__name__)
36
+
37
+
38
+ def _active_file() -> Path:
39
+ """
40
+ Return the active Excel file path.
41
+ EXCEL_SYNC_FILE (from env EXCEL_FILE_PATH) takes priority over default.
42
+ """
43
+ return Path(EXCEL_SYNC_FILE)
44
+
45
+
46
+ # ---------------------------------------------------------------------------
47
+ # Sheet-specific append functions
48
+ # ---------------------------------------------------------------------------
49
+
50
+ def append_order(event_dict: dict) -> None:
51
+ """
52
+ Append an order record to the Orders sheet.
53
+
54
+ Args:
55
+ event_dict: The "order" sub-dict from an order skill event.
56
+ Expected keys: order_id, timestamp, customer, item,
57
+ quantity, status
58
+ """
59
+ record = {
60
+ "order_id": event_dict.get("order_id"),
61
+ "timestamp": event_dict.get("timestamp"),
62
+ "customer": event_dict.get("customer"),
63
+ "item": event_dict.get("item"),
64
+ "quantity": event_dict.get("quantity"),
65
+ "status": event_dict.get("status", "pending"),
66
+ }
67
+ append_row("Orders", record)
68
+ logger.info("Excel sync → Orders: %s", record.get("order_id"))
69
+
70
+
71
+ def append_payment(event_dict: dict) -> None:
72
+ """
73
+ Append a payment record to the Ledger sheet.
74
+
75
+ Args:
76
+ event_dict: The "payment" sub-dict from a payment skill event.
77
+ Expected keys: entry_id, timestamp, customer,
78
+ amount, payment_type, status
79
+ """
80
+ record = {
81
+ "entry_id": event_dict.get("entry_id"),
82
+ "timestamp": event_dict.get("timestamp"),
83
+ "type": "payment",
84
+ "customer": event_dict.get("customer"),
85
+ "item": None,
86
+ "quantity": None,
87
+ "amount": event_dict.get("amount"),
88
+ "payment_type": event_dict.get("payment_type"),
89
+ "status": event_dict.get("status", "received"),
90
+ }
91
+ append_row("Ledger", record)
92
+ logger.info("Excel sync → Ledger (payment): customer=%s", record.get("customer"))
93
+
94
+
95
+ def append_return(event_dict: dict) -> None:
96
+ """
97
+ Append a return record to the Returns sheet.
98
+
99
+ Args:
100
+ event_dict: The "return" sub-dict from a return skill event.
101
+ Expected keys: return_id, timestamp, customer,
102
+ item, reason, status
103
+ """
104
+ record = {
105
+ "return_id": event_dict.get("return_id"),
106
+ "timestamp": event_dict.get("timestamp"),
107
+ "customer": event_dict.get("customer"),
108
+ "item": event_dict.get("item"),
109
+ "reason": event_dict.get("reason"),
110
+ "status": event_dict.get("status", "pending_review"),
111
+ }
112
+ append_row("Returns", record)
113
+ logger.info("Excel sync → Returns: %s", record.get("return_id"))
114
+
115
+
116
+ def append_credit(event_dict: dict) -> None:
117
+ """
118
+ Append a credit (udhar) record to the Ledger sheet.
119
+
120
+ Args:
121
+ event_dict: The "credit" sub-dict from a credit skill event.
122
+ """
123
+ record = {
124
+ "entry_id": event_dict.get("entry_id"),
125
+ "timestamp": event_dict.get("timestamp"),
126
+ "type": "credit",
127
+ "customer": event_dict.get("customer"),
128
+ "item": event_dict.get("item"),
129
+ "quantity": event_dict.get("quantity"),
130
+ "amount": event_dict.get("amount"),
131
+ "payment_type": None,
132
+ "status": event_dict.get("status", "open"),
133
+ }
134
+ append_row("Ledger", record)
135
+ logger.info("Excel sync → Ledger (credit): customer=%s", record.get("customer"))
136
+
137
+
138
+ def append_inventory(record: dict) -> None:
139
+ """
140
+ Append a stock movement record to the Inventory sheet.
141
+
142
+ Args:
143
+ record: Dict with keys: timestamp, item, change, direction,
144
+ reference_id, note
145
+ """
146
+ append_row("Inventory", record)
147
+ logger.info(
148
+ "Excel sync → Inventory: %s %s (%s)",
149
+ record.get("direction"), record.get("item"), record.get("change"),
150
+ )
151
+
152
+
153
+ def append_invoice(record: dict) -> None:
154
+ """
155
+ Append an invoice record to the Invoices sheet.
156
+
157
+ Args:
158
+ record: Invoice dict from invoice_service.generate_invoice().
159
+ """
160
+ append_row("Invoices", record)
161
+ logger.info("Excel sync → Invoices: %s", record.get("invoice_id"))
162
+
163
+
164
+ # ---------------------------------------------------------------------------
165
+ # Convenience router — auto-dispatches based on orchestrator result intent
166
+ # ---------------------------------------------------------------------------
167
+
168
+ def sync_from_event(result: dict) -> None:
169
+ """
170
+ Automatically sync an orchestrator result to the correct Excel sheet(s).
171
+
172
+ Called by the FastAPI notification handler after skill execution so
173
+ the ledger is always up to date without skills needing to know about
174
+ the sync layer.
175
+
176
+ Note: Skills already write to Excel internally (Stages 5-6).
177
+ This function provides a secondary sync point for the FastAPI path
178
+ when skills are called via run_notiflow() in demo mode (where skills
179
+ don't execute and nothing is written). In live mode, this is a no-op
180
+ safety net — duplicate rows are avoided by checking event type.
181
+
182
+ Args:
183
+ result: Full orchestrator result dict
184
+ {message, intent, data, event}
185
+ """
186
+ intent = result.get("intent", "other")
187
+ event = result.get("event", {})
188
+ event_name = event.get("event", "")
189
+
190
+ # In live mode the skill already wrote to Excel — skip to avoid duplicates.
191
+ # We only sync here if the result came from demo mode (event has no IDs).
192
+ order_data = event.get("order")
193
+ payment_data = event.get("payment")
194
+ return_data = event.get("return")
195
+ credit_data = event.get("credit")
196
+
197
+ if intent == "order" and order_data and order_data.get("order_id"):
198
+ # Already written by skill — skip
199
+ logger.debug("sync_from_event: order already persisted by skill, skipping.")
200
+ elif intent == "payment" and payment_data and payment_data.get("entry_id"):
201
+ logger.debug("sync_from_event: payment already persisted by skill, skipping.")
202
+ elif intent == "return" and return_data and return_data.get("return_id"):
203
+ logger.debug("sync_from_event: return already persisted by skill, skipping.")
204
+ elif intent == "credit" and credit_data and credit_data.get("entry_id"):
205
+ logger.debug("sync_from_event: credit already persisted by skill, skipping.")
206
+ else:
207
+ logger.debug(
208
+ "sync_from_event: demo mode result or missing IDs — "
209
+ "no additional Excel write needed."
210
+ )
services/notification_generator.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ services/notification_generator.py
3
+ -----------------------------------
4
+ Gemini-powered business notification generator for Notiflow.
5
+
6
+ Purpose: generate realistic Hinglish business notifications for demo
7
+ automation. This is entirely optional — the frontend simulation continues
8
+ to work independently.
9
+
10
+ Two modes:
11
+ 1. Live Gemini generation — calls Gemini API (requires GEMINI_API_KEY)
12
+ 2. Static fallback pool — returns from a hardcoded set when Gemini
13
+ is unavailable (safe for offline demos)
14
+
15
+ Public API
16
+ ----------
17
+ get_notifications(n: int = 5) -> list[dict]
18
+ Returns a list of notification dicts:
19
+ [{"source": "whatsapp", "message": "..."}, ...]
20
+
21
+ stream_notifications(n, delay_seconds) -> AsyncGenerator
22
+ Async generator yielding one notification at a time with a delay.
23
+ Used by the WebSocket endpoint.
24
+ """
25
+
26
+ from __future__ import annotations
27
+
28
+ import asyncio
29
+ import logging
30
+ import random
31
+ from typing import AsyncGenerator
32
+
33
+ logger = logging.getLogger(__name__)
34
+
35
+ # ---------------------------------------------------------------------------
36
+ # Static fallback pool (used when Gemini is unavailable)
37
+ # ---------------------------------------------------------------------------
38
+
39
+ _FALLBACK_NOTIFICATIONS: list[dict] = [
40
+ {"source": "whatsapp", "message": "bhaiya 3 kurti bhej dena"},
41
+ {"source": "payment", "message": "rahul ne 15000 bheja UPI se"},
42
+ {"source": "whatsapp", "message": "size chota hai exchange karna hai"},
43
+ {"source": "whatsapp", "message": "udhar me de dijiye"},
44
+ {"source": "amazon", "message": "priya ke liye 2 kilo aata bhej dena"},
45
+ {"source": "payment", "message": "amit bhai ka 8000 gpay se aaya"},
46
+ {"source": "whatsapp", "message": "3 kurti ka set ready rakhna"},
47
+ {"source": "return", "message": "maal kharab tha wapas bhej diya"},
48
+ {"source": "whatsapp", "message": "suresh ko 500 ka maal udhar dena"},
49
+ {"source": "amazon", "message": "order cancel karna hai, size bada hai"},
50
+ {"source": "payment", "message": "50 piece pack karke rakhna kal tak"},
51
+ {"source": "whatsapp", "message": "geeta ke liye 5 metre kapda bhej dena"},
52
+ ]
53
+
54
+
55
+ def _get_fallback(n: int) -> list[dict]:
56
+ """Return n randomly sampled notifications from the static pool."""
57
+ pool = _FALLBACK_NOTIFICATIONS * (n // len(_FALLBACK_NOTIFICATIONS) + 1)
58
+ return random.sample(pool, min(n, len(pool)))
59
+
60
+
61
+ # ---------------------------------------------------------------------------
62
+ # Public API
63
+ # ---------------------------------------------------------------------------
64
+
65
+ def get_notifications(n: int = 5) -> list[dict]:
66
+ """
67
+ Get n business notifications.
68
+
69
+ Tries Gemini first; falls back to static pool silently if unavailable.
70
+
71
+ Args:
72
+ n: Number of notifications to generate/return.
73
+
74
+ Returns:
75
+ List of {"source": str, "message": str} dicts.
76
+ """
77
+ try:
78
+ from models.gemini_client import generate_notifications
79
+ results = generate_notifications(n)
80
+ if results:
81
+ return results
82
+ logger.info("Gemini returned empty list — using fallback pool.")
83
+ except Exception as exc:
84
+ logger.info("Gemini unavailable (%s) — using fallback pool.", exc)
85
+
86
+ return _get_fallback(n)
87
+
88
+
89
+ async def stream_notifications(
90
+ n: int = 5,
91
+ delay_seconds: float = 2.0,
92
+ ) -> AsyncGenerator[dict, None]:
93
+ """
94
+ Async generator that yields one notification at a time.
95
+
96
+ Fetches a fresh batch from get_notifications() then yields them
97
+ one-by-one with a configurable delay between each.
98
+
99
+ Args:
100
+ n: Number of notifications per batch.
101
+ delay_seconds: Pause between yielded notifications.
102
+
103
+ Yields:
104
+ {"source": str, "message": str}
105
+ """
106
+ notifications = get_notifications(n)
107
+ for notification in notifications:
108
+ yield notification
109
+ await asyncio.sleep(delay_seconds)