ZBro7 commited on
Commit
fb3d2df
Β·
verified Β·
1 Parent(s): 3a32636

Update router.py

Browse files
Files changed (1) hide show
  1. router.py +42 -11
router.py CHANGED
@@ -7,14 +7,20 @@ from llm_clients import (
7
 
8
  from memory import save_message, load_memory
9
  from search_tool import search_web
 
 
10
  import requests
11
 
12
 
13
- # Optional: image microservice URL
14
  IMAGE_SPACE_URL = "https://your-image-space.hf.space/generate"
15
 
 
 
 
16
 
17
  def build_messages(system_prompt, memory, user_prompt):
 
18
  messages = []
19
 
20
  if system_prompt:
@@ -27,6 +33,7 @@ def build_messages(system_prompt, memory, user_prompt):
27
 
28
 
29
  def call_image_microservice(prompt):
 
30
  try:
31
  response = requests.post(
32
  IMAGE_SPACE_URL,
@@ -40,51 +47,73 @@ def call_image_microservice(prompt):
40
 
41
  def route_request(prompt, user_id):
42
 
 
 
 
 
 
 
 
 
43
  # ==========================
44
- # Image Command
45
  # ==========================
46
  if prompt.startswith("/image"):
47
  clean_prompt = prompt.replace("/image", "").strip()
48
  return call_image_microservice(clean_prompt)
49
 
50
  # ==========================
51
- # Load Memory
 
 
 
 
 
 
 
 
 
52
  # ==========================
53
  memory = load_memory(user_id)
54
 
55
  # ==========================
56
- # AI Classification
57
  # ==========================
58
  classification = classify_prompt(prompt)
59
 
60
  intent = classification.get("intent", "chat")
61
  needs_search = classification.get("needs_search", False)
62
 
63
- system_prompt = "You are ZXAI, a helpful advanced AI assistant."
64
 
65
  # ==========================
66
- # Greeting (fast return)
67
  # ==========================
68
  if intent == "greeting":
69
  response = "Hello πŸ‘‹ I am ZXAI. How can I assist you today?"
 
70
  save_message(user_id, "user", prompt)
71
  save_message(user_id, "assistant", response)
 
 
72
  return {"response": response}
73
 
74
  # ==========================
75
- # Reasoning β†’ Gemini
76
  # ==========================
77
  if intent == "reasoning":
 
78
  messages = build_messages(system_prompt, memory, prompt)
79
  response = call_gemini(messages)
80
 
81
  save_message(user_id, "user", prompt)
82
  save_message(user_id, "assistant", response)
83
 
 
84
  return {"response": response}
85
 
86
  # ==========================
87
- # Live Data
88
  # ==========================
89
  if intent == "live_data" or needs_search:
90
 
@@ -97,7 +126,7 @@ User Question:
97
  Web Data:
98
  {web_data}
99
 
100
- Use the web data if relevant.
101
  """
102
 
103
  messages = build_messages(system_prompt, memory, enriched_prompt)
@@ -112,10 +141,11 @@ Use the web data if relevant.
112
  save_message(user_id, "user", prompt)
113
  save_message(user_id, "assistant", final_answer)
114
 
 
115
  return {"response": final_answer}
116
 
117
  # ==========================
118
- # Default Chat β†’ Llama
119
  # ==========================
120
  messages = build_messages(system_prompt, memory, prompt)
121
 
@@ -124,5 +154,6 @@ Use the web data if relevant.
124
  save_message(user_id, "user", prompt)
125
  save_message(user_id, "assistant", response)
126
 
 
 
127
  return {"response": response}
128
-
 
7
 
8
  from memory import save_message, load_memory
9
  from search_tool import search_web
10
+ from rag_engine import rag_response
11
+
12
  import requests
13
 
14
 
15
+ # πŸ”₯ Image microservice endpoint
16
  IMAGE_SPACE_URL = "https://your-image-space.hf.space/generate"
17
 
18
+ # πŸ”₯ Simple in-memory cache
19
+ response_cache = {}
20
+
21
 
22
  def build_messages(system_prompt, memory, user_prompt):
23
+
24
  messages = []
25
 
26
  if system_prompt:
 
33
 
34
 
35
  def call_image_microservice(prompt):
36
+
37
  try:
38
  response = requests.post(
39
  IMAGE_SPACE_URL,
 
47
 
48
  def route_request(prompt, user_id):
49
 
50
+ cache_key = f"{user_id}:{prompt}"
51
+
52
+ # ==========================
53
+ # CACHE CHECK
54
+ # ==========================
55
+ if cache_key in response_cache:
56
+ return {"response": response_cache[cache_key]}
57
+
58
  # ==========================
59
+ # IMAGE COMMAND
60
  # ==========================
61
  if prompt.startswith("/image"):
62
  clean_prompt = prompt.replace("/image", "").strip()
63
  return call_image_microservice(clean_prompt)
64
 
65
  # ==========================
66
+ # RAG QUICK RESPONSE
67
+ # ==========================
68
+ rag_answer = rag_response(prompt)
69
+
70
+ if rag_answer:
71
+ response_cache[cache_key] = rag_answer
72
+ return {"response": rag_answer}
73
+
74
+ # ==========================
75
+ # LOAD MEMORY
76
  # ==========================
77
  memory = load_memory(user_id)
78
 
79
  # ==========================
80
+ # AI CLASSIFICATION
81
  # ==========================
82
  classification = classify_prompt(prompt)
83
 
84
  intent = classification.get("intent", "chat")
85
  needs_search = classification.get("needs_search", False)
86
 
87
+ system_prompt = "You are ZXAI, a powerful advanced AI assistant."
88
 
89
  # ==========================
90
+ # GREETING FAST PATH
91
  # ==========================
92
  if intent == "greeting":
93
  response = "Hello πŸ‘‹ I am ZXAI. How can I assist you today?"
94
+
95
  save_message(user_id, "user", prompt)
96
  save_message(user_id, "assistant", response)
97
+
98
+ response_cache[cache_key] = response
99
  return {"response": response}
100
 
101
  # ==========================
102
+ # REASONING β†’ GEMINI
103
  # ==========================
104
  if intent == "reasoning":
105
+
106
  messages = build_messages(system_prompt, memory, prompt)
107
  response = call_gemini(messages)
108
 
109
  save_message(user_id, "user", prompt)
110
  save_message(user_id, "assistant", response)
111
 
112
+ response_cache[cache_key] = response
113
  return {"response": response}
114
 
115
  # ==========================
116
+ # LIVE DATA / SEARCH
117
  # ==========================
118
  if intent == "live_data" or needs_search:
119
 
 
126
  Web Data:
127
  {web_data}
128
 
129
+ Use web data if helpful.
130
  """
131
 
132
  messages = build_messages(system_prompt, memory, enriched_prompt)
 
141
  save_message(user_id, "user", prompt)
142
  save_message(user_id, "assistant", final_answer)
143
 
144
+ response_cache[cache_key] = final_answer
145
  return {"response": final_answer}
146
 
147
  # ==========================
148
+ # DEFAULT CHAT β†’ LLAMA
149
  # ==========================
150
  messages = build_messages(system_prompt, memory, prompt)
151
 
 
154
  save_message(user_id, "user", prompt)
155
  save_message(user_id, "assistant", response)
156
 
157
+ response_cache[cache_key] = response
158
+
159
  return {"response": response}