Commit
Β·
da8460e
1
Parent(s):
c9d2117
refactor(strategist): Simplify chat prompt for small LLMs
Browse files- core/strategist.py +17 -37
core/strategist.py
CHANGED
|
@@ -124,72 +124,52 @@ Summary:
|
|
| 124 |
|
| 125 |
def generate_chat_response(self, prompt: str, context: str) -> str:
|
| 126 |
"""
|
| 127 |
-
RAG-Enabled Chat Response
|
|
|
|
| 128 |
"""
|
| 129 |
print(f"--- Strategist Skill (Chat): Processing: '{prompt}'")
|
| 130 |
|
| 131 |
-
#
|
| 132 |
if SafetyGuard and not SafetyGuard.validate_input(prompt):
|
| 133 |
-
return "I cannot
|
| 134 |
|
| 135 |
-
#
|
| 136 |
retrieved_knowledge = ""
|
| 137 |
if self.store:
|
| 138 |
try:
|
| 139 |
print(" - π Searching knowledge base...")
|
| 140 |
-
# Search DB for relevant context
|
| 141 |
kb_docs = self.store.search(prompt, n_results=1)
|
| 142 |
-
if kb_docs:
|
| 143 |
-
retrieved_knowledge = f"\
|
| 144 |
except Exception as e:
|
| 145 |
print(f" - β οΈ RAG Search Warning: {e}")
|
| 146 |
|
| 147 |
-
|
| 148 |
-
[SYSTEM]
|
| 149 |
-
You are a
|
| 150 |
|
| 151 |
-
|
| 152 |
{context}
|
| 153 |
-
|
| 154 |
{retrieved_knowledge}
|
| 155 |
|
| 156 |
-
|
| 157 |
-
{prompt}
|
| 158 |
|
| 159 |
-
|
| 160 |
"""
|
| 161 |
try:
|
|
|
|
| 162 |
response = self.llm(
|
| 163 |
master_prompt,
|
| 164 |
max_tokens=500,
|
| 165 |
-
temperature=0.
|
| 166 |
-
stop=["
|
| 167 |
echo=False
|
| 168 |
)
|
| 169 |
return response['choices'][0]['text'].strip()
|
| 170 |
except Exception as e:
|
| 171 |
traceback.print_exc()
|
| 172 |
-
return "
|
| 173 |
-
|
| 174 |
-
def generate_dashboard_insights(self, kpis: Dict[str, Any]) -> str:
|
| 175 |
-
print(f"--- Strategist Skill (Insights): Received KPIs: {kpis}")
|
| 176 |
-
prompt = f"""
|
| 177 |
-
[SYSTEM]
|
| 178 |
-
You are a senior data analyst at Reachify...
|
| 179 |
|
| 180 |
-
[YOUR INSIGHTFUL BULLET POINTS]
|
| 181 |
-
- """
|
| 182 |
-
try:
|
| 183 |
-
response = self.llm(prompt, max_tokens=250, temperature=0.7, stop=["[SYSTEM]", "Human:", "\n\n"], echo=False)
|
| 184 |
-
insight_text = response['choices'][0]['text'].strip()
|
| 185 |
-
if not insight_text.startswith('-'):
|
| 186 |
-
insight_text = '- ' + insight_text
|
| 187 |
-
print("--- Strategist Skill (Insights): Successfully received response from LLM.")
|
| 188 |
-
return insight_text
|
| 189 |
-
except Exception as e:
|
| 190 |
-
print(f"--- Strategist Skill (Insights) ERROR: {e}")
|
| 191 |
-
traceback.print_exc()
|
| 192 |
-
return "- Could not generate AI insights due to an internal model error."
|
| 193 |
|
| 194 |
def generate_analytics_insights(self, analytics_data: dict) -> str:
|
| 195 |
"""
|
|
|
|
| 124 |
|
| 125 |
def generate_chat_response(self, prompt: str, context: str) -> str:
|
| 126 |
"""
|
| 127 |
+
[SIMPLIFIED PROMPT VERSION] RAG-Enabled Chat Response for small models.
|
| 128 |
+
Combines all info into a simple, human-readable prompt.
|
| 129 |
"""
|
| 130 |
print(f"--- Strategist Skill (Chat): Processing: '{prompt}'")
|
| 131 |
|
| 132 |
+
# Safety Guardrail remains the same
|
| 133 |
if SafetyGuard and not SafetyGuard.validate_input(prompt):
|
| 134 |
+
return "I cannot respond to this query as it may contain restricted content."
|
| 135 |
|
| 136 |
+
# RAG Retrieval remains the same
|
| 137 |
retrieved_knowledge = ""
|
| 138 |
if self.store:
|
| 139 |
try:
|
| 140 |
print(" - π Searching knowledge base...")
|
|
|
|
| 141 |
kb_docs = self.store.search(prompt, n_results=1)
|
| 142 |
+
if kb_docs and kb_docs[0]:
|
| 143 |
+
retrieved_knowledge = f"\nHere is some additional information that might be relevant:\n{kb_docs[0]}"
|
| 144 |
except Exception as e:
|
| 145 |
print(f" - β οΈ RAG Search Warning: {e}")
|
| 146 |
|
| 147 |
+
# β
THE FIX IS HERE: A much simpler, direct prompt for the small AI model.
|
| 148 |
+
# Koi [SYSTEM] ya [CONTEXT] nahi, sirf seedhi baat.
|
| 149 |
+
master_prompt = f"""You are a helpful AI marketing strategist. Below is some context about the user's current situation and their question. Answer the user's question directly and professionally.
|
| 150 |
|
| 151 |
+
Context about the user's data:
|
| 152 |
{context}
|
|
|
|
| 153 |
{retrieved_knowledge}
|
| 154 |
|
| 155 |
+
User's Question: "{prompt}"
|
|
|
|
| 156 |
|
| 157 |
+
Your Answer:
|
| 158 |
"""
|
| 159 |
try:
|
| 160 |
+
print(f" - π Calling LLM with simplified prompt...")
|
| 161 |
response = self.llm(
|
| 162 |
master_prompt,
|
| 163 |
max_tokens=500,
|
| 164 |
+
temperature=0.6,
|
| 165 |
+
stop=["User's Question:", "Context:", "\n\n"],
|
| 166 |
echo=False
|
| 167 |
)
|
| 168 |
return response['choices'][0]['text'].strip()
|
| 169 |
except Exception as e:
|
| 170 |
traceback.print_exc()
|
| 171 |
+
return "I am sorry, but an internal error occurred while processing your request in the AI module."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 172 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 173 |
|
| 174 |
def generate_analytics_insights(self, analytics_data: dict) -> str:
|
| 175 |
"""
|