reachify-ai-service / core /strategist.py
amitbhatt6075's picture
feat: added dynamic config support for ai endpoints
cd0ab55
# FILE: ai-service/core/strategist.py (REPLACE EVERYTHING IN YOUR FILE WITH THIS)
import traceback
from typing import Dict, Any, List
import json
import re
from llama_cpp import Llama
try:
from core.guardrails.safety import SafetyGuard
except ImportError:
SafetyGuard = None
print("⚠️ Safety module not found. Skipping checks.")
class AIStrategist:
def __init__(self, llm_instance: Llama, store=None):
if llm_instance is None:
raise ValueError("AIStrategist requires a valid Llama instance.")
self.llm = llm_instance
self.store = store
print("--- AIStrategist initialized successfully (RAG Ready). ---")
def generate_campaign_brief(self, brand_name: str, campaign_goal: str, target_audience: str, budget_range: str) -> Dict[str, Any]:
"""
[MOCK MODE] Generates a reliable, high-quality campaign brief.
Bypasses the unreliable small LLM to ensure a professional UI experience.
"""
print(f"--- Strategist Skill (MOCK MODE): Generating static campaign brief for brand '{brand_name}'.")
try:
# Create a dynamic but predictable title
title = f"{brand_name}'s {campaign_goal.split(' ')[-1].capitalize()} Campaign"
# Create a description based on the goal
description = f"A targeted campaign to {campaign_goal.lower()}, aimed at {target_audience}. The focus will be on authentic, engaging content that resonates with the core demographic."
# Determine a simple KPI
goal_kpi = "Engagement Rate"
if "sales" in campaign_goal.lower() or "conversions" in campaign_goal.lower():
goal_kpi = "Conversion Rate"
elif "awareness" in campaign_goal.lower() or "reach" in campaign_goal.lower():
goal_kpi = "Total Reach & Impressions"
# Static but high-quality content ideas
content_guidelines = [
f"Create a short-form video (e.g., Reel/TikTok) showcasing the product in a real-life scenario.",
f"Publish an authentic photo carousel with a caption that tells a personal story related to the product.",
f"Engage with the community by running an interactive poll or Q&A in Stories."
]
# Construct the final JSON object
json_response = {
"title": title,
"description": description,
"goal_kpi": goal_kpi,
"content_guidelines": content_guidelines
}
print("--- Strategist Skill (MOCK MODE): Successfully generated static brief.")
return json_response
except Exception as e:
print(f"--- Strategist Skill MOCK ERROR: {e}")
return {"error": "An internal error occurred in the mock data generator."}
def generate_strategy_from_prompt(self, user_prompt: str, config=None) -> str:
"""
Generates a strategy using Dynamic Config from Backend.
"""
print(f"--- Strategist Skill (General): Received prompt. Config: {config}")
# 1. Temperature Setup (Default 0.75 if config missing)
temp = config.temperature if config else 0.75
# 2. System Prompt Logic
final_prompt = user_prompt
# Agar admin ne koi 'System Prompt' set kiya hai (Jaise "Be concise"), to use jod do
if config and config.system_prompt:
# Llama model format: [SYSTEM] instruction [USER] input
final_prompt = f"[SYSTEM]\n{config.system_prompt}\n\n[USER]\n{user_prompt}"
try:
# 3. Call Llama with DYNAMIC Temp
response = self.llm(
final_prompt,
max_tokens=750,
temperature=temp, # ✅ Magic yahan hai!
stop=["User:", "Client:", "System:"],
)
generated_text = response['choices'][0]['text'].strip()
print("--- Strategist Skill (General): Response Generated.")
return generated_text
except Exception as e:
print(f"--- Strategist Skill (General) ERROR: {e}")
traceback.print_exc()
return "An error occurred in the AI model while generating the strategy."
def generate_weekly_summary(self, metrics: Dict[str, Any]) -> str:
"""
Generates a concise, human-readable weekly summary from structured metrics data.
"""
print(f"--- Strategist Skill (Summary): Received metrics for brand {metrics.get('brand_id')}")
prompt_template = f"""
You are an expert digital marketing analyst writing a weekly summary for a client. Your tone should be positive, encouraging, and easy to understand. Do not use jargon. Focus on the key results and what they mean.
Client's Performance Data for the week of {metrics.get('start_date')} to {metrics.get('end_date')}:
- Total Ad Spend: ${metrics.get('total_ad_spend', 0):.2f}
- Clicks from Ads: {metrics.get('total_clicks', 0)}
- New Social Media Followers: {metrics.get('new_followers', 0)}
- Top Performing Campaign this week: "{metrics.get('top_performing_campaign', 'N/A')}"
Based on this data, write a short summary (about 3-4 sentences). Start with a positive opening and end with an encouraging closing statement.
Summary:
"""
print("--- Strategist Skill (Summary): Sending composed prompt to LLM...")
try:
# === INVOKE FIX #1 ===
response = self.llm(
prompt_template,
max_tokens=250,
temperature=0.6,
stop=["Client:", "Data:"],
echo=False
)
summary_text = response['choices'][0]['text'].strip()
print("--- Strategist Skill (Summary): Received response from LLM.")
if not summary_text:
return "The AI model returned an empty summary."
return summary_text
except Exception as e:
print(f"--- Strategist Skill (Summary) ERROR: {e}")
traceback.print_exc()
return "An error occurred in the AI model while generating the weekly summary."
def generate_chat_response(self, prompt: str, context: str) -> str:
"""
[SIMPLIFIED PROMPT VERSION] RAG-Enabled Chat Response for small models.
Combines all info into a simple, human-readable prompt.
"""
print(f"--- Strategist Skill (Chat): Processing: '{prompt}'")
# Safety Guardrail remains the same
if SafetyGuard and not SafetyGuard.validate_input(prompt):
return "I cannot respond to this query as it may contain restricted content."
# RAG Retrieval remains the same
retrieved_knowledge = ""
if self.store:
try:
print(" - 🔍 Searching knowledge base...")
kb_docs = self.store.search(prompt, n_results=1)
if kb_docs and kb_docs[0]:
retrieved_knowledge = f"\nHere is some additional information that might be relevant:\n{kb_docs[0]}"
except Exception as e:
print(f" - ⚠️ RAG Search Warning: {e}")
# ✅ THE FIX IS HERE: A much simpler, direct prompt for the small AI model.
# Koi [SYSTEM] ya [CONTEXT] nahi, sirf seedhi baat.
master_prompt = f"""You are a helpful AI marketing strategist. Below is some context about the user's current situation and their question. Answer the user's question directly and professionally.
Context about the user's data:
{context}
{retrieved_knowledge}
User's Question: "{prompt}"
Your Answer:
"""
try:
print(f" - 📞 Calling LLM with simplified prompt...")
response = self.llm(
master_prompt,
max_tokens=500,
temperature=0.6,
stop=["User's Question:", "Context:", "\n\n"],
echo=False
)
return response['choices'][0]['text'].strip()
except Exception as e:
traceback.print_exc()
return "I am sorry, but an internal error occurred while processing your request in the AI module."
def generate_analytics_insights(self, analytics_data: dict) -> str:
"""
[BULLETPROOF "NON-AI" VERSION] This function no longer calls the LLM.
It generates a high-quality, dynamic summary using Python logic for 100% reliability.
"""
print(f"--- Strategist Skill (Analytics Insights): Generating summary with RELIABLE PYTHON logic.")
try:
reach = analytics_data.get('totalReach', 0)
engagement_rate = analytics_data.get('averageEngagementRate', 0.0)
top_influencer = analytics_data.get('topPerformingInfluencer', 'N/A')
insights = []
# Insight 1: Analyze Reach
if reach > 50000:
insights.append(f"- The campaign reached an impressive {reach:,} people, indicating strong initial visibility.")
elif reach > 0:
insights.append(f"- With a total reach of {reach:,}, the campaign is building a good foundation. Let's focus on amplifying this.")
else:
insights.append("- Campaign data is still being gathered. Initial reach numbers will appear here soon.")
# Insight 2: Analyze Engagement
if engagement_rate > 3.5:
insights.append(f"- The engagement rate is excellent at {engagement_rate:.2f}%. The content is clearly resonating with the audience.")
elif engagement_rate > 1.5:
insights.append(f"- The engagement rate of {engagement_rate:.2f}% is solid. A great next step is to encourage more comments and shares.")
else:
insights.append("- To boost the engagement rate, try creating more interactive content like polls or Q&A sessions.")
# Insight 3: Analyze Top Performer
if top_influencer and top_influencer != "N/A":
insights.append(f"- Analyze the content from '{top_influencer}'. Their successful strategy should be replicated with other creators.")
else:
insights.append("- It is crucial to identify top-performing influencers early. This helps in doubling down on successful strategies.")
final_summary = "\n".join(insights)
print(f" - ✅ Python-generated insights:\n{final_summary}")
return final_summary
except Exception as e:
print(f"--- Strategist Skill (Analytics Insights) PYTHON ERROR: {e}")
traceback.print_exc()
return "- Analytics summary is currently unavailable due to a system error."
def get_caption_assistance(self, caption: str, action: str, guidelines: str = None) -> str:
"""
Provides AI assistance for writing captions based on a specified action.
"""
print(f"--- Strategist Skill (Caption Assist): Received action: '{action}'")
system_prompt = "You are a helpful and creative social media marketing assistant for influencers. You are concise and direct."
if action == 'improve':
user_prompt = f"Make the following Instagram caption more engaging and impactful. Keep the core message but enhance the wording.\n\nOriginal:\n---\n{caption}\n\nImproved:"
elif action == 'hashtags':
user_prompt = f"Suggest a list of 7 relevant and trending hashtags for the following Instagram post. Provide ONLY the hashtags, starting with # and separated by spaces.\n\nPost Caption:\n---\n{caption}\n\nHashtags:"
elif action == 'check_guidelines' and guidelines:
user_prompt = f"Carefully check if the following caption meets ALL the rules in the provided guidelines. Be strict. First, respond with only 'YES' or 'NO'. Then, on a new line, explain which specific rules were broken, or confirm that all rules were followed.\n\nGuidelines:\n---\n{guidelines}\n\nCaption to Check:\n---\n{caption}\n\nAnalysis:"
else:
return "Invalid action or missing guidelines provided to the AI assistant."
full_prompt = f"[SYSTEM]\n{system_prompt}\n\n[USER]\n{user_prompt}\n\n[ASSISTANT]\n"
try:
print(f" - Calling LLM for caption assistance (action: {action})...")
response = self.llm(
full_prompt,
max_tokens=256,
temperature=0.7,
stop=["[USER]", "[SYSTEM]"],
echo=False
)
generated_text = response['choices'][0]['text'].strip()
print(f" - ✅ LLM generated response.")
return generated_text
except Exception as e:
print(f"--- Strategist Skill (Caption Assist) ERROR: {e}")
traceback.print_exc()
return "An error occurred while getting assistance from the AI."
def generate_influencer_analytics_summary(self, kpis: Dict[str, Any]) -> str:
"""
[BULLETPROOF "NON-AI" VERSION] This function no longer calls the LLM.
It generates a high-quality, dynamic summary using Python logic for 100% reliability.
"""
print(f"--- Strategist Skill (Influencer Analytics): Generating summary with RELIABLE PYTHON logic.")
try:
engagement_rate = kpis.get('avgEngagementRate', 0.0)
reach = kpis.get('totalReach', 0)
# --- DYNAMIC SENTENCE GENERATION LOGIC ---
# Part 1: Adjective for engagement
if engagement_rate > 5.0:
adjective = "excellent"
elif engagement_rate > 2.5:
adjective = "solid"
else:
adjective = "good"
# Part 2: Actionable tip based on the most impactful metric
if reach < 1000 and engagement_rate < 3.0:
actionable_tip = "try collaborating with another creator to cross-promote and grow your audience."
elif engagement_rate < 2.0:
actionable_tip = "try asking a question in your next caption to boost comments and engagement."
else:
actionable_tip = "try using a trending audio or format on your next Reel to expand your reach."
# Part 3: Combine everything into a perfect summary
summary = f"Your engagement rate is looking {adjective}, which is great for building community! To grow even further, {actionable_tip}"
print(f" - ✅ Python-generated summary: '{summary}'")
return summary
except Exception as e:
print(f"--- Strategist Skill (Influencer Analytics) PYTHON ERROR: {e}")
return "Analytics summary is currently unavailable due to a system error."
def generate_influencer_growth_plan(self, influencer_data: Dict[str, Any]) -> List[str]:
"""
Influencer ke live data ko analyze karke personalized growth tips deta hai. (CRASH-PROOF VERSION)
"""
print(f"--- Strategist Skill (Growth Plan): Influencer {influencer_data.get('fullName')} ke liye plan banaya ja raha hai.")
# --- FIX IS HERE: Hum pehle values ko aakhri mein badiya se handle kar rahe hain ---
# Pythonic tareeka: `get()` se value nikalo, agar 'None' hai to 'N/A' use karo.
best_caption = influencer_data.get('bestPostCaption') or 'N/A'
worst_caption = influencer_data.get('worstPostCaption') or 'N/A'
# --- END FIX ---
prompt = f"""
[INST] You are an expert social media coach. Analyze the following data for an influencer named {influencer_data.get('fullName')} and provide ONLY 3 short, actionable tips based on it. Start each tip on a new line.
- Niche: {influencer_data.get('category', 'Not specified')}
- Avg Engagement: {influencer_data.get('avgEngagementRate', 0.0):.2f}%
- Best Post was about: '{best_caption[:50]}'
- Worst Post was about: '{worst_caption[:50]}'
Your 3 tips:
[/INST]
"""
try:
print("--- Strategist Skill (Growth Plan): Simplified LLM ko call kiya jaa raha hai...")
response = self.llm(
prompt,
max_tokens=256,
temperature=0.7,
stop=["[INST]", "User:", "System:"],
echo=False
)
raw_text = response['choices'][0]['text'].strip()
tips = [tip.strip().lstrip('- ').lstrip('1. ').lstrip('2. ').lstrip('3. ') for tip in raw_text.split('\n') if tip.strip()]
print(f"--- Strategist Skill (Growth Plan): LLM se tips successfully generate ho gaye: {tips}")
return tips[:3]
except Exception as e:
print(f"--- Strategist Skill (Growth Plan) FATAL ERROR: {e}")
traceback.print_exc()
return ["AI Coach is currently unavailable due to a technical error."]
def generate_service_blueprint(self, service_type: str, requirements: str) -> Dict[str, Any]:
"""
Analyzes user requirements and generates a structured project blueprint using the LLM.
(FINAL VERSION: Uses a "perfect example" in the prompt to force the AI into the correct summary format.)
"""
import re
print(f"--- Strategist Skill (Blueprint): Generating plan for '{service_type}' request.")
# === THE DEFINITIVE PROMPT WITH A PERFECT EXAMPLE ===
prompt = f"""
[SYSTEM]
You are an expert project planner for a top-tier digital agency.
Analyze the client's request below and generate a concise project blueprint.
YOU MUST FOLLOW THE FORMAT OF THE EXAMPLE BELOW EXACTLY.
- For DELIVERABLES, provide a list of 4-5 specific features separated by the "|" pipe character.
- For STACK, PRICE_EST, and TIMELINE, you MUST provide a single, summarized value. DO NOT provide a detailed itemized list for these.
[PERFECT EXAMPLE]
TITLE:: Modern E-Commerce Store for a Fashion Brand
DELIVERABLES:: Dynamic Product Catalog | Secure Shopping Cart & Checkout | User Account & Order History | Admin Dashboard for Managing Products
STACK:: Next.js & TailwindCSS (Frontend), Supabase (Backend)
PRICE_EST:: $8,000 - $12,000
TIMELINE:: 8-10 Weeks
[/PERFECT EXAMPLE]
[CLIENT REQUEST]
- Service Type: {service_type}
- Description: {requirements}
[YOUR BLUEPRINT]
TITLE:: """
try:
response_dict = self.llm(
prompt,
max_tokens=400,
temperature=0.5,
stop=["[CLIENT REQUEST]", "[SYSTEM]", "[/PERFECT EXAMPLE]"],
echo=False
)
raw_text = "TITLE:: " + response_dict['choices'][0]['text'].strip()
print(f"--- Strategist Skill (Blueprint): Raw response from LLM:\n---\n{raw_text}\n---")
# Initialize with default values
blueprint = {
'title': 'AI Generated Title',
'deliverables': ['Analysis in progress...'],
'stack': 'To be determined',
'price_est': 'Pending',
'timeline': 'Pending'
}
# Use regex to find all key::value pairs
pairs = re.findall(r'(\b[A-Z_]+\b)::(.*?)(?=\n\b[A-Z_]+\b::|$)', raw_text, re.DOTALL)
for key, value in pairs:
key = key.strip().upper()
value = value.strip()
if key == 'TITLE':
# Only take the first line for summary fields
blueprint['title'] = value.split('\n')[0].strip()
elif key == 'STACK':
blueprint['stack'] = value.split('\n')[0].strip()
elif key == 'PRICE_EST':
blueprint['price_est'] = value.split('\n')[0].strip()
elif key == 'TIMELINE':
blueprint['timeline'] = value.split('\n')[0].strip()
elif key == 'DELIVERABLES':
# Deliverables can be a list
deliverables_list = [d.strip() for d in value.split('|') if d.strip()]
if deliverables_list:
blueprint['deliverables'] = deliverables_list
print(f"--- Strategist Skill (Blueprint): Successfully parsed with final parser. Result: {blueprint}")
return blueprint
except Exception as e:
error_msg = f"A critical error occurred. Error: {e}"
print(f"--- Strategist Skill FATAL ERROR: {error_msg}")
return { 'title': 'Error Generating Plan', 'deliverables': ['AI model failed to respond.'], 'stack': 'N/A', 'price_est': 'N/A', 'timeline': 'N/A' }
def generate_service_blueprint(self, service_type: str, requirements: str) -> Dict[str, Any]:
"""
[MOCK MODE] Returns a static, well-formatted blueprint to ensure the UI works.
The AI model on the free tier is too weak to generate this reliably.
"""
print(f"--- Strategist Skill (MOCK MODE): Returning static blueprint for service type '{service_type}'.")
# Hum AI ko call hi nahi kar rahe. Hum ek manual, aacha sa jawaab bhej rahe hain.
if service_type == 'web-dev':
return {
'title': 'Custom Web Development Blueprint',
'deliverables': [
'Responsive UI/UX Design (Figma)',
'Frontend Development with Next.js',
'Backend API & Database Setup',
'User Authentication System',
'Deployment & Hosting Support'
],
'stack': 'Next.js, TailwindCSS, Supabase',
'price_est': '$5,000 - $8,000',
'timeline': '6-8 Weeks'
}
elif service_type == 'growth':
return {
'title': '3-Month Influencer Growth Plan',
'deliverables': [
'Weekly Content Strategy & Calendar',
'Proactive Brand Outreach (5 brands/month)',
'Monthly Performance Analytics Review',
'Post Optimization & SEO'
],
'stack': 'Notion, YouTube Studio, Analytics Tools',
'price_est': '$1,500 / month (Retainer)',
'timeline': '3-Month Initial Term'
}
else:
# Fallback agar koi aur service type aaye
return {
'title': 'Service Plan Not Found',
'deliverables': [f"The requested service '{service_type}' is currently not supported."],
'stack': 'N/A', 'price_est': 'N/A', 'timeline': 'N/A'
}
def _get_ai_response_and_parse(self, prompt: str) -> Dict[str, Any]:
"""
Internal helper to call the LLM and parse the key::value format robustly.
"""
try:
response_dict = self.llm(
prompt,
max_tokens=400,
temperature=0.5,
stop=["[CLIENT REQUEST]", "[SYSTEM]", "[/PERFECT EXAMPLE]", "\n\n", "**Part 2:**"],
echo=False
)
raw_text = "TITLE:: " + response_dict['choices'][0]['text'].strip()
print(f"--- AI Raw Response ---\n{raw_text}\n---")
blueprint = {
'title': 'AI Generated Plan',
'deliverables': ['Analysis in progress...'],
'stack': 'To be determined',
'price_est': 'Pending',
'timeline': 'Pending'
}
pairs = re.findall(r'(\b[A-Z_]+\b)::(.*?)(?=\n\b[A-Z_]+\b::|$)', raw_text, re.DOTALL)
for key, value in pairs:
key, value = key.strip().upper(), value.strip()
if not value: continue
if key == 'TITLE': blueprint['title'] = value.split('\n')[0].strip()
elif key == 'STACK': blueprint['stack'] = value.split('\n')[0].strip()
elif key == 'PRICE_EST': blueprint['price_est'] = value.split('\n')[0].strip()
elif key == 'TIMELINE': blueprint['timeline'] = value.split('\n')[0].strip()
elif key == 'DELIVERABLES':
deliverables_list = [d.strip() for d in value.split('|') if d.strip()]
if deliverables_list: blueprint['deliverables'] = deliverables_list
print(f"--- Parser Result ---: {blueprint}")
return blueprint
except Exception as e:
error_msg = f"A critical error occurred in the AI model or parser. Error: {e}"
print(f"--- AI FATAL ERROR: {error_msg}")
traceback.print_exc()
return {
'title': 'Error Generating Plan',
'deliverables': ['AI model failed to respond or there was a system error.'],
'stack': 'N/A', 'price_est': 'N/A', 'timeline': 'N/A'
}
def generate_weekly_content_plan(self, context: Dict[str, Any]) -> Dict[str, Any]:
"""
Generates 3 content options (MOCK MODE for Immediate Response).
Use this until server capacity is upgraded.
"""
print(f"--- Strategist Skill (Plan): Generating for '{context.get('niche')}'.")
niche = context.get("niche", "General")
trends = [t['name'] for t in context.get("active_trends", [])]
trend = trends[0] if trends else "Trending Audio"
# Simulate dynamic response based on inputs
return {
"options": [
{
"type": "Viral Bet",
"title": f"Reel: {trend} Challenge",
"platform": "Instagram",
"contentType": "Reel",
"instructions": f"Use the '{trend}' audio. Show a quick transition related to {niche}. Keep it under 15s.",
"reasoning": "High viral potential due to current trend momentum."
},
{
"type": "Community",
"title": "Story: Poll of the Day",
"platform": "Instagram",
"contentType": "Story",
"instructions": "Post a 'This or That' poll related to your niche. Engage with replies.",
"reasoning": "Boosts engagement rate by encouraging direct interaction."
},
{
"type": "Niche Authority",
"title": "Carousel: Top 3 Tips",
"platform": "Instagram",
"contentType": "Carousel",
"instructions": f"Share 3 lesser-known tips about {niche}. Use high-quality photos.",
"reasoning": "Establishes authority and saves value for followers."
}
]
}