File size: 3,883 Bytes
0914e96 a78ff79 0914e96 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 |
import os
import sys
from llama_cpp import Llama
import json
import re
current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(current_dir)
sys.path.append(parent_dir)
from core.rag.store import VectorStore
class CreativeDirector:
def __init__(self, llm_instance: Llama):
if not llm_instance:
raise ValueError("CreativeDirector received an invalid LLM instance.")
print("π§ Initializing AI Director with pre-loaded LLM...")
self.llm = llm_instance
self.memory = VectorStore(collection_name="creative_mind")
print("β
AI Director is Online.")
def chat(self, user_message: str, history: list, task_context: str):
"""Main Chat Logic with RAG, optimized for speed."""
print(f" - π§ Thinking...")
retrieved_docs = self.memory.search(user_message, n_results=1)
expert_knowledge = retrieved_docs[0][:150] if retrieved_docs else "Be creative and direct."
prompt = f"""Instruction: Act as a Viral Content Expert. Give 1 short tip for "{task_context}".
Context: {expert_knowledge}
User: {user_message}
Response:"""
try:
response = self.llm(
prompt,
max_tokens=50,
stop=["Instruction:", "User:", "\n\n"],
temperature=0.7,
echo=False
)
reply = response['choices'][0]['text'].strip()
if not reply:
return "Try showing a 'before vs after' comparison. It always works!"
print(f" - π£οΈ Reply: {reply}")
return reply
except Exception as e:
print(f" - β AI Chat Error: {e}")
return "My AI brain is a bit slow today. Please ask again!"
def generate_final_plan(self, task_context: str, history: list):
"""Generates the final script using simple text fallback."""
print(f" - π¬ Generating final plan for: {task_context}")
conversation_summary = "\n".join([f"- {msg['content']}" for msg in history[-3:]])
prompt = f"""Instruction: Create a video script for "{task_context}".
Chat Summary: {conversation_summary}
Format your answer exactly like this:
HOOK: (Write hook here)
SCRIPT: (Write script here)
VISUALS: (Write visuals here)
TOOLS: (Write tools here)
Response:"""
try:
response = self.llm(
prompt,
max_tokens=300,
stop=["Instruction:", "Response:"],
temperature=0.6,
echo=False
)
raw_text = response['choices'][0]['text'].strip()
print(f" - π€ Raw Text: {raw_text}")
hook_match = re.search(r'HOOK:\s*(.*?)(?=\nSCRIPT:)', raw_text, re.DOTALL | re.IGNORECASE)
script_match = re.search(r'SCRIPT:\s*(.*?)(?=\nVISUALS:)', raw_text, re.DOTALL | re.IGNORECASE)
visuals_match = re.search(r'VISUALS:\s*(.*?)(?=\nTOOLS:)', raw_text, re.DOTALL | re.IGNORECASE)
tools_match = re.search(r'TOOLS:\s*(.*)', raw_text, re.DOTALL | re.IGNORECASE)
return {
"hook": hook_match.group(1).strip() if hook_match else "Start with a bang!",
"script": script_match.group(1).strip() if script_match else raw_text,
"visuals": [visuals_match.group(1).strip()] if visuals_match else ["Talking Head"],
"tools": [tools_match.group(1).strip()] if tools_match else ["CapCut"]
}
except Exception as e:
print(f" - β Final Plan Generation Error: {e}")
return {
"hook": "Error generating plan.",
"script": "Please try again later.",
"visuals": [],
"tools": []
}
|