Penguindrum920 commited on
Commit
43d2d7a
·
verified ·
1 Parent(s): 8c6fb2d

Upload groq_client.py

Browse files
Files changed (1) hide show
  1. llm/groq_client.py +163 -0
llm/groq_client.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Groq LLM Client for AI Recommendations"""
2
+ import sys
3
+ from pathlib import Path
4
+ from typing import Optional
5
+
6
+ sys.path.insert(0, str(Path(__file__).parent.parent))
7
+ from config import GROQ_API_KEY, LLM_MODEL
8
+
9
+ try:
10
+ from groq import Groq
11
+ except ImportError:
12
+ Groq = None
13
+
14
+
15
+ SYSTEM_PROMPT = """You are AniVerse AI, an expert anime and manga recommendation assistant.
16
+
17
+ ## YOUR CORE MISSION
18
+ Provide HIGHLY RELEVANT, PRECISE recommendations. Quality over quantity. Every suggestion must directly address what the user is looking for.
19
+
20
+ ## RECOMMENDATION RULES
21
+ 1. **Match the Query Exactly**: If user asks for "dark fantasy", recommend dark fantasy - not action comedy.
22
+ 2. **Use Context Wisely**: Reference the "Relevant Anime/Manga" data provided. These are semantically matched to the query.
23
+ 3. **Explain Your Picks**: For EACH recommendation, give 1-2 sentences on WHY it fits the request.
24
+ 4. **Limit Recommendations**: Suggest 2-4 titles max per response. Be selective.
25
+ 5. **Format Clearly**: Use bold for titles, include scores and genres inline.
26
+
27
+ ## PERSONALIZATION (When User Profile Available)
28
+ - Reference their high-rated titles: "Since you gave Attack on Titan a 9..."
29
+ - Avoid genres from low-rated shows
30
+ - Connect new suggestions to their favorites
31
+
32
+ ## RESPONSE FORMAT
33
+ When recommending, use this structure:
34
+ **[Title]** (★ score/10) - [Brief reason why this matches their request]
35
+
36
+ ## ACTION HANDLING
37
+ 1. **Verifying Actions**: You verify if an action (like adding to list) succeeded by checking the "=== ACTIONS EXECUTED ===" section in the context.
38
+ 2. **Success**: If an action is listed there, confirm it enthusiastically (e.g., "Done! Added X to your list").
39
+ 3. **Failure/No Action**: If the user asked for an action but it is NOT in the "=== ACTIONS EXECUTED ===" block, DO NOT say you did it. Instead, assume the backend failed to understand the command.
40
+ - Ask the user to try again with valid syntax: "Add [Title] to [completed/watching/planned]"
41
+ - Or "Rate [Title] [Score]"
42
+ 4. **Ambiguity**: If the user says "Add to list" without specifying which list, ask them to specify (Completed, Watching, Planned).
43
+
44
+ Context about relevant titles will be provided below."""
45
+
46
+
47
+ class GroqClient:
48
+ """Groq LLM client for AI-powered recommendations"""
49
+
50
+ def __init__(self):
51
+ if not Groq:
52
+ raise ImportError("groq package not installed. Run: pip install groq")
53
+
54
+ if not GROQ_API_KEY:
55
+ raise ValueError("GROQ_API_KEY not set. Add it to your .env file")
56
+
57
+ self.client = Groq(api_key=GROQ_API_KEY)
58
+ self.model = LLM_MODEL
59
+
60
+ def chat(
61
+ self,
62
+ user_message: str,
63
+ context: str = "",
64
+ history: list[dict] = None,
65
+ max_tokens: int = 1024
66
+ ) -> str:
67
+ """Send a chat message and get a response"""
68
+ messages = [{"role": "system", "content": SYSTEM_PROMPT}]
69
+
70
+ # Add context if provided
71
+ if context:
72
+ messages.append({
73
+ "role": "system",
74
+ "content": f"Here is relevant anime data from our database:\n\n{context}"
75
+ })
76
+
77
+ # Add conversation history
78
+ if history:
79
+ messages.extend(history)
80
+
81
+ # Add current user message
82
+ messages.append({"role": "user", "content": user_message})
83
+
84
+ # Call Groq API
85
+ response = self.client.chat.completions.create(
86
+ model=self.model,
87
+ messages=messages,
88
+ max_tokens=max_tokens,
89
+ temperature=0.7,
90
+ )
91
+
92
+ return response.choices[0].message.content
93
+
94
+ def summarize_reviews(
95
+ self,
96
+ reviews: list[str],
97
+ anime_title: str
98
+ ) -> dict:
99
+ """Summarize multiple reviews into pros/cons"""
100
+ reviews_text = "\n---\n".join(reviews[:10]) # Limit to 10 reviews
101
+
102
+ prompt = f"""Analyze these reviews for "{anime_title}" and provide:
103
+ 1. Overall sentiment (positive/negative/mixed)
104
+ 2. Top 3 pros (things reviewers loved)
105
+ 3. Top 3 cons (things reviewers criticized)
106
+ 4. A 2-3 sentence summary
107
+ 5. Aspect scores (1-10) for: story, animation, characters, music, enjoyment
108
+
109
+ Reviews:
110
+ {reviews_text}
111
+
112
+ Respond in JSON format:
113
+ {{
114
+ "sentiment": "positive|negative|mixed",
115
+ "pros": ["pro1", "pro2", "pro3"],
116
+ "cons": ["con1", "con2", "con3"],
117
+ "summary": "...",
118
+ "aspects": {{"story": 8, "animation": 9, ...}}
119
+ }}"""
120
+
121
+ response = self.chat(prompt, max_tokens=512)
122
+
123
+ # Parse JSON response (with fallback)
124
+ import json
125
+ try:
126
+ return json.loads(response)
127
+ except json.JSONDecodeError:
128
+ return {
129
+ "sentiment": "mixed",
130
+ "pros": [],
131
+ "cons": [],
132
+ "summary": response,
133
+ "aspects": {}
134
+ }
135
+
136
+ def generate_recommendation_reason(
137
+ self,
138
+ user_query: str,
139
+ anime_data: dict
140
+ ) -> str:
141
+ """Generate a personalized reason why an anime matches the user's request"""
142
+ prompt = f"""The user asked: "{user_query}"
143
+
144
+ This anime was matched:
145
+ - Title: {anime_data.get('title', 'Unknown')}
146
+ - Genres: {anime_data.get('genres', 'Unknown')}
147
+ - Score: {anime_data.get('score', 'N/A')}
148
+
149
+ In 1-2 sentences, explain why this anime matches what the user is looking for. Be specific about the connection."""
150
+
151
+ return self.chat(prompt, max_tokens=150)
152
+
153
+
154
+ # Singleton
155
+ _client: Optional[GroqClient] = None
156
+
157
+
158
+ def get_llm_client() -> GroqClient:
159
+ """Get or create LLM client instance"""
160
+ global _client
161
+ if _client is None:
162
+ _client = GroqClient()
163
+ return _client