nice-bill commited on
Commit
c05af84
·
1 Parent(s): b96e9c6

witty explainabilty added

Browse files
Files changed (1) hide show
  1. src/llm.py +78 -0
src/llm.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ from typing import Dict, Optional
4
+ from groq import Groq
5
+ from huggingface_hub import InferenceClient
6
+ from dotenv import load_dotenv
7
+ load_dotenv()
8
+
9
+ class PersonaExplainer:
10
+ def __init__(self):
11
+ self.groq_api_key = os.getenv("GROQ_API_KEY")
12
+ self.hf_api_token = os.getenv("HF_API_TOKEN")
13
+
14
+ self.groq_client = Groq(api_key=self.groq_api_key) if self.groq_api_key else None
15
+ self.hf_client = InferenceClient(token=self.hf_api_token) if self.hf_api_token else None
16
+
17
+ def generate_explanation(self, persona: str, stats: Dict[str, float]) -> str:
18
+ """
19
+ Generates a humorous/insightful explanation of the wallet's persona.
20
+ """
21
+ prompt = self._construct_prompt(persona, stats)
22
+
23
+ # Try Groq First (Fastest)
24
+ if self.groq_client:
25
+ try:
26
+ return self._call_groq(prompt)
27
+ except Exception as e:
28
+ print(f"Groq API failed: {e}. Falling back...")
29
+
30
+ # Fallback to Hugging Face
31
+ if self.hf_client:
32
+ try:
33
+ return self._call_hf(prompt)
34
+ except Exception as e:
35
+ print(f"HF API failed: {e}.")
36
+
37
+ return "Analysis unavailable (AI models busy)."
38
+
39
+ def _construct_prompt(self, persona: str, stats: Dict) -> str:
40
+ # Simplify stats for the LLM to avoid token bloat
41
+ key_stats = {
42
+ "Transactions": int(stats.get('tx_count', 0)),
43
+ "NFT Volume (USD)": f"${stats.get('total_nft_volume_usd', 0):,.2f}",
44
+ "Gas Spent (ETH)": f"{stats.get('total_gas_spent', 0):.4f}",
45
+ "Active Days": int(stats.get('active_days', 0)),
46
+ "DEX Trades": int(stats.get('dex_trades', 0))
47
+ }
48
+
49
+ return (
50
+ f"You are a crypto analytics bot with a witty, slightly roasting personality. "
51
+ f"Analyze this wallet:\n"
52
+ f"Persona: {persona}\n"
53
+ f"Stats: {key_stats}\n\n"
54
+ f"Task: Write a 2-3 sentence 'Roast' or 'Insight' about this user. "
55
+ f"Explain WHY they fit this persona based on the stats. "
56
+ f"Be specific but concise."
57
+ )
58
+
59
+ def _call_groq(self, prompt: str) -> str:
60
+ chat_completion = self.groq_client.chat.completions.create(
61
+ messages=[
62
+ {"role": "system", "content": "You are a witty crypto analyst."},
63
+ {"role": "user", "content": prompt}
64
+ ],
65
+ model="llama-3.1-8b-instant",
66
+ temperature=0.7,
67
+ max_tokens=200,
68
+ )
69
+ return chat_completion.choices[0].message.content
70
+
71
+ def _call_hf(self, prompt: str) -> str:
72
+ # Using Mistral-7B-Instruct via HF Inference API
73
+ return self.hf_client.text_generation(
74
+ prompt,
75
+ model="mistralai/Mistral-7B-Instruct-v0.2",
76
+ max_new_tokens=200,
77
+ temperature=0.7
78
+ )