AIencoder commited on
Commit
b5dae1c
Β·
verified Β·
1 Parent(s): e51b0a6

Update src/chimera_core.py

Browse files
Files changed (1) hide show
  1. src/chimera_core.py +92 -77
src/chimera_core.py CHANGED
@@ -1,103 +1,118 @@
1
  import os
 
2
  from google import genai
3
  from groq import Groq
 
 
 
4
 
5
  class Chimera:
6
  def __init__(self):
7
- # 1. GEMINI (Google) - Router & Planner
8
  self.gemini_key = os.getenv("GEMINI_API_KEY")
9
- self.gemini_client = None
10
  if self.gemini_key:
11
- try:
12
- self.gemini_client = genai.Client(api_key=self.gemini_key)
13
- self.gemini_model = "gemini-2.5-flash"
14
- print(f"🦁 Gemini: ONLINE")
15
- except: pass
16
 
17
- # 2. GROQ (The Factory)
18
  self.groq_key = os.getenv("GROQ_API_KEY")
19
- self.groq_client = None
20
  if self.groq_key:
21
- try:
22
- self.groq_client = Groq(api_key=self.groq_key)
23
- print(f"⚑ Groq (Llama 3.3 + Qwen 3): ONLINE")
24
- except: pass
25
 
26
- def _call_model(self, client, model, prompt, system_msg):
27
- """ Generic handler for any model """
28
- if not client: return None
 
 
 
 
 
 
29
  try:
30
- if "gemini" in model:
31
- res = client.models.generate_content(
32
- model=model, contents=f"System: {system_msg}\nUser: {prompt}"
33
- )
34
- return res.text
35
- else:
36
- # Groq / OpenAI style
37
- res = client.chat.completions.create(
38
- model=model,
39
- messages=[
40
- {"role": "system", "content": system_msg},
41
- {"role": "user", "content": prompt}
42
- ]
43
- )
44
- return res.choices[0].message.content
45
  except Exception as e:
46
- print(f"⚠️ Error calling {model}: {e}")
47
- return None
48
 
49
- def _trinity_pipeline(self, user_prompt):
50
- """
51
- THE QWEN TRINITY (UPDATED FOR 2026):
52
- 1. Gemini: Plan (Strategy)
53
- 2. Qwen 3: Code (Draft) - NEW MODEL ID
54
- 3. Llama 3.3: Refine (Polish)
55
- """
56
-
57
- # --- PHASE 1: STRATEGY (Gemini) ---
58
- print(" ↳ Phase 1: Gemini Planning...")
59
- strategy = self._call_model(self.gemini_client, "gemini-2.5-flash", user_prompt, "You are a Software Architect. Outline the logic for this code task.")
60
- if not strategy: strategy = user_prompt
61
 
62
- # --- PHASE 2: DRAFTING (Qwen 3 - The Coder) ---
63
- print(" ↳ Phase 2: Qwen 3 Coding...")
64
- # UPDATED MODEL ID: qwen/qwen3-32b
65
- qwen_code = self._call_model(self.groq_client, "qwen/qwen3-32b", f"Plan: {strategy}\n\nTask: {user_prompt}", "You are an expert Python Developer. Write the code.")
66
-
67
- # Fallback if Qwen 3 fails -> Try Llama 3.3 directly
68
- if not qwen_code:
69
- print(" ⚠️ Qwen 3 failed. Falling back to Llama 3.3")
70
- qwen_code = self._call_model(self.groq_client, "llama-3.3-70b-versatile", user_prompt, "You are an expert Python Developer.")
 
 
 
71
 
72
- # --- PHASE 3: REFINEMENT (Llama 3.3 - The Critic) ---
73
- print(" ↳ Phase 3: Llama Refining...")
74
- final_code = self._call_model(self.groq_client, "llama-3.3-70b-versatile", f"Refine this code:\n{qwen_code}", "You are a Senior Engineer. Fix bugs, optimize, and add comments.")
75
-
76
- if final_code:
77
- return final_code, "ASM (Qwen 3 + Llama)"
78
- else:
79
- return qwen_code, "ASM (Qwen 3 Draft)"
80
 
81
- def process_request(self, user_message, history, manual_role="Auto"):
82
- # Router logic
83
  role = manual_role
84
  if role == "Auto":
85
- if any(x in user_message.lower() for x in ["code", "python", "function", "script", "app"]):
 
 
 
 
 
86
  role = "ASM"
87
  else:
88
  role = "CHAT"
89
 
90
  print(f"πŸ‘‰ Routing to: [{role}]")
91
 
92
- # Execute
93
- if role == "ASM" and self.groq_client:
94
- return self._trinity_pipeline(user_message)
95
-
96
- elif role == "SFE":
97
- return self._call_model(self.groq_client, "llama-3.3-70b-versatile", user_message, "You are a Scientist."), "SFE (Llama)"
98
-
99
- elif role == "CSM":
100
- return self._call_model(self.gemini_client, "gemini-2.5-flash", user_message, "You are a Creative Writer."), "CSM (Gemini)"
101
-
102
- else:
103
- return self._call_model(self.gemini_client, "gemini-2.5-flash", user_message, "You are Chimera."), "CHAT"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
+ import random
3
  from google import genai
4
  from groq import Groq
5
+ from duckduckgo_search import DDGS
6
+ from huggingface_hub import InferenceClient
7
+ from PIL import Image
8
 
9
  class Chimera:
10
  def __init__(self):
11
+ # 1. SETUP GEMINI (Router + Vision)
12
  self.gemini_key = os.getenv("GEMINI_API_KEY")
 
13
  if self.gemini_key:
14
+ self.gemini_client = genai.Client(api_key=self.gemini_key)
15
+ print("πŸ‘οΈ VIM (Gemini Vision): ONLINE")
 
 
 
16
 
17
+ # 2. SETUP GROQ (Coding + Reasoning)
18
  self.groq_key = os.getenv("GROQ_API_KEY")
 
19
  if self.groq_key:
20
+ self.groq_client = Groq(api_key=self.groq_key)
21
+ print("⚑ ASM (Qwen/Llama): ONLINE")
 
 
22
 
23
+ # 3. SETUP HUGGING FACE (Image Generation)
24
+ self.hf_token = os.getenv("HF_TOKEN")
25
+ if self.hf_token:
26
+ self.hf_client = InferenceClient("black-forest-labs/FLUX.1-schnell", token=self.hf_token)
27
+ print("🎨 IGM (Flux Art): ONLINE")
28
+
29
+ # --- MODULE 1: NET (Web Search) ---
30
+ def _web_search(self, query):
31
+ print(f" ↳ 🌐 Searching web for: {query}")
32
  try:
33
+ results = DDGS().text(query, max_results=3)
34
+ summary = "\n".join([f"- {r['title']}: {r['body']}" for r in results])
35
+ return summary
 
 
 
 
 
 
 
 
 
 
 
 
36
  except Exception as e:
37
+ return f"Search Error: {e}"
 
38
 
39
+ # --- MODULE 2: IGM (Image Generation) ---
40
+ def _generate_image(self, prompt):
41
+ print(f" ↳ 🎨 Generating image for: {prompt}")
42
+ if not self.hf_client: return None, "❌ HF_TOKEN missing."
43
+ try:
44
+ image = self.hf_client.text_to_image(prompt)
45
+ # Save to a temporary path so Gradio can display it
46
+ path = f"/tmp/gen_{random.randint(0,9999)}.png"
47
+ image.save(path)
48
+ return path, f"![Generated Image](/file={path})"
49
+ except Exception as e:
50
+ return None, f"Generation Error: {e}"
51
 
52
+ # --- MODULE 3: VIM (Vision Analysis) ---
53
+ def _analyze_image(self, text, image_path):
54
+ print(" ↳ πŸ‘οΈ Analyzing Image...")
55
+ try:
56
+ pil_image = Image.open(image_path)
57
+ response = self.gemini_client.models.generate_content(
58
+ model="gemini-2.5-flash",
59
+ contents=[text, pil_image]
60
+ )
61
+ return response.text
62
+ except Exception as e:
63
+ return f"Vision Error: {e}"
64
 
65
+ def process_request(self, message, history, manual_role, image_input=None):
66
+ # 1. PRIORITY: VIM (If image is uploaded)
67
+ if image_input:
68
+ return self._analyze_image(message or "Describe this.", image_input), "VIM (Vision)"
 
 
 
 
69
 
70
+ # 2. DETECT INTENT (Router)
 
71
  role = manual_role
72
  if role == "Auto":
73
+ msg_lower = message.lower()
74
+ if any(x in msg_lower for x in ["generate", "draw", "create image", "paint"]):
75
+ role = "IGM"
76
+ elif any(x in msg_lower for x in ["search", "news", "price", "latest"]):
77
+ role = "NET"
78
+ elif any(x in msg_lower for x in ["code", "python", "script"]):
79
  role = "ASM"
80
  else:
81
  role = "CHAT"
82
 
83
  print(f"πŸ‘‰ Routing to: [{role}]")
84
 
85
+ # 3. EXECUTE MODULES
86
+ if role == "IGM":
87
+ # Image Gen Mode
88
+ path, markdown = self._generate_image(message)
89
+ return markdown, "IGM (Flux)"
90
+
91
+ elif role == "NET":
92
+ # Web Search Mode
93
+ search_data = self._web_search(message)
94
+ # Synthesize answer with Gemini
95
+ prompt = f"User Question: {message}\n\nSearch Results:\n{search_data}\n\nAnswer the user based on these results."
96
+ response = self.gemini_client.models.generate_content(model="gemini-2.5-flash", contents=prompt)
97
+ return response.text, "NET (Web)"
98
+
99
+ elif role == "ASM":
100
+ # Coding Mode (Qwen/Llama) - Simplified for stability
101
+ if self.groq_client:
102
+ try:
103
+ res = self.groq_client.chat.completions.create(
104
+ model="qwen-2.5-32b", # Or qwen/qwen3-32b
105
+ messages=[{"role":"user", "content": message}]
106
+ )
107
+ return res.choices[0].message.content, "ASM (Qwen)"
108
+ except:
109
+ pass # Fallback to Gemini
110
+
111
+ # Default Fallback (Gemini)
112
+ try:
113
+ res = self.gemini_client.models.generate_content(
114
+ model="gemini-2.5-flash", contents=message
115
+ )
116
+ return res.text, f"{role} (Gemini)"
117
+ except:
118
+ return "System Error.", "ERR"