AIencoder commited on
Commit
e2b4a55
·
verified ·
1 Parent(s): dbf3196

Update src/chimera_core.py

Browse files
Files changed (1) hide show
  1. src/chimera_core.py +16 -11
src/chimera_core.py CHANGED
@@ -2,7 +2,7 @@ import os
2
  import random
3
  from google import genai
4
  from groq import Groq
5
- from ddgs import DDGS # Changed from duckduckgo_search
6
  from huggingface_hub import InferenceClient
7
  from PIL import Image
8
 
@@ -40,13 +40,15 @@ class Chimera:
40
  # --- MODULE 2: IGM (Image Generation) ---
41
  def _generate_image(self, prompt):
42
  print(f" ↳ 🎨 Generating image for: {prompt}")
43
- if not self.hf_client: return None, "❌ HF_TOKEN missing."
 
44
  try:
45
  image = self.hf_client.text_to_image(prompt)
46
  # Save to a temporary path so Gradio can display it
47
  path = f"/tmp/gen_{random.randint(0,9999)}.png"
48
  image.save(path)
49
- return path, f"Generated image saved!\n\n![Generated Image](file={path})"
 
50
  except Exception as e:
51
  return None, f"Generation Error: {e}"
52
 
@@ -66,17 +68,17 @@ class Chimera:
66
  def process_request(self, message, history, manual_role, image_input=None):
67
  # 1. PRIORITY: VIM (If image is uploaded)
68
  if image_input:
69
- return self._analyze_image(message or "Describe this.", image_input), "VIM"
70
 
71
  # 2. DETECT INTENT (Router)
72
  role = manual_role
73
  if role == "Auto":
74
  msg_lower = message.lower()
75
- if any(x in msg_lower for x in ["generate", "draw", "create image", "paint", "make an image"]):
76
  role = "IGM"
77
- elif any(x in msg_lower for x in ["search", "news", "price", "latest", "find"]):
78
  role = "NET"
79
- elif any(x in msg_lower for x in ["code", "python", "script", "function", "debug"]):
80
  role = "ASM"
81
  else:
82
  role = "CHAT"
@@ -95,19 +97,22 @@ class Chimera:
95
  # 3. EXECUTE MODULES
96
  if role == "IGM" or role == "IGM (Generate Image)":
97
  # Image Gen Mode
98
- path, markdown = self._generate_image(message)
99
- return markdown, "IGM"
 
 
 
100
 
101
  elif role == "NET" or role == "NET (Search)":
102
  # Web Search Mode
103
  search_data = self._web_search(message)
104
  # Synthesize answer with Gemini
105
- prompt = f"User Question: {message}\n\nSearch Results:\n{search_data}\n\nAnswer the user based on these results."
106
  response = self.gemini_client.models.generate_content(model="gemini-2.5-flash", contents=prompt)
107
  return response.text, "NET"
108
 
109
  elif role == "ASM" or role == "ASM (Code)":
110
- # Coding Mode (Qwen/Llama) - Simplified for stability
111
  if self.groq_client:
112
  try:
113
  res = self.groq_client.chat.completions.create(
 
2
  import random
3
  from google import genai
4
  from groq import Groq
5
+ from ddgs import DDGS
6
  from huggingface_hub import InferenceClient
7
  from PIL import Image
8
 
 
40
  # --- MODULE 2: IGM (Image Generation) ---
41
  def _generate_image(self, prompt):
42
  print(f" ↳ 🎨 Generating image for: {prompt}")
43
+ if not self.hf_client:
44
+ return None, "❌ HF_TOKEN missing."
45
  try:
46
  image = self.hf_client.text_to_image(prompt)
47
  # Save to a temporary path so Gradio can display it
48
  path = f"/tmp/gen_{random.randint(0,9999)}.png"
49
  image.save(path)
50
+ # Return the path - Gradio will handle displaying it
51
+ return path, None
52
  except Exception as e:
53
  return None, f"Generation Error: {e}"
54
 
 
68
  def process_request(self, message, history, manual_role, image_input=None):
69
  # 1. PRIORITY: VIM (If image is uploaded)
70
  if image_input:
71
+ return self._analyze_image(message or "Describe this image in detail.", image_input), "VIM"
72
 
73
  # 2. DETECT INTENT (Router)
74
  role = manual_role
75
  if role == "Auto":
76
  msg_lower = message.lower()
77
+ if any(x in msg_lower for x in ["generate", "draw", "create image", "paint", "make an image", "picture of"]):
78
  role = "IGM"
79
+ elif any(x in msg_lower for x in ["search", "news", "price", "latest", "find", "look up"]):
80
  role = "NET"
81
+ elif any(x in msg_lower for x in ["code", "python", "script", "function", "debug", "program"]):
82
  role = "ASM"
83
  else:
84
  role = "CHAT"
 
97
  # 3. EXECUTE MODULES
98
  if role == "IGM" or role == "IGM (Generate Image)":
99
  # Image Gen Mode
100
+ image_path, error_msg = self._generate_image(message)
101
+ if error_msg:
102
+ return error_msg, "IGM"
103
+ # Return both text and image path
104
+ return ("✨ Image generated successfully!", image_path), "IGM"
105
 
106
  elif role == "NET" or role == "NET (Search)":
107
  # Web Search Mode
108
  search_data = self._web_search(message)
109
  # Synthesize answer with Gemini
110
+ prompt = f"User Question: {message}\n\nSearch Results:\n{search_data}\n\nProvide a clear, concise answer based on these results."
111
  response = self.gemini_client.models.generate_content(model="gemini-2.5-flash", contents=prompt)
112
  return response.text, "NET"
113
 
114
  elif role == "ASM" or role == "ASM (Code)":
115
+ # Coding Mode (Qwen/Llama)
116
  if self.groq_client:
117
  try:
118
  res = self.groq_client.chat.completions.create(