Spaces:
Running
Running
| import os | |
| import random | |
| from google import genai | |
| from groq import Groq | |
| from ddgs import DDGS | |
| from huggingface_hub import InferenceClient | |
| from PIL import Image | |
| class Chimera: | |
| def __init__(self): | |
| # 1. SETUP GEMINI (Router + Vision) | |
| self.gemini_key = os.getenv("GEMINI_API_KEY") | |
| if self.gemini_key: | |
| self.gemini_client = genai.Client(api_key=self.gemini_key) | |
| print("ποΈ VIM (Gemini Vision): ONLINE") | |
| # 2. SETUP GROQ (Coding + Reasoning) | |
| self.groq_key = os.getenv("GROQ_API_KEY") | |
| if self.groq_key: | |
| self.groq_client = Groq(api_key=self.groq_key) | |
| print("β‘ ASM (Qwen/Llama): ONLINE") | |
| # 3. SETUP HUGGING FACE (Image Generation) | |
| self.hf_token = os.getenv("HF_TOKEN") | |
| if self.hf_token: | |
| self.hf_client = InferenceClient("black-forest-labs/FLUX.1-schnell", token=self.hf_token) | |
| print("π¨ IGM (Flux Art): ONLINE") | |
| # --- MODULE 1: NET (Web Search) --- | |
| def _web_search(self, query): | |
| print(f" β³ π Searching web for: {query}") | |
| try: | |
| with DDGS() as ddgs: | |
| results = list(ddgs.text(query, max_results=3)) | |
| summary = "\n".join([f"- {r['title']}: {r['body']}" for r in results]) | |
| return summary | |
| except Exception as e: | |
| return f"Search Error: {e}" | |
| # --- MODULE 2: IGM (Image Generation) --- | |
| def _generate_image(self, prompt): | |
| print(f" β³ π¨ Generating image for: {prompt}") | |
| if not self.hf_client: | |
| return None, "β HF_TOKEN missing." | |
| try: | |
| image = self.hf_client.text_to_image(prompt) | |
| # Save to a temporary path so Gradio can display it | |
| path = f"/tmp/gen_{random.randint(0,9999)}.png" | |
| image.save(path) | |
| # Return the path - Gradio will handle displaying it | |
| return path, None | |
| except Exception as e: | |
| return None, f"Generation Error: {e}" | |
| # --- MODULE 3: VIM (Vision Analysis) --- | |
| def _analyze_image(self, text, image_path): | |
| print(" β³ ποΈ Analyzing Image...") | |
| try: | |
| pil_image = Image.open(image_path) | |
| response = self.gemini_client.models.generate_content( | |
| model="gemini-2.5-flash", | |
| contents=[text, pil_image] | |
| ) | |
| return response.text | |
| except Exception as e: | |
| return f"Vision Error: {e}" | |
| def process_request(self, message, history, manual_role, image_input=None): | |
| # 1. PRIORITY: VIM (If image is uploaded) | |
| if image_input: | |
| return self._analyze_image(message or "Describe this image in detail.", image_input), "VIM" | |
| # 2. DETECT INTENT (Router) | |
| role = manual_role | |
| if role == "Auto": | |
| msg_lower = message.lower() | |
| if any(x in msg_lower for x in ["generate", "draw", "create image", "paint", "make an image", "picture of"]): | |
| role = "IGM" | |
| elif any(x in msg_lower for x in ["search", "news", "price", "latest", "find", "look up"]): | |
| role = "NET" | |
| elif any(x in msg_lower for x in ["code", "python", "script", "function", "debug", "program"]): | |
| role = "ASM" | |
| else: | |
| role = "CHAT" | |
| else: | |
| # Map dropdown values to internal codes | |
| role_map = { | |
| "ASM (Code)": "ASM", | |
| "IGM (Generate Image)": "IGM", | |
| "NET (Search)": "NET", | |
| "VIM (Vision)": "VIM" | |
| } | |
| role = role_map.get(manual_role, role) | |
| print(f"π Routing to: [{role}]") | |
| # 3. EXECUTE MODULES | |
| if role == "IGM" or role == "IGM (Generate Image)": | |
| # Image Gen Mode | |
| image_path, error_msg = self._generate_image(message) | |
| if error_msg: | |
| return error_msg, "IGM" | |
| # Return both text and image path | |
| return ("β¨ Image generated successfully!", image_path), "IGM" | |
| elif role == "NET" or role == "NET (Search)": | |
| # Web Search Mode | |
| search_data = self._web_search(message) | |
| # Synthesize answer with Gemini | |
| prompt = f"User Question: {message}\n\nSearch Results:\n{search_data}\n\nProvide a clear, concise answer based on these results." | |
| response = self.gemini_client.models.generate_content(model="gemini-2.5-flash", contents=prompt) | |
| return response.text, "NET" | |
| elif role == "ASM" or role == "ASM (Code)": | |
| # Coding Mode (Qwen/Llama) | |
| if self.groq_client: | |
| try: | |
| res = self.groq_client.chat.completions.create( | |
| model="qwen2.5-coder-32b-instruct", | |
| messages=[{"role":"user", "content": message}] | |
| ) | |
| return res.choices[0].message.content, "ASM" | |
| except Exception as e: | |
| print(f"Groq Error: {e}, falling back to Gemini") | |
| # Default Fallback (Gemini) | |
| try: | |
| res = self.gemini_client.models.generate_content( | |
| model="gemini-2.5-flash", contents=message | |
| ) | |
| return res.text, f"{role}" | |
| except Exception as e: | |
| return f"System Error: {e}", "ERR" |