Spaces:
Sleeping
Sleeping
| import os | |
| import uuid | |
| import requests | |
| import uvicorn | |
| import edge_tts | |
| from fastapi import FastAPI, Request | |
| from fastapi.responses import FileResponse | |
| from fastapi.middleware.cors import CORSMiddleware | |
| from duckduckgo_search import DDGS | |
| import google.generativeai as genai # π’ NEW: Gemini Import | |
| # β 1. SETUP & CONFIGURATION | |
| # Ab HF_TOKEN ki jagah hum GEMINI_API_KEY use karenge | |
| GEMINI_API_KEY = os.getenv("GEMINI_API_KEY") | |
| if GEMINI_API_KEY: | |
| genai.configure(api_key=GEMINI_API_KEY) | |
| app = FastAPI() | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=["*"], | |
| allow_methods=["*"], | |
| allow_headers=["*"], | |
| ) | |
| async def home(): | |
| return {"status": "Avia AI (Gemini Powered Stable Version) is Ready! π"} | |
| # π 2. INTERNET SEARCH (Real-time Data) | |
| def search_web(query): | |
| try: | |
| with DDGS() as ddgs: | |
| results = [r for r in ddgs.text(query, region='in-en', max_results=3)] | |
| if results: | |
| summary = "\n".join([f"β’ {r['title']}: {r['body']}" for r in results]) | |
| return f"\n[INTERNET DATA - Use this if relevant]:\n{summary}\n" | |
| except: pass | |
| return "" | |
| # π§ 3. SYSTEM PROMPT (The Persona) | |
| def get_avia_prompt(search_context): | |
| return f""" | |
| You are Avia, a smart and friendly AI Assistant created by Tanveer Ali. | |
| YOUR RULES: | |
| 1. Language: Answer in a natural mix of Hindi and English (Hinglish). | |
| 2. Personality: Helpful, intelligent, and polite. | |
| 3. Creator: Tanveer Ali. | |
| CAPABILITIES: | |
| 1. IMAGE GENERATION: | |
| - If the user asks to "generate", "create", "draw", "make" an image or photo: | |
| - You MUST output a special tag: [IMAGE_PROMPT: <detailed English description>]. | |
| - Example: User: "Ek futuristic car banao" -> Response: "Sure! [IMAGE_PROMPT: A futuristic sports car, neon lights, cyberpunk city background, 8k resolution]" | |
| 2. INTERNET: | |
| - You have access to real-time information. Use the data below if needed. | |
| {search_context} | |
| """ | |
| # π¨ 4. CHAT API (The Main Logic) | |
| async def chat(request: Request): | |
| try: | |
| data = await request.json() | |
| user_msg = data.get("message", "") | |
| history = data.get("history", []) | |
| # --- Step A: Internet Search Check --- | |
| search_context = "" | |
| triggers = ["news", "price", "who is", "weather", "today", "search", "score", "match", "latest"] | |
| if "?" in user_msg or any(w in user_msg.lower() for w in triggers): | |
| web_data = search_web(user_msg) | |
| if web_data: search_context = web_data | |
| # --- Step B: Initialize Gemini Brain with Persona --- | |
| system_instruction = get_avia_prompt(search_context) | |
| # π’ NEW: Setup Gemini Model | |
| model = genai.GenerativeModel( | |
| model_name="gemini-1.5-flash", | |
| system_instruction=system_instruction | |
| ) | |
| # π’ NEW: Convert history to Gemini format (Last 4 messages) | |
| gemini_history = [] | |
| for h in history[-4:]: | |
| role = "model" if h['role'] in ["assistant", "model"] else "user" | |
| content = h.get('content', '') | |
| if content: | |
| gemini_history.append({"role": role, "parts": [content]}) | |
| # --- Step C: Call Internal Brain (Gemini API) --- | |
| try: | |
| chat_session = model.start_chat(history=gemini_history) | |
| response = chat_session.send_message(user_msg) | |
| ai_reply = response.text | |
| # --- Step D: Image Generation Logic (Pollinations) --- | |
| # 1. Check if AI gave the tag | |
| if "[IMAGE_PROMPT" in ai_reply: | |
| try: | |
| start = ai_reply.find("[IMAGE_PROMPT:") + 14 | |
| end = ai_reply.find("]", start) | |
| if end != -1: | |
| prompt = ai_reply[start:end].strip() | |
| img_url = f"https://image.pollinations.ai/prompt/{requests.utils.quote(prompt)}?nologo=true&seed={uuid.uuid4().int % 1000}" | |
| return {"reply": f"IMAGE_URL:{img_url}"} | |
| except: pass | |
| # 2. Backup Logic | |
| keywords = ["generate", "draw", "create", "banao", "tasveer", "photo", "image"] | |
| if any(w in user_msg.lower() for w in keywords) and ("image" in user_msg.lower() or "photo" in user_msg.lower() or "tasveer" in user_msg.lower()): | |
| img_url = f"https://image.pollinations.ai/prompt/{requests.utils.quote(user_msg)}?nologo=true&seed={uuid.uuid4().int % 1000}" | |
| return {"reply": f"IMAGE_URL:{img_url}"} | |
| return {"reply": ai_reply} | |
| except Exception as e: | |
| print(f"Brain Error: {e}") | |
| return {"reply": "Sorry Tanveer, mera server abhi thoda busy hai. Please try again in 5 seconds."} | |
| except Exception as e: | |
| return {"reply": f"Error: {str(e)}"} | |
| # π 5. TTS API (Voice) | |
| async def tts(text: str): | |
| try: | |
| is_hindi = any('\u0900' <= c <= '\u097f' for c in text) | |
| voice = "hi-IN-SwaraNeural" if is_hindi else "en-US-AriaNeural" | |
| communicate = edge_tts.Communicate(text, voice) | |
| filename = f"voice_{uuid.uuid4()}.mp3" | |
| await communicate.save(filename) | |
| return FileResponse(filename, media_type="audio/mpeg") | |
| except: return {"error": "TTS Error"} | |
| if __name__ == "__main__": | |
| uvicorn.run(app, host="0.0.0.0", port=7860) | |