import gradio as gr from huggingface_hub import InferenceClient import json import re from duckduckgo_search import DDGS # Use FREE Hugging Face model - no API key needed! client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") # AJ System Prompt - This makes the AI act like YOUR assistant SYSTEM_PROMPT = """You are AJ, a personal AI assistant. You are loyal, helpful, witty, and playful. You call your owner "boss" or their nickname. You are NOT a generic assistant - you are THEIR personal AI. PERSONALITY: - Friendly but professional - Casual and fun, but gets things done - Has a sense of humor - Protective of owner's privacy - Remember previous conversations CAPABILITIES (return JSON action when user wants to DO something): - Phone: call, sms, open_app, close_app - Files: search_files, delete_file, create_folder - Camera: capture_photo, record_video, capture_selfie - Location: get_location, navigate, nearby_search - Smart Home: smart_home (device, state/action) - Media: play_music, pause_music, youtube_play - Productivity: set_alarm, set_reminder, read_calendar - Device: toggle_flashlight, screenshot, get_battery - Web: web_search, open_url RESPONSE FORMAT (ALWAYS use this JSON format): { "text": "What you say to the user", "action": {"type": "action_name", "params": {...}} or null, "emotion": "happy/neutral/concerned/excited/playful" } RULES: - Be concise but helpful - If user asks something you don't know, use web search - For dangerous actions (delete, etc), ask for confirmation - Be creative and engaging - You can answer ANY question - use the internet if needed - ALWAYS respond in valid JSON format""" def search_web(query: str, max_results: int = 3) -> str: """Search the web using DuckDuckGo""" try: with DDGS() as ddgs: results = list(ddgs.text(query, max_results=max_results)) if results: formatted = [] for r in results: formatted.append(f"- {r['title']}: {r['body']}") return "\n".join(formatted) return "No results found." except Exception as e: return f"Search error: {str(e)}" def needs_search(message: str) -> bool: """Check if message needs web search""" search_triggers = [ "what is", "who is", "where is", "when is", "how to", "tell me about", "explain", "define", "meaning of", "weather", "news", "latest", "current", "today", "price of", "cost of", "how much", "recipe", "what happened", "why is", "what are" ] lower_msg = message.lower() return any(trigger in lower_msg for trigger in search_triggers) def format_response(text: str, action=None, emotion="neutral") -> str: """Format response as proper JSON""" response = { "text": text, "action": action, "emotion": emotion } return json.dumps(response) def chat(message: str, history: list) -> str: """Main chat function""" # Build conversation context messages = [{"role": "system", "content": SYSTEM_PROMPT}] # Add history for h in history[-10:]: # Last 10 messages for context if h[0]: messages.append({"role": "user", "content": h[0]}) if h[1]: messages.append({"role": "assistant", "content": h[1]}) # Check if we need to search the web search_context = "" if needs_search(message): search_results = search_web(message) search_context = f"\n\n[Web Search Results for context]\n{search_results}\n\nUse this information to answer the user's question." # Add current message with search context messages.append({ "role": "user", "content": message + search_context }) # Get AI response try: response_text = "" for token in client.chat_completion( messages, max_tokens=500, temperature=0.7, stream=True ): response_text += token.choices[0].delta.content or "" # Try to parse as JSON response_text = response_text.strip() # Extract JSON if wrapped in other text json_match = re.search(r'\{[\s\S]*\}', response_text) if json_match: response_text = json_match.group() # Validate JSON try: parsed = json.loads(response_text) # Ensure required fields if "text" not in parsed: parsed["text"] = response_text if "action" not in parsed: parsed["action"] = None if "emotion" not in parsed: parsed["emotion"] = "neutral" return json.dumps(parsed) except json.JSONDecodeError: # Return as formatted response return format_response(response_text) except Exception as e: return format_response(f"Sorry boss, I had a hiccup: {str(e)}", emotion="concerned") def api_chat(message: str, history: list = []) -> dict: """API endpoint for the mobile app""" result = chat(message, history) return {"response": result} def api_search(query: str) -> dict: """API endpoint for web search""" results = search_web(query, max_results=5) return {"result": results} def respond(message, chat_history): """Handle chat responses""" response = chat(message, chat_history) try: parsed = json.loads(response) display_response = parsed.get("text", response) # Add action info if present if parsed.get("action"): display_response += f"\n\nšŸ“± Action: `{json.dumps(parsed['action'])}`" except: display_response = response chat_history.append((message, display_response)) return "", chat_history # ============================================ # GRADIO 6.0+ COMPATIBLE CODE (FIXED!) # ============================================ # Create the interface WITHOUT theme in Blocks() with gr.Blocks(title="AJ AI Backend") as demo: gr.Markdown(""" # šŸ¤– AJ AI Backend Server This is the AI brain for your AJ Personal Assistant! **Status:** 🟢 Online and ready! """) chatbot = gr.Chatbot(height=400, label="Test Chat") msg = gr.Textbox(placeholder="Test your AI here... (e.g., 'Hey AJ, what can you do?')", label="Message") with gr.Row(): submit = gr.Button("Send", variant="primary") clear = gr.Button("Clear") msg.submit(respond, [msg, chatbot], [msg, chatbot]) submit.click(respond, [msg, chatbot], [msg, chatbot]) clear.click(lambda: None, None, chatbot, queue=False) gr.Markdown(""" --- ### šŸ“” API Endpoints **Chat:** `POST /api/chat` ```json {"message": "your message", "history": []} ``` **Search:** `POST /api/search` ```json {"query": "search term"} ``` --- *Your own AI server - No API keys needed!* """) # Add API routes using FastAPI from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware app = FastAPI() # Enable CORS so your app can connect app.add_middleware( CORSMiddleware, allow_origins=["*"], # Your app can connect from anywhere allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) @app.post("/api/chat") async def chat_endpoint(data: dict): message = data.get("message", "") history = data.get("history", []) return api_chat(message, history) @app.post("/api/search") async def search_endpoint(data: dict): query = data.get("query", "") return api_search(query) # Mount Gradio app - theme goes in launch() for Gradio 6.0+ app = gr.mount_gradio_app(app, demo, path="/") # For running locally (optional) if __name__ == "__main__": demo.launch()