File size: 7,844 Bytes
1940cd7 7dbf9c8 1940cd7 7dbf9c8 1940cd7 7dbf9c8 1940cd7 7dbf9c8 1940cd7 7dbf9c8 1940cd7 7dbf9c8 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 | import gradio as gr
from huggingface_hub import InferenceClient
import json
import re
from duckduckgo_search import DDGS
# Use FREE Hugging Face model - no API key needed!
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
# AJ System Prompt - This makes the AI act like YOUR assistant
SYSTEM_PROMPT = """You are AJ, a personal AI assistant. You are loyal, helpful, witty, and playful.
You call your owner "boss" or their nickname.
You are NOT a generic assistant - you are THEIR personal AI.
PERSONALITY:
- Friendly but professional
- Casual and fun, but gets things done
- Has a sense of humor
- Protective of owner's privacy
- Remember previous conversations
CAPABILITIES (return JSON action when user wants to DO something):
- Phone: call, sms, open_app, close_app
- Files: search_files, delete_file, create_folder
- Camera: capture_photo, record_video, capture_selfie
- Location: get_location, navigate, nearby_search
- Smart Home: smart_home (device, state/action)
- Media: play_music, pause_music, youtube_play
- Productivity: set_alarm, set_reminder, read_calendar
- Device: toggle_flashlight, screenshot, get_battery
- Web: web_search, open_url
RESPONSE FORMAT (ALWAYS use this JSON format):
{
"text": "What you say to the user",
"action": {"type": "action_name", "params": {...}} or null,
"emotion": "happy/neutral/concerned/excited/playful"
}
RULES:
- Be concise but helpful
- If user asks something you don't know, use web search
- For dangerous actions (delete, etc), ask for confirmation
- Be creative and engaging
- You can answer ANY question - use the internet if needed
- ALWAYS respond in valid JSON format"""
def search_web(query: str, max_results: int = 3) -> str:
"""Search the web using DuckDuckGo"""
try:
with DDGS() as ddgs:
results = list(ddgs.text(query, max_results=max_results))
if results:
formatted = []
for r in results:
formatted.append(f"- {r['title']}: {r['body']}")
return "\n".join(formatted)
return "No results found."
except Exception as e:
return f"Search error: {str(e)}"
def needs_search(message: str) -> bool:
"""Check if message needs web search"""
search_triggers = [
"what is", "who is", "where is", "when is", "how to",
"tell me about", "explain", "define", "meaning of",
"weather", "news", "latest", "current", "today",
"price of", "cost of", "how much", "recipe",
"what happened", "why is", "what are"
]
lower_msg = message.lower()
return any(trigger in lower_msg for trigger in search_triggers)
def format_response(text: str, action=None, emotion="neutral") -> str:
"""Format response as proper JSON"""
response = {
"text": text,
"action": action,
"emotion": emotion
}
return json.dumps(response)
def chat(message: str, history: list) -> str:
"""Main chat function"""
# Build conversation context
messages = [{"role": "system", "content": SYSTEM_PROMPT}]
# Add history
for h in history[-10:]: # Last 10 messages for context
if h[0]:
messages.append({"role": "user", "content": h[0]})
if h[1]:
messages.append({"role": "assistant", "content": h[1]})
# Check if we need to search the web
search_context = ""
if needs_search(message):
search_results = search_web(message)
search_context = f"\n\n[Web Search Results for context]\n{search_results}\n\nUse this information to answer the user's question."
# Add current message with search context
messages.append({
"role": "user",
"content": message + search_context
})
# Get AI response
try:
response_text = ""
for token in client.chat_completion(
messages,
max_tokens=500,
temperature=0.7,
stream=True
):
response_text += token.choices[0].delta.content or ""
# Try to parse as JSON
response_text = response_text.strip()
# Extract JSON if wrapped in other text
json_match = re.search(r'\{[\s\S]*\}', response_text)
if json_match:
response_text = json_match.group()
# Validate JSON
try:
parsed = json.loads(response_text)
# Ensure required fields
if "text" not in parsed:
parsed["text"] = response_text
if "action" not in parsed:
parsed["action"] = None
if "emotion" not in parsed:
parsed["emotion"] = "neutral"
return json.dumps(parsed)
except json.JSONDecodeError:
# Return as formatted response
return format_response(response_text)
except Exception as e:
return format_response(f"Sorry boss, I had a hiccup: {str(e)}", emotion="concerned")
def api_chat(message: str, history: list = []) -> dict:
"""API endpoint for the mobile app"""
result = chat(message, history)
return {"response": result}
def api_search(query: str) -> dict:
"""API endpoint for web search"""
results = search_web(query, max_results=5)
return {"result": results}
def respond(message, chat_history):
"""Handle chat responses"""
response = chat(message, chat_history)
try:
parsed = json.loads(response)
display_response = parsed.get("text", response)
# Add action info if present
if parsed.get("action"):
display_response += f"\n\n📱 Action: `{json.dumps(parsed['action'])}`"
except:
display_response = response
chat_history.append((message, display_response))
return "", chat_history
# ============================================
# GRADIO 6.0+ COMPATIBLE CODE (FIXED!)
# ============================================
# Create the interface WITHOUT theme in Blocks()
with gr.Blocks(title="AJ AI Backend") as demo:
gr.Markdown("""
# 🤖 AJ AI Backend Server
This is the AI brain for your AJ Personal Assistant!
**Status:** 🟢 Online and ready!
""")
chatbot = gr.Chatbot(height=400, label="Test Chat")
msg = gr.Textbox(placeholder="Test your AI here... (e.g., 'Hey AJ, what can you do?')", label="Message")
with gr.Row():
submit = gr.Button("Send", variant="primary")
clear = gr.Button("Clear")
msg.submit(respond, [msg, chatbot], [msg, chatbot])
submit.click(respond, [msg, chatbot], [msg, chatbot])
clear.click(lambda: None, None, chatbot, queue=False)
gr.Markdown("""
---
### 📡 API Endpoints
**Chat:** `POST /api/chat`
```json
{"message": "your message", "history": []}
```
**Search:** `POST /api/search`
```json
{"query": "search term"}
```
---
*Your own AI server - No API keys needed!*
""")
# Add API routes using FastAPI
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
app = FastAPI()
# Enable CORS so your app can connect
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # Your app can connect from anywhere
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.post("/api/chat")
async def chat_endpoint(data: dict):
message = data.get("message", "")
history = data.get("history", [])
return api_chat(message, history)
@app.post("/api/search")
async def search_endpoint(data: dict):
query = data.get("query", "")
return api_search(query)
# Mount Gradio app - theme goes in launch() for Gradio 6.0+
app = gr.mount_gradio_app(app, demo, path="/")
# For running locally (optional)
if __name__ == "__main__":
demo.launch() |