yukee1992 commited on
Commit
7f141ed
·
verified ·
1 Parent(s): 5e9c503

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +354 -185
app.py CHANGED
@@ -1,262 +1,431 @@
1
- # app.py - General AI Chat API Gateway
2
  from fastapi import FastAPI, HTTPException
3
  from pydantic import BaseModel
4
- from typing import Optional, Dict, Any
5
  import time
6
  import json
7
  import os
 
 
 
 
 
 
8
 
9
  app = FastAPI(
10
- title="General AI Chat API",
11
- description="Simple API gateway - accepts any prompt and returns as-is. All logic handled by n8n.",
12
- version="1.0.0"
13
  )
14
 
15
- # Request models
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  class ChatRequest(BaseModel):
17
- """General chat request - accepts any structure"""
18
- messages: Optional[list] = None
 
 
 
19
  prompt: Optional[str] = None
20
- instruction: Optional[str] = None
21
  content: Optional[str] = None
22
- parameters: Optional[Dict[str, Any]] = None
 
 
 
 
 
 
23
  metadata: Optional[Dict[str, Any]] = None
24
 
25
- # For compatibility with different formats
26
  text: Optional[str] = None
27
  query: Optional[str] = None
28
- data: Optional[Any] = None
29
 
30
  class ChatResponse(BaseModel):
 
31
  success: bool
32
  response: str
33
- received_at: str
 
34
  processing_time_ms: float
35
- echo: Optional[Dict[str, Any]] = None # Echo back what you sent
 
 
36
 
37
  class HealthResponse(BaseModel):
38
  status: str
39
  service: str
 
40
  timestamp: str
41
- uptime: float
42
 
43
- # Startup time
44
- startup_time = time.time()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
  @app.get("/")
47
  async def root():
48
  return {
49
  "status": "online",
50
- "service": "General AI Chat API",
51
- "description": "Accepts any prompt/instruction from n8n. All logic handled by n8n workflow.",
 
52
  "endpoints": {
53
- "/chat": "POST - General chat endpoint",
54
- "/process": "POST - Process endpoint (legacy)",
55
- "/webhook": "POST - Webhook endpoint",
56
  "/health": "GET - Health check",
57
- "/echo": "POST - Echo test endpoint"
58
  },
59
- "usage": "Send any JSON payload. It will be processed by your n8n workflow logic."
60
  }
61
 
62
  @app.get("/health", response_model=HealthResponse)
63
  async def health():
64
  return HealthResponse(
65
  status="healthy",
66
- service="AI Chat Gateway",
67
- timestamp=time.strftime("%Y-%m-%d %H:%M:%S"),
68
- uptime=time.time() - startup_time
69
  )
70
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  @app.post("/chat", response_model=ChatResponse)
72
  async def chat(request: ChatRequest):
 
73
  start_time = time.time()
74
 
75
  try:
76
- # DEBUG: Log what we received
77
- print(f"📨 Received request:")
78
- print(f" Prompt: {request.prompt}")
79
- print(f" Content: {request.content}")
80
- print(f" Content length: {len(request.content) if request.content else 0}")
81
-
82
- # FIXED: Properly extract content
83
- text_to_process = ""
84
-
85
- # Priority 1: Use content field
86
- if request.content:
87
- text_to_process = f"{request.prompt or ''}\n\n{request.content}"
88
- # Priority 2: Use prompt if no content
89
- elif request.prompt:
90
- text_to_process = request.prompt
91
- # Priority 3: Use other fields
92
- elif request.text:
93
- text_to_process = request.text
94
- elif request.query:
95
- text_to_process = request.query
96
- elif request.instruction:
97
- text_to_process = request.instruction
98
- elif request.messages:
99
- text_to_process = str(request.messages)
100
- elif request.data:
101
- text_to_process = str(request.data)
102
- else:
103
- text_to_process = "No content provided"
104
-
105
- print(f"📤 Text to process: {text_to_process[:100]}...")
106
 
107
- # For messages array (OpenAI format)
108
- if request.messages:
109
- # Extract last user message
110
- user_messages = [msg for msg in request.messages if msg.get("role") == "user"]
111
- if user_messages:
112
- text_to_process = user_messages[-1].get("content", text_to_process)
113
 
114
- # Simple processing - just echo/acknowledge
115
- # In reality, n8n will replace this with actual AI processing
116
- response_text = f"✅ Received at {time.strftime('%H:%M:%S')}:\n\n{text_to_process[:500]}"
117
-
118
- # Add note about processing
119
- if len(text_to_process) > 500:
120
- response_text += f"\n\n... (truncated, total {len(text_to_process)} characters)"
 
121
 
122
- processing_time = (time.time() - start_time) * 1000 # Convert to ms
123
 
124
  return ChatResponse(
125
- success=True,
126
- response=response_text,
127
- received_at=time.strftime("%Y-%m-%d %H:%M:%S"),
 
128
  processing_time_ms=processing_time,
129
- echo={
130
- "original_length": len(text_to_process),
131
- "fields_received": list(request.model_dump(exclude_none=True).keys()),
132
- "timestamp": time.time()
133
- }
134
  )
135
 
136
  except Exception as e:
137
  processing_time = (time.time() - start_time) * 1000
 
 
138
  return ChatResponse(
139
  success=False,
140
- response=f"Error: {str(e)}",
141
- received_at=time.strftime("%Y-%m-%d %H:%M:%S"),
142
- processing_time_ms=processing_time
 
 
 
143
  )
144
 
145
- @app.post("/process", response_model=ChatResponse)
146
  async def process(request: Dict[str, Any]):
147
- """
148
- Legacy endpoint - accepts any JSON structure
149
- Compatible with previous versions
150
- """
151
  start_time = time.time()
152
 
153
  try:
154
- # Extract text from any field
155
- text = ""
156
- for key in ['prompt', 'text', 'content', 'message', 'query', 'input']:
157
- if key in request and request[key]:
158
- text = str(request[key])
159
- break
160
-
161
- if not text:
162
- text = json.dumps(request)[:500]
163
-
164
- processing_time = (time.time() - start_time) * 1000
165
-
166
- return ChatResponse(
167
- success=True,
168
- response=f"📥 Process endpoint received:\n\n{text[:400]}...",
169
- received_at=time.strftime("%Y-%m-%d %H:%M:%S"),
170
- processing_time_ms=processing_time,
171
- echo={"original_request": request}
172
  )
173
 
174
- except Exception as e:
175
- processing_time = (time.time() - start_time) * 1000
176
- return ChatResponse(
177
- success=False,
178
- response=f"Error in /process: {str(e)}",
179
- received_at=time.strftime("%Y-%m-%d %H:%M:%S"),
180
- processing_time_ms=processing_time
181
- )
182
-
183
- @app.post("/webhook")
184
- async def webhook(request: Dict[str, Any]):
185
- """
186
- Webhook endpoint for n8n
187
- """
188
- try:
189
- # Log the webhook
190
- print(f"📨 Webhook received: {json.dumps(request, indent=2)[:200]}...")
191
-
192
- # Extract useful info
193
- webhook_id = request.get("webhookId")
194
- workflow_id = request.get("workflowId")
195
-
196
- # Echo back with some processing
197
  return {
198
- "success": True,
199
- "message": "Webhook received successfully",
200
- "webhook_id": webhook_id,
201
- "workflow_id": workflow_id,
202
- "received_at": time.strftime("%Y-%m-%d %H:%M:%S"),
203
- "data_preview": str(request)[:200] + "..." if len(str(request)) > 200 else str(request)
204
  }
205
 
206
  except Exception as e:
207
- raise HTTPException(status_code=500, detail=str(e))
208
-
209
- @app.post("/echo")
210
- async def echo(request: Dict[str, Any]):
211
- """
212
- Simple echo endpoint for testing
213
- """
214
- return {
215
- "status": "echo",
216
- "received": request,
217
- "timestamp": time.time(),
218
- "headers": {"content-type": "application/json"},
219
- "note": "This is exactly what you sent me"
220
- }
221
-
222
- # Batch endpoint
223
- @app.post("/batch")
224
- async def batch_process(requests: list):
225
- """
226
- Process multiple requests at once
227
- """
228
- responses = []
229
-
230
- for i, req in enumerate(requests):
231
- responses.append({
232
- "index": i,
233
- "success": True,
234
- "response": f"Processed item {i}: {str(req)[:100]}...",
235
- "original_length": len(str(req))
236
- })
237
-
238
- return {
239
- "batch_size": len(requests),
240
- "responses": responses,
241
- "processed_at": time.strftime("%Y-%m-%d %H:%M:%S")
242
- }
243
-
244
- # Test endpoint
245
- @app.get("/test")
246
- async def test_endpoint(prompt: str = "Hello, test!"):
247
- """
248
- GET endpoint for quick testing
249
- """
250
- return {
251
- "test": "success",
252
- "your_prompt": prompt,
253
- "response": f"Test response to: {prompt}",
254
- "timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
255
- }
256
 
257
  if __name__ == "__main__":
258
  import uvicorn
259
  port = int(os.getenv("PORT", 7860))
260
- print(f"🚀 Starting General AI Chat API on port {port}")
261
- print("📝 Description: Simple gateway - all logic handled by n8n")
 
262
  uvicorn.run(app, host="0.0.0.0", port=port)
 
1
+ # app.py - General AI Gateway (No specific prompts, 100% free)
2
  from fastapi import FastAPI, HTTPException
3
  from pydantic import BaseModel
4
+ from typing import Optional, Dict, Any, List
5
  import time
6
  import json
7
  import os
8
+ import requests
9
+ import logging
10
+
11
+ # Configure logging
12
+ logging.basicConfig(level=logging.INFO)
13
+ logger = logging.getLogger(__name__)
14
 
15
  app = FastAPI(
16
+ title="General AI Chat Gateway",
17
+ description="General AI gateway. All prompts/instructions come from n8n. Supports multiple free AI providers.",
18
+ version="2.0.0"
19
  )
20
 
21
+ # Configuration for FREE AI providers
22
+ FREE_AI_PROVIDERS = {
23
+ "openrouter": {
24
+ "url": "https://openrouter.ai/api/v1/chat/completions",
25
+ "free_model": "mistralai/mistral-7b-instruct:free",
26
+ "paid_model": "meta-llama/llama-3.1-8b-instruct:free"
27
+ },
28
+ "groq": {
29
+ "url": "https://api.groq.com/openai/v1/chat/completions",
30
+ "free_model": "llama-3.1-8b-instruct",
31
+ "requires_key": True
32
+ },
33
+ "google": {
34
+ "url": "https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent",
35
+ "requires_key": True
36
+ }
37
+ }
38
+
39
+ # Request models - GENERAL, no specific prompts
40
  class ChatRequest(BaseModel):
41
+ """General chat request - ALL prompts come from n8n"""
42
+ # Messages in OpenAI format
43
+ messages: Optional[List[Dict[str, str]]] = None
44
+
45
+ # OR simple prompt/content format
46
  prompt: Optional[str] = None
 
47
  content: Optional[str] = None
48
+
49
+ # Configuration
50
+ provider: Optional[str] = "openrouter" # openrouter, groq, google
51
+ model: Optional[str] = None # Specific model if needed
52
+ parameters: Optional[Dict[str, Any]] = None # temperature, max_tokens, etc.
53
+
54
+ # Metadata from n8n
55
  metadata: Optional[Dict[str, Any]] = None
56
 
57
+ # Compatibility
58
  text: Optional[str] = None
59
  query: Optional[str] = None
60
+ instruction: Optional[str] = None
61
 
62
  class ChatResponse(BaseModel):
63
+ """AI response"""
64
  success: bool
65
  response: str
66
+ provider: str
67
+ model: str
68
  processing_time_ms: float
69
+ is_free: bool
70
+ error: Optional[str] = None
71
+ metadata: Optional[Dict[str, Any]] = None
72
 
73
  class HealthResponse(BaseModel):
74
  status: str
75
  service: str
76
+ free_providers: List[str]
77
  timestamp: str
 
78
 
79
+ class FreeAIGateway:
80
+ """Free AI Gateway - routes to different free providers"""
81
+
82
+ def __init__(self):
83
+ self.providers = FREE_AI_PROVIDERS
84
+ # Get API keys from environment
85
+ self.api_keys = {
86
+ "openrouter": os.getenv("OPENROUTER_API_KEY", ""),
87
+ "groq": os.getenv("GROQ_API_KEY", ""),
88
+ "google": os.getenv("GOOGLE_API_KEY", "")
89
+ }
90
+
91
+ logger.info("✅ Free AI Gateway initialized")
92
+ logger.info(f"Available providers: {list(self.providers.keys())}")
93
+
94
+ def process(self,
95
+ messages: List[Dict[str, str]] = None,
96
+ prompt: str = None,
97
+ content: str = None,
98
+ provider: str = "openrouter",
99
+ **kwargs) -> Dict[str, Any]:
100
+ """Process chat request with free AI"""
101
+
102
+ # Prepare messages from different formats
103
+ final_messages = self._prepare_messages(messages, prompt, content)
104
+
105
+ # Route to selected provider
106
+ if provider == "openrouter":
107
+ return self._call_openrouter(final_messages, **kwargs)
108
+ elif provider == "groq":
109
+ return self._call_groq(final_messages, **kwargs)
110
+ elif provider == "google":
111
+ return self._call_google(final_messages, **kwargs)
112
+ else:
113
+ return self._fallback_response(final_messages, f"Unknown provider: {provider}")
114
+
115
+ def _prepare_messages(self, messages, prompt, content):
116
+ """Prepare messages in OpenAI format"""
117
+ if messages:
118
+ return messages
119
+
120
+ # Convert prompt/content to messages format
121
+ result_messages = []
122
+
123
+ # Add system prompt if provided in metadata
124
+ system_prompt = "You are a helpful AI assistant."
125
+
126
+ result_messages.append({"role": "system", "content": system_prompt})
127
+
128
+ # Add user message
129
+ user_message = ""
130
+ if prompt and content:
131
+ user_message = f"{prompt}\n\n{content}"
132
+ elif prompt:
133
+ user_message = prompt
134
+ elif content:
135
+ user_message = content
136
+
137
+ if user_message:
138
+ result_messages.append({"role": "user", "content": user_message})
139
+
140
+ return result_messages
141
+
142
+ def _call_openrouter(self, messages: List[Dict[str, str]], **kwargs) -> Dict[str, Any]:
143
+ """Call OpenRouter (FREE tier available)"""
144
+ try:
145
+ api_key = self.api_keys["openrouter"]
146
+ model = kwargs.get("model") or self.providers["openrouter"]["free_model"]
147
+
148
+ headers = {
149
+ "Authorization": f"Bearer {api_key}" if api_key else "",
150
+ "Content-Type": "application/json",
151
+ "HTTP-Referer": "https://huggingface.co",
152
+ "X-Title": "AI Chat Gateway"
153
+ }
154
+
155
+ payload = {
156
+ "model": model,
157
+ "messages": messages,
158
+ "temperature": kwargs.get("temperature", 0.7),
159
+ "max_tokens": kwargs.get("max_tokens", 1000),
160
+ "stream": False
161
+ }
162
+
163
+ logger.info(f"📤 Calling OpenRouter with model: {model}")
164
+
165
+ response = requests.post(
166
+ self.providers["openrouter"]["url"],
167
+ headers=headers,
168
+ json=payload,
169
+ timeout=30
170
+ )
171
+
172
+ if response.status_code == 200:
173
+ result = response.json()
174
+ return {
175
+ "success": True,
176
+ "content": result["choices"][0]["message"]["content"],
177
+ "provider": "openrouter",
178
+ "model": result["model"],
179
+ "is_free": True
180
+ }
181
+ else:
182
+ error_msg = f"OpenRouter error: {response.status_code}"
183
+ return self._fallback_response(messages, error_msg)
184
+
185
+ except Exception as e:
186
+ error_msg = f"OpenRouter exception: {str(e)}"
187
+ return self._fallback_response(messages, error_msg)
188
+
189
+ def _call_groq(self, messages: List[Dict[str, str]], **kwargs) -> Dict[str, Any]:
190
+ """Call Groq (FREE tier available)"""
191
+ try:
192
+ api_key = self.api_keys["groq"]
193
+ if not api_key:
194
+ return self._fallback_response(messages, "Groq API key not configured")
195
+
196
+ model = kwargs.get("model") or self.providers["groq"]["free_model"]
197
+
198
+ headers = {
199
+ "Authorization": f"Bearer {api_key}",
200
+ "Content-Type": "application/json"
201
+ }
202
+
203
+ payload = {
204
+ "model": model,
205
+ "messages": messages,
206
+ "temperature": kwargs.get("temperature", 0.7),
207
+ "max_tokens": kwargs.get("max_tokens", 1000)
208
+ }
209
+
210
+ logger.info(f"📤 Calling Groq with model: {model}")
211
+
212
+ response = requests.post(
213
+ self.providers["groq"]["url"],
214
+ headers=headers,
215
+ json=payload,
216
+ timeout=30
217
+ )
218
+
219
+ if response.status_code == 200:
220
+ result = response.json()
221
+ return {
222
+ "success": True,
223
+ "content": result["choices"][0]["message"]["content"],
224
+ "provider": "groq",
225
+ "model": result["model"],
226
+ "is_free": True
227
+ }
228
+ else:
229
+ error_msg = f"Groq error: {response.status_code}"
230
+ return self._fallback_response(messages, error_msg)
231
+
232
+ except Exception as e:
233
+ error_msg = f"Groq exception: {str(e)}"
234
+ return self._fallback_response(messages, error_msg)
235
+
236
+ def _call_google(self, messages: List[Dict[str, str]], **kwargs) -> Dict[str, Any]:
237
+ """Call Google AI (FREE tier available)"""
238
+ try:
239
+ api_key = self.api_keys["google"]
240
+ if not api_key:
241
+ return self._fallback_response(messages, "Google API key not configured")
242
+
243
+ # Convert OpenAI format to Google format
244
+ last_user_message = ""
245
+ for msg in reversed(messages):
246
+ if msg.get("role") == "user":
247
+ last_user_message = msg.get("content", "")
248
+ break
249
+
250
+ url = f"{self.providers['google']['url']}?key={api_key}"
251
+
252
+ payload = {
253
+ "contents": [{
254
+ "parts": [{"text": last_user_message}]
255
+ }],
256
+ "generationConfig": {
257
+ "temperature": kwargs.get("temperature", 0.7),
258
+ "maxOutputTokens": kwargs.get("max_tokens", 1000)
259
+ }
260
+ }
261
+
262
+ logger.info("📤 Calling Google AI")
263
+
264
+ response = requests.post(
265
+ url,
266
+ json=payload,
267
+ timeout=30
268
+ )
269
+
270
+ if response.status_code == 200:
271
+ result = response.json()
272
+ if "candidates" in result and len(result["candidates"]) > 0:
273
+ return {
274
+ "success": True,
275
+ "content": result["candidates"][0]["content"]["parts"][0]["text"],
276
+ "provider": "google",
277
+ "model": "gemini-pro",
278
+ "is_free": True
279
+ }
280
+ else:
281
+ return self._fallback_response(messages, "Google AI returned no candidates")
282
+ else:
283
+ error_msg = f"Google AI error: {response.status_code}"
284
+ return self._fallback_response(messages, error_msg)
285
+
286
+ except Exception as e:
287
+ error_msg = f"Google AI exception: {str(e)}"
288
+ return self._fallback_response(messages, error_msg)
289
+
290
+ def _fallback_response(self, messages: List[Dict[str, str]], error: str = None) -> Dict[str, Any]:
291
+ """Fallback response when AI fails"""
292
+ # Extract last user message
293
+ last_user_message = ""
294
+ for msg in reversed(messages):
295
+ if msg.get("role") == "user":
296
+ last_user_message = msg.get("content", "")
297
+ break
298
+
299
+ if error:
300
+ content = f"⚠️ AI暂时不可用({error})。\n\n收到:{last_user_message[:100]}..."
301
+ else:
302
+ content = f"🤖 AI处理响应:\n\n已收到您的请求:{last_user_message[:150]}..."
303
+
304
+ return {
305
+ "success": False,
306
+ "content": content,
307
+ "provider": "fallback",
308
+ "model": "none",
309
+ "is_free": True
310
+ }
311
+
312
+ # Initialize gateway
313
+ ai_gateway = FreeAIGateway()
314
 
315
  @app.get("/")
316
  async def root():
317
  return {
318
  "status": "online",
319
+ "service": "Free AI Chat Gateway",
320
+ "description": "General AI gateway. ALL prompts come from n8n.",
321
+ "free_providers": list(FREE_AI_PROVIDERS.keys()),
322
  "endpoints": {
323
+ "/chat": "POST - AI chat with free providers",
324
+ "/process": "POST - Simple processing",
 
325
  "/health": "GET - Health check",
326
+ "/providers": "GET - Available providers"
327
  },
328
+ "note": "Configure API keys in environment for better performance"
329
  }
330
 
331
  @app.get("/health", response_model=HealthResponse)
332
  async def health():
333
  return HealthResponse(
334
  status="healthy",
335
+ service="Free AI Chat Gateway",
336
+ free_providers=list(FREE_AI_PROVIDERS.keys()),
337
+ timestamp=time.strftime("%Y-%m-%d %H:%M:%S")
338
  )
339
 
340
+ @app.get("/providers")
341
+ async def list_providers():
342
+ return {
343
+ "available_providers": list(FREE_AI_PROVIDERS.keys()),
344
+ "recommended": "openrouter (no API key needed for free tier)",
345
+ "configuration_needed": {
346
+ "openrouter": "optional (better rate limits)",
347
+ "groq": "required",
348
+ "google": "required"
349
+ }
350
+ }
351
+
352
  @app.post("/chat", response_model=ChatResponse)
353
  async def chat(request: ChatRequest):
354
+ """Main chat endpoint - ALL prompts come from n8n"""
355
  start_time = time.time()
356
 
357
  try:
358
+ # Get provider from request or use default
359
+ provider = request.provider or "openrouter"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
360
 
361
+ # Get parameters
362
+ params = request.parameters or {}
 
 
 
 
363
 
364
+ # Process with AI gateway
365
+ ai_result = ai_gateway.process(
366
+ messages=request.messages,
367
+ prompt=request.prompt,
368
+ content=request.content or request.text or request.query,
369
+ provider=provider,
370
+ **params
371
+ )
372
 
373
+ processing_time = (time.time() - start_time) * 1000
374
 
375
  return ChatResponse(
376
+ success=ai_result["success"],
377
+ response=ai_result["content"],
378
+ provider=ai_result["provider"],
379
+ model=ai_result["model"],
380
  processing_time_ms=processing_time,
381
+ is_free=ai_result["is_free"],
382
+ metadata=request.metadata
 
 
 
383
  )
384
 
385
  except Exception as e:
386
  processing_time = (time.time() - start_time) * 1000
387
+ logger.error(f"Error in /chat: {str(e)}")
388
+
389
  return ChatResponse(
390
  success=False,
391
+ response=f"处理错误:{str(e)[:100]}",
392
+ provider="error",
393
+ model="none",
394
+ processing_time_ms=processing_time,
395
+ is_free=True,
396
+ error=str(e)
397
  )
398
 
399
+ @app.post("/process")
400
  async def process(request: Dict[str, Any]):
401
+ """Legacy endpoint"""
 
 
 
402
  start_time = time.time()
403
 
404
  try:
405
+ ai_result = ai_gateway.process(
406
+ prompt=request.get("prompt", ""),
407
+ content=request.get("content", request.get("text", "")),
408
+ provider=request.get("provider", "openrouter")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
409
  )
410
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
411
  return {
412
+ "success": ai_result["success"],
413
+ "result": ai_result["content"],
414
+ "provider": ai_result["provider"],
415
+ "processing_time_ms": (time.time() - start_time) * 1000
 
 
416
  }
417
 
418
  except Exception as e:
419
+ return {
420
+ "success": False,
421
+ "result": f"Error: {str(e)}",
422
+ "provider": "error"
423
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
424
 
425
  if __name__ == "__main__":
426
  import uvicorn
427
  port = int(os.getenv("PORT", 7860))
428
+ logger.info(f"🚀 Starting Free AI Chat Gateway on port {port}")
429
+ logger.info("🎯 ALL prompts come from n8n - this is a general gateway")
430
+ logger.info("🆓 Using free AI providers")
431
  uvicorn.run(app, host="0.0.0.0", port=port)