Spaces:
Running
Running
Upload 48 files
Browse files- ai-routes.js +10 -0
- server.js +2 -2
ai-routes.js
CHANGED
|
@@ -210,6 +210,8 @@ async function streamGemini(aiModelObj, baseParams, res) {
|
|
| 210 |
if (text) {
|
| 211 |
fullText += text;
|
| 212 |
res.write(`data: ${JSON.stringify({ text: text })}\n\n`);
|
|
|
|
|
|
|
| 213 |
}
|
| 214 |
}
|
| 215 |
console.log(`✅ [AI Debug] Gemini ${modelName} stream complete.`);
|
|
@@ -244,6 +246,7 @@ async function streamGemma(aiModelObj, baseParams, res) {
|
|
| 244 |
if (text) {
|
| 245 |
fullText += text;
|
| 246 |
res.write(`data: ${JSON.stringify({ text: text })}\n\n`);
|
|
|
|
| 247 |
}
|
| 248 |
}
|
| 249 |
return fullText;
|
|
@@ -282,6 +285,7 @@ async function streamOpenRouter(baseParams, res) {
|
|
| 282 |
if (text) {
|
| 283 |
fullText += text;
|
| 284 |
res.write(`data: ${JSON.stringify({ text: text })}\n\n`);
|
|
|
|
| 285 |
}
|
| 286 |
}
|
| 287 |
return fullText;
|
|
@@ -422,9 +426,11 @@ const checkAIAccess = async (req, res, next) => {
|
|
| 422 |
router.post('/chat', checkAIAccess, async (req, res) => {
|
| 423 |
const { text, audio, history } = req.body;
|
| 424 |
|
|
|
|
| 425 |
res.setHeader('Content-Type', 'text/event-stream');
|
| 426 |
res.setHeader('Cache-Control', 'no-cache');
|
| 427 |
res.setHeader('Connection', 'keep-alive');
|
|
|
|
| 428 |
res.flushHeaders();
|
| 429 |
|
| 430 |
try {
|
|
@@ -439,6 +445,7 @@ router.post('/chat', checkAIAccess, async (req, res) => {
|
|
| 439 |
}
|
| 440 |
if (currentParts.length === 0) {
|
| 441 |
res.write(`data: ${JSON.stringify({ error: 'No input' })}\n\n`);
|
|
|
|
| 442 |
res.end();
|
| 443 |
return;
|
| 444 |
}
|
|
@@ -481,6 +488,7 @@ router.post('/chat', checkAIAccess, async (req, res) => {
|
|
| 481 |
const audioBytes = ttsResponse.candidates?.[0]?.content?.parts?.[0]?.inlineData?.data;
|
| 482 |
if (audioBytes) {
|
| 483 |
res.write(`data: ${JSON.stringify({ audio: audioBytes })}\n\n`);
|
|
|
|
| 484 |
}
|
| 485 |
} catch (ttsError) {
|
| 486 |
console.warn("⚠️ TTS Generation skipped (Quota or Error). Returning text only.");
|
|
@@ -489,6 +497,7 @@ router.post('/chat', checkAIAccess, async (req, res) => {
|
|
| 489 |
await ConfigModel.findOneAndUpdate({ key: 'main' }, { $inc: { aiTotalCalls: 1 } }, { upsert: true });
|
| 490 |
|
| 491 |
res.write('data: [DONE]\n\n');
|
|
|
|
| 492 |
res.end();
|
| 493 |
|
| 494 |
} catch (e) {
|
|
@@ -505,6 +514,7 @@ router.post('/chat', checkAIAccess, async (req, res) => {
|
|
| 505 |
errPayload.message = e.message || 'AI Service Unavailable';
|
| 506 |
}
|
| 507 |
res.write(`data: ${JSON.stringify(errPayload)}\n\n`);
|
|
|
|
| 508 |
res.end();
|
| 509 |
}
|
| 510 |
});
|
|
|
|
| 210 |
if (text) {
|
| 211 |
fullText += text;
|
| 212 |
res.write(`data: ${JSON.stringify({ text: text })}\n\n`);
|
| 213 |
+
// FIX: Force flush for Nginx buffering prevention
|
| 214 |
+
if (res.flush) res.flush();
|
| 215 |
}
|
| 216 |
}
|
| 217 |
console.log(`✅ [AI Debug] Gemini ${modelName} stream complete.`);
|
|
|
|
| 246 |
if (text) {
|
| 247 |
fullText += text;
|
| 248 |
res.write(`data: ${JSON.stringify({ text: text })}\n\n`);
|
| 249 |
+
if (res.flush) res.flush();
|
| 250 |
}
|
| 251 |
}
|
| 252 |
return fullText;
|
|
|
|
| 285 |
if (text) {
|
| 286 |
fullText += text;
|
| 287 |
res.write(`data: ${JSON.stringify({ text: text })}\n\n`);
|
| 288 |
+
if (res.flush) res.flush();
|
| 289 |
}
|
| 290 |
}
|
| 291 |
return fullText;
|
|
|
|
| 426 |
router.post('/chat', checkAIAccess, async (req, res) => {
|
| 427 |
const { text, audio, history } = req.body;
|
| 428 |
|
| 429 |
+
// FIX: Add X-Accel-Buffering header for Nginx proxies
|
| 430 |
res.setHeader('Content-Type', 'text/event-stream');
|
| 431 |
res.setHeader('Cache-Control', 'no-cache');
|
| 432 |
res.setHeader('Connection', 'keep-alive');
|
| 433 |
+
res.setHeader('X-Accel-Buffering', 'no');
|
| 434 |
res.flushHeaders();
|
| 435 |
|
| 436 |
try {
|
|
|
|
| 445 |
}
|
| 446 |
if (currentParts.length === 0) {
|
| 447 |
res.write(`data: ${JSON.stringify({ error: 'No input' })}\n\n`);
|
| 448 |
+
if (res.flush) res.flush();
|
| 449 |
res.end();
|
| 450 |
return;
|
| 451 |
}
|
|
|
|
| 488 |
const audioBytes = ttsResponse.candidates?.[0]?.content?.parts?.[0]?.inlineData?.data;
|
| 489 |
if (audioBytes) {
|
| 490 |
res.write(`data: ${JSON.stringify({ audio: audioBytes })}\n\n`);
|
| 491 |
+
if (res.flush) res.flush();
|
| 492 |
}
|
| 493 |
} catch (ttsError) {
|
| 494 |
console.warn("⚠️ TTS Generation skipped (Quota or Error). Returning text only.");
|
|
|
|
| 497 |
await ConfigModel.findOneAndUpdate({ key: 'main' }, { $inc: { aiTotalCalls: 1 } }, { upsert: true });
|
| 498 |
|
| 499 |
res.write('data: [DONE]\n\n');
|
| 500 |
+
if (res.flush) res.flush();
|
| 501 |
res.end();
|
| 502 |
|
| 503 |
} catch (e) {
|
|
|
|
| 514 |
errPayload.message = e.message || 'AI Service Unavailable';
|
| 515 |
}
|
| 516 |
res.write(`data: ${JSON.stringify(errPayload)}\n\n`);
|
| 517 |
+
if (res.flush) res.flush();
|
| 518 |
res.end();
|
| 519 |
}
|
| 520 |
});
|
server.js
CHANGED
|
@@ -23,10 +23,10 @@ const MONGO_URI = 'mongodb+srv://dv890a:db8822723@chatpro.gw3v0v7.mongodb.net/ch
|
|
| 23 |
const app = express();
|
| 24 |
|
| 25 |
// FIX: Disable compression for AI Chat SSE endpoint to allow real-time streaming
|
|
|
|
| 26 |
app.use(compression({
|
| 27 |
filter: (req, res) => {
|
| 28 |
-
|
| 29 |
-
if (req.path.includes('/api/ai/chat')) {
|
| 30 |
return false; // Don't compress SSE streams
|
| 31 |
}
|
| 32 |
return compression.filter(req, res);
|
|
|
|
| 23 |
const app = express();
|
| 24 |
|
| 25 |
// FIX: Disable compression for AI Chat SSE endpoint to allow real-time streaming
|
| 26 |
+
// Using req.originalUrl to match the full path regardless of mounting point
|
| 27 |
app.use(compression({
|
| 28 |
filter: (req, res) => {
|
| 29 |
+
if (req.originalUrl && req.originalUrl.includes('/api/ai/chat')) {
|
|
|
|
| 30 |
return false; // Don't compress SSE streams
|
| 31 |
}
|
| 32 |
return compression.filter(req, res);
|