Update server.js
Browse files
server.js
CHANGED
|
@@ -3,15 +3,13 @@ import path from "path";
|
|
| 3 |
import { fileURLToPath } from "url";
|
| 4 |
import dotenv from "dotenv";
|
| 5 |
import bodyParser from "body-parser";
|
| 6 |
-
import { InferenceClient } from "@huggingface/inference";
|
| 7 |
|
| 8 |
dotenv.config();
|
| 9 |
|
| 10 |
const app = express();
|
| 11 |
const __filename = fileURLToPath(import.meta.url);
|
| 12 |
const __dirname = path.dirname(__filename);
|
| 13 |
-
const PORT = process.env.APP_PORT ||
|
| 14 |
-
const MODEL_ID = "deepseek-ai/DeepSeek-V3-0324";
|
| 15 |
const OPENROUTER_API_KEY = process.env.OPENROUTER_API_KEY;
|
| 16 |
|
| 17 |
app.use(bodyParser.json());
|
|
@@ -34,48 +32,77 @@ app.post("/api/ask-ai", async (req, res) => {
|
|
| 34 |
res.setHeader("Cache-Control", "no-cache");
|
| 35 |
res.setHeader("Connection", "keep-alive");
|
| 36 |
|
| 37 |
-
const
|
| 38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
|
| 40 |
try {
|
| 41 |
-
const
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
? [{ role: "assistant", content: `The current code is: ${html}.` }]
|
| 55 |
-
: []),
|
| 56 |
-
{ role: "user", content: prompt },
|
| 57 |
-
],
|
| 58 |
-
max_tokens: 4000,
|
| 59 |
});
|
| 60 |
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
completeResponse += chunk;
|
| 68 |
-
res.write(chunk);
|
| 69 |
|
| 70 |
-
if (
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
}
|
| 72 |
-
|
|
|
|
| 73 |
|
| 74 |
-
|
| 75 |
-
} catch (
|
| 76 |
res.status(500).send({
|
| 77 |
ok: false,
|
| 78 |
-
message:
|
| 79 |
});
|
| 80 |
}
|
| 81 |
});
|
|
@@ -85,5 +112,5 @@ app.get("*", (_req, res) => {
|
|
| 85 |
});
|
| 86 |
|
| 87 |
app.listen(PORT, () => {
|
| 88 |
-
console.log(
|
| 89 |
});
|
|
|
|
| 3 |
import { fileURLToPath } from "url";
|
| 4 |
import dotenv from "dotenv";
|
| 5 |
import bodyParser from "body-parser";
|
|
|
|
| 6 |
|
| 7 |
dotenv.config();
|
| 8 |
|
| 9 |
const app = express();
|
| 10 |
const __filename = fileURLToPath(import.meta.url);
|
| 11 |
const __dirname = path.dirname(__filename);
|
| 12 |
+
const PORT = process.env.APP_PORT || 7860;
|
|
|
|
| 13 |
const OPENROUTER_API_KEY = process.env.OPENROUTER_API_KEY;
|
| 14 |
|
| 15 |
app.use(bodyParser.json());
|
|
|
|
| 32 |
res.setHeader("Cache-Control", "no-cache");
|
| 33 |
res.setHeader("Connection", "keep-alive");
|
| 34 |
|
| 35 |
+
const messages = [
|
| 36 |
+
{
|
| 37 |
+
role: "system",
|
| 38 |
+
content:
|
| 39 |
+
"Você é um especialista em landing pages. Gere um HTML bonito e persuasivo para a copy enviada. Responda apenas com código HTML válido e completo.",
|
| 40 |
+
},
|
| 41 |
+
...(previousPrompt ? [{ role: "user", content: previousPrompt }] : []),
|
| 42 |
+
...(html ? [{ role: "assistant", content: `The current code is: ${html}` }] : []),
|
| 43 |
+
{ role: "user", content: prompt },
|
| 44 |
+
];
|
| 45 |
|
| 46 |
try {
|
| 47 |
+
const response = await fetch("https://openrouter.ai/api/v1/chat/completions", {
|
| 48 |
+
method: "POST",
|
| 49 |
+
headers: {
|
| 50 |
+
Authorization: `Bearer ${OPENROUTER_API_KEY}`,
|
| 51 |
+
"Content-Type": "application/json",
|
| 52 |
+
"HTTP-Referer": "https://raypages.com", // personalize aqui
|
| 53 |
+
"X-Title": "RayPages AI",
|
| 54 |
+
},
|
| 55 |
+
body: JSON.stringify({
|
| 56 |
+
model: "deepseek-chat-v3.0",
|
| 57 |
+
stream: true,
|
| 58 |
+
messages,
|
| 59 |
+
}),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
});
|
| 61 |
|
| 62 |
+
if (!response.ok || !response.body) {
|
| 63 |
+
return res.status(500).send({
|
| 64 |
+
ok: false,
|
| 65 |
+
message: "Failed to fetch OpenRouter response",
|
| 66 |
+
});
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
const reader = response.body.getReader();
|
| 70 |
+
const decoder = new TextDecoder("utf-8");
|
| 71 |
+
let completeResponse = "";
|
| 72 |
+
|
| 73 |
+
const stream = async () => {
|
| 74 |
+
while (true) {
|
| 75 |
+
const { done, value } = await reader.read();
|
| 76 |
+
if (done) break;
|
| 77 |
|
| 78 |
+
const chunk = decoder.decode(value, { stream: true });
|
| 79 |
+
const matches = chunk.match(/"content":"([^"]*)"/g);
|
|
|
|
|
|
|
| 80 |
|
| 81 |
+
if (matches) {
|
| 82 |
+
for (const match of matches) {
|
| 83 |
+
const text = match
|
| 84 |
+
.replace(/"content":"|"/g, "")
|
| 85 |
+
.replace(/\\n/g, "\n")
|
| 86 |
+
.replace(/\\t/g, "\t");
|
| 87 |
+
|
| 88 |
+
completeResponse += text;
|
| 89 |
+
res.write(text);
|
| 90 |
+
|
| 91 |
+
if (completeResponse.includes("</html>")) {
|
| 92 |
+
res.end();
|
| 93 |
+
return;
|
| 94 |
+
}
|
| 95 |
+
}
|
| 96 |
+
}
|
| 97 |
}
|
| 98 |
+
res.end();
|
| 99 |
+
};
|
| 100 |
|
| 101 |
+
stream();
|
| 102 |
+
} catch (err) {
|
| 103 |
res.status(500).send({
|
| 104 |
ok: false,
|
| 105 |
+
message: err.message || "AI request failed",
|
| 106 |
});
|
| 107 |
}
|
| 108 |
});
|
|
|
|
| 112 |
});
|
| 113 |
|
| 114 |
app.listen(PORT, () => {
|
| 115 |
+
console.log(`✅ Server running on port ${PORT}`);
|
| 116 |
});
|