Spaces:
Sleeping
Sleeping
| import express from "express"; | |
| import path from "path"; | |
| import { fileURLToPath } from "url"; | |
| //import dotenv from "dotenv"; | |
| import cookieParser from "cookie-parser"; | |
| import { | |
| createRepo, | |
| uploadFiles, | |
| whoAmI, | |
| spaceInfo, | |
| fileExists, | |
| } from "@huggingface/hub"; | |
| import bodyParser from "body-parser"; | |
| import { PROVIDERS } from "./utils/providers.js"; | |
| import { COLORS } from "./utils/colors.js"; | |
| import { TEMPLATES, CDN_URLS } from "./utils/templates.js"; | |
| // Load environment variables from .env file | |
| import dotenv from "dotenv"; | |
| // Hanya load .env jika BUKAN di environment production | |
| if (process.env.NODE_ENV !== 'production') { | |
| console.log("Memuat environment variables dari .env (non-production)..."); | |
| dotenv.config(); | |
| } else { | |
| console.log("Mode production, mengharapkan environment variables diset langsung."); | |
| } | |
| // Detect Vercel environment - Vercel automatically sets the VERCEL environment variable | |
| const isVercelEnvironment = process.env.VERCEL === '1' || process.env.VERCEL === 'true' || !!process.env.VERCEL; | |
| // IP access limit - No limit if not configured or value <= 0 | |
| const IP_RATE_LIMIT = parseInt(process.env.IP_RATE_LIMIT) || 0; | |
| // Cache for storing IP access records | |
| const ipRequestCache = {}; | |
| const app = express(); | |
| const __filename = fileURLToPath(import.meta.url); | |
| const __dirname = path.dirname(__filename); | |
| const MODEL_ID = process.env.OPENAI_MODEL || "gpt-4o"; | |
| const OPENAI_BASE_URL = process.env.OPENAI_BASE_URL || "https://api.openai.com/v1"; | |
| const DEFAULT_MAX_TOKENS = process.env.DEFAULT_MAX_TOKENS || 64000; | |
| const DEFAULT_TEMPERATURE = process.env.DEFAULT_TEMPERATURE || 0; | |
| app.use(cookieParser()); | |
| app.use(bodyParser.json()); | |
| // Optimize static file path handling | |
| const staticPath = isVercelEnvironment ? path.join(process.cwd(), "dist") : path.join(__dirname, "dist"); | |
| app.use(express.static(staticPath)); | |
| // IP rate limiting middleware - Check the access frequency of each IP | |
| app.use((req, res, next) => { | |
| // If the limit is not configured or the limit value is <= 0, skip the rate limit check | |
| if (IP_RATE_LIMIT <= 0) { | |
| req.rateLimit = { limited: false }; | |
| return next(); | |
| } | |
| // Get the client IP address | |
| const clientIp = req.headers['x-forwarded-for'] || | |
| req.connection.remoteAddress || | |
| req.socket.remoteAddress; | |
| // Static resource requests are not counted towards the limit | |
| if (req.path.startsWith('/assets/') || | |
| req.path.endsWith('.js') || | |
| req.path.endsWith('.css') || | |
| req.path.endsWith('.ico') || | |
| req.path.endsWith('.png') || | |
| req.path.endsWith('.jpg') || | |
| req.path.endsWith('.svg')) { | |
| req.rateLimit = { limited: false }; | |
| return next(); | |
| } | |
| const now = Date.now(); | |
| const hourAgo = now - 3600000; // Timestamp 1 hour ago | |
| // Initialize IP record | |
| if (!ipRequestCache[clientIp]) { | |
| ipRequestCache[clientIp] = []; | |
| } | |
| // Clean up request records older than 1 hour | |
| ipRequestCache[clientIp] = ipRequestCache[clientIp].filter(timestamp => timestamp > hourAgo); | |
| // Calculate the current number of requests and remaining requests | |
| const requestCount = ipRequestCache[clientIp].length; | |
| const remainingRequests = IP_RATE_LIMIT - requestCount; | |
| // Add rate limit information to the request object for subsequent processing | |
| req.rateLimit = { | |
| limited: requestCount >= IP_RATE_LIMIT, | |
| requestCount, | |
| remainingRequests, | |
| clientIp | |
| }; | |
| // Check if the limit is exceeded | |
| if (req.rateLimit.limited) { | |
| // Find the earliest request time, calculate when the next request can be made | |
| const oldestRequest = Math.min(...ipRequestCache[clientIp]); | |
| const resetTime = oldestRequest + 3600000; // Earliest request time + 1 hour | |
| const waitTimeMs = resetTime - now; | |
| const waitTimeMinutes = Math.ceil(waitTimeMs / 60000); // Convert to minutes and round up | |
| // Get the client's possible language settings | |
| const clientLang = req.headers['accept-language'] || 'en'; | |
| const isIdClient = clientLang.toLowerCase().includes('id'); | |
| console.log(`Rate limit exceeded for IP: ${clientIp}, can try again in ${waitTimeMinutes} minutes`); | |
| // Return the appropriate message based on the language | |
| // Note: The Indonesian message remains as requested by the original code logic based on client language. | |
| const message = isIdClient | |
| ? `Request frequency exceeds the limit, please try again in ${waitTimeMinutes} minutes` // Translated from Chinese | |
| : `Too many requests. Please try again in ${waitTimeMinutes} minutes.`; | |
| return res.status(429).send({ | |
| ok: false, | |
| message: message, | |
| waitTimeMinutes: waitTimeMinutes, | |
| resetTime: resetTime | |
| }); | |
| } | |
| // Record the timestamp of this request (only record in middleware to avoid double counting) | |
| ipRequestCache[clientIp].push(now); | |
| // Periodically clean up expired IP records (hourly) | |
| if (!global.ipCacheCleanupInterval) { | |
| global.ipCacheCleanupInterval = setInterval(() => { | |
| const cleanupTime = Date.now() - 3600000; | |
| for (const ip in ipRequestCache) { | |
| ipRequestCache[ip] = ipRequestCache[ip].filter(timestamp => timestamp > cleanupTime); | |
| // If there are no records, delete the cache for this IP | |
| if (ipRequestCache[ip].length === 0) { | |
| delete ipRequestCache[ip]; | |
| } | |
| } | |
| console.log(`IP cache cleanup completed. Active IPs: ${Object.keys(ipRequestCache).length}`); | |
| }, 3600000); | |
| } | |
| next(); | |
| }); | |
| const getPTag = (repoId) => { | |
| return `<p style="border-radius: 8px; text-align: center; font-size: 12px; color: #fff; margin-top: 16px;position: fixed; left: 8px; bottom: 8px; z-index: 10; background: rgba(0, 0, 0, 0.8); padding: 4px 8px;">Made with <img src="https://enzostvs-deepsite.hf.space/logo.svg" alt="DeepSite Logo" style="width: 16px; height: 16px; vertical-align: middle;display:inline-block;margin-right:3px;filter:brightness(0) invert(1);"><a href="https://enzostvs-deepsite.hf.space" style="color: #fff;text-decoration: underline;" target="_blank" >DeepSite</a> - <a href="https://enzostvs-deepsite.hf.space?remix=${repoId}" style="color: #fff;text-decoration: underline;" target="_blank" >🧬 Remix</a></p>`; | |
| }; | |
| // Get all available templates | |
| app.get("/api/templates", (req, res) => { | |
| const templates = Object.keys(TEMPLATES).map(key => ({ | |
| id: key, | |
| name: TEMPLATES[key].name, | |
| description: TEMPLATES[key].description, | |
| })); | |
| return res.status(200).send({ | |
| ok: true, | |
| templates, | |
| }); | |
| }); | |
| // Get detailed information for the specified template | |
| app.get("/api/templates/:id", (req, res) => { | |
| const { id } = req.params; | |
| if (!TEMPLATES[id]) { | |
| return res.status(404).send({ | |
| ok: false, | |
| message: "Template not found", | |
| }); | |
| } | |
| // Templates now directly reference CDN_URLS, no need to replace variables | |
| const html = TEMPLATES[id].html; | |
| return res.status(200).send({ | |
| ok: true, | |
| template: { | |
| id, | |
| name: TEMPLATES[id].name, | |
| description: TEMPLATES[id].description, | |
| systemPrompt: TEMPLATES[id].systemPrompt, | |
| html: html | |
| }, | |
| }); | |
| }); | |
| // API to check the configuration status of OpenAI environment variables | |
| app.get("/api/check-env", (req, res) => { | |
| // Check if each environment variable is configured | |
| const apiKeyConfigured = !!process.env.OPENAI_API_KEY; | |
| const baseUrlConfigured = !!process.env.OPENAI_BASE_URL; | |
| const modelConfigured = !!process.env.OPENAI_MODEL; | |
| const ipRateLimitConfigured = !!process.env.IP_RATE_LIMIT && parseInt(process.env.IP_RATE_LIMIT) > 0; | |
| return res.status(200).send({ | |
| ok: true, | |
| env: { | |
| apiKey: apiKeyConfigured, | |
| baseUrl: baseUrlConfigured, | |
| model: modelConfigured, | |
| ipRateLimit: ipRateLimitConfigured | |
| }, | |
| model: process.env.OPENAI_MODEL || "", | |
| ipRateLimit: parseInt(process.env.IP_RATE_LIMIT) || 0 | |
| }); | |
| }); | |
| // Test API connection | |
| app.post("/api/test-connection", async (req, res) => { | |
| const { api_key, base_url, model } = req.body; | |
| try { | |
| // Prioritize user-provided parameters, otherwise use environment variables | |
| const apiKey = api_key || process.env.OPENAI_API_KEY; | |
| if (!apiKey) { | |
| return res.status(400).send({ | |
| ok: false, | |
| message: "API key is required for testing", | |
| }); | |
| } | |
| const baseUrl = base_url || OPENAI_BASE_URL; | |
| const modelId = model || MODEL_ID; | |
| // Build a simple test request | |
| const requestOptions = { | |
| method: "POST", | |
| headers: { | |
| "Content-Type": "application/json", | |
| "Authorization": `Bearer ${apiKey}` | |
| }, | |
| body: JSON.stringify({ | |
| model: modelId, | |
| messages: [ | |
| { | |
| role: "user", | |
| content: "hi", | |
| }, | |
| ], | |
| max_tokens: 50, // Limit return length to speed up testing | |
| temperature: 0 // Fixed return result | |
| }) | |
| }; | |
| console.log("Testing OpenAI API connection"); | |
| console.log(`Testing API at: ${baseUrl}`); | |
| console.log(`Testing model: ${modelId}`); | |
| const response = await fetch(`${baseUrl}/chat/completions`, requestOptions); | |
| if (!response.ok) { | |
| const errorData = await response.json(); | |
| return res.status(response.status).send({ | |
| ok: false, | |
| message: errorData.error?.message || "Connection test failed", | |
| }); | |
| } | |
| const data = await response.json(); | |
| // Validate if the response contains valid content | |
| if (data && data.choices && data.choices[0] && data.choices[0].message) { | |
| return res.status(200).send({ | |
| ok: true, | |
| message: "Connection test successful", | |
| response: data.choices[0].message.content | |
| }); | |
| } else { | |
| return res.status(500).send({ | |
| ok: false, | |
| message: "Received invalid response format" | |
| }); | |
| } | |
| } catch (error) { | |
| console.error("Error testing connection:", error); | |
| return res.status(500).send({ | |
| ok: false, | |
| message: error.message || "An error occurred during connection test", | |
| }); | |
| } | |
| }); | |
| // Interface for optimizing prompts | |
| app.post("/api/optimize-prompt", async (req, res) => { | |
| const { | |
| prompt, | |
| language, | |
| api_key, | |
| base_url, | |
| model | |
| } = req.body; | |
| if (!prompt) { | |
| return res.status(400).send({ | |
| ok: false, | |
| message: "Missing prompt field", | |
| }); | |
| } | |
| try { | |
| // Prioritize user-provided API KEY, if not provided, use the environment variable | |
| const apiKey = api_key || process.env.OPENAI_API_KEY; | |
| if (!apiKey) { | |
| return res.status(500).send({ | |
| ok: false, | |
| message: "OpenAI API key is not configured.", | |
| }); | |
| } | |
| // Prioritize user-provided BASE URL and Model | |
| const baseUrl = base_url || OPENAI_BASE_URL; | |
| const modelId = model || MODEL_ID; | |
| // Set system prompt based on language | |
| // Translating the Chinese prompt to English here | |
| const systemPrompt = language === 'id' | |
| ? "You are a professional prompt optimization assistant. Your task is to improve the user's prompt to make it clearer, more specific, and more effective. Maintain the user's original intent but make the prompt more structured and easier for AI to understand. Output only the plain text of the optimized prompt without any Markdown syntax, explanations, comments, or additional markers. You may use <br> and spaces to format the text when necessary to improve readability." // Was originally in Chinese | |
| : "You are a professional prompt optimization assistant. Your task is to improve the user's prompt to make it clearer, more specific, and more effective. Maintain the user's original intent but make the prompt more structured and easier for AI to understand. Output only the plain text of the optimized prompt without any Markdown syntax, explanations, comments, or additional markers. You may use <br> and spaces to format the text when necessary to improve readability."; // English version was already present | |
| const messages = [ | |
| { | |
| role: "system", | |
| content: systemPrompt, | |
| }, | |
| { | |
| role: "user", | |
| content: prompt, | |
| }, | |
| ]; | |
| const requestOptions = { | |
| method: "POST", | |
| headers: { | |
| "Content-Type": "application/json", | |
| "Authorization": `Bearer ${apiKey}` | |
| }, | |
| body: JSON.stringify({ | |
| model: modelId, | |
| messages, | |
| temperature: 0.7, | |
| max_tokens: 2000 | |
| }) | |
| }; | |
| console.log("Sending prompt optimization request to OpenAI API"); | |
| console.log(`Using API at: ${baseUrl}`); | |
| console.log(`Using model: ${modelId}`); | |
| const response = await fetch(`${baseUrl}/chat/completions`, requestOptions); | |
| if (!response.ok) { | |
| console.error(`OpenAI API error: ${response.status} ${response.statusText}`); | |
| try { | |
| const error = await response.json(); | |
| return res.status(response.status).send({ | |
| ok: false, | |
| message: error?.message || "Error calling OpenAI API", | |
| }); | |
| } catch (parseError) { | |
| return res.status(response.status).send({ | |
| ok: false, | |
| message: `OpenAI API error: ${response.status} ${response.statusText}`, | |
| }); | |
| } | |
| } | |
| const data = await response.json(); | |
| const optimizedPrompt = data.choices?.[0]?.message?.content?.trim(); | |
| return res.status(200).send({ | |
| ok: true, | |
| optimizedPrompt, | |
| }); | |
| } catch (error) { | |
| console.error("Error optimizing prompt:", error); | |
| return res.status(500).send({ | |
| ok: false, | |
| message: error.message || "An error occurred while optimizing the prompt", | |
| }); | |
| } | |
| }); | |
| app.post("/api/deploy", async (req, res) => { | |
| const { html, title } = req.body; | |
| if (!html || !title) { | |
| return res.status(400).send({ | |
| ok: false, | |
| message: "Missing required fields", | |
| }); | |
| } | |
| return res.status(200).send({ | |
| ok: true, | |
| message: "Deployment feature has been removed as it required Hugging Face login", | |
| }); | |
| }); | |
| app.post("/api/ask-ai", async (req, res) => { | |
| const { | |
| prompt, | |
| html, | |
| previousPrompt, | |
| templateId, | |
| language, | |
| ui, | |
| tools, | |
| max_tokens, | |
| temperature, | |
| api_key, | |
| base_url, | |
| model | |
| } = req.body; | |
| if (!prompt) { | |
| return res.status(400).send({ | |
| ok: false, | |
| message: "Missing required fields", | |
| }); | |
| } | |
| // Get client IP - for logging purposes | |
| const clientIp = req.headers['x-forwarded-for'] || | |
| req.connection.remoteAddress || | |
| req.socket.remoteAddress; | |
| // Use the rate limit information already calculated by the middleware for logging | |
| if (req.rateLimit) { | |
| if (req.rateLimit.limited === false) { | |
| console.log(`API request from IP: ${clientIp}, rate limit: unlimited or not applicable`); | |
| } else { | |
| console.log(`API request from IP: ${clientIp}, requests this hour: ${req.rateLimit.requestCount}/${IP_RATE_LIMIT}, remaining: ${req.rateLimit.remainingRequests}`); | |
| } | |
| } else { | |
| console.log(`API request from IP: ${clientIp}, rate limit information not available`); | |
| } | |
| // Set response headers | |
| res.setHeader("Content-Type", "text/plain"); | |
| res.setHeader("Cache-Control", "no-cache"); | |
| res.setHeader("Connection", "keep-alive"); | |
| // Add the following response headers to optimize streaming | |
| res.setHeader("Transfer-Encoding", "chunked"); | |
| res.setHeader("X-Accel-Buffering", "no"); // Disable Nginx buffering | |
| res.setHeader("X-Content-Type-Options", "nosniff"); | |
| res.setHeader("Keep-Alive", "timeout=120"); // Keep the connection alive for 120 seconds | |
| res.flushHeaders(); // Send response headers immediately | |
| const selectedProvider = PROVIDERS["openai"]; | |
| try { | |
| // Prioritize user-provided API KEY, if not provided, use the environment variable | |
| const apiKey = api_key || process.env.OPENAI_API_KEY; | |
| if (!apiKey) { | |
| return res.status(500).send({ | |
| ok: false, | |
| message: "OpenAI API key is not configured.", | |
| }); | |
| } | |
| // Prioritize user-provided BASE URL, if not provided, use the environment variable | |
| const baseUrl = base_url || OPENAI_BASE_URL; | |
| // Prioritize user-provided Model, if not provided, use the environment variable | |
| const modelId = model || MODEL_ID; | |
| console.log(`Using OpenAI API at: ${baseUrl}`); | |
| console.log(`Using model: ${modelId}`); | |
| // Get the base system prompt | |
| let systemPrompt = templateId && TEMPLATES[templateId] | |
| ? TEMPLATES[templateId].systemPrompt | |
| : TEMPLATES.vanilla.systemPrompt; | |
| // If a component library is selected, add related prompts only when the Vue3 framework is chosen | |
| if (ui && ui !== templateId && templateId === 'vue3') { | |
| const uiTemplate = TEMPLATES[ui]; | |
| if (uiTemplate) { | |
| // Extract key parts from the component library prompt and add them to the system prompt | |
| systemPrompt += ` Also, use ${uiTemplate.name} component library with CDN: `; | |
| if (ui === 'elementPlus') { | |
| systemPrompt += `CSS: ${CDN_URLS.ELEMENT_PLUS_CSS}, JS: ${CDN_URLS.ELEMENT_PLUS_JS}, Icons: ${CDN_URLS.ELEMENT_PLUS_ICONS}.`; | |
| } else if (ui === 'naiveUI') { | |
| systemPrompt += `${CDN_URLS.NAIVE_UI}.`; | |
| } | |
| } | |
| } | |
| // If a tool library is selected, add related prompts | |
| if (tools && tools.length > 0) { | |
| systemPrompt += " Include the following additional libraries: "; | |
| tools.forEach((tool, index) => { | |
| if (tool === 'tailwindcss') { | |
| systemPrompt += `Tailwind CSS (use <script src="${CDN_URLS.TAILWIND}"></script>)`; | |
| } else if (tool === 'vueuse') { | |
| systemPrompt += `VueUse (use <script src="${CDN_URLS.VUEUSE_SHARED}"></script> and <script src="${CDN_URLS.VUEUSE_CORE}"></script>)`; | |
| } else if (tool === 'dayjs') { | |
| systemPrompt += `Day.js (use <script src="${CDN_URLS.DAYJS}"></script>)`; | |
| } else if (tool === 'element-plus-icons') { | |
| systemPrompt += `Element Plus Icons (use <script src="${CDN_URLS.ELEMENT_PLUS_ICONS}"></script>)`; | |
| } | |
| if (index < tools.length - 1) { | |
| systemPrompt += ", "; | |
| } | |
| }); | |
| systemPrompt += ". Make sure to use the correct syntax for all the frameworks and libraries."; | |
| } | |
| // Add comment language prompt based on language setting | |
| if (language === 'id') { | |
| // Translating the Chinese instruction to English | |
| systemPrompt += " Please write all comments in Indonesian."; // Was originally "Silakan tulis semua komentar dalam Bahasa Indonesia" | |
| } else if (language === 'en') { | |
| systemPrompt += " Please write all comments in English."; // Already in English | |
| } | |
| // Log output for selected configuration | |
| console.log("Template configuration:"); | |
| console.log(`- Framework: ${templateId}`); | |
| console.log(`- UI Library: ${ui || 'None'}`); | |
| console.log(`- Tools: ${tools ? tools.join(', ') : 'None'}`); | |
| console.log(`- Language: ${language || 'default'}`); | |
| const messages = [ | |
| { | |
| role: "system", | |
| content: systemPrompt, | |
| }, | |
| ]; | |
| if (previousPrompt) { | |
| messages.push({ | |
| role: "user", | |
| content: previousPrompt, | |
| }); | |
| } | |
| if (html) { | |
| messages.push({ | |
| role: "assistant", | |
| content: `The current code is: ${html}.`, | |
| }); | |
| } | |
| messages.push({ | |
| role: "user", | |
| content: prompt, | |
| }); | |
| const requestOptions = { | |
| method: "POST", | |
| headers: { | |
| "Content-Type": "application/json", | |
| "Authorization": `Bearer ${apiKey}` | |
| }, | |
| body: JSON.stringify({ | |
| model: modelId, | |
| messages, | |
| stream: true, | |
| max_tokens: max_tokens || parseInt(DEFAULT_MAX_TOKENS), | |
| temperature: temperature !== undefined ? parseFloat(temperature) : parseFloat(DEFAULT_TEMPERATURE) | |
| }) | |
| }; | |
| console.log(`Sending request to OpenAI API with model: ${modelId}`); | |
| console.log(`Using max_tokens: ${max_tokens || DEFAULT_MAX_TOKENS}, temperature: ${temperature !== undefined ? temperature : DEFAULT_TEMPERATURE}`); | |
| console.log("Request URL:", `${baseUrl}/chat/completions`); | |
| console.log("Request headers:", { | |
| ...requestOptions.headers, | |
| "Authorization": "Bearer [API_KEY_HIDDEN]" | |
| }); | |
| console.log("Request body:", JSON.parse(requestOptions.body)); | |
| const response = await fetch(`${baseUrl}/chat/completions`, requestOptions); | |
| if (!response.ok) { | |
| console.error(`OpenAI API error: ${response.status} ${response.statusText}`); | |
| try { | |
| // Check if Content-Type is JSON | |
| const contentType = response.headers.get("Content-Type"); | |
| console.log(`Response Content-Type: ${contentType}`); | |
| if (contentType && contentType.includes("application/json")) { | |
| const error = await response.json(); | |
| console.error("OpenAI API error details:", error); | |
| return res.status(response.status).send({ | |
| ok: false, | |
| message: error?.message || "Error calling OpenAI API", | |
| }); | |
| } else { | |
| // If not JSON, read text directly | |
| const errorText = await response.text(); | |
| console.error("OpenAI API error text:", errorText); | |
| return res.status(response.status).send({ | |
| ok: false, | |
| message: errorText || `OpenAI API error: ${response.status} ${response.statusText}`, | |
| }); | |
| } | |
| } catch (parseError) { | |
| // Handle JSON parsing errors | |
| console.error("Error parsing API response:", parseError); | |
| return res.status(response.status).send({ | |
| ok: false, | |
| message: `OpenAI API error: ${response.status} ${response.statusText}`, | |
| }); | |
| } | |
| } | |
| // Process OpenAI stream response | |
| const reader = response.body.getReader(); | |
| const decoder = new TextDecoder("utf-8"); | |
| let completeResponse = ""; | |
| console.log("Starting to process stream response"); | |
| try { | |
| while (true) { | |
| const { done, value } = await reader.read(); | |
| if (done) { | |
| console.log("Stream completed"); | |
| break; | |
| } | |
| // Check if the client has already disconnected | |
| if (res.writableEnded) { | |
| console.log("Client disconnected, stopping stream"); | |
| reader.cancel("Client disconnected"); | |
| break; | |
| } | |
| // Parse SSE format data | |
| const chunk = decoder.decode(value); | |
| // Try to handle different formats of stream responses | |
| const lines = chunk.split("\n"); | |
| let processedAnyLine = false; | |
| for (const line of lines) { | |
| // Standard OpenAI format | |
| if (line.startsWith("data: ")) { | |
| processedAnyLine = true; | |
| if (line.includes("[DONE]")) { | |
| console.log("Received [DONE] signal"); | |
| continue; | |
| } | |
| try { | |
| const data = JSON.parse(line.replace("data: ", "")); | |
| const content = data.choices?.[0]?.delta?.content || ""; | |
| if (content) { | |
| // Check if the connection is interrupted | |
| if (!res.writableEnded) { | |
| res.write(content); | |
| completeResponse += content; | |
| if (completeResponse.includes("</html>")) { | |
| console.log("Found </html> tag, ending stream"); | |
| break; | |
| } | |
| } else { | |
| console.log("Cannot write to closed response"); | |
| break; | |
| } | |
| } | |
| } catch (e) { | |
| console.error("Error parsing JSON from SSE line:", e); | |
| console.log("Problematic line:", line); | |
| // Continue processing other lines | |
| } | |
| } | |
| } | |
| // If standard SSE format is not recognized, try processing the entire block directly | |
| if (!processedAnyLine && chunk.trim()) { | |
| try { | |
| // Try to parse the entire response as JSON | |
| const jsonData = JSON.parse(chunk); | |
| if (jsonData.choices && jsonData.choices[0]) { | |
| const content = jsonData.choices[0].message?.content || jsonData.choices[0].delta?.content || ""; | |
| if (content && !res.writableEnded) { | |
| res.write(content); | |
| completeResponse += content; | |
| } | |
| } | |
| } catch (e) { | |
| // Not JSON, process directly as text | |
| if (!res.writableEnded) { | |
| res.write(chunk); | |
| completeResponse += chunk; | |
| } | |
| } | |
| } | |
| if (completeResponse.includes("</html>")) { | |
| console.log("Found </html> tag in complete response, ending stream"); | |
| break; | |
| } | |
| } | |
| console.log("Stream processing completed"); | |
| if (!res.writableEnded) { | |
| res.end(); | |
| } | |
| } catch (streamError) { | |
| console.error("Error processing stream:", streamError); | |
| // Check if it was the client who interrupted the connection | |
| if (streamError.message && (streamError.message.includes("aborted") || streamError.message.includes("canceled"))) { | |
| console.log("Client aborted the request"); | |
| } | |
| if (!res.headersSent) { | |
| res.status(500).send({ | |
| ok: false, | |
| message: "Error processing response stream" | |
| }); | |
| } else if (!res.writableEnded) { | |
| res.end(); | |
| } | |
| } | |
| } catch (error) { | |
| if (error.message.includes("exceeded your monthly included credits")) { | |
| return res.status(402).send({ | |
| ok: false, | |
| openProModal: true, | |
| message: error.message, | |
| }); | |
| } | |
| if (!res.headersSent) { | |
| res.status(500).send({ | |
| ok: false, | |
| message: | |
| error.message || "An error occurred while processing your request.", | |
| }); | |
| } else { | |
| // Otherwise end the stream | |
| res.end(); | |
| } | |
| } | |
| }); | |
| app.get("/api/remix/:username/:repo", async (req, res) => { | |
| const { username, repo } = req.params; | |
| const { hf_token } = req.cookies; | |
| const token = hf_token || process.env.DEFAULT_HF_TOKEN; | |
| const repoId = `${username}/${repo}`; | |
| const space = await spaceInfo({ | |
| name: repoId, | |
| }); | |
| if (!space || space.sdk !== "static" || space.private) { | |
| return res.status(404).send({ | |
| ok: false, | |
| message: "Space not found", | |
| }); | |
| } | |
| const url = `https://huggingface.co/spaces/${repoId}/raw/main/index.html`; | |
| const response = await fetch(url); | |
| if (!response.ok) { | |
| return res.status(404).send({ | |
| ok: false, | |
| message: "Space not found", | |
| }); | |
| } | |
| let html = await response.text(); | |
| // remove the last p tag including this url https://enzostvs-deepsite.hf.space | |
| html = html.replace(getPTag(repoId), ""); | |
| res.status(200).send({ | |
| ok: true, | |
| html, | |
| }); | |
| }); | |
| app.get("*", (_req, res) => { | |
| // Use the correct path on Vercel | |
| const indexPath = isVercelEnvironment | |
| ? path.join(process.cwd(), "dist", "index.html") | |
| : path.join(__dirname, "dist", "index.html"); | |
| res.sendFile(indexPath); | |
| }); | |
| const PORT = process.env.PORT || 7860; // Prioritaskan PORT dari env, fallback ke 7860 (atau 3000 jika mau) | |
| app.listen(PORT, () => { | |
| console.log(`Server is running on port ${PORT}`); | |
| }); |