File size: 18,318 Bytes
bddc111
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
import express from "express";
import path from "path";
import { fileURLToPath } from "url";
import dotenv from "dotenv";
import cookieParser from "cookie-parser";
import {
    createRepo,
    uploadFiles,
    whoAmI,
    spaceInfo,
} from "@huggingface/hub";
import { InferenceClient } from "@huggingface/inference";
import bodyParser from "body-parser";

import checkUser from "./middlewares/checkUser.js"; // Assuming this middleware exists
// Removed PROVIDERS import
import { COLORS } from "./utils/colors.js"; // Assuming this utility exists

// Load environment variables from .env file
dotenv.config();

const app = express();

const ipAddresses = new Map(); // For rate limiting unauthenticated users

const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);

const PORT = process.env.APP_PORT || 3000;
const REDIRECT_URI =
    process.env.REDIRECT_URI || `http://localhost:${PORT}/auth/login`;
// --- Updated Model ID ---
const MODEL_ID = "deepseek-ai/deepseek-llm-7b-chat"; // Using the chat model
const MAX_NEW_TOKENS = 512; // Default max tokens for generation
const MAX_REQUESTS_PER_IP = 5; // Rate limit for unauthenticated users

app.use(cookieParser());
app.use(bodyParser.json());
// Serve static files from 'dist' and also the root for the new index.html
app.use(express.static(path.join(__dirname, "dist")));
app.use(express.static(path.join(__dirname))); // Serve index.html from root

// --- Helper Function (Keep as is) ---
const getPTag = (repoId) => {
    // ... (keep the original getPTag function)
    return `<p style="border-radius: 8px; text-align: center; font-size: 12px; color: #fff; margin-top: 16px;position: fixed; left: 8px; bottom: 8px; z-index: 10; background: rgba(0, 0, 0, 0.8); padding: 4px 8px;">Made with <img src="https://enzostvs-deepsite.hf.space/logo.svg" alt="DeepSite Logo" style="width: 16px; height: 16px; vertical-align: middle;display:inline-block;margin-right:3px;filter:brightness(0) invert(1);"><a href="https://enzostvs-deepsite.hf.space" style="color: #fff;text-decoration: underline;" target="_blank" >DeepSite</a> - <a href="https://enzostvs-deepsite.hf.space?remix=${repoId}" style="color: #fff;text-decoration: underline;" target="_blank" >🧬 Remix</a></p>`;
};

// --- Auth Routes (Keep as is) ---
app.get("/api/login", (_req, res) => {
    // ... (keep original /api/login)
     res.redirect(
        302,
        `https://huggingface.co/oauth/authorize?client_id=${process.env.OAUTH_CLIENT_ID}&redirect_uri=${REDIRECT_URI}&response_type=code&scope=openid%20profile%20write-repos%20manage-repos%20inference-api&prompt=consent&state=1234567890`
    );
});
app.get("/auth/login", async (req, res) => {
    // ... (keep original /auth/login)
    const { code } = req.query;

    if (!code) {
        return res.redirect(302, "/");
    }
    const Authorization = `Basic ${Buffer.from(
        `${process.env.OAUTH_CLIENT_ID}:${process.env.OAUTH_CLIENT_SECRET}`
    ).toString("base64")}`;

    try {
        const request_auth = await fetch("https://huggingface.co/oauth/token", {
            method: "POST",
            headers: {
                "Content-Type": "application/x-www-form-urlencoded",
                Authorization,
            },
            body: new URLSearchParams({
                grant_type: "authorization_code",
                code: code,
                redirect_uri: REDIRECT_URI,
            }),
        });

        const response = await request_auth.json();

        if (!response.access_token) {
             console.error("OAuth Error:", response);
            return res.redirect(302, "/?error=auth_failed");
        }

        res.cookie("hf_token", response.access_token, {
            httpOnly: false, // Set to true if JS doesn't need to read it directly
            secure: process.env.NODE_ENV === 'production', // Use secure cookies in production
            sameSite: "lax", // More common default than 'none' unless cross-site needed
            maxAge: 30 * 24 * 60 * 60 * 1000, // 30 days
        });

        return res.redirect(302, "/");

    } catch(err) {
        console.error("Error during OAuth token exchange:", err);
        return res.redirect(302, "/?error=auth_exception");
    }
});
app.get("/auth/logout", (req, res) => {
    // ... (keep original /auth/logout)
    res.clearCookie("hf_token", {
        httpOnly: false,
        secure: process.env.NODE_ENV === 'production',
        sameSite: "lax",
    });
    return res.redirect(302, "/");
});

// --- User Info Route (Keep as is) ---
app.get("/api/@me", checkUser, async (req, res) => {
    // ... (keep original /api/@me)
     const { hf_token } = req.cookies;
    try {
        const request_user = await fetch("https://huggingface.co/oauth/userinfo", {
            headers: {
                Authorization: `Bearer ${hf_token}`,
            },
        });

        if (!request_user.ok) {
             throw new Error(`User info request failed with status ${request_user.status}`);
        }

        const user = await request_user.json();
        res.send(user);
    } catch (err) {
        console.error("Error fetching user info:", err.message);
        // Don't clear cookie on transient errors, only perhaps on 401?
        // res.clearCookie("hf_token", { ... });
        res.status(401).send({
            ok: false,
            message: err.message,
        });
    }
});

// --- Deploy Route (Keep as is, maybe update html replace logic if needed) ---
app.post("/api/deploy", checkUser, async (req, res) => {
    // ... (keep original /api/deploy, ensure getPTag logic matches if you modify it)
    const { html, title, path } = req.body;
    if (!html || !title) {
        return res.status(400).send({
            ok: false,
            message: "Missing required fields",
        });
    }

    const { hf_token } = req.cookies;
    try {
        const repo = {
            type: "space",
            name: path ?? "",
        };

        let readme;
        let newHtml = html;

        if (!path || path === "") {
            const { name: username } = await whoAmI({ accessToken: hf_token });
            const newTitle = title
                .toLowerCase()
                .replace(/[^a-z0-9]+/g, "-")
                .split("-")
                .filter(Boolean)
                .join("-")
                .slice(0, 96);

            const repoId = `${username}/${newTitle}`;
            repo.name = repoId;

            // Check if repo exists before creating? Might need adjustment
            await createRepo({
                repo,
                accessToken: hf_token,
                spaceSdk: "static", // Explicitly set SDK for new repos
            });
            const colorFrom = COLORS[Math.floor(Math.random() * COLORS.length)];
            const colorTo = COLORS[Math.floor(Math.random() * COLORS.length)];
            readme = `---
title: ${newTitle}
emoji: 🐳
colorFrom: ${colorFrom}
colorTo: ${colorTo}
sdk: static
pinned: false
tags:
  - deepsite
---
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference`;
        }

        // Ensure the p-tag replacement is robust
        const pTag = getPTag(repo.name); // Generate the tag to add/replace
        // Remove existing tag if present before adding
        newHtml = newHtml.replace(/<p style=.*?enzostvs-deepsite\.hf\.space.*?<\/p>/s, "");
        newHtml = newHtml.replace(/<\/body>/i, `${pTag}</body>`); // Add before closing body

        const file = new Blob([newHtml], { type: "text/html" });
        file.name = "index.html"; // Add name property to the Blob

        const files = [file];
        if (readme) {
            const readmeFile = new Blob([readme], { type: "text/markdown" });
            readmeFile.name = "README.md"; // Add name property to the Blob
            files.push(readmeFile);
        }
        await uploadFiles({
            repo,
            files,
            accessToken: hf_token,
            commitMessage: "Update website via DeepSite" // Add commit message
        });
        return res.status(200).send({ ok: true, path: repo.name });
    } catch (err) {
         console.error("Deploy Error:", err);
        return res.status(500).send({
            ok: false,
            message: err.message,
        });
    }
});

// --- *** UPDATED /api/ask-ai Route *** ---
app.post("/api/ask-ai", async (req, res) => {
    // Removed 'provider' from destructuring
    const { prompt, html, previousPrompt } = req.body;
    if (!prompt) {
        return res.status(400).send({
            ok: false,
            message: "Missing prompt field",
        });
    }

    const { hf_token } = req.cookies;
    let token = hf_token;

    // --- Token & Rate Limiting Logic (Keep similar logic) ---
    const ip = req.headers['x-forwarded-for']?.split(',')[0].trim() || req.socket.remoteAddress || 'unknown';

    if (!token) {
        const currentCount = (ipAddresses.get(ip) || 0) + 1;
        if (currentCount > MAX_REQUESTS_PER_IP) {
            console.warn(`Rate limit exceeded for IP: ${ip}`);
            return res.status(429).send({
                ok: false,
                openLogin: true, // Keep this flag for the frontend
                message: "Rate limit exceeded. Please log in to continue.",
            });
        }
        ipAddresses.set(ip, currentCount);
        console.log(`Anonymous request from ${ip}, count: ${currentCount}`);

        token = process.env.DEFAULT_HF_TOKEN;
        if (!token) {
            console.error("DEFAULT_HF_TOKEN is not set. Cannot process anonymous requests.");
            return res.status(503).send({ // 503 Service Unavailable might be better
                ok: false,
                message: "Service is temporarily unavailable for anonymous users.",
            });
        }
    }
    // --- End Token Logic ---

    // Set up response headers for streaming
    res.setHeader("Content-Type", "text/plain; charset=utf-8"); // Explicitly set UTF-8
    res.setHeader("Cache-Control", "no-cache");
    res.setHeader("Connection", "keep-alive");
    res.setHeader("X-Content-Type-Options", "nosniff"); // Security header

    try {
        const client = new InferenceClient(token);

        // Construct messages array (Keep context logic)
        const messages = [
            {
                role: "system",
                content: `ONLY USE HTML, CSS AND JAVASCRIPT. If you want to use ICON make sure to import the library first. Try to create the best UI possible by using only HTML, CSS and JAVASCRIPT. Use as much as you can TailwindCSS for the CSS, if you can't do something with TailwindCSS, then use custom CSS (make sure to import <script src="https://cdn.tailwindcss.com"></script> in the head). Also, try to ellaborate as much as you can, to create something unique. ALWAYS GIVE THE RESPONSE INTO A SINGLE HTML FILE`,
            },
            // Conditionally add previous user prompt if it exists
            ...(previousPrompt ? [{ role: "user", content: previousPrompt }] : []),
            // Conditionally add previous assistant response (current HTML) if it exists
            ...(html ? [{ role: "assistant", content: `The current HTML code is:\n\`\`\`html\n${html}\n\`\`\`` }] : []),
             // The new user prompt
            {
                role: "user",
                content: prompt,
            },
        ];

        console.log(`Calling model ${MODEL_ID} for IP ${ip}. Prompt: "${prompt.substring(0, 60)}..."`);

        // --- Call chatCompletionStream without provider ---
        const stream = client.chatCompletionStream({
            model: MODEL_ID,
            messages: messages,
            max_tokens: MAX_NEW_TOKENS, // Use defined max tokens
            // Optional: Add temperature, top_p if needed
            // temperature: 0.7,
            // top_p: 0.95,
        });

        // --- Process the stream ---
        let responseText = ""; // To potentially check for end tags if needed
        for await (const chunk of stream) {
            // Check for content in the delta
            if (chunk.choices && chunk.choices[0]?.delta?.content) {
                const contentChunk = chunk.choices[0].delta.content;
                res.write(contentChunk); // Send chunk to client
                responseText += contentChunk;
                // Example: Optional early exit if a specific tag is found
                // if (responseText.includes("</html>")) {
                //    console.log("Detected </html>, ending stream early.");
                //    break;
                // }
            }
             // Handle potential errors within the stream if the library provides them
            if (chunk.error) {
                 console.error(`Error during stream for IP ${ip}:`, chunk.error);
                 // Decide if you want to send an error marker to the client
                 // res.write(`\n[Stream Error: ${chunk.error}]\n`);
                 // break; // Stop processing the stream on error
            }
        }

        console.log(`Finished streaming response for IP ${ip}. Prompt: "${prompt.substring(0, 60)}..."`);
        res.end(); // End the response stream properly

    } catch (error) {
        console.error(`Error in /api/ask-ai for IP ${ip}:`, error);

        // Specific error handling (e.g., credits)
        if (error.message && error.message.includes("exceeded your monthly included credits")) {
             if (!res.headersSent) {
                 res.status(402).send({ // Payment Required
                     ok: false,
                     openProModal: true, // Keep this flag for frontend
                     message: "API call failed: " + error.message,
                 });
             } else {
                 // Stream already started, append error and end
                 res.write("\n[Error: API Credit Limit Exceeded]\n");
                 res.end();
             }
             return;
        }
         // Handle other potential InferenceClient errors (e.g., 401 Unauthorized, 404 Model Not Found)
        if (error.code) { // InferenceClient often includes error codes
             if (!res.headersSent) {
                let statusCode = 500;
                if (error.code === 'ERR_INFERENCE_AUTH') statusCode = 401;
                if (error.code === 'ERR_INFERENCE_NOT_FOUND') statusCode = 404;
                 res.status(statusCode).send({
                     ok: false,
                     message: `API Error (${error.code}): ${error.message}`,
                 });
             } else {
                  res.write(`\n[Error: ${error.message}]\n`);
                  res.end();
             }
             return;
        }


        // Generic error handling if headers haven't been sent
        if (!res.headersSent) {
            res.status(500).send({
                ok: false,
                message: error.message || "An internal server error occurred.",
            });
        } else {
            // If headers were sent, we can't change status code, just end the stream.
            // Optionally try to write an error marker if the stream is still writable.
            if (!res.writableEnded) {
                 res.write("\n[Error: Server failed to process the request fully]\n");
                 res.end();
            }
        }
    }
});

// --- Remix Route (Keep as is, maybe adjust html cleaning) ---
app.get("/api/remix/:username/:repo", async (req, res) => {
    // ... (keep original /api/remix, ensure getPTag logic matches for cleaning)
    const { username, repo } = req.params;
    const { hf_token } = req.cookies;

    // Using default token for read-only operation is fine if repo is public
    const token = hf_token || process.env.DEFAULT_HF_TOKEN;

    const repoId = `${username}/${repo}`;

    try {
        // Check space exists and is static/public first (optional but good practice)
        // const space = await spaceInfo({ repo: { type: "space", name: repoId }, accessToken: token });
        // if (!space || space.sdk !== "static" || space.private) {
        //    return res.status(404).send({ ok: false, message: "Static public space not found" });
        // }

        const url = `https://huggingface.co/spaces/${repoId}/raw/main/index.html`;
        const response = await fetch(url); // No token needed for public raw files

        if (!response.ok) {
             if (response.status === 404) {
                return res.status(404).send({ ok: false, message: "index.html not found in the space" });
             }
            throw new Error(`Failed to fetch raw file: ${response.statusText}`);
        }

        let html = await response.text();
        // Clean the specific p tag added by this tool
        const pTagToRemove = getPTag(repoId); // Generate the exact tag to remove
        html = html.replace(pTagToRemove, "");

        res.status(200).send({
            ok: true,
            html,
        });
    } catch(err) {
         console.error("Remix Error:", err);
         // Don't expose internal errors directly unless needed
          res.status(500).send({ ok: false, message: "Failed to remix the content." });
    }
});


// --- Catch-all for Frontend Routing (Keep as is if using client-side router in 'dist') ---
// Make sure this is AFTER all API routes
app.get("*", (req, res) => {
    // Check if the request looks like a file extension, if so, maybe 404?
    if (path.extname(req.path).length > 0 && req.path !== "/index.html") {
        res.status(404).send("Not found");
    } else {
         // Send the main HTML file for SPA routing
         res.sendFile(path.join(__dirname, "dist", "index.html"));
         // OR if the new HTML below is the main entry point:
         // res.sendFile(path.join(__dirname, "index.html"));
    }
});

// --- Server Start ---
app.listen(PORT, () => {
    console.log(`Server listening on port ${PORT}`);
    console.log(`Using model: ${MODEL_ID}`);
    if (!process.env.OAUTH_CLIENT_ID || !process.env.OAUTH_CLIENT_SECRET) {
        console.warn("Warning: OAuth environment variables (OAUTH_CLIENT_ID, OAUTH_CLIENT_SECRET) are not set. Login will not work.");
    }
     if (!process.env.DEFAULT_HF_TOKEN) {
        console.warn("Warning: DEFAULT_HF_TOKEN is not set. Anonymous requests will fail.");
    }
});