File size: 2,236 Bytes
f3f2aa9
1318bf3
8ffd7c8
 
 
f3f2aa9
1318bf3
210759e
1318bf3
 
 
 
 
 
210759e
1318bf3
 
 
 
 
 
 
 
f3f2aa9
1318bf3
 
210759e
1318bf3
 
 
210759e
1318bf3
 
63391c4
 
1318bf3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210759e
 
 
 
 
 
 
 
1318bf3
 
210759e
 
 
1318bf3
210759e
 
 
1318bf3
210759e
 
 
 
 
 
1318bf3
 
210759e
 
1318bf3
 
 
 
f3f2aa9
1318bf3
 
8ffd7c8
 
 
1318bf3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
# syntax=docker/dockerfile:1
FROM node:20-slim

WORKDIR /app

COPY <<EOF /app/package.json
{
  "name": "gemini-api-server",
  "version": "1.0.0",
  "main": "index.js",
  "scripts": {
    "start": "node index.js"
  },
  "dependencies": {
    "@google/generative-ai": "^0.15.0",
    "cors": "^2.8.5",
    "express": "^4.19.2"
  }
}
EOF

RUN npm install --only=production

COPY <<EOF /app/index.js
const express = require('express');
const cors = require('cors');
const { GoogleGenerativeAI } = require('@google/generative-ai');

const app = express();
const PORT = 7860;
const MODEL_NAME = "gemini-flash-lite-latest";

const apiKeys = [
    "AIzaSyCL6KXbZDbJ5fBgI4BOrdhJbEDLnuzUL-Y",
    "AIzaSyCAI1oirZPlmTyKAOeDUWusKIg9tpNK0zM"
];

const getRandomApiKey = () => {
    const randomIndex = Math.floor(Math.random() * apiKeys.length);
    return apiKeys[randomIndex];
};

app.use(cors());
app.use(express.json());

app.post('/api/generate', async (req, res) => {
    const { prompt } = req.body;

    if (!prompt) {
        return res.status(400).json({ error: 'Request body harus menyertakan "prompt"' });
    }

    try {
        const apiKey = getRandomApiKey();
        const genAI = new GoogleGenerativeAI(apiKey);
        const model = genAI.getGenerativeModel({ model: MODEL_NAME });

        const generationConfig = {
            temperature: 0.9,
            topK: 1,
            topP: 1,
            maxOutputTokens: 2048,
        };

        const result = await model.generateContentStream({
            contents: [{ role: "user", parts: [{ text: prompt }] }],
            generationConfig,
        });
        
        res.setHeader('Content-Type', 'text/plain; charset=utf-8');
        res.setHeader('Transfer-Encoding', 'chunked');

        for await (const chunk of result.stream) {
            const chunkText = chunk.text();
            res.write(chunkText);
        }

        res.end();

    } catch (error) {
        console.error("Error saat streaming dari Gemini API:", error);
        res.status(500).send("Terjadi kesalahan pada server saat memproses permintaan Anda.");
    }
});

app.listen(PORT, () => {
    console.log(`Server berjalan di http://localhost:${PORT}`);
});
EOF

EXPOSE 7860

CMD ["npm", "start"]