testing / Dockerfile
nirkyy's picture
Update Dockerfile
63391c4 verified
raw
history blame
2.24 kB
# syntax=docker/dockerfile:1
FROM node:20-slim
WORKDIR /app
COPY <<EOF /app/package.json
{
"name": "gemini-api-server",
"version": "1.0.0",
"main": "index.js",
"scripts": {
"start": "node index.js"
},
"dependencies": {
"@google/generative-ai": "^0.15.0",
"cors": "^2.8.5",
"express": "^4.19.2"
}
}
EOF
RUN npm install --only=production
COPY <<EOF /app/index.js
const express = require('express');
const cors = require('cors');
const { GoogleGenerativeAI } = require('@google/generative-ai');
const app = express();
const PORT = 7860;
const MODEL_NAME = "gemini-flash-lite-latest";
const apiKeys = [
"AIzaSyCL6KXbZDbJ5fBgI4BOrdhJbEDLnuzUL-Y",
"AIzaSyCAI1oirZPlmTyKAOeDUWusKIg9tpNK0zM"
];
const getRandomApiKey = () => {
const randomIndex = Math.floor(Math.random() * apiKeys.length);
return apiKeys[randomIndex];
};
app.use(cors());
app.use(express.json());
app.post('/api/generate', async (req, res) => {
const { prompt } = req.body;
if (!prompt) {
return res.status(400).json({ error: 'Request body harus menyertakan "prompt"' });
}
try {
const apiKey = getRandomApiKey();
const genAI = new GoogleGenerativeAI(apiKey);
const model = genAI.getGenerativeModel({ model: MODEL_NAME });
const generationConfig = {
temperature: 0.9,
topK: 1,
topP: 1,
maxOutputTokens: 2048,
};
const result = await model.generateContentStream({
contents: [{ role: "user", parts: [{ text: prompt }] }],
generationConfig,
});
res.setHeader('Content-Type', 'text/plain; charset=utf-8');
res.setHeader('Transfer-Encoding', 'chunked');
for await (const chunk of result.stream) {
const chunkText = chunk.text();
res.write(chunkText);
}
res.end();
} catch (error) {
console.error("Error saat streaming dari Gemini API:", error);
res.status(500).send("Terjadi kesalahan pada server saat memproses permintaan Anda.");
}
});
app.listen(PORT, () => {
console.log(`Server berjalan di http://localhost:${PORT}`);
});
EOF
EXPOSE 7860
CMD ["npm", "start"]