| const express = require('express'); |
| const rateLimit = require('express-rate-limit'); |
| const axios = require('axios'); |
|
|
| const app = express(); |
| app.use(express.json()); |
|
|
| const apiKeys = process.env.OPENAI_KEYS.split(','); |
|
|
| const start = process.env.start; |
|
|
| const limiter = rateLimit({ |
| windowMs: 5 * 1000, |
| max: 1, |
| handler: function (req, res) { |
| return res.status(429).json("wait"); |
| }, |
| }); |
|
|
| |
| app.use('/pl', limiter); |
|
|
| app.post('/cr', (req, res) => { |
| res.json({ |
| content: '{"whate":"🪨","howe":"ОБНОВИТЕСЬ","text":"Текущая версия приложения устарела. Установите новую из нашего телеграм канала: @yufi_ru","succ":"победа","what":"Версию","how":"Обновите","howl":"@yufi_ru"}' |
| }); |
| }); |
|
|
| |
| const getRandomApiKey = () => apiKeys[Math.floor(Math.random() * apiKeys.length)]; |
|
|
| const generateResponse = async (url, prompt, additionalMessages, temperature) => { |
| try { |
| const response = await axios.post(url, { |
| messages: [ |
| { role: 'system', content: start }, |
| ...additionalMessages, |
| { role: 'user', content: prompt } |
| ], |
| max_tokens: 2000, |
| temperature: temperature, |
| model: additionalMessages.length > 0 ? "gemini-1.5-pro-002" : "deepseek-chat", |
| }, { |
| headers: { |
| 'Authorization': `Bearer ${getRandomApiKey()}`, |
| 'Content-Type': 'application/json', |
| }, |
| }); |
|
|
| return response.data.choices && response.data.choices.length > 0 && response.data.choices[0].message |
| ? response.data.choices[0].message.content.trim() |
| : null; |
|
|
| } catch (error) { |
| console.error(error); |
| throw new Error('Error during generation'); |
| } |
| }; |
|
|
| app.post('/pl', async (req, res) => { |
| const prompt = req.body.prompt; |
|
|
| if (!prompt) { |
| return res.status(400).json("wait"); |
| } |
|
|
| try { |
| const content = await generateResponse('https://openai-gemini-iota.vercel.app/v1/chat/completions', prompt, [], 0.8); |
| content ? res.json({ content }) : res.status(500).json({ content: 'errora' }); |
| } catch { |
| res.status(500).json({ content: 'errorb' }); |
| } |
| }); |
|
|
| app.post('/cre', async (req, res) => { |
| const prompt = req.body.prompt; |
|
|
| if (!prompt) { |
| return res.status(400).json("wait"); |
| } |
|
|
| try { |
| const additionalMessages = [ |
| { role: 'system', content: 'И ОБЯЗАТЕЛЬНО, всегда в конце "text" проси обновить приложение, уведомляя о том, что скоро в жтой версии перестанешь отвечать!' } |
| ]; |
| const content = await generateResponse('https://vercel.com/roonis-projects/openai-gemini/v1/chat/completions', prompt, additionalMessages, 0.6); |
| content ? res.json({ content }) : res.status(500).json({ content: 'errora' }); |
| } catch { |
| res.status(500).json({ content: 'errorb' }); |
| } |
| }); |
|
|
| app.post('/crebeta', async (req, res) => { |
| const prompt = req.body.prompt; |
|
|
| if (!prompt) { |
| return res.status(400).json("wait"); |
| } |
|
|
| try { |
| const content = await generateResponse('https://geminiyufi.vercel.app/v1/chat/completions', prompt, [], 0.24); |
| content ? res.json({ content }) : res.status(500).json({ content: 'errora' }); |
| } catch { |
| res.status(500).json({ content: 'errorb' }); |
| } |
| }); |
|
|
| const port = 7860; |
| app.listen(port, () => { |
| console.log(`API сервер запущен на порту ${port}`); |
| }); |
|
|