Update server.js
Browse files
server.js
CHANGED
|
@@ -141,20 +141,23 @@ app.post('/cr', async (req, res) => {
|
|
| 141 |
}
|
| 142 |
|
| 143 |
try {
|
| 144 |
-
const response = await axios.post('https://
|
| 145 |
messages: [{'role': 'system', 'content': start}, {'role': 'user', 'content': prompt}],
|
| 146 |
max_tokens: 2000,
|
| 147 |
temperature: 0.3,
|
| 148 |
// presence_penalty: 0.0,
|
| 149 |
//frequency_penalty: -0.2,
|
| 150 |
-
|
| 151 |
//model: "gemini-1.5-flash-latest",
|
| 152 |
-
|
| 153 |
-
|
|
|
|
|
|
|
|
|
|
| 154 |
});
|
| 155 |
|
| 156 |
if (response.data.choices && response.data.choices.length > 0 && response.data.choices[0].message) {
|
| 157 |
-
const content = response.data.trim();
|
| 158 |
console.log(content);
|
| 159 |
res.json({ content });
|
| 160 |
} else {
|
|
|
|
| 141 |
}
|
| 142 |
|
| 143 |
try {
|
| 144 |
+
const response = await axios.post('https://geminiyufi.vercel.app/v1/chat/completions', {
|
| 145 |
messages: [{'role': 'system', 'content': start}, {'role': 'user', 'content': prompt}],
|
| 146 |
max_tokens: 2000,
|
| 147 |
temperature: 0.3,
|
| 148 |
// presence_penalty: 0.0,
|
| 149 |
//frequency_penalty: -0.2,
|
| 150 |
+
model: "gpt-4o",
|
| 151 |
//model: "gemini-1.5-flash-latest",
|
| 152 |
+
}, {
|
| 153 |
+
headers: {
|
| 154 |
+
'Authorization': `Bearer ${apiKey}`,
|
| 155 |
+
'Content-Type': 'application/json',
|
| 156 |
+
},
|
| 157 |
});
|
| 158 |
|
| 159 |
if (response.data.choices && response.data.choices.length > 0 && response.data.choices[0].message) {
|
| 160 |
+
const content = response.data.choices[0].message.content.trim();
|
| 161 |
console.log(content);
|
| 162 |
res.json({ content });
|
| 163 |
} else {
|