Spaces:
Sleeping
Sleeping
File size: 1,848 Bytes
dc2c6ca 7acb769 dc2c6ca 7acb769 dc2c6ca 7acb769 dc2c6ca |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 |
const axios = require('axios');
const handler = async (req, res) => {
try {
const { text, img } = req.query;
if (!text) {
return res.status(400).json({
success: false,
error: 'Missing required parameter: text'
});
}
const apiKey = process.env.GROQ;
if (!apiKey) {
return res.status(500).json({
success: false,
error: 'GROQ API key not configured in environment'
});
}
const content = [
{
type: "text",
text: text
}
];
if (img) {
content.push({
type: "image_url",
image_url: {
url: img
}
});
}
const response = await axios.post(
'https://api.groq.com/openai/v1/chat/completions',
{
messages: [
{
role: "user",
content: img ? content : text
}
],
model: img ? "meta-llama/llama-4-scout-17b-16e-instruct" : "llama-3.1-8b-instant",
temperature: 1,
max_completion_tokens: 1024,
top_p: 1,
stream: false
},
{
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${apiKey}`
}
}
);
const result = response.data.choices[0].message.content;
res.json({
author: "Herza",
success: true,
msg: result
});
} catch (error) {
res.status(500).json({
success: false,
error: error.response?.data?.error?.message || error.message
});
}
};
module.exports = {
name: 'LLAMA AI',
description: 'Generate responses using Groq API with text and image support',
type: 'GET',
routes: ['api/AI/llama'],
tags: ['ai', 'Meta AI', 'llama', 'vision'],
main: ['AI'],
parameters: ['text', 'img', 'key'],
enabled: true,
limit: 8,
handler
}; |