Pix / core /chat.ts
cybergamer0123's picture
okay?
b1f8189
import { OpenAI } from "openai";
import { loadEnv } from "../utils/general";
// Initialize the OpenAI client
const client = new OpenAI({
baseURL: "https://api-inference.huggingface.co/v1/",
apiKey: loadEnv("HF_TOKEN"), // Load the API key
});
/**
* Generate a response based on a message, either describing an image or engaging in a chat.
* @param {string} message - The text message or image URL to send to the model.
* @param {boolean} isImage - Flag to determine if the input is an image URL or plain text.
* @returns {Promise<string>} A promise that resolves with the generated response.
*/
export async function generateResponse(message, isImage = false) {
let output = "";
// Prepare the message structure based on whether it's an image or text
const content = isImage
? [
{
type: "text",
text: "Describe this image in one sentence.",
},
{
type: "image_url",
image_url: { url: message },
},
]
: [
{
type: "text",
text: message,
},
];
// Create a streaming completion request
const stream = await client.chat.completions.create({
model: "meta-llama/Llama-3.2-11B-Vision-Instruct",
messages: [
{
role: "user",
content: content,
},
],
max_tokens: 512,
stream: true,
});
// Process the streamed response
for await (const chunk of stream) {
if (chunk.choices && chunk.choices.length > 0) {
const newContent = chunk.choices[0].delta.content;
output += newContent;
// console.log(newContent); // Log the streamed content (optional)
}
}
return output;
}