StyleGPT-milestone2 / src /utils /geminiClient.ts
nexusbert's picture
push all
8a12695
import { GoogleGenAI } from "@google/genai";
import dotenv from "dotenv";
dotenv.config();
const GEMINI_API_KEY = process.env.GEMINI_API_KEY;
const GEMINI_MODEL = process.env.GEMINI_MODEL || "gemini-2.0-flash-exp";
if (!GEMINI_API_KEY) {
console.warn("GEMINI_API_KEY not found in environment variables");
}
const ai = new GoogleGenAI({});
export interface GeminiResponse {
text: string;
usage?: {
promptTokens?: number;
candidatesTokens?: number;
totalTokens?: number;
};
}
export interface GeminiStreamChunk {
text: string;
done: boolean;
}
export async function generateContent(
prompt: string,
systemInstruction?: string,
images?: string[]
): Promise<GeminiResponse> {
try {
if (!GEMINI_API_KEY) {
throw new Error("GEMINI_API_KEY is not configured");
}
const contents: any[] = [];
if (images && images.length > 0) {
const imageParts = images.map((image) => {
if (image.startsWith("data:")) {
const [header, base64Data] = image.split(",");
const mimeType = header.match(/data:([^;]+)/)?.[1] || "image/jpeg";
return {
inlineData: {
data: base64Data,
mimeType: mimeType,
},
};
} else {
return {
fileData: {
fileUri: image,
mimeType: "image/jpeg",
},
};
}
});
contents.push({
role: "user",
parts: [
{ text: prompt },
...imageParts,
],
});
} else {
contents.push({
role: "user",
parts: [{ text: prompt }],
});
}
const fullPrompt = systemInstruction
? `${systemInstruction}\n\n${prompt}`
: prompt;
if (images && images.length > 0) {
contents[0].parts[0].text = fullPrompt;
}
const response = await ai.models.generateContent({
model: GEMINI_MODEL,
contents: images && images.length > 0 ? contents : fullPrompt,
});
const text = response.text || "";
const usage = response.usageMetadata
? {
promptTokens: response.usageMetadata.promptTokenCount,
candidatesTokens: response.usageMetadata.candidatesTokenCount,
totalTokens: response.usageMetadata.totalTokenCount,
}
: undefined;
return {
text,
usage,
};
} catch (error: any) {
console.error("Gemini API error:", error);
throw new Error(
error.message || "Failed to generate content with Gemini"
);
}
}
export async function* generateContentStream(
prompt: string,
systemInstruction?: string,
images?: string[]
): AsyncGenerator<GeminiStreamChunk, void, unknown> {
try {
if (!GEMINI_API_KEY) {
throw new Error("GEMINI_API_KEY is not configured");
}
const contents: any[] = [];
if (images && images.length > 0) {
const imageParts = images.map((image) => {
if (image.startsWith("data:")) {
const [header, base64Data] = image.split(",");
const mimeType = header.match(/data:([^;]+)/)?.[1] || "image/jpeg";
return {
inlineData: {
data: base64Data,
mimeType: mimeType,
},
};
} else {
return {
fileData: {
fileUri: image,
mimeType: "image/jpeg",
},
};
}
});
contents.push({
role: "user",
parts: [
{ text: prompt },
...imageParts,
],
});
} else {
contents.push({
role: "user",
parts: [{ text: prompt }],
});
}
const fullPrompt = systemInstruction
? `${systemInstruction}\n\n${prompt}`
: prompt;
if (images && images.length > 0) {
contents[0].parts[0].text = fullPrompt;
}
const stream = await ai.models.generateContentStream({
model: GEMINI_MODEL,
contents: images && images.length > 0 ? contents : fullPrompt,
});
for await (const chunk of stream) {
const text = chunk.text || "";
if (text) {
yield {
text,
done: false,
};
}
}
yield {
text: "",
done: true,
};
} catch (error: any) {
console.error("Gemini streaming error:", error);
throw new Error(
error.message || "Failed to stream content with Gemini"
);
}
}