everydaycats commited on
Commit
0248e0f
·
verified ·
1 Parent(s): 2e99d45

Update ai_engine.js

Browse files
Files changed (1) hide show
  1. ai_engine.js +3 -59
ai_engine.js CHANGED
@@ -12,12 +12,6 @@ const bedrockClient = new BedrockRuntimeClient({
12
  })
13
  });
14
 
15
- const azureOpenAI = new OpenAI({
16
- apiKey: process.env.AZURE_OPENAI_API_KEY || "7U3m9NRkE38ThSWTr92hMgQ4hDCUFI9MAnFNrCgRL7MhdvckfTXwJQQJ99CBACHYHv6XJ3w3AAAAACOGV22P",
17
- baseURL: `https://hollowpad-resource.cognitiveservices.azure.com/openai/deployments/gpt-5-mini`,
18
- defaultQuery: { "api-version": "2024-05-01-preview" },
19
- defaultHeaders: { "api-key": process.env.AZURE_OPENAI_API_KEY || "7U3m9NRkE38ThSWTr92hMgQ4hDCUFI9MAnFNrCgRL7MhdvckfTXwJQQJ99CBACHYHv6XJ3w3AAAAACOGV22P" }
20
- });
21
 
22
  function getBedrockModelId(modelName) {
23
  switch(modelName) {
@@ -32,28 +26,7 @@ function getBedrockModelId(modelName) {
32
  }
33
 
34
  export const generateCompletion = async ({ model, prompt, system_prompt, images }) => {
35
- if (model === "gpt" || model === "gpt-5-mini") {
36
- let messagesPayload =[{ role: "system", content: system_prompt || GPT_SYSTEM_PROMPT }];
37
-
38
- if (images && images.length > 0) {
39
- let userContent =[{ type: "text", text: prompt }];
40
- images.forEach(imgStr => {
41
- userContent.push({ type: "image_url", image_url: { url: imgStr } });
42
- });
43
- messagesPayload.push({ role: "user", content: userContent });
44
- } else {
45
- messagesPayload.push({ role: "user", content: prompt });
46
- }
47
-
48
- const response = await azureOpenAI.chat.completions.create({
49
- model: "gpt-5-mini",
50
- messages: messagesPayload,
51
- reasoning_effort: "high"
52
- });
53
-
54
- return { success: true, data: response.choices[0].message.content, usage: { totalTokenCount: response.usage?.total_tokens || 0 } };
55
-
56
- } else {
57
  const bedrockModelId = getBedrockModelId(model);
58
  let contentBlock = [{ text: prompt }];
59
 
@@ -88,41 +61,12 @@ export const generateCompletion = async ({ model, prompt, system_prompt, images
88
  const tokenUsage = response.usage ? (response.usage.inputTokens + response.usage.outputTokens) : 0;
89
 
90
  return { success: true, data: text, usage: { totalTokenCount: tokenUsage } };
91
- }
92
  };
93
 
94
  export const streamCompletion = async ({ model, prompt, system_prompt, images, res }) => {
95
  let totalTokenCount = 0;
96
 
97
- if (model === "gpt" || model === "gpt-5-mini") {
98
- let messagesPayload =[{ role: "system", content: system_prompt || GPT_SYSTEM_PROMPT }];
99
- let userContent =[];
100
- if (images && images.length > 0) {
101
- userContent.push({ type: "text", text: prompt });
102
- images.forEach(imgStr => { userContent.push({ type: "image_url", image_url: { url: imgStr } }); });
103
- messagesPayload.push({ role: "user", content: userContent });
104
- } else {
105
- messagesPayload.push({ role: "user", content: prompt });
106
- }
107
-
108
- const stream = await azureOpenAI.chat.completions.create({
109
- model: "gpt-5-mini",
110
- messages: messagesPayload,
111
- reasoning_effort: "high",
112
- stream: true,
113
- stream_options: { include_usage: true }
114
- });
115
-
116
- for await (const chunk of stream) {
117
- const delta = chunk.choices[0]?.delta;
118
- if (delta?.reasoning_content) res.write(`__THINK__${delta.reasoning_content}`);
119
- else if (delta?.content) res.write(delta.content);
120
- if (chunk.usage) totalTokenCount = chunk.usage.total_tokens;
121
- }
122
- res.write(`__USAGE__${JSON.stringify({ totalTokenCount })}`);
123
- res.end();
124
-
125
- } else {
126
  const bedrockModelId = getBedrockModelId(model);
127
  let contentBlock = [{ text: prompt }];
128
 
@@ -155,5 +99,5 @@ export const streamCompletion = async ({ model, prompt, system_prompt, images, r
155
  }
156
  res.write(`__USAGE__${JSON.stringify({ totalTokenCount })}`);
157
  res.end();
158
- }
159
  };
 
12
  })
13
  });
14
 
 
 
 
 
 
 
15
 
16
  function getBedrockModelId(modelName) {
17
  switch(modelName) {
 
26
  }
27
 
28
  export const generateCompletion = async ({ model, prompt, system_prompt, images }) => {
29
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  const bedrockModelId = getBedrockModelId(model);
31
  let contentBlock = [{ text: prompt }];
32
 
 
61
  const tokenUsage = response.usage ? (response.usage.inputTokens + response.usage.outputTokens) : 0;
62
 
63
  return { success: true, data: text, usage: { totalTokenCount: tokenUsage } };
64
+
65
  };
66
 
67
  export const streamCompletion = async ({ model, prompt, system_prompt, images, res }) => {
68
  let totalTokenCount = 0;
69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  const bedrockModelId = getBedrockModelId(model);
71
  let contentBlock = [{ text: prompt }];
72
 
 
99
  }
100
  res.write(`__USAGE__${JSON.stringify({ totalTokenCount })}`);
101
  res.end();
102
+
103
  };