Pepguy commited on
Commit
3d5938a
·
verified ·
1 Parent(s): 96034f5

Update app.js

Browse files
Files changed (1) hide show
  1. app.js +198 -1
app.js CHANGED
@@ -16,6 +16,202 @@ app.use(express.json({ limit: '50mb' }));
16
  const CLAUDE_SYSTEM_PROMPT = "You are a pro. Provide elite, high-level technical responses.";
17
  const GPT_SYSTEM_PROMPT = "You are a worker. Be concise, efficient, and get the job done.";
18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  const bedrockClient = new BedrockRuntimeClient({
20
  region: "us-east-1" ,
21
  requestHandler: new NodeHttpHandler({
@@ -197,4 +393,5 @@ app.get('/', async (req, res) => {
197
  res.json({ success: true });
198
  });
199
 
200
- app.listen(PORT, '0.0.0.0', () => console.log(`Main AI Agent live on port ${PORT}`));
 
 
16
  const CLAUDE_SYSTEM_PROMPT = "You are a pro. Provide elite, high-level technical responses.";
17
  const GPT_SYSTEM_PROMPT = "You are a worker. Be concise, efficient, and get the job done.";
18
 
19
+ const bedrockClient = new BedrockRuntimeClient({
20
+ region: "us-east-1",
21
+ requestHandler: new NodeHttpHandler({
22
+ http2Handler: undefined,
23
+ })
24
+ });
25
+
26
+ const azureOpenAI = new OpenAI({
27
+ apiKey: "7U3m9NRkE38ThSWTr92hMgQ4hDCUFI9MAnFNrCgRL7MhdvckfTXwJQQJ99CBACHYHv6XJ3w3AAAAACOGV22P",
28
+ baseURL: `https://hollowpad-resource.cognitiveservices.azure.com/openai/deployments/gpt-5-mini`,
29
+ defaultQuery: { "api-version": "2024-05-01-preview" },
30
+ defaultHeaders: { "api-key": "7U3m9NRkE38ThSWTr92hMgQ4hDCUFI9MAnFNrCgRL7MhdvckfTXwJQQJ99CBACHYHv6XJ3w3AAAAACOGV22P" }
31
+ });
32
+
33
+ // --- DYNAMIC MODEL ROUTER ---
34
+ function getBedrockModelId(modelName) {
35
+ switch(modelName) {
36
+ case "haiku":
37
+ return "arn:aws:bedrock:us-east-1:106774395747:inference-profile/global.anthropic.claude-haiku-4-5";
38
+ case "maverick":
39
+ // Standard Bedrock cross-region inference mapping for Llama
40
+ return "arn:aws:bedrock:us-east-1::foundation-model/meta.llama4-maverick-17b-instruct-v1:0";
41
+ case "claude":
42
+ default:
43
+ return "arn:aws:bedrock:us-east-1:106774395747:inference-profile/global.anthropic.claude-sonnet-4-6";
44
+ }
45
+ }
46
+
47
+ // --- NON-STREAMING ENDPOINT ---
48
+ app.post('/api/generate', async (req, res) => {
49
+ const { model, prompt, system_prompt } = req.body;
50
+ console.log(`[TRAFFIC] Request for ${model}`);
51
+
52
+ try {
53
+ if (model === "gpt" || model === "gpt-5-mini") {
54
+ const response = await azureOpenAI.chat.completions.create({
55
+ model: "gpt-5-mini",
56
+ messages:[
57
+ { role: "system", content: system_prompt || GPT_SYSTEM_PROMPT },
58
+ { role: "user", content: prompt }
59
+ ],
60
+ reasoning_effort: "high"
61
+ });
62
+
63
+ const totalTokens = response.usage ? response.usage.total_tokens : 0;
64
+ res.json({ success: true, data: response.choices[0].message.content, usage: { totalTokenCount: totalTokens } });
65
+
66
+ } else {
67
+ // Handles Claude Sonnet, Claude Haiku, and Llama Maverick
68
+ const bedrockModelId = getBedrockModelId(model);
69
+ const command = new ConverseCommand({
70
+ modelId: bedrockModelId,
71
+ system: [{ text: system_prompt || CLAUDE_SYSTEM_PROMPT }],
72
+ messages: [{ role: "user", content: [{ text: prompt }] }],
73
+ inferenceConfig: { maxTokens: 48000, temperature: 1 },
74
+ additionalModelRequestFields: model.includes("claude") ? {
75
+ thinking: { type: "adaptive" },
76
+ output_config: { effort: "high" }
77
+ } : undefined // Llama does not support Claude's specific thinking fields
78
+ });
79
+
80
+ const response = await bedrockClient.send(command);
81
+ const text = response.output.message.content.find(b => b.text)?.text;
82
+ const tokenUsage = response.usage ? (response.usage.inputTokens + response.usage.outputTokens) : 0;
83
+
84
+ res.json({ success: true, data: text, usage: { totalTokenCount: tokenUsage } });
85
+ }
86
+ } catch (err) {
87
+ console.error(`❌[${model?.toUpperCase() || 'UNKNOWN'} ERROR]:`, err.name, err.message);
88
+ res.status(500).json({ success: false, error: `${err.name}: ${err.message}` });
89
+ }
90
+ });
91
+
92
+ // --- STREAMING ENDPOINT ---
93
+ app.post('/api/stream', async (req, res) => {
94
+ const { model, prompt, system_prompt, images } = req.body;
95
+ console.log(`[STREAM] Request for ${model} ${images?.length ? 'with images' : ''}`);
96
+
97
+ res.setHeader('Content-Type', 'text/plain; charset=utf-8');
98
+ res.setHeader('Transfer-Encoding', 'chunked');
99
+ res.setHeader('X-Accel-Buffering', 'no');
100
+ res.flushHeaders();
101
+
102
+ let totalTokenCount = 0;
103
+
104
+ try {
105
+ if (model === "gpt" || model === "gpt-5-mini") {
106
+ let messagesPayload =[
107
+ { role: "system", content: system_prompt || GPT_SYSTEM_PROMPT }
108
+ ];
109
+
110
+ let userContent =[];
111
+ if (images && images.length > 0) {
112
+ userContent.push({ type: "text", text: prompt });
113
+ images.forEach(imgStr => {
114
+ userContent.push({ type: "image_url", image_url: { url: imgStr } });
115
+ });
116
+ messagesPayload.push({ role: "user", content: userContent });
117
+ } else {
118
+ messagesPayload.push({ role: "user", content: prompt });
119
+ }
120
+
121
+ const stream = await azureOpenAI.chat.completions.create({
122
+ model: "gpt-5-mini",
123
+ messages: messagesPayload,
124
+ reasoning_effort: "high",
125
+ stream: true,
126
+ stream_options: { include_usage: true }
127
+ });
128
+
129
+ for await (const chunk of stream) {
130
+ const delta = chunk.choices[0]?.delta;
131
+ if (delta?.reasoning_content) res.write(`__THINK__${delta.reasoning_content}`);
132
+ else if (delta?.content) res.write(delta.content);
133
+ if (chunk.usage) totalTokenCount = chunk.usage.total_tokens;
134
+ }
135
+
136
+ res.write(`__USAGE__${JSON.stringify({ totalTokenCount })}`);
137
+ res.end();
138
+
139
+ } else {
140
+ const bedrockModelId = getBedrockModelId(model);
141
+ let contentBlock = [{ text: prompt }];
142
+
143
+ if (images && images.length > 0) {
144
+ const imageBlocks = images.map(imgStr => {
145
+ const base64Data = imgStr.replace(/^data:image\/\w+;base64,/, "");
146
+ return {
147
+ image: {
148
+ format: 'png', // Assuming normalized to PNG by frontend
149
+ source: { bytes: Buffer.from(base64Data, 'base64') }
150
+ }
151
+ };
152
+ });
153
+ contentBlock = [...imageBlocks, ...contentBlock];
154
+ }
155
+
156
+ const command = new ConverseStreamCommand({
157
+ modelId: bedrockModelId,
158
+ system:[{ text: system_prompt || CLAUDE_SYSTEM_PROMPT }],
159
+ messages: [{ role: "user", content: contentBlock }],
160
+ inferenceConfig: { maxTokens: 48000, temperature: 1 },
161
+ additionalModelRequestFields: model.includes("claude") ? {
162
+ thinking: { type: "adaptive" },
163
+ output_config: { effort: "high" }
164
+ } : undefined
165
+ });
166
+
167
+ const response = await bedrockClient.send(command);
168
+
169
+ for await (const chunk of response.stream) {
170
+ if (chunk.contentBlockDelta) {
171
+ const delta = chunk.contentBlockDelta.delta;
172
+ if (delta.reasoningContent && delta.reasoningContent.text) {
173
+ res.write(`__THINK__${delta.reasoningContent.text}`);
174
+ } else if (delta.text) {
175
+ res.write(delta.text);
176
+ }
177
+ }
178
+ if (chunk.metadata && chunk.metadata.usage) {
179
+ totalTokenCount = (chunk.metadata.usage.inputTokens || 0) + (chunk.metadata.usage.outputTokens || 0);
180
+ }
181
+ }
182
+
183
+ res.write(`__USAGE__${JSON.stringify({ totalTokenCount })}`);
184
+ res.end();
185
+ }
186
+ } catch (err) {
187
+ console.error(`❌ [STREAM ERROR]:`, err.message);
188
+ res.write(`ERROR: ${err.message}`);
189
+ res.end();
190
+ }
191
+ });
192
+
193
+ app.get('/', async (req, res) => { res.json({ success: true }); });
194
+ app.listen(PORT, '0.0.0.0', () => console.log(`Main AI Agent live on port ${PORT}`));
195
+
196
+
197
+ /* import express from 'express';
198
+ import cors from 'cors';
199
+ import dotenv from 'dotenv';
200
+ import OpenAI from "openai";
201
+ import { BedrockRuntimeClient, ConverseCommand, ConverseStreamCommand } from "@aws-sdk/client-bedrock-runtime";
202
+ import { NodeHttpHandler } from "@smithy/node-http-handler";
203
+
204
+ dotenv.config();
205
+ const app = express();
206
+ const PORT = process.env.PORT || 7860;
207
+
208
+ app.use(cors());
209
+ app.use(express.json({ limit: '50mb' }));
210
+
211
+ // --- SYSTEM PROMPT DEFINITIONS ---
212
+ const CLAUDE_SYSTEM_PROMPT = "You are a pro. Provide elite, high-level technical responses.";
213
+ const GPT_SYSTEM_PROMPT = "You are a worker. Be concise, efficient, and get the job done.";
214
+
215
  const bedrockClient = new BedrockRuntimeClient({
216
  region: "us-east-1" ,
217
  requestHandler: new NodeHttpHandler({
 
393
  res.json({ success: true });
394
  });
395
 
396
+ app.listen(PORT, '0.0.0.0', () => console.log(`Main AI Agent live on port ${PORT}`));
397
+ */