Pepguy commited on
Commit
6909c54
·
verified ·
1 Parent(s): 544151e

Update app.js

Browse files
Files changed (1) hide show
  1. app.js +58 -43
app.js CHANGED
@@ -2,7 +2,7 @@ import express from 'express';
2
  import cors from 'cors';
3
  import dotenv from 'dotenv';
4
  import OpenAI from "openai";
5
- import { BedrockRuntimeClient, ConverseCommand } from "@aws-sdk/client-bedrock-runtime";
6
  import { NodeHttpHandler } from "@smithy/node-http-handler";
7
 
8
  dotenv.config();
@@ -20,13 +20,7 @@ const bedrockClient = new BedrockRuntimeClient({
20
  region: "us-east-1" ,
21
  requestHandler: new NodeHttpHandler({
22
  http2Handler: undefined,
23
- }) //,
24
- /*
25
- credentials: {
26
- accessKeyId: "AKIARRXB77NRTRLNWEBT",
27
- secretAccessKey: "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",
28
- }
29
- */
30
  });
31
 
32
  const azureOpenAI = new OpenAI({
@@ -36,6 +30,7 @@ const azureOpenAI = new OpenAI({
36
  defaultHeaders: { "api-key": "7U3m9NRkE38ThSWTr92hMgQ4hDCUFI9MAnFNrCgRL7MhdvckfTXwJQQJ99CBACHYHv6XJ3w3AAAAACOGV22P" }
37
  });
38
 
 
39
  app.post('/api/generate', async (req, res) => {
40
  const { model, prompt, system_prompt} = req.body;
41
  console.log(`[TRAFFIC] Request for ${model}`);
@@ -44,7 +39,6 @@ app.post('/api/generate', async (req, res) => {
44
  if (model === "claude") {
45
  const command = new ConverseCommand({
46
  modelId: "arn:aws:bedrock:us-east-1:106774395747:inference-profile/global.anthropic.claude-sonnet-4-6",
47
- // --- OFFICIAL BEDROCK SYSTEM PROMPT ---
48
  system: [{ text: system_prompt || CLAUDE_SYSTEM_PROMPT }],
49
  messages: [{ role: "user", content: [{ text: prompt }] }],
50
  inferenceConfig: { maxTokens: 48000, temperature: 1 },
@@ -60,7 +54,6 @@ app.post('/api/generate', async (req, res) => {
60
  } else {
61
  const response = await azureOpenAI.chat.completions.create({
62
  model: "gpt-5-mini",
63
- // --- OFFICIAL OPENAI SYSTEM PROMPT ---
64
  messages: [
65
  { role: "system", content: system_prompt || GPT_SYSTEM_PROMPT },
66
  { role: "user", content: prompt }
@@ -75,39 +68,61 @@ app.post('/api/generate', async (req, res) => {
75
  }
76
  });
77
 
78
- app.get('/', (req, res) => {
79
- res.send(`
80
- <body style="background:#111;color:#eee;font-family:sans-serif;padding:50px;">
81
- <div style="max-width:500px;margin:auto;background:#222;padding:20px;border-radius:10px;">
82
- <h2>Hollowpad Battle Arena</h2>
83
- <p style="font-size: 0.8em; color: #888;">Claude: Pro | GPT: Worker</p>
84
- <textarea id="p" style="width:100%;height:100px;background:#333;color:white;border:1px solid #444;"></textarea><br>
85
- <select id="m" style="width:100%;margin:10px 0;padding:10px;">
86
- <option value="gpt">GPT-5 Mini</option>
87
- <option value="claude">Claude Sonnet 4.6</option>
88
- </select><br>
89
- <button onclick="run()" style="width:100%;padding:10px;background:#0078d4;color:white;border:none;cursor:pointer;">Run Battle</button>
90
- <div id="out" style="margin-top:20px;white-space:pre-wrap;color:#ccc;border-top:1px solid #444;padding-top:10px;"></div>
91
- </div>
92
- <script>
93
- async function run() {
94
- const out = document.getElementById('out');
95
- out.innerText = "Processing with System Prompts...";
96
- try {
97
- const res = await fetch('/api/generate', {
98
- method: 'POST',
99
- headers: {'Content-Type': 'application/json'},
100
- body: JSON.stringify({ model: document.getElementById('m').value, prompt: document.getElementById('p').value })
101
- });
102
- const data = await res.json();
103
- out.innerText = data.success ? data.data : "ERROR: " + data.error;
104
- } catch (e) {
105
- out.innerText = "FETCH ERROR: " + e.message;
106
- }
107
  }
108
- </script>
109
- </body>
110
- `);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  });
112
 
113
- app.listen(PORT, '0.0.0.0', () => console.log(`Server live on port ${PORT}`));
 
2
  import cors from 'cors';
3
  import dotenv from 'dotenv';
4
  import OpenAI from "openai";
5
+ import { BedrockRuntimeClient, ConverseStreamCommand } from "@aws-sdk/client-bedrock-runtime";
6
  import { NodeHttpHandler } from "@smithy/node-http-handler";
7
 
8
  dotenv.config();
 
20
  region: "us-east-1" ,
21
  requestHandler: new NodeHttpHandler({
22
  http2Handler: undefined,
23
+ })
 
 
 
 
 
 
24
  });
25
 
26
  const azureOpenAI = new OpenAI({
 
30
  defaultHeaders: { "api-key": "7U3m9NRkE38ThSWTr92hMgQ4hDCUFI9MAnFNrCgRL7MhdvckfTXwJQQJ99CBACHYHv6XJ3w3AAAAACOGV22P" }
31
  });
32
 
33
+ // --- STANDARD GENERATION (Blocking) ---
34
  app.post('/api/generate', async (req, res) => {
35
  const { model, prompt, system_prompt} = req.body;
36
  console.log(`[TRAFFIC] Request for ${model}`);
 
39
  if (model === "claude") {
40
  const command = new ConverseCommand({
41
  modelId: "arn:aws:bedrock:us-east-1:106774395747:inference-profile/global.anthropic.claude-sonnet-4-6",
 
42
  system: [{ text: system_prompt || CLAUDE_SYSTEM_PROMPT }],
43
  messages: [{ role: "user", content: [{ text: prompt }] }],
44
  inferenceConfig: { maxTokens: 48000, temperature: 1 },
 
54
  } else {
55
  const response = await azureOpenAI.chat.completions.create({
56
  model: "gpt-5-mini",
 
57
  messages: [
58
  { role: "system", content: system_prompt || GPT_SYSTEM_PROMPT },
59
  { role: "user", content: prompt }
 
68
  }
69
  });
70
 
71
+ // --- STREAMING GENERATION (Realtime) ---
72
+ app.post('/api/stream', async (req, res) => {
73
+ const { model, prompt, system_prompt } = req.body;
74
+ console.log(`[STREAM] Request for ${model}`);
75
+
76
+ // Set headers for streaming response
77
+ res.setHeader('Content-Type', 'text/event-stream');
78
+ res.setHeader('Cache-Control', 'no-cache');
79
+ res.setHeader('Connection', 'keep-alive');
80
+
81
+ try {
82
+ if (model === "claude") {
83
+ const command = new ConverseStreamCommand({
84
+ modelId: "arn:aws:bedrock:us-east-1:106774395747:inference-profile/global.anthropic.claude-sonnet-4-6",
85
+ system: [{ text: system_prompt || CLAUDE_SYSTEM_PROMPT }],
86
+ messages: [{ role: "user", content: [{ text: prompt }] }],
87
+ inferenceConfig: { maxTokens: 48000, temperature: 1 },
88
+ additionalModelRequestFields: {
89
+ thinking: { type: "adaptive" },
90
+ output_config: { effort: "high" }
91
+ }
92
+ });
93
+
94
+ const response = await bedrockClient.send(command);
95
+
96
+ for await (const chunk of response.stream) {
97
+ if (chunk.contentBlockDelta) {
98
+ const text = chunk.contentBlockDelta.delta?.text || "";
99
+ if (text) res.write(text);
100
  }
101
+ }
102
+ res.end();
103
+
104
+ } else {
105
+ const stream = await azureOpenAI.chat.completions.create({
106
+ model: "gpt-5-mini",
107
+ messages: [
108
+ { role: "system", content: system_prompt || GPT_SYSTEM_PROMPT },
109
+ { role: "user", content: prompt }
110
+ ],
111
+ reasoning_effort: "high",
112
+ stream: true
113
+ });
114
+
115
+ for await (const chunk of stream) {
116
+ const text = chunk.choices[0]?.delta?.content || "";
117
+ if (text) res.write(text);
118
+ }
119
+ res.end();
120
+ }
121
+ } catch (err) {
122
+ console.error(`❌ [STREAM ERROR]:`, err.message);
123
+ res.write(`[ERROR]: ${err.message}`);
124
+ res.end();
125
+ }
126
  });
127
 
128
+ app.listen(PORT, '0.0.0.0', () => console.log(`Main AI Agent Server live on port ${PORT}`));