InfiniaxAI commited on
Commit
cbf1c02
·
verified ·
1 Parent(s): 734af91

Create ARDR_Utils.ts

Browse files
Files changed (1) hide show
  1. ARDR_Utils.ts +99 -0
ARDR_Utils.ts ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import OpenAI from "openai";
2
+ import { colors } from "./ARDR_models";
3
+
4
+ let openaiClient: OpenAI | null = null;
5
+
6
+ export function initializeOpenAI(apiKey: string): OpenAI {
7
+ openaiClient = new OpenAI({
8
+ baseURL: "https://openrouter.ai/api/v1",
9
+ apiKey,
10
+ defaultHeaders: {
11
+ "HTTP-Referer": "https://infiniax.replit.app",
12
+ "X-Title": "ARDR High"
13
+ }
14
+ });
15
+ return openaiClient;
16
+ }
17
+
18
+ export function getOpenAI(): OpenAI {
19
+ if (!openaiClient) {
20
+ throw new Error("OpenAI client not initialized. Call initializeOpenAI first.");
21
+ }
22
+ return openaiClient;
23
+ }
24
+
25
+ export function log(stage: string, message: string, color: string = colors.white): void {
26
+ const timestamp = new Date().toISOString().slice(11, 19);
27
+ console.log(`${colors.dim}[${timestamp}]${colors.reset} ${color}[${stage}]${colors.reset} ${message}`);
28
+ }
29
+
30
+ export function logSection(title: string): void {
31
+ console.log(`\n${colors.bgBlue}${colors.white}${colors.bright} ═══ ${title} ═══ ${colors.reset}\n`);
32
+ }
33
+
34
+ export async function callModel(
35
+ model: string,
36
+ systemPrompt: string,
37
+ userPrompt: string,
38
+ maxTokens: number = 1500
39
+ ): Promise<string> {
40
+ const openai = getOpenAI();
41
+ try {
42
+ const response = await openai.chat.completions.create({
43
+ model,
44
+ messages: [
45
+ { role: "system", content: systemPrompt },
46
+ { role: "user", content: userPrompt }
47
+ ],
48
+ max_tokens: maxTokens,
49
+ temperature: 0.7
50
+ });
51
+ return response.choices[0]?.message?.content || "";
52
+ } catch (error: any) {
53
+ log("ERROR", `Model call failed: ${error.message}`, colors.red);
54
+ return `[Error calling ${model}: ${error.message}]`;
55
+ }
56
+ }
57
+
58
+ export async function callModelStreaming(
59
+ model: string,
60
+ systemPrompt: string,
61
+ userPrompt: string,
62
+ maxTokens: number = 4000
63
+ ): Promise<string> {
64
+ const openai = getOpenAI();
65
+ try {
66
+ const stream = await openai.chat.completions.create({
67
+ model,
68
+ messages: [
69
+ { role: "system", content: systemPrompt },
70
+ { role: "user", content: userPrompt }
71
+ ],
72
+ max_tokens: maxTokens,
73
+ temperature: 0.7,
74
+ stream: true
75
+ });
76
+
77
+ let fullResponse = "";
78
+ for await (const chunk of stream) {
79
+ const content = chunk.choices[0]?.delta?.content || "";
80
+ process.stdout.write(content);
81
+ fullResponse += content;
82
+ }
83
+ console.log();
84
+ return fullResponse;
85
+ } catch (error: any) {
86
+ log("ERROR", `Streaming call failed: ${error.message}`, colors.red);
87
+ return `[Error calling ${model}: ${error.message}]`;
88
+ }
89
+ }
90
+
91
+ export function parseJsonFromResponse(response: string): any | null {
92
+ try {
93
+ const jsonMatch = response.match(/\{[\s\S]*\}/);
94
+ if (jsonMatch) {
95
+ return JSON.parse(jsonMatch[0]);
96
+ }
97
+ } catch {}
98
+ return null;
99
+ }