|
|
import { describe, it, expect, beforeEach } from "bun:test"; |
|
|
import { OpenAIService } from "../src/openai-service"; |
|
|
import type { ChatCompletionRequest, ToolDefinition } from "../src/types"; |
|
|
|
|
|
describe("Rate Limiting and Error Handling", () => { |
|
|
let openAIService: OpenAIService; |
|
|
|
|
|
beforeEach(() => { |
|
|
openAIService = new OpenAIService(); |
|
|
}); |
|
|
|
|
|
const sampleTools: ToolDefinition[] = [ |
|
|
{ |
|
|
type: "function", |
|
|
function: { |
|
|
name: "get_current_time", |
|
|
description: "Get the current time", |
|
|
}, |
|
|
}, |
|
|
{ |
|
|
type: "function", |
|
|
function: { |
|
|
name: "calculate", |
|
|
description: "Perform mathematical calculations", |
|
|
parameters: { |
|
|
type: "object", |
|
|
properties: { |
|
|
expression: { |
|
|
type: "string", |
|
|
description: "Mathematical expression to evaluate", |
|
|
}, |
|
|
}, |
|
|
required: ["expression"], |
|
|
}, |
|
|
}, |
|
|
}, |
|
|
]; |
|
|
|
|
|
describe("Duck.ai API Error Handling", () => { |
|
|
it("should handle rate limiting gracefully with fallback", async () => { |
|
|
const request: ChatCompletionRequest = { |
|
|
model: "gpt-4o-mini", |
|
|
messages: [{ role: "user", content: "What time is it?" }], |
|
|
tools: sampleTools, |
|
|
tool_choice: "required", |
|
|
}; |
|
|
|
|
|
|
|
|
const originalChat = openAIService["duckAI"].chat; |
|
|
openAIService["duckAI"].chat = async () => { |
|
|
throw new Error("429 Too Many Requests"); |
|
|
}; |
|
|
|
|
|
try { |
|
|
const response = await openAIService.createChatCompletion(request); |
|
|
|
|
|
|
|
|
expect(response.choices[0].finish_reason).toBe("tool_calls"); |
|
|
expect(response.choices[0].message.tool_calls).toHaveLength(1); |
|
|
expect(response.choices[0].message.tool_calls![0].function.name).toBe( |
|
|
"get_current_time" |
|
|
); |
|
|
} catch (error) { |
|
|
|
|
|
expect(error).toBeInstanceOf(Error); |
|
|
} finally { |
|
|
|
|
|
openAIService["duckAI"].chat = originalChat; |
|
|
} |
|
|
}); |
|
|
|
|
|
it("should handle empty responses with tool_choice required", async () => { |
|
|
const request: ChatCompletionRequest = { |
|
|
model: "gpt-4o-mini", |
|
|
messages: [{ role: "user", content: "Calculate 5 + 3" }], |
|
|
tools: sampleTools, |
|
|
tool_choice: "required", |
|
|
}; |
|
|
|
|
|
|
|
|
const originalChat = openAIService["duckAI"].chat; |
|
|
openAIService["duckAI"].chat = async () => ""; |
|
|
|
|
|
const response = await openAIService.createChatCompletion(request); |
|
|
|
|
|
|
|
|
expect(response.choices[0].finish_reason).toBe("tool_calls"); |
|
|
expect(response.choices[0].message.tool_calls).toHaveLength(1); |
|
|
|
|
|
|
|
|
expect(response.choices[0].message.tool_calls![0].function.name).toBe( |
|
|
"calculate" |
|
|
); |
|
|
|
|
|
|
|
|
openAIService["duckAI"].chat = originalChat; |
|
|
}); |
|
|
|
|
|
it("should handle network errors gracefully", async () => { |
|
|
const request: ChatCompletionRequest = { |
|
|
model: "gpt-4o-mini", |
|
|
messages: [{ role: "user", content: "Hello" }], |
|
|
tools: sampleTools, |
|
|
}; |
|
|
|
|
|
|
|
|
const originalChat = openAIService["duckAI"].chat; |
|
|
openAIService["duckAI"].chat = async () => { |
|
|
throw new Error("Network error"); |
|
|
}; |
|
|
|
|
|
try { |
|
|
await openAIService.createChatCompletion(request); |
|
|
|
|
|
} catch (error) { |
|
|
|
|
|
expect(error).toBeInstanceOf(Error); |
|
|
expect((error as Error).message).toContain("Network error"); |
|
|
} finally { |
|
|
|
|
|
openAIService["duckAI"].chat = originalChat; |
|
|
} |
|
|
}); |
|
|
|
|
|
it("should handle malformed responses from Duck.ai", async () => { |
|
|
const request: ChatCompletionRequest = { |
|
|
model: "gpt-4o-mini", |
|
|
messages: [{ role: "user", content: "Test" }], |
|
|
tools: sampleTools, |
|
|
}; |
|
|
|
|
|
|
|
|
const originalChat = openAIService["duckAI"].chat; |
|
|
openAIService["duckAI"].chat = async () => |
|
|
"This is not JSON and not a function call"; |
|
|
|
|
|
const response = await openAIService.createChatCompletion(request); |
|
|
|
|
|
|
|
|
expect(response.choices[0].message.role).toBe("assistant"); |
|
|
expect(response.choices[0].message.content).toBe( |
|
|
"This is not JSON and not a function call" |
|
|
); |
|
|
expect(response.choices[0].finish_reason).toBe("stop"); |
|
|
|
|
|
|
|
|
openAIService["duckAI"].chat = originalChat; |
|
|
}); |
|
|
|
|
|
it("should handle partial JSON responses", async () => { |
|
|
const request: ChatCompletionRequest = { |
|
|
model: "gpt-4o-mini", |
|
|
messages: [{ role: "user", content: "Test" }], |
|
|
tools: sampleTools, |
|
|
}; |
|
|
|
|
|
|
|
|
const originalChat = openAIService["duckAI"].chat; |
|
|
openAIService["duckAI"].chat = async () => |
|
|
'{"tool_calls": [{"id": "call_1", "type": "function"'; |
|
|
|
|
|
const response = await openAIService.createChatCompletion(request); |
|
|
|
|
|
|
|
|
expect(response.choices[0].message.role).toBe("assistant"); |
|
|
expect(response.choices[0].finish_reason).toBe("stop"); |
|
|
|
|
|
|
|
|
openAIService["duckAI"].chat = originalChat; |
|
|
}); |
|
|
}); |
|
|
|
|
|
describe("Resilience Testing", () => { |
|
|
it("should handle rapid consecutive requests", async () => { |
|
|
const requests = Array.from({ length: 5 }, (_, i) => ({ |
|
|
model: "gpt-4o-mini", |
|
|
messages: [{ role: "user", content: `Test message ${i}` }], |
|
|
tools: sampleTools, |
|
|
})); |
|
|
|
|
|
|
|
|
const originalChat = openAIService["duckAI"].chat; |
|
|
let callCount = 0; |
|
|
openAIService["duckAI"].chat = async () => { |
|
|
callCount++; |
|
|
if (callCount % 2 === 0) { |
|
|
throw new Error("Rate limited"); |
|
|
} |
|
|
return `Response ${callCount}`; |
|
|
}; |
|
|
|
|
|
const results = await Promise.allSettled( |
|
|
requests.map((req) => openAIService.createChatCompletion(req)) |
|
|
); |
|
|
|
|
|
|
|
|
results.forEach((result, index) => { |
|
|
if (result.status === "fulfilled") { |
|
|
expect(result.value.choices[0].message.role).toBe("assistant"); |
|
|
} else { |
|
|
expect(result.reason).toBeInstanceOf(Error); |
|
|
} |
|
|
}); |
|
|
|
|
|
|
|
|
openAIService["duckAI"].chat = originalChat; |
|
|
}); |
|
|
|
|
|
it("should maintain function execution capability during API issues", async () => { |
|
|
|
|
|
const toolCall = { |
|
|
id: "call_1", |
|
|
type: "function" as const, |
|
|
function: { |
|
|
name: "get_current_time", |
|
|
arguments: "{}", |
|
|
}, |
|
|
}; |
|
|
|
|
|
const result = await openAIService.executeToolCall(toolCall); |
|
|
expect(result).toMatch(/^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}/); |
|
|
}); |
|
|
|
|
|
it("should handle streaming errors gracefully", async () => { |
|
|
const request: ChatCompletionRequest = { |
|
|
model: "gpt-4o-mini", |
|
|
messages: [{ role: "user", content: "Test streaming" }], |
|
|
stream: true, |
|
|
}; |
|
|
|
|
|
|
|
|
const originalChat = openAIService["duckAI"].chat; |
|
|
openAIService["duckAI"].chat = async () => { |
|
|
throw new Error("Streaming error"); |
|
|
}; |
|
|
|
|
|
try { |
|
|
const stream = await openAIService.createChatCompletionStream(request); |
|
|
const reader = stream.getReader(); |
|
|
|
|
|
|
|
|
const { done, value } = await reader.read(); |
|
|
|
|
|
if (value) { |
|
|
const text = new TextDecoder().decode(value); |
|
|
expect(text).toContain("data:"); |
|
|
} |
|
|
} catch (error) { |
|
|
|
|
|
expect(error).toBeInstanceOf(Error); |
|
|
} finally { |
|
|
|
|
|
openAIService["duckAI"].chat = originalChat; |
|
|
} |
|
|
}); |
|
|
}); |
|
|
|
|
|
describe("Fallback Mechanisms", () => { |
|
|
it("should use intelligent function selection when Duck.ai fails", async () => { |
|
|
const testCases = [ |
|
|
{ |
|
|
message: "What time is it now?", |
|
|
expectedFunction: "get_current_time", |
|
|
}, |
|
|
{ |
|
|
message: "Calculate 15 * 8 + 42", |
|
|
expectedFunction: "calculate", |
|
|
}, |
|
|
{ |
|
|
message: "Please compute 2 + 2", |
|
|
expectedFunction: "calculate", |
|
|
}, |
|
|
]; |
|
|
|
|
|
|
|
|
const originalChat = openAIService["duckAI"].chat; |
|
|
openAIService["duckAI"].chat = async () => { |
|
|
throw new Error("API unavailable"); |
|
|
}; |
|
|
|
|
|
for (const testCase of testCases) { |
|
|
const request: ChatCompletionRequest = { |
|
|
model: "gpt-4o-mini", |
|
|
messages: [{ role: "user", content: testCase.message }], |
|
|
tools: sampleTools, |
|
|
tool_choice: "required", |
|
|
}; |
|
|
|
|
|
try { |
|
|
const response = await openAIService.createChatCompletion(request); |
|
|
|
|
|
if (response.choices[0].finish_reason === "tool_calls") { |
|
|
expect( |
|
|
response.choices[0].message.tool_calls![0].function.name |
|
|
).toBe(testCase.expectedFunction); |
|
|
} |
|
|
} catch (error) { |
|
|
|
|
|
expect(error).toBeInstanceOf(Error); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
openAIService["duckAI"].chat = originalChat; |
|
|
}); |
|
|
}); |
|
|
}); |
|
|
|