File size: 5,205 Bytes
55a0975 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 |
/**
* This test demonstrates that our server is compatible with the OpenAI SDK
* Run this after starting the server to verify compatibility
*/
// Mock OpenAI SDK interface for testing
interface OpenAIConfig {
baseURL: string;
apiKey: string;
}
interface ChatMessage {
role: "system" | "user" | "assistant";
content: string;
}
interface ChatCompletionRequest {
model: string;
messages: ChatMessage[];
stream?: boolean;
}
class MockOpenAI {
private baseURL: string;
private apiKey: string;
constructor(config: OpenAIConfig) {
this.baseURL = config.baseURL;
this.apiKey = config.apiKey;
}
get chat() {
return {
completions: {
create: async (request: ChatCompletionRequest) => {
const response = await fetch(`${this.baseURL}/v1/chat/completions`, {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${this.apiKey}`,
},
body: JSON.stringify(request),
});
if (!response.ok) {
throw new Error(
`HTTP ${response.status}: ${await response.text()}`
);
}
if (request.stream) {
return response; // Return the response for streaming
}
return response.json();
},
},
};
}
get models() {
return {
list: async () => {
const response = await fetch(`${this.baseURL}/v1/models`, {
headers: {
Authorization: `Bearer ${this.apiKey}`,
},
});
if (!response.ok) {
throw new Error(`HTTP ${response.status}: ${await response.text()}`);
}
return response.json();
},
};
}
}
async function testOpenAICompatibility() {
console.log("🧪 Testing OpenAI SDK compatibility...\n");
const openai = new MockOpenAI({
baseURL: "http://localhost:3000",
apiKey: "dummy-key", // Our server doesn't require auth, but SDK expects it
});
try {
// Test 1: List models
console.log("1️⃣ Testing models endpoint...");
const models = await openai.models.list();
console.log(`✅ Found ${models.data.length} models:`);
models.data.forEach((model: any) => {
console.log(` - ${model.id}`);
});
console.log();
// Test 2: Basic chat completion
console.log("2️⃣ Testing basic chat completion...");
const completion = await openai.chat.completions.create({
model: "gpt-4o-mini",
messages: [
{
role: "user",
content: "Hello! Please respond with just 'Hi there!'",
},
],
});
console.log("✅ Chat completion response:");
console.log(` ID: ${completion.id}`);
console.log(` Model: ${completion.model}`);
console.log(` Response: ${completion.choices[0].message.content}`);
console.log(` Tokens: ${completion.usage.total_tokens}`);
console.log();
// Test 3: Streaming chat completion
console.log("3️⃣ Testing streaming chat completion...");
const streamResponse = await openai.chat.completions.create({
model: "gpt-4o-mini",
messages: [
{ role: "user", content: "Count from 1 to 5, one number per line" },
],
stream: true,
});
console.log("✅ Streaming response:");
const reader = streamResponse.body?.getReader();
const decoder = new TextDecoder();
let streamedContent = "";
if (reader) {
while (true) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value);
const lines = chunk.split("\n");
for (const line of lines) {
if (line.startsWith("data: ") && !line.includes("[DONE]")) {
try {
const data = JSON.parse(line.slice(6));
const content = data.choices[0]?.delta?.content;
if (content) {
streamedContent += content;
process.stdout.write(content);
}
} catch (e) {
// Skip invalid JSON
}
}
}
}
}
console.log(`\n Total streamed content: "${streamedContent.trim()}"`);
console.log();
// Test 4: Multi-turn conversation
console.log("4️⃣ Testing multi-turn conversation...");
const conversation = await openai.chat.completions.create({
model: "claude-3-haiku-20240307",
messages: [
{ role: "system", content: "You are a helpful math tutor." },
{ role: "user", content: "What is 2 + 2?" },
{ role: "assistant", content: "2 + 2 equals 4." },
{ role: "user", content: "What about 3 + 3?" },
],
});
console.log("✅ Multi-turn conversation:");
console.log(` Response: ${conversation.choices[0].message.content}`);
console.log();
console.log("🎉 All tests passed! The server is OpenAI SDK compatible.");
} catch (error) {
console.error("❌ Test failed:", error);
process.exit(1);
}
}
// Run tests if this file is executed directly
if (import.meta.main) {
testOpenAICompatibility();
}
export { testOpenAICompatibility };
|