File size: 9,981 Bytes
46fa70a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
// deno run --allow-net --allow-env adapter.ts

import { serve } from "https://deno.land/std@0.203.0/http/server.ts";

// --- Configuration from Environment Variables (Safer for deployment) ---

function getKeysFromEnv(envVarName: string): Set<string> {
  const keysString = Deno.env.get(envVarName);
  if (!keysString) {
    console.warn(`Environment variable ${envVarName} is not set.`);
    return new Set();
  }
  // Split by comma and trim whitespace, filter out empty strings
  return new Set(keysString.split(',').map(k => k.trim()).filter(Boolean));
}

// Client keys will be read from Hugging Face Secrets
const CLIENT_API_KEYS = getKeysFromEnv("CLIENT_KEYS"); 

// CodeGeeX tokens will also be read from Hugging Face Secrets
const codegeeXTokensRaw = Array.from(getKeysFromEnv("CODEGEEX_KEYS"));

const CODEGEEX_TOKENS: {
  token: string;
  isValid: boolean;
  lastUsed: number;
  errorCount: number;
}[] = codegeeXTokensRaw.map(token => ({
    token: token,
    isValid: true,
    lastUsed: 0,
    errorCount: 0
}));


const MAX_ERROR_COUNT = 3;
const ERROR_COOLDOWN = 300 * 1000; // ms

// --- Utilities ---
function now(): number {
  return Date.now();
}

function rotateToken(): typeof CODEGEEX_TOKENS[0] | null {
  if (CODEGEEX_TOKENS.length === 0) {
      console.error("CODEGEEX_TOKENS array is empty. Check your CODEGEEX_KEYS secret.");
      return null;
  }
    
  const available = CODEGEEX_TOKENS.filter(t => {
    if (!t.isValid) return false;
    if (t.errorCount >= MAX_ERROR_COUNT && now() - t.lastUsed < ERROR_COOLDOWN) return false;
    return true;
  });
  if (available.length === 0) return null;

  // reset cooled-down tokens
  for (const t of available) {
    if (t.errorCount >= MAX_ERROR_COUNT && now() - t.lastUsed >= ERROR_COOLDOWN) {
      t.errorCount = 0;
    }
  }

  // pick the one least recently used, then lowest errorCount
  available.sort((a, b) => a.lastUsed - b.lastUsed || a.errorCount - b.errorCount);
  const tok = available[0];
  tok.lastUsed = now();
  return tok;
}

// This function translates the OpenAI format to CodeGeeX format
function convertToCodeGeeXPayload(params: { model: string; messages: any[] }) {
    // CodeGeeX seems to use the last message's content as the main prompt.
    // The history part is more complex, here we simplify it.
    const lastMessage = params.messages.slice(-1)[0];
    const history = params.messages.slice(0, -1)
        .filter(msg => msg.role === 'user' || msg.role === 'assistant')
        .map(msg => ({
            role: msg.role,
            content: msg.content
        }));

    return {
        user_role: 0, // This seems to be a fixed value
        ide: "HuggingFace", // Let's identify the source
        prompt: lastMessage?.content || "",
        history: history, // Passing a simplified history
        model: params.model,
    };
}


async function proxyChat(req: Request, params: { stream: boolean; model: string; messages: any[] }) {
  const tokenObj = rotateToken();
  if (!tokenObj) {
    return new Response(JSON.stringify({ error: { message: "No valid CodeGeeX tokens available", type: "server_error" } }), { status: 503, headers: { "Content-Type": "application/json" }});
  }

  const payload = convertToCodeGeeXPayload(params);

  try {
    const response = await fetch("https://codegeex.cn/prod/code/chatCodeSseV3/chat", {
      method: "POST",
      headers: {
        "Content-Type": "application/json",
        "Accept": "text/event-stream",
        "code-token": tokenObj.token,
      },
      body: JSON.stringify(payload),
    });

    if (!response.ok) {
      console.error(`Upstream error from CodeGeeX: ${response.status}`);
      if (response.status === 401 || response.status === 403) {
        tokenObj.isValid = false;
        console.warn(`Token ${tokenObj.token.substring(0, 15)}... marked as invalid due to 401/403 error.`);
      } else {
        tokenObj.errorCount++;
        console.warn(`Token ${tokenObj.token.substring(0, 15)}... error count increased to ${tokenObj.errorCount}.`);
      }
      const errorBody = await response.text();
      return new Response(JSON.stringify({ error: { message: `Upstream error ${response.status}: ${errorBody}`, type: "upstream_error" } }), { status: 502, headers: { "Content-Type": "application/json" }});
    }

    // For stream, we must transform the raw CodeGeeX SSE to OpenAI format
    if (params.stream) {
        const { readable, writable } = new TransformStream();
        const writer = writable.getWriter();
        const encoder = new TextEncoder();

        // This function processes the stream from CodeGeeX and sends OpenAI compatible chunks
        (async () => {
            const reader = response.body?.getReader();
            if (!reader) {
                await writer.close();
                return;
            }
            const decoder = new TextDecoder();
            const completionId = `chatcmpl-${crypto.randomUUID()}`;
            const creationTime = Math.floor(now() / 1000);

            try {
                while(true) {
                    const { done, value } = await reader.read();
                    if (done) break;

                    const chunkText = decoder.decode(value);
                    // A simple transformation: assume the raw chunk is the content delta
                    const openAIChunk = {
                        id: completionId,
                        object: "chat.completion.chunk",
                        created: creationTime,
                        model: params.model,
                        choices: [{ delta: { content: chunkText }, index: 0, finish_reason: null }]
                    };
                    await writer.write(encoder.encode(`data: ${JSON.stringify(openAIChunk)}\n\n`));
                }
                // Send the final DONE chunk
                await writer.write(encoder.encode(`data: [DONE]\n\n`));
            } catch (e) {
                console.error("Error while transforming stream:", e);
            } finally {
                await writer.close();
            }
        })();
        
        return new Response(readable, {
            status: 200,
            headers: { "Content-Type": "text/event-stream", "Cache-Control": "no-cache", "Connection": "keep-alive" },
        });
    } else {
      // accumulate and return JSON
      const text = await response.text();
      return new Response(JSON.stringify({
        id: `chatcmpl-${crypto.randomUUID()}`,
        object: "chat.completion",
        created: Math.floor(now() / 1000),
        model: params.model,
        choices: [{ message: { role: "assistant", content: text }, index: 0, finish_reason: "stop" }],
        usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 } // Placeholder usage
      }), {
        status: 200,
        headers: { "Content-Type": "application/json" },
      });
    }
  } catch (err) {
    tokenObj.errorCount++;
    console.error("Fetch to CodeGeeX failed:", err);
    return new Response(JSON.stringify({ error: { message: err.message, type: "server_error" } }), { status: 500, headers: { "Content-Type": "application/json" }});
  }
}

// --- Main Handler ---
async function handler(req: Request): Promise<Response> {
  const url = new URL(req.url);
  console.log(`Received request: ${req.method} ${url.pathname}`);

  // CORS preflight request handler for web clients
  if (req.method === 'OPTIONS') {
    return new Response(null, {
      status: 204,
      headers: {
        'Access-Control-Allow-Origin': '*',
        'Access-Control-Allow-Methods': 'GET, POST, OPTIONS',
        'Access-Control-Allow-Headers': 'Content-Type, Authorization',
      },
    });
  }
  
  // Authentication middleware
  const auth = req.headers.get("Authorization")?.replace(/^Bearer\s+/, "");
  if (CLIENT_API_KEYS.size === 0) {
    console.error("Server misconfigured: CLIENT_KEYS secret is not set or empty.");
    return new Response(JSON.stringify({ error: { message: "Server misconfigured: no client keys", type: "server_error" }}), { status: 503, headers: { "Content-Type": "application/json" }});
  }
  if (!auth || !CLIENT_API_KEYS.has(auth)) {
    return new Response(JSON.stringify({ error: { message: "Invalid or missing API key", type: "auth_error" }}), {
      status: 401,
      headers: { "WWW-Authenticate": "Bearer", "Content-Type": "application/json" },
    });
  }

  // GET /v1/models
  if (url.pathname === "/v1/models" && req.method === "GET") {
    const modelData = [
      { id: "codegeex-4", object: "model", created: Math.floor(now() / 1000), owned_by: "codegeex" },
      { id: "codegeex-pro", object: "model", created: Math.floor(now() / 1000), owned_by: "codegeex" }
    ];
    return new Response(JSON.stringify({ object: "list", data: modelData }), {
      headers: { "Content-Type": "application/json" },
    });
  }

  // POST /v1/chat/completions
  if (url.pathname === "/v1/chat/completions" && req.method === "POST") {
    try {
        const body = await req.json();
        const { model, messages, stream = true } = body;
        if (!model || !Array.isArray(messages) || messages.length === 0) {
          return new Response(JSON.stringify({ error: { message: "Bad Request: 'model' and 'messages' are required.", type: "invalid_request_error" } }), { status: 400, headers: { "Content-Type": "application/json" }});
        }
        return proxyChat(req, { model, messages, stream });
    } catch (e) {
        return new Response(JSON.stringify({ error: { message: "Invalid JSON body.", type: "invalid_request_error" } }), { status: 400, headers: { "Content-Type": "application/json" }});
    }
  }

  // Not found
  return new Response(JSON.stringify({ error: "Not Found" }), { status: 404, headers: { "Content-Type": "application/json" }});
}

// --- Start Server ---
const PORT = 7860; // Use the standard port for Hugging Face Spaces
console.log(`Starting Deno CodeGeeX Adapter on http://0.0.0.0:${PORT}`);
serve(handler, { port: PORT });