Spaces:
Paused
Paused
| import { MCPServer, Tool, Resource } from '@widget-tdc/mcp-types'; | |
| import axios, { AxiosInstance, AxiosResponse, AxiosError } from 'axios'; | |
| import { z } from 'zod'; | |
| import { | |
| DeepSeekResponse, | |
| ChatMessage, | |
| ModelInfo, | |
| ModelConfig | |
| } from './deepseek-types.js'; | |
| const API_CONFIG = { | |
| BASE_URL: 'https://api.deepseek.com', // Updated to base URL, endpoints usually include /chat/completions | |
| DEFAULT_MODEL: 'deepseek-reasoner', | |
| ENDPOINTS: { | |
| CHAT: '/chat/completions' | |
| } | |
| } as const; | |
| const MODELS: ModelInfo[] = [ | |
| { | |
| id: "deepseek-chat", | |
| name: "DeepSeek Chat", | |
| description: "General-purpose chat model optimized for dialogue" | |
| }, | |
| { | |
| id: "deepseek-reasoner", | |
| name: "DeepSeek Reasoner", | |
| description: "Model optimized for reasoning and problem-solving" | |
| } | |
| ]; | |
| const MODEL_CONFIGS: ModelConfig[] = [ | |
| { | |
| id: "temperature", | |
| name: "Temperature", | |
| type: "number", | |
| description: "Controls randomness in the output (0.0 to 2.0)", | |
| default: 0.7, | |
| minimum: 0, | |
| maximum: 2 | |
| }, | |
| { | |
| id: "max_tokens", | |
| name: "Maximum Tokens", | |
| type: "integer", | |
| description: "Maximum number of tokens to generate", | |
| default: 8000, | |
| minimum: 1 | |
| }, | |
| { | |
| id: "top_p", | |
| name: "Top P", | |
| type: "number", | |
| description: "Controls diversity via nucleus sampling (0.0 to 1.0)", | |
| default: 1.0, | |
| minimum: 0, | |
| maximum: 1 | |
| }, | |
| { | |
| id: "frequency_penalty", | |
| name: "Frequency Penalty", | |
| type: "number", | |
| description: "Reduces repetition by penalizing frequent tokens (-2.0 to 2.0)", | |
| default: 0.1, | |
| minimum: -2, | |
| maximum: 2 | |
| }, | |
| { | |
| id: "presence_penalty", | |
| name: "Presence Penalty", | |
| type: "number", | |
| description: "Reduces repetition by penalizing used tokens (-2.0 to 2.0)", | |
| default: 0, | |
| minimum: -2, | |
| maximum: 2 | |
| } | |
| ]; | |
| export class DeepSeekServer implements MCPServer { | |
| name = "deepseek-mcp-server"; | |
| version = "0.1.0"; | |
| private axiosInstance: AxiosInstance; | |
| private conversationHistory: ChatMessage[] = []; | |
| private apiKey: string; | |
| constructor() { | |
| this.apiKey = process.env.DEEPSEEK_API_KEY || ''; | |
| if (!this.apiKey) { | |
| console.warn("⚠️ DEEPSEEK_API_KEY environment variable is missing. DeepSeek Server will be disabled."); | |
| } | |
| this.axiosInstance = axios.create({ | |
| baseURL: API_CONFIG.BASE_URL, | |
| headers: { | |
| 'Authorization': `Bearer ${this.apiKey}`, | |
| 'Content-Type': 'application/json' | |
| } | |
| }); | |
| // Handle API errors | |
| this.axiosInstance.interceptors.response.use( | |
| (response: AxiosResponse) => response, | |
| (error: AxiosError) => { | |
| console.error("[DeepSeek API Error]", error.response?.data || error.message); | |
| throw error; | |
| } | |
| ); | |
| } | |
| async listTools(): Promise<Tool[]> { | |
| if (!this.apiKey) return []; | |
| return [ | |
| { | |
| name: "chat_completion", | |
| description: "Generate a chat completion using DeepSeek models", | |
| inputSchema: { | |
| type: "object", | |
| properties: { | |
| message: { type: "string", description: "Single message content" }, | |
| messages: { | |
| type: "array", | |
| items: { | |
| type: "object", | |
| properties: { | |
| role: { type: "string", enum: ["system", "user", "assistant"] }, | |
| content: { type: "string" } | |
| }, | |
| required: ["role", "content"] | |
| } | |
| }, | |
| model: { type: "string", default: "deepseek-reasoner" }, | |
| temperature: { type: "number", minimum: 0, maximum: 2, default: 0.7 }, | |
| max_tokens: { type: "integer", minimum: 1, default: 8000 }, | |
| top_p: { type: "number", minimum: 0, maximum: 1, default: 1.0 }, | |
| frequency_penalty: { type: "number", minimum: -2, maximum: 2, default: 0.1 }, | |
| presence_penalty: { type: "number", minimum: -2, maximum: 2, default: 0 } | |
| } | |
| } | |
| }, | |
| { | |
| name: "multi_turn_chat", | |
| description: "Engage in a multi-turn conversation with context history", | |
| inputSchema: { | |
| type: "object", | |
| properties: { | |
| messages: { | |
| oneOf: [ | |
| { type: "string" }, | |
| { | |
| type: "array", | |
| items: { | |
| type: "object", | |
| properties: { | |
| role: { type: "string", enum: ["system", "user", "assistant"] }, | |
| content: { | |
| type: "object", | |
| properties: { | |
| type: { type: "string", const: "text" }, | |
| text: { type: "string" } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| ] | |
| }, | |
| model: { type: "string", default: "deepseek-chat" }, | |
| temperature: { type: "number", minimum: 0, maximum: 2, default: 0.7 }, | |
| max_tokens: { type: "integer", minimum: 1, default: 8000 }, | |
| top_p: { type: "number", minimum: 0, maximum: 1, default: 1.0 }, | |
| frequency_penalty: { type: "number", minimum: -2, maximum: 2, default: 0.1 }, | |
| presence_penalty: { type: "number", minimum: -2, maximum: 2, default: 0 } | |
| }, | |
| required: ["messages"] | |
| } | |
| } | |
| ]; | |
| } | |
| async callTool(name: string, args: any): Promise<any> { | |
| if (!this.apiKey) { | |
| throw new Error("DeepSeek API key not configured"); | |
| } | |
| if (name === "chat_completion") { | |
| return this.handleChatCompletion(args); | |
| } else if (name === "multi_turn_chat") { | |
| return this.handleMultiTurnChat(args); | |
| } | |
| throw new Error(`Unknown tool: ${name}`); | |
| } | |
| private async handleChatCompletion(args: any) { | |
| const schema = z.object({ | |
| message: z.string().optional(), | |
| messages: z.array(z.object({ | |
| role: z.enum(['system', 'user', 'assistant']), | |
| content: z.string() | |
| })).optional(), | |
| model: z.string().default('deepseek-reasoner'), | |
| temperature: z.number().min(0).max(2).default(0.7), | |
| max_tokens: z.number().positive().int().default(8000), | |
| top_p: z.number().min(0).max(1).default(1.0), | |
| frequency_penalty: z.number().min(-2).max(2).default(0.1), | |
| presence_penalty: z.number().min(-2).max(2).default(0) | |
| }); | |
| const parsed = schema.parse(args); | |
| let messages: ChatMessage[]; | |
| if (parsed.message) { | |
| messages = [{ role: 'user', content: parsed.message }]; | |
| } else if (parsed.messages) { | |
| messages = parsed.messages as ChatMessage[]; | |
| } else { | |
| throw new Error("Either 'message' or 'messages' must be provided"); | |
| } | |
| try { | |
| const response = await this.axiosInstance.post<DeepSeekResponse>( | |
| API_CONFIG.ENDPOINTS.CHAT, | |
| { | |
| messages, | |
| model: parsed.model, | |
| temperature: parsed.temperature, | |
| max_tokens: parsed.max_tokens, | |
| top_p: parsed.top_p, | |
| frequency_penalty: parsed.frequency_penalty, | |
| presence_penalty: parsed.presence_penalty | |
| } | |
| ); | |
| return { | |
| content: [{ | |
| type: "text", | |
| text: response.data.choices[0].message.content | |
| }] | |
| }; | |
| } catch (error) { | |
| console.error("Error with deepseek-reasoner, falling back to deepseek-chat"); | |
| try { | |
| const fallbackResponse = await this.axiosInstance.post<DeepSeekResponse>( | |
| API_CONFIG.ENDPOINTS.CHAT, | |
| { | |
| messages, | |
| model: 'deepseek-chat', | |
| temperature: parsed.temperature, | |
| max_tokens: parsed.max_tokens, | |
| top_p: parsed.top_p, | |
| frequency_penalty: parsed.frequency_penalty, | |
| presence_penalty: parsed.presence_penalty | |
| } | |
| ); | |
| return { | |
| content: [{ | |
| type: "text", | |
| text: "Note: Fallback to deepseek-chat due to reasoner error.\n\n" + | |
| fallbackResponse.data.choices[0].message.content | |
| }] | |
| }; | |
| } catch (fallbackError) { | |
| if (axios.isAxiosError(fallbackError)) { | |
| throw new Error(`DeepSeek API error: ${fallbackError.response?.data?.error?.message ?? fallbackError.message}`); | |
| } | |
| throw fallbackError; | |
| } | |
| } | |
| } | |
| private async handleMultiTurnChat(args: any) { | |
| const schema = z.object({ | |
| messages: z.union([ | |
| z.string(), | |
| z.array(z.object({ | |
| role: z.enum(['system', 'user', 'assistant']), | |
| content: z.object({ | |
| type: z.literal('text'), | |
| text: z.string() | |
| }) | |
| })) | |
| ]), | |
| model: z.string().default('deepseek-chat'), | |
| temperature: z.number().min(0).max(2).default(0.7), | |
| max_tokens: z.number().positive().int().default(8000), | |
| top_p: z.number().min(0).max(1).default(1.0), | |
| frequency_penalty: z.number().min(-2).max(2).default(0.1), | |
| presence_penalty: z.number().min(-2).max(2).default(0) | |
| }); | |
| const parsed = schema.parse(args); | |
| let newMessage: { role: 'user' | 'system' | 'assistant', content: string }; | |
| if (typeof parsed.messages === 'string') { | |
| newMessage = { role: 'user', content: parsed.messages }; | |
| } else { | |
| const msg = parsed.messages[0]; | |
| newMessage = { role: msg.role, content: msg.content.text }; | |
| } | |
| // Add new message to history | |
| this.conversationHistory.push(newMessage); | |
| // Transform all messages for API | |
| const transformedMessages = this.conversationHistory.map(msg => ({ | |
| role: msg.role, | |
| content: msg.content | |
| })); | |
| try { | |
| const response = await this.axiosInstance.post<DeepSeekResponse>( | |
| API_CONFIG.ENDPOINTS.CHAT, | |
| { | |
| messages: transformedMessages, | |
| model: parsed.model, | |
| temperature: parsed.temperature, | |
| max_tokens: parsed.max_tokens, | |
| top_p: parsed.top_p, | |
| frequency_penalty: parsed.frequency_penalty, | |
| presence_penalty: parsed.presence_penalty | |
| } | |
| ); | |
| // Add assistant's response to history | |
| const assistantMessage: ChatMessage = { | |
| role: 'assistant', | |
| content: response.data.choices[0].message.content | |
| }; | |
| this.conversationHistory.push(assistantMessage); | |
| return { | |
| content: [{ | |
| type: "text", | |
| text: assistantMessage.content | |
| }] | |
| }; | |
| } catch (error) { | |
| if (axios.isAxiosError(error)) { | |
| throw new Error(`DeepSeek API error: ${error.response?.data?.error?.message ?? error.message}`); | |
| } | |
| throw error; | |
| } | |
| } | |
| async listResources(): Promise<Resource[]> { | |
| const models = MODELS.map(model => ({ | |
| uri: `models://${model.id}`, | |
| name: model.name, | |
| description: model.description, | |
| mimeType: "application/json" | |
| })); | |
| const configs = MODEL_CONFIGS.map(config => ({ | |
| uri: `config://${config.id}`, | |
| name: config.name, | |
| description: config.description, | |
| mimeType: "application/json" | |
| })); | |
| return [...models, ...configs]; | |
| } | |
| async readResource(uri: string): Promise<string | Buffer> { | |
| if (uri.startsWith('models://')) { | |
| const modelId = uri.replace('models://', ''); | |
| const model = MODELS.find(m => m.id === modelId); | |
| if (model) { | |
| return JSON.stringify(model, null, 2); | |
| } | |
| } | |
| if (uri.startsWith('config://')) { | |
| // Handles config://{modelId}/{configId} but the SDK implementation was slightly different in how it matched templates. | |
| // The original implementation had "config://{modelId}" listing resources, and "config://{modelId}/{configId}" reading. | |
| // But my listResources returns "config://{configId}" essentially (based on my simplification). | |
| // Let's stick to the simpler version or adapt if needed. | |
| // If uri is exactly one of the config URIs from listResources | |
| const configId = uri.replace('config://', ''); | |
| const config = MODEL_CONFIGS.find(c => c.id === configId); | |
| if (config) { | |
| return JSON.stringify(config, null, 2); | |
| } | |
| } | |
| throw new Error(`Resource not found: ${uri}`); | |
| } | |
| } | |