repo
stringlengths
6
47
file_url
stringlengths
77
269
file_path
stringlengths
5
186
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-07 08:35:43
2026-01-07 08:55:24
truncated
bool
2 classes
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/schema/agent_jobs.go
core/schema/agent_jobs.go
package schema import ( "time" ) // Task represents a reusable agent task definition type Task struct { ID string `json:"id"` // UUID Name string `json:"name"` // User-friendly name Description string `json:"description"` // Optional description Model string `json:"model"` // Model name (must have MCP config) Prompt string `json:"prompt"` // Template prompt (supports {{.param}} syntax) CreatedAt time.Time `json:"created_at"` UpdatedAt time.Time `json:"updated_at"` Enabled bool `json:"enabled"` // Can be disabled without deletion Cron string `json:"cron,omitempty"` // Optional cron expression CronParameters map[string]string `json:"cron_parameters,omitempty"` // Parameters to use when executing cron jobs // Webhook configuration (for notifications) // Support multiple webhook endpoints // Webhooks can handle both success and failure cases using template variables: // - {{.Job}} - Job object with all fields // - {{.Task}} - Task object // - {{.Result}} - Job result (if successful) // - {{.Error}} - Error message (if failed, empty string if successful) // - {{.Status}} - Job status string Webhooks []WebhookConfig `json:"webhooks,omitempty"` // Webhook configs for job completion notifications // Multimedia sources (for cron jobs) // URLs to fetch multimedia content from when cron job executes // Each source can have custom headers for authentication/authorization MultimediaSources []MultimediaSourceConfig `json:"multimedia_sources,omitempty"` // Multimedia sources for cron jobs } // WebhookConfig represents configuration for sending webhook notifications type WebhookConfig struct { URL string `json:"url"` // Webhook endpoint URL Method string `json:"method"` // HTTP method (POST, PUT, PATCH) - default: POST Headers map[string]string `json:"headers,omitempty"` // Custom headers (e.g., Authorization) PayloadTemplate string `json:"payload_template,omitempty"` // Optional template for payload // If PayloadTemplate is empty, uses default JSON structure // Available template variables: // - {{.Job}} - Job object with all fields // - {{.Task}} - Task object // - {{.Result}} - Job result (if successful) // - {{.Error}} - Error message (if failed, empty string if successful) // - {{.Status}} - Job status string } // MultimediaSourceConfig represents configuration for fetching multimedia content // Used in cron jobs to periodically fetch multimedia from URLs with custom headers type MultimediaSourceConfig struct { Type string `json:"type"` // "image", "video", "audio", "file" URL string `json:"url"` // URL to fetch from Headers map[string]string `json:"headers,omitempty"` // Custom headers for HTTP request (e.g., Authorization) } type MultimediaAttachment struct { Images []string `json:"images,omitempty"` Videos []string `json:"videos,omitempty"` Audios []string `json:"audios,omitempty"` Files []string `json:"files,omitempty"` } // JobStatus represents the status of a job type JobStatus string const ( JobStatusPending JobStatus = "pending" JobStatusRunning JobStatus = "running" JobStatusCompleted JobStatus = "completed" JobStatusFailed JobStatus = "failed" JobStatusCancelled JobStatus = "cancelled" ) // Job represents a single execution instance of a task type Job struct { ID string `json:"id"` // UUID TaskID string `json:"task_id"` // Reference to Task Status JobStatus `json:"status"` // pending, running, completed, failed, cancelled Parameters map[string]string `json:"parameters"` // Template parameters Result string `json:"result,omitempty"` // Agent response Error string `json:"error,omitempty"` // Error message if failed StartedAt *time.Time `json:"started_at,omitempty"` CompletedAt *time.Time `json:"completed_at,omitempty"` CreatedAt time.Time `json:"created_at"` TriggeredBy string `json:"triggered_by"` // "manual", "cron", "api" // Webhook delivery tracking WebhookSent bool `json:"webhook_sent,omitempty"` WebhookSentAt *time.Time `json:"webhook_sent_at,omitempty"` WebhookError string `json:"webhook_error,omitempty"` // Error if webhook failed // Execution traces (reasoning, tool calls, tool results) Traces []JobTrace `json:"traces,omitempty"` // Multimedia content (for manual execution) // Can contain URLs or base64-encoded data URIs Images []string `json:"images,omitempty"` // List of image URLs or base64 strings Videos []string `json:"videos,omitempty"` // List of video URLs or base64 strings Audios []string `json:"audios,omitempty"` // List of audio URLs or base64 strings Files []string `json:"files,omitempty"` // List of file URLs or base64 strings } // JobTrace represents a single execution trace entry type JobTrace struct { Type string `json:"type"` // "reasoning", "tool_call", "tool_result", "status" Content string `json:"content"` // The actual trace content Timestamp time.Time `json:"timestamp"` // When this trace occurred ToolName string `json:"tool_name,omitempty"` // Tool name (for tool_call/tool_result) Arguments map[string]interface{} `json:"arguments,omitempty"` // Tool arguments or result data } // JobExecutionRequest represents a request to execute a job type JobExecutionRequest struct { TaskID string `json:"task_id"` // Required Parameters map[string]string `json:"parameters"` // Optional, for templating // Multimedia content (optional, for manual execution) // Can contain URLs or base64-encoded data URIs Images []string `json:"images,omitempty"` // List of image URLs or base64 strings Videos []string `json:"videos,omitempty"` // List of video URLs or base64 strings Audios []string `json:"audios,omitempty"` // List of audio URLs or base64 strings Files []string `json:"files,omitempty"` // List of file URLs or base64 strings } // JobExecutionResponse represents the response after creating a job type JobExecutionResponse struct { JobID string `json:"job_id"` Status string `json:"status"` URL string `json:"url"` // URL to check job status } // TasksFile represents the structure of agent_tasks.json type TasksFile struct { Tasks []Task `json:"tasks"` } // JobsFile represents the structure of agent_jobs.json type JobsFile struct { Jobs []Job `json:"jobs"` LastCleanup time.Time `json:"last_cleanup,omitempty"` }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/schema/message_test.go
core/schema/message_test.go
package schema_test import ( "encoding/json" . "github.com/mudler/LocalAI/core/schema" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) var _ = Describe("LLM tests", func() { Context("ToProtoMessages conversion", func() { It("should convert basic message with string content", func() { messages := Messages{ { Role: "user", Content: "Hello, world!", }, } protoMessages := messages.ToProto() Expect(protoMessages).To(HaveLen(1)) Expect(protoMessages[0].Role).To(Equal("user")) Expect(protoMessages[0].Content).To(Equal("Hello, world!")) Expect(protoMessages[0].Name).To(BeEmpty()) Expect(protoMessages[0].ToolCalls).To(BeEmpty()) }) It("should convert message with nil content to empty string", func() { messages := Messages{ { Role: "assistant", Content: nil, }, } protoMessages := messages.ToProto() Expect(protoMessages).To(HaveLen(1)) Expect(protoMessages[0].Role).To(Equal("assistant")) Expect(protoMessages[0].Content).To(Equal("")) }) It("should convert message with array content (multimodal)", func() { messages := Messages{ { Role: "user", Content: []interface{}{ map[string]interface{}{ "type": "text", "text": "Hello", }, map[string]interface{}{ "type": "text", "text": " World", }, }, }, } protoMessages := messages.ToProto() Expect(protoMessages).To(HaveLen(1)) Expect(protoMessages[0].Role).To(Equal("user")) Expect(protoMessages[0].Content).To(Equal("Hello World")) }) It("should convert message with tool_calls", func() { messages := Messages{ { Role: "assistant", Content: "I'll call a function", ToolCalls: []ToolCall{ { Index: 0, ID: "call_123", Type: "function", FunctionCall: FunctionCall{ Name: "get_weather", Arguments: `{"location": "San Francisco"}`, }, }, }, }, } protoMessages := messages.ToProto() Expect(protoMessages).To(HaveLen(1)) Expect(protoMessages[0].Role).To(Equal("assistant")) Expect(protoMessages[0].Content).To(Equal("I'll call a function")) Expect(protoMessages[0].ToolCalls).NotTo(BeEmpty()) // Verify tool_calls JSON is valid var toolCalls []ToolCall err := json.Unmarshal([]byte(protoMessages[0].ToolCalls), &toolCalls) Expect(err).NotTo(HaveOccurred()) Expect(toolCalls).To(HaveLen(1)) Expect(toolCalls[0].ID).To(Equal("call_123")) Expect(toolCalls[0].FunctionCall.Name).To(Equal("get_weather")) }) It("should convert message with name field", func() { messages := Messages{ { Role: "tool", Content: "Function result", Name: "get_weather", }, } protoMessages := messages.ToProto() Expect(protoMessages).To(HaveLen(1)) Expect(protoMessages[0].Role).To(Equal("tool")) Expect(protoMessages[0].Content).To(Equal("Function result")) Expect(protoMessages[0].Name).To(Equal("get_weather")) }) It("should convert message with tool_calls and nil content", func() { messages := Messages{ { Role: "assistant", Content: nil, ToolCalls: []ToolCall{ { Index: 0, ID: "call_456", Type: "function", FunctionCall: FunctionCall{ Name: "search", Arguments: `{"query": "test"}`, }, }, }, }, } protoMessages := messages.ToProto() Expect(protoMessages).To(HaveLen(1)) Expect(protoMessages[0].Role).To(Equal("assistant")) Expect(protoMessages[0].Content).To(Equal("")) Expect(protoMessages[0].ToolCalls).NotTo(BeEmpty()) var toolCalls []ToolCall err := json.Unmarshal([]byte(protoMessages[0].ToolCalls), &toolCalls) Expect(err).NotTo(HaveOccurred()) Expect(toolCalls).To(HaveLen(1)) Expect(toolCalls[0].FunctionCall.Name).To(Equal("search")) }) It("should convert multiple messages", func() { messages := Messages{ { Role: "user", Content: "Hello", }, { Role: "assistant", Content: "Hi there!", }, { Role: "user", Content: "How are you?", }, } protoMessages := messages.ToProto() Expect(protoMessages).To(HaveLen(3)) Expect(protoMessages[0].Role).To(Equal("user")) Expect(protoMessages[0].Content).To(Equal("Hello")) Expect(protoMessages[1].Role).To(Equal("assistant")) Expect(protoMessages[1].Content).To(Equal("Hi there!")) Expect(protoMessages[2].Role).To(Equal("user")) Expect(protoMessages[2].Content).To(Equal("How are you?")) }) It("should handle empty messages slice", func() { messages := Messages{} protoMessages := messages.ToProto() Expect(protoMessages).To(HaveLen(0)) }) It("should handle message with all optional fields", func() { messages := Messages{ { Role: "assistant", Content: "I'll help you", Name: "test_tool", ToolCalls: []ToolCall{ { Index: 0, ID: "call_789", Type: "function", FunctionCall: FunctionCall{ Name: "test_function", Arguments: `{"param": "value"}`, }, }, }, }, } protoMessages := messages.ToProto() Expect(protoMessages).To(HaveLen(1)) Expect(protoMessages[0].Role).To(Equal("assistant")) Expect(protoMessages[0].Content).To(Equal("I'll help you")) Expect(protoMessages[0].Name).To(Equal("test_tool")) Expect(protoMessages[0].ToolCalls).NotTo(BeEmpty()) var toolCalls []ToolCall err := json.Unmarshal([]byte(protoMessages[0].ToolCalls), &toolCalls) Expect(err).NotTo(HaveOccurred()) Expect(toolCalls).To(HaveLen(1)) }) It("should handle message with empty string content", func() { messages := Messages{ { Role: "user", Content: "", }, } protoMessages := messages.ToProto() Expect(protoMessages).To(HaveLen(1)) Expect(protoMessages[0].Role).To(Equal("user")) Expect(protoMessages[0].Content).To(Equal("")) }) It("should handle message with array content containing non-text parts", func() { messages := Messages{ { Role: "user", Content: []interface{}{ map[string]interface{}{ "type": "text", "text": "Hello", }, map[string]interface{}{ "type": "image", "url": "https://example.com/image.jpg", }, }, }, } protoMessages := messages.ToProto() Expect(protoMessages).To(HaveLen(1)) Expect(protoMessages[0].Role).To(Equal("user")) // Should only extract text parts Expect(protoMessages[0].Content).To(Equal("Hello")) }) }) })
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/schema/localai.go
core/schema/localai.go
package schema import ( "encoding/json" "time" gopsutil "github.com/shirou/gopsutil/v3/process" ) type BackendMonitorRequest struct { BasicModelRequest } type TokenMetricsRequest struct { BasicModelRequest } type BackendMonitorResponse struct { MemoryInfo *gopsutil.MemoryInfoStat MemoryPercent float32 CPUPercent float64 } type GalleryResponse struct { ID string `json:"uuid"` StatusURL string `json:"status"` } type VideoRequest struct { BasicModelRequest Prompt string `json:"prompt" yaml:"prompt"` NegativePrompt string `json:"negative_prompt" yaml:"negative_prompt"` StartImage string `json:"start_image" yaml:"start_image"` EndImage string `json:"end_image" yaml:"end_image"` Width int32 `json:"width" yaml:"width"` Height int32 `json:"height" yaml:"height"` NumFrames int32 `json:"num_frames" yaml:"num_frames"` FPS int32 `json:"fps" yaml:"fps"` Seconds string `json:"seconds,omitempty" yaml:"seconds,omitempty"` Size string `json:"size,omitempty" yaml:"size,omitempty"` InputReference string `json:"input_reference,omitempty" yaml:"input_reference,omitempty"` Seed int32 `json:"seed" yaml:"seed"` CFGScale float32 `json:"cfg_scale" yaml:"cfg_scale"` Step int32 `json:"step" yaml:"step"` ResponseFormat string `json:"response_format" yaml:"response_format"` } // @Description TTS request body type TTSRequest struct { BasicModelRequest Input string `json:"input" yaml:"input"` // text input Voice string `json:"voice" yaml:"voice"` // voice audio file or speaker id Backend string `json:"backend" yaml:"backend"` Language string `json:"language,omitempty" yaml:"language,omitempty"` // (optional) language to use with TTS model Format string `json:"response_format,omitempty" yaml:"response_format,omitempty"` // (optional) output format } // @Description VAD request body type VADRequest struct { BasicModelRequest Audio []float32 `json:"audio" yaml:"audio"` // model name or full path } type VADSegment struct { Start float32 `json:"start" yaml:"start"` End float32 `json:"end" yaml:"end"` } type VADResponse struct { Segments []VADSegment `json:"segments" yaml:"segments"` } type StoreCommon struct { Backend string `json:"backend,omitempty" yaml:"backend,omitempty"` } type StoresSet struct { Store string `json:"store,omitempty" yaml:"store,omitempty"` Keys [][]float32 `json:"keys" yaml:"keys"` Values []string `json:"values" yaml:"values"` StoreCommon } type StoresDelete struct { Store string `json:"store,omitempty" yaml:"store,omitempty"` Keys [][]float32 `json:"keys"` StoreCommon } type StoresGet struct { Store string `json:"store,omitempty" yaml:"store,omitempty"` Keys [][]float32 `json:"keys" yaml:"keys"` StoreCommon } type StoresGetResponse struct { Keys [][]float32 `json:"keys" yaml:"keys"` Values []string `json:"values" yaml:"values"` } type StoresFind struct { Store string `json:"store,omitempty" yaml:"store,omitempty"` Key []float32 `json:"key" yaml:"key"` Topk int `json:"topk" yaml:"topk"` StoreCommon } type StoresFindResponse struct { Keys [][]float32 `json:"keys" yaml:"keys"` Values []string `json:"values" yaml:"values"` Similarities []float32 `json:"similarities" yaml:"similarities"` } type NodeData struct { Name string ID string TunnelAddress string ServiceID string LastSeen time.Time } func (d NodeData) IsOnline() bool { now := time.Now() // if the node was seen in the last 40 seconds, it's online return now.Sub(d.LastSeen) < 40*time.Second } type P2PNodesResponse struct { Nodes []NodeData `json:"nodes" yaml:"nodes"` FederatedNodes []NodeData `json:"federated_nodes" yaml:"federated_nodes"` } type SysInfoModel struct { ID string `json:"id"` } type SystemInformationResponse struct { Backends []string `json:"backends"` Models []SysInfoModel `json:"loaded_models"` } type DetectionRequest struct { BasicModelRequest Image string `json:"image"` } type DetectionResponse struct { Detections []Detection `json:"detections"` } type Detection struct { X float32 `json:"x"` Y float32 `json:"y"` Width float32 `json:"width"` Height float32 `json:"height"` ClassName string `json:"class_name"` } type ImportModelRequest struct { URI string `json:"uri"` Preferences json.RawMessage `json:"preferences,omitempty"` } // SettingsResponse is the response type for settings API operations type SettingsResponse struct { Success bool `json:"success"` Error string `json:"error,omitempty"` Message string `json:"message,omitempty"` }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/render.go
core/http/render.go
package http import ( "embed" "fmt" "html/template" "io" "io/fs" "net/http" "strings" "github.com/Masterminds/sprig/v3" "github.com/labstack/echo/v4" "github.com/microcosm-cc/bluemonday" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/schema" "github.com/russross/blackfriday" ) //go:embed views/* var viewsfs embed.FS // TemplateRenderer is a custom template renderer for Echo type TemplateRenderer struct { templates *template.Template } // Render renders a template document func (t *TemplateRenderer) Render(w io.Writer, name string, data interface{}, c echo.Context) error { return t.templates.ExecuteTemplate(w, name, data) } func notFoundHandler(c echo.Context) error { // Check if the request accepts JSON contentType := c.Request().Header.Get("Content-Type") accept := c.Request().Header.Get("Accept") if strings.Contains(contentType, "application/json") || !strings.Contains(accept, "text/html") { // The client expects a JSON response return c.JSON(http.StatusNotFound, schema.ErrorResponse{ Error: &schema.APIError{Message: "Resource not found", Code: http.StatusNotFound}, }) } else { // The client expects an HTML response return c.Render(http.StatusNotFound, "views/404", map[string]interface{}{ "BaseURL": middleware.BaseURL(c), }) } } func renderEngine() *TemplateRenderer { // Parse all templates from embedded filesystem tmpl := template.New("").Funcs(sprig.FuncMap()) tmpl = tmpl.Funcs(template.FuncMap{ "MDToHTML": markDowner, }) // Recursively walk through embedded filesystem and parse all HTML templates err := fs.WalkDir(viewsfs, "views", func(path string, d fs.DirEntry, err error) error { if err != nil { return err } if !d.IsDir() && strings.HasSuffix(path, ".html") { data, err := viewsfs.ReadFile(path) if err == nil { // Remove .html extension to get template name (e.g., "views/index.html" -> "views/index") templateName := strings.TrimSuffix(path, ".html") _, err := tmpl.New(templateName).Parse(string(data)) if err != nil { // If parsing fails, try parsing without explicit name (for templates with {{define}}) tmpl.Parse(string(data)) } } } return nil }) if err != nil { // Log error but continue - templates might still work fmt.Printf("Error walking views directory: %v\n", err) } return &TemplateRenderer{ templates: tmpl, } } func markDowner(args ...interface{}) template.HTML { s := blackfriday.MarkdownCommon([]byte(fmt.Sprintf("%s", args...))) return template.HTML(bluemonday.UGCPolicy().Sanitize(string(s))) }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/openai_videos_test.go
core/http/openai_videos_test.go
package http_test import ( "bytes" "context" "encoding/json" "io" "net/http" "os" "path/filepath" "time" "github.com/mudler/LocalAI/core/application" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/pkg/system" "github.com/mudler/LocalAI/pkg/grpc" pb "github.com/mudler/LocalAI/pkg/grpc/proto" "fmt" . "github.com/mudler/LocalAI/core/http" "github.com/labstack/echo/v4" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) const testAPIKey = "joshua" type fakeAI struct{} func (f *fakeAI) Busy() bool { return false } func (f *fakeAI) Lock() {} func (f *fakeAI) Unlock() {} func (f *fakeAI) Locking() bool { return false } func (f *fakeAI) Predict(*pb.PredictOptions) (string, error) { return "", nil } func (f *fakeAI) PredictStream(*pb.PredictOptions, chan string) error { return nil } func (f *fakeAI) Load(*pb.ModelOptions) error { return nil } func (f *fakeAI) Embeddings(*pb.PredictOptions) ([]float32, error) { return nil, nil } func (f *fakeAI) GenerateImage(*pb.GenerateImageRequest) error { return nil } func (f *fakeAI) GenerateVideo(*pb.GenerateVideoRequest) error { return nil } func (f *fakeAI) Detect(*pb.DetectOptions) (pb.DetectResponse, error) { return pb.DetectResponse{}, nil } func (f *fakeAI) AudioTranscription(*pb.TranscriptRequest) (pb.TranscriptResult, error) { return pb.TranscriptResult{}, nil } func (f *fakeAI) TTS(*pb.TTSRequest) error { return nil } func (f *fakeAI) SoundGeneration(*pb.SoundGenerationRequest) error { return nil } func (f *fakeAI) TokenizeString(*pb.PredictOptions) (pb.TokenizationResponse, error) { return pb.TokenizationResponse{}, nil } func (f *fakeAI) Status() (pb.StatusResponse, error) { return pb.StatusResponse{}, nil } func (f *fakeAI) StoresSet(*pb.StoresSetOptions) error { return nil } func (f *fakeAI) StoresDelete(*pb.StoresDeleteOptions) error { return nil } func (f *fakeAI) StoresGet(*pb.StoresGetOptions) (pb.StoresGetResult, error) { return pb.StoresGetResult{}, nil } func (f *fakeAI) StoresFind(*pb.StoresFindOptions) (pb.StoresFindResult, error) { return pb.StoresFindResult{}, nil } func (f *fakeAI) VAD(*pb.VADRequest) (pb.VADResponse, error) { return pb.VADResponse{}, nil } var _ = Describe("OpenAI /v1/videos (embedded backend)", func() { var tmpdir string var appServer *application.Application var app *echo.Echo var ctx context.Context var cancel context.CancelFunc BeforeEach(func() { var err error tmpdir, err = os.MkdirTemp("", "") Expect(err).ToNot(HaveOccurred()) modelDir := filepath.Join(tmpdir, "models") err = os.Mkdir(modelDir, 0750) Expect(err).ToNot(HaveOccurred()) ctx, cancel = context.WithCancel(context.Background()) systemState, err := system.GetSystemState( system.WithModelPath(modelDir), ) Expect(err).ToNot(HaveOccurred()) grpc.Provide("embedded://fake", &fakeAI{}) appServer, err = application.New( config.WithContext(ctx), config.WithSystemState(systemState), config.WithApiKeys([]string{testAPIKey}), config.WithGeneratedContentDir(tmpdir), config.WithExternalBackend("fake", "embedded://fake"), ) Expect(err).ToNot(HaveOccurred()) }) AfterEach(func() { cancel() if app != nil { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() _ = app.Shutdown(ctx) } _ = os.RemoveAll(tmpdir) }) It("accepts OpenAI-style video create and delegates to backend", func() { var err error app, err = API(appServer) Expect(err).ToNot(HaveOccurred()) go func() { if err := app.Start("127.0.0.1:9091"); err != nil && err != http.ErrServerClosed { // Log error if needed } }() // wait for server client := &http.Client{Timeout: 5 * time.Second} Eventually(func() error { req, _ := http.NewRequest("GET", "http://127.0.0.1:9091/v1/models", nil) req.Header.Set("Authorization", "Bearer "+testAPIKey) resp, err := client.Do(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode >= 400 { return fmt.Errorf("bad status: %d", resp.StatusCode) } return nil }, "30s", "500ms").Should(Succeed()) body := map[string]interface{}{ "model": "fake-model", "backend": "fake", "prompt": "a test video", "size": "256x256", "seconds": "1", } payload, err := json.Marshal(body) Expect(err).ToNot(HaveOccurred()) req, err := http.NewRequest("POST", "http://127.0.0.1:9091/v1/videos", bytes.NewBuffer(payload)) Expect(err).ToNot(HaveOccurred()) req.Header.Set("Content-Type", "application/json") req.Header.Set("Authorization", "Bearer "+testAPIKey) resp, err := client.Do(req) Expect(err).ToNot(HaveOccurred()) defer resp.Body.Close() Expect(resp.StatusCode).To(Equal(200)) dat, err := io.ReadAll(resp.Body) Expect(err).ToNot(HaveOccurred()) var out map[string]interface{} err = json.Unmarshal(dat, &out) Expect(err).ToNot(HaveOccurred()) data, ok := out["data"].([]interface{}) Expect(ok).To(BeTrue()) Expect(len(data)).To(BeNumerically(">", 0)) first := data[0].(map[string]interface{}) url, ok := first["url"].(string) Expect(ok).To(BeTrue()) Expect(url).To(ContainSubstring("/generated-videos/")) Expect(url).To(ContainSubstring(".mp4")) }) })
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/app_test.go
core/http/app_test.go
package http_test import ( "bytes" "context" "encoding/json" "fmt" "io" "net/http" "os" "path/filepath" "runtime" "time" "github.com/mudler/LocalAI/core/application" "github.com/mudler/LocalAI/core/config" . "github.com/mudler/LocalAI/core/http" "github.com/mudler/LocalAI/core/schema" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/gallery" "github.com/mudler/LocalAI/pkg/downloader" "github.com/mudler/LocalAI/pkg/system" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "gopkg.in/yaml.v3" "github.com/mudler/xlog" openaigo "github.com/otiai10/openaigo" "github.com/sashabaranov/go-openai" "github.com/sashabaranov/go-openai/jsonschema" ) const apiKey = "joshua" const bearerKey = "Bearer " + apiKey const testPrompt = `### System: You are an AI assistant that follows instruction extremely well. Help as much as you can. ### Instruction: Say hello. ### Response:` type modelApplyRequest struct { ID string `json:"id"` URL string `json:"url"` ConfigURL string `json:"config_url"` Name string `json:"name"` Overrides map[string]interface{} `json:"overrides"` } func getModelStatus(url string) (response map[string]interface{}) { // Create the HTTP request req, err := http.NewRequest("GET", url, nil) req.Header.Set("Content-Type", "application/json") req.Header.Set("Authorization", bearerKey) if err != nil { fmt.Println("Error creating request:", err) return } client := &http.Client{} resp, err := client.Do(req) if err != nil { fmt.Println("Error sending request:", err) return } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { fmt.Println("Error reading response body:", err) return } // Unmarshal the response into a map[string]interface{} err = json.Unmarshal(body, &response) if err != nil { fmt.Println("Error unmarshaling JSON response:", err) return } return } func getModels(url string) ([]gallery.GalleryModel, error) { response := []gallery.GalleryModel{} uri := downloader.URI(url) // TODO: No tests currently seem to exercise file:// urls. Fix? err := uri.ReadWithAuthorizationAndCallback(context.TODO(), "", bearerKey, func(url string, i []byte) error { // Unmarshal YAML data into a struct return json.Unmarshal(i, &response) }) return response, err } func postModelApplyRequest(url string, request modelApplyRequest) (response map[string]interface{}) { //url := "http://localhost:AI/models/apply" // Create the request payload payload, err := json.Marshal(request) if err != nil { fmt.Println("Error marshaling JSON:", err) return } // Create the HTTP request req, err := http.NewRequest("POST", url, bytes.NewBuffer(payload)) if err != nil { fmt.Println("Error creating request:", err) return } req.Header.Set("Content-Type", "application/json") req.Header.Set("Authorization", bearerKey) // Make the request client := &http.Client{} resp, err := client.Do(req) if err != nil { fmt.Println("Error making request:", err) return } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { fmt.Println("Error reading response body:", err) return } // Unmarshal the response into a map[string]interface{} err = json.Unmarshal(body, &response) if err != nil { fmt.Println("Error unmarshaling JSON response:", err) return } return } func postRequestJSON[B any](url string, bodyJson *B) error { payload, err := json.Marshal(bodyJson) if err != nil { return err } GinkgoWriter.Printf("POST %s: %s\n", url, string(payload)) req, err := http.NewRequest("POST", url, bytes.NewBuffer(payload)) if err != nil { return err } req.Header.Set("Content-Type", "application/json") req.Header.Set("Authorization", bearerKey) client := &http.Client{} resp, err := client.Do(req) if err != nil { return err } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return err } if resp.StatusCode < 200 || resp.StatusCode >= 400 { return fmt.Errorf("unexpected status code: %d, body: %s", resp.StatusCode, string(body)) } return nil } func postRequestResponseJSON[B1 any, B2 any](url string, reqJson *B1, respJson *B2) error { payload, err := json.Marshal(reqJson) if err != nil { return err } GinkgoWriter.Printf("POST %s: %s\n", url, string(payload)) req, err := http.NewRequest("POST", url, bytes.NewBuffer(payload)) if err != nil { return err } req.Header.Set("Content-Type", "application/json") req.Header.Set("Authorization", bearerKey) client := &http.Client{} resp, err := client.Do(req) if err != nil { return err } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return err } if resp.StatusCode < 200 || resp.StatusCode >= 400 { return fmt.Errorf("unexpected status code: %d, body: %s", resp.StatusCode, string(body)) } return json.Unmarshal(body, respJson) } func putRequestJSON[B any](url string, bodyJson *B) error { payload, err := json.Marshal(bodyJson) if err != nil { return err } GinkgoWriter.Printf("PUT %s: %s\n", url, string(payload)) req, err := http.NewRequest("PUT", url, bytes.NewBuffer(payload)) if err != nil { return err } req.Header.Set("Content-Type", "application/json") req.Header.Set("Authorization", bearerKey) client := &http.Client{} resp, err := client.Do(req) if err != nil { return err } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return err } if resp.StatusCode < 200 || resp.StatusCode >= 400 { return fmt.Errorf("unexpected status code: %d, body: %s", resp.StatusCode, string(body)) } return nil } func postInvalidRequest(url string) (error, int) { req, err := http.NewRequest("POST", url, bytes.NewBufferString("invalid request")) if err != nil { return err, -1 } req.Header.Set("Content-Type", "application/json") client := &http.Client{} resp, err := client.Do(req) if err != nil { return err, -1 } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return err, -1 } if resp.StatusCode < 200 || resp.StatusCode >= 400 { return fmt.Errorf("unexpected status code: %d, body: %s", resp.StatusCode, string(body)), resp.StatusCode } return nil, resp.StatusCode } func getRequest(url string, header http.Header) (error, int, []byte) { req, err := http.NewRequest("GET", url, nil) if err != nil { return err, -1, nil } req.Header = header client := &http.Client{} resp, err := client.Do(req) if err != nil { return err, -1, nil } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return err, -1, nil } return nil, resp.StatusCode, body } const bertEmbeddingsURL = `https://gist.githubusercontent.com/mudler/0a080b166b87640e8644b09c2aee6e3b/raw/f0e8c26bb72edc16d9fbafbfd6638072126ff225/bert-embeddings-gallery.yaml` var _ = Describe("API test", func() { var app *echo.Echo var client *openai.Client var client2 *openaigo.Client var c context.Context var cancel context.CancelFunc var tmpdir string var modelDir string commonOpts := []config.AppOption{ config.WithDebug(true), } Context("API with ephemeral models", func() { BeforeEach(func(sc SpecContext) { var err error tmpdir, err = os.MkdirTemp("", "") Expect(err).ToNot(HaveOccurred()) backendPath := os.Getenv("BACKENDS_PATH") modelDir = filepath.Join(tmpdir, "models") err = os.Mkdir(modelDir, 0750) Expect(err).ToNot(HaveOccurred()) c, cancel = context.WithCancel(context.Background()) g := []gallery.GalleryModel{ { Metadata: gallery.Metadata{ Name: "bert", URL: bertEmbeddingsURL, }, }, { Metadata: gallery.Metadata{ Name: "bert2", URL: bertEmbeddingsURL, AdditionalFiles: []gallery.File{{Filename: "foo.yaml", URI: bertEmbeddingsURL}}, }, Overrides: map[string]interface{}{"foo": "bar"}, }, } out, err := yaml.Marshal(g) Expect(err).ToNot(HaveOccurred()) err = os.WriteFile(filepath.Join(modelDir, "gallery_simple.yaml"), out, 0600) Expect(err).ToNot(HaveOccurred()) galleries := []config.Gallery{ { Name: "test", URL: "file://" + filepath.Join(modelDir, "gallery_simple.yaml"), }, } systemState, err := system.GetSystemState( system.WithBackendPath(backendPath), system.WithModelPath(modelDir), ) Expect(err).ToNot(HaveOccurred()) application, err := application.New( append(commonOpts, config.WithContext(c), config.WithSystemState(systemState), config.WithGalleries(galleries), config.WithApiKeys([]string{apiKey}), )...) Expect(err).ToNot(HaveOccurred()) app, err = API(application) Expect(err).ToNot(HaveOccurred()) go func() { if err := app.Start("127.0.0.1:9090"); err != nil && err != http.ErrServerClosed { xlog.Error("server error", "error", err) } }() defaultConfig := openai.DefaultConfig(apiKey) defaultConfig.BaseURL = "http://127.0.0.1:9090/v1" client2 = openaigo.NewClient("") client2.BaseURL = defaultConfig.BaseURL // Wait for API to be ready client = openai.NewClientWithConfig(defaultConfig) Eventually(func() error { _, err := client.ListModels(context.TODO()) return err }, "2m").ShouldNot(HaveOccurred()) }) AfterEach(func(sc SpecContext) { cancel() if app != nil { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() err := app.Shutdown(ctx) Expect(err).ToNot(HaveOccurred()) } err := os.RemoveAll(tmpdir) Expect(err).ToNot(HaveOccurred()) _, err = os.ReadDir(tmpdir) Expect(err).To(HaveOccurred()) }) Context("Auth Tests", func() { It("Should fail if the api key is missing", func() { err, sc := postInvalidRequest("http://127.0.0.1:9090/models/available") Expect(err).ToNot(BeNil()) Expect(sc).To(Equal(401)) }) }) Context("URL routing Tests", func() { It("Should support reverse-proxy when unauthenticated", func() { err, sc, body := getRequest("http://127.0.0.1:9090/myprefix/", http.Header{ "X-Forwarded-Proto": {"https"}, "X-Forwarded-Host": {"example.org"}, "X-Forwarded-Prefix": {"/myprefix/"}, }) Expect(err).To(BeNil(), "error") Expect(sc).To(Equal(401), "status code") Expect(string(body)).To(ContainSubstring(`<base href="https://example.org/myprefix/" />`), "body") }) It("Should support reverse-proxy when authenticated", func() { err, sc, body := getRequest("http://127.0.0.1:9090/myprefix/", http.Header{ "Authorization": {bearerKey}, "X-Forwarded-Proto": {"https"}, "X-Forwarded-Host": {"example.org"}, "X-Forwarded-Prefix": {"/myprefix/"}, }) Expect(err).To(BeNil(), "error") Expect(sc).To(Equal(200), "status code") Expect(string(body)).To(ContainSubstring(`<base href="https://example.org/myprefix/" />`), "body") }) }) Context("Applying models", func() { It("applies models from a gallery", func() { models, err := getModels("http://127.0.0.1:9090/models/available") Expect(err).To(BeNil()) Expect(len(models)).To(Equal(2), fmt.Sprint(models)) Expect(models[0].Installed).To(BeFalse(), fmt.Sprint(models)) Expect(models[1].Installed).To(BeFalse(), fmt.Sprint(models)) response := postModelApplyRequest("http://127.0.0.1:9090/models/apply", modelApplyRequest{ ID: "test@bert2", }) Expect(response["uuid"]).ToNot(BeEmpty(), fmt.Sprint(response)) uuid := response["uuid"].(string) resp := map[string]interface{}{} Eventually(func() bool { response := getModelStatus("http://127.0.0.1:9090/models/jobs/" + uuid) fmt.Println(response) resp = response return response["processed"].(bool) }, "360s", "10s").Should(Equal(true)) Expect(resp["message"]).ToNot(ContainSubstring("error")) dat, err := os.ReadFile(filepath.Join(modelDir, "bert2.yaml")) Expect(err).ToNot(HaveOccurred()) _, err = os.ReadFile(filepath.Join(modelDir, "foo.yaml")) Expect(err).ToNot(HaveOccurred()) content := map[string]interface{}{} err = yaml.Unmarshal(dat, &content) Expect(err).ToNot(HaveOccurred()) Expect(content["usage"]).To(ContainSubstring("You can test this model with curl like this")) Expect(content["foo"]).To(Equal("bar")) models, err = getModels("http://127.0.0.1:9090/models/available") Expect(err).To(BeNil()) Expect(len(models)).To(Equal(2), fmt.Sprint(models)) Expect(models[0].Name).To(Or(Equal("bert"), Equal("bert2"))) Expect(models[1].Name).To(Or(Equal("bert"), Equal("bert2"))) for _, m := range models { if m.Name == "bert2" { Expect(m.Installed).To(BeTrue()) } else { Expect(m.Installed).To(BeFalse()) } } }) It("overrides models", func() { response := postModelApplyRequest("http://127.0.0.1:9090/models/apply", modelApplyRequest{ URL: bertEmbeddingsURL, Name: "bert", Overrides: map[string]interface{}{ "backend": "llama", }, }) Expect(response["uuid"]).ToNot(BeEmpty(), fmt.Sprint(response)) uuid := response["uuid"].(string) Eventually(func() bool { response := getModelStatus("http://127.0.0.1:9090/models/jobs/" + uuid) return response["processed"].(bool) }, "360s", "10s").Should(Equal(true)) dat, err := os.ReadFile(filepath.Join(modelDir, "bert.yaml")) Expect(err).ToNot(HaveOccurred()) content := map[string]interface{}{} err = yaml.Unmarshal(dat, &content) Expect(err).ToNot(HaveOccurred()) Expect(content["backend"]).To(Equal("llama")) }) It("apply models without overrides", func() { response := postModelApplyRequest("http://127.0.0.1:9090/models/apply", modelApplyRequest{ URL: bertEmbeddingsURL, Name: "bert", Overrides: map[string]interface{}{}, }) Expect(response["uuid"]).ToNot(BeEmpty(), fmt.Sprint(response)) uuid := response["uuid"].(string) Eventually(func() bool { response := getModelStatus("http://127.0.0.1:9090/models/jobs/" + uuid) return response["processed"].(bool) }, "360s", "10s").Should(Equal(true)) dat, err := os.ReadFile(filepath.Join(modelDir, "bert.yaml")) Expect(err).ToNot(HaveOccurred()) content := map[string]interface{}{} err = yaml.Unmarshal(dat, &content) Expect(err).ToNot(HaveOccurred()) Expect(content["usage"]).To(ContainSubstring("You can test this model with curl like this")) }) }) Context("Importing models from URI", func() { var testYamlFile string BeforeEach(func() { // Create a test YAML config file yamlContent := `name: test-import-model backend: llama-cpp description: Test model imported from file URI parameters: model: path/to/model.gguf temperature: 0.7 ` testYamlFile = filepath.Join(tmpdir, "test-import.yaml") err := os.WriteFile(testYamlFile, []byte(yamlContent), 0644) Expect(err).ToNot(HaveOccurred()) }) AfterEach(func() { err := os.Remove(testYamlFile) Expect(err).ToNot(HaveOccurred()) }) It("should import model from file:// URI pointing to local YAML config", func() { importReq := schema.ImportModelRequest{ URI: "file://" + testYamlFile, Preferences: json.RawMessage(`{}`), } var response schema.GalleryResponse err := postRequestResponseJSON("http://127.0.0.1:9090/models/import-uri", &importReq, &response) Expect(err).ToNot(HaveOccurred()) Expect(response.ID).ToNot(BeEmpty()) uuid := response.ID resp := map[string]interface{}{} Eventually(func() bool { response := getModelStatus("http://127.0.0.1:9090/models/jobs/" + uuid) resp = response return response["processed"].(bool) }, "360s", "10s").Should(Equal(true)) // Check that the model was imported successfully Expect(resp["message"]).ToNot(ContainSubstring("error")) Expect(resp["error"]).To(BeNil()) // Verify the model config file was created dat, err := os.ReadFile(filepath.Join(modelDir, "test-import-model.yaml")) Expect(err).ToNot(HaveOccurred()) content := map[string]interface{}{} err = yaml.Unmarshal(dat, &content) Expect(err).ToNot(HaveOccurred()) Expect(content["name"]).To(Equal("test-import-model")) Expect(content["backend"]).To(Equal("llama-cpp")) }) It("should return error when file:// URI points to non-existent file", func() { nonExistentFile := filepath.Join(tmpdir, "nonexistent.yaml") importReq := schema.ImportModelRequest{ URI: "file://" + nonExistentFile, Preferences: json.RawMessage(`{}`), } var response schema.GalleryResponse err := postRequestResponseJSON("http://127.0.0.1:9090/models/import-uri", &importReq, &response) // The endpoint should return an error immediately Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("failed to discover model config")) }) }) Context("Importing models from URI can't point to absolute paths", func() { var testYamlFile string BeforeEach(func() { // Create a test YAML config file yamlContent := `name: test-import-model backend: llama-cpp description: Test model imported from file URI parameters: model: /path/to/model.gguf temperature: 0.7 ` testYamlFile = filepath.Join(tmpdir, "test-import.yaml") err := os.WriteFile(testYamlFile, []byte(yamlContent), 0644) Expect(err).ToNot(HaveOccurred()) }) AfterEach(func() { err := os.Remove(testYamlFile) Expect(err).ToNot(HaveOccurred()) }) It("should fail to import model from file:// URI pointing to local YAML config", func() { importReq := schema.ImportModelRequest{ URI: "file://" + testYamlFile, Preferences: json.RawMessage(`{}`), } var response schema.GalleryResponse err := postRequestResponseJSON("http://127.0.0.1:9090/models/import-uri", &importReq, &response) Expect(err).ToNot(HaveOccurred()) Expect(response.ID).ToNot(BeEmpty()) uuid := response.ID resp := map[string]interface{}{} Eventually(func() bool { response := getModelStatus("http://127.0.0.1:9090/models/jobs/" + uuid) resp = response return response["processed"].(bool) }, "360s", "10s").Should(Equal(true)) // Check that the model was imported successfully Expect(resp["message"]).To(ContainSubstring("error")) Expect(resp["error"]).ToNot(BeNil()) }) }) }) Context("Model gallery", func() { BeforeEach(func() { var err error tmpdir, err = os.MkdirTemp("", "") backendPath := os.Getenv("BACKENDS_PATH") Expect(err).ToNot(HaveOccurred()) modelDir = filepath.Join(tmpdir, "models") backendAssetsDir := filepath.Join(tmpdir, "backend-assets") err = os.Mkdir(backendAssetsDir, 0750) Expect(err).ToNot(HaveOccurred()) c, cancel = context.WithCancel(context.Background()) galleries := []config.Gallery{ { Name: "localai", URL: "https://raw.githubusercontent.com/mudler/LocalAI/refs/heads/master/gallery/index.yaml", }, } systemState, err := system.GetSystemState( system.WithBackendPath(backendPath), system.WithModelPath(modelDir), ) Expect(err).ToNot(HaveOccurred()) application, err := application.New( append(commonOpts, config.WithContext(c), config.WithGeneratedContentDir(tmpdir), config.WithSystemState(systemState), config.WithGalleries(galleries), )..., ) Expect(err).ToNot(HaveOccurred()) app, err = API(application) Expect(err).ToNot(HaveOccurred()) go func() { if err := app.Start("127.0.0.1:9090"); err != nil && err != http.ErrServerClosed { xlog.Error("server error", "error", err) } }() defaultConfig := openai.DefaultConfig("") defaultConfig.BaseURL = "http://127.0.0.1:9090/v1" client2 = openaigo.NewClient("") client2.BaseURL = defaultConfig.BaseURL // Wait for API to be ready client = openai.NewClientWithConfig(defaultConfig) Eventually(func() error { _, err := client.ListModels(context.TODO()) return err }, "2m").ShouldNot(HaveOccurred()) }) AfterEach(func() { cancel() if app != nil { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() err := app.Shutdown(ctx) Expect(err).ToNot(HaveOccurred()) } err := os.RemoveAll(tmpdir) Expect(err).ToNot(HaveOccurred()) _, err = os.ReadDir(tmpdir) Expect(err).To(HaveOccurred()) }) It("runs gguf models (chat)", Label("llama-gguf"), func() { if runtime.GOOS != "linux" { Skip("test supported only on linux") } modelName := "qwen3-1.7b" response := postModelApplyRequest("http://127.0.0.1:9090/models/apply", modelApplyRequest{ ID: "localai@" + modelName, }) Expect(response["uuid"]).ToNot(BeEmpty(), fmt.Sprint(response)) uuid := response["uuid"].(string) Eventually(func() bool { response := getModelStatus("http://127.0.0.1:9090/models/jobs/" + uuid) return response["processed"].(bool) }, "900s", "10s").Should(Equal(true)) By("testing chat") resp, err := client.CreateChatCompletion(context.TODO(), openai.ChatCompletionRequest{Model: modelName, Messages: []openai.ChatCompletionMessage{ { Role: "user", Content: "How much is 2+2?", }, }}) Expect(err).ToNot(HaveOccurred()) Expect(len(resp.Choices)).To(Equal(1)) Expect(resp.Choices[0].Message.Content).To(Or(ContainSubstring("4"), ContainSubstring("four"))) By("testing functions") resp2, err := client.CreateChatCompletion( context.TODO(), openai.ChatCompletionRequest{ Model: modelName, Messages: []openai.ChatCompletionMessage{ { Role: "user", Content: "What is the weather like in San Francisco (celsius)?", }, }, Functions: []openai.FunctionDefinition{ openai.FunctionDefinition{ Name: "get_current_weather", Description: "Get the current weather", Parameters: jsonschema.Definition{ Type: jsonschema.Object, Properties: map[string]jsonschema.Definition{ "location": { Type: jsonschema.String, Description: "The city and state, e.g. San Francisco, CA", }, "unit": { Type: jsonschema.String, Enum: []string{"celcius", "fahrenheit"}, }, }, Required: []string{"location"}, }, }, }, }) Expect(err).ToNot(HaveOccurred()) Expect(len(resp2.Choices)).To(Equal(1)) Expect(resp2.Choices[0].Message.FunctionCall).ToNot(BeNil()) Expect(resp2.Choices[0].Message.FunctionCall.Name).To(Equal("get_current_weather"), resp2.Choices[0].Message.FunctionCall.Name) var res map[string]string err = json.Unmarshal([]byte(resp2.Choices[0].Message.FunctionCall.Arguments), &res) Expect(err).ToNot(HaveOccurred()) Expect(res["location"]).To(ContainSubstring("San Francisco"), fmt.Sprint(res)) Expect(res["unit"]).To(Equal("celcius"), fmt.Sprint(res)) Expect(string(resp2.Choices[0].FinishReason)).To(Equal("function_call"), fmt.Sprint(resp2.Choices[0].FinishReason)) }) It("installs and is capable to run tts", Label("tts"), func() { if runtime.GOOS != "linux" { Skip("test supported only on linux") } response := postModelApplyRequest("http://127.0.0.1:9090/models/apply", modelApplyRequest{ ID: "localai@voice-en-us-kathleen-low", }) Expect(response["uuid"]).ToNot(BeEmpty(), fmt.Sprint(response)) uuid := response["uuid"].(string) Eventually(func() bool { response := getModelStatus("http://127.0.0.1:9090/models/jobs/" + uuid) fmt.Println(response) return response["processed"].(bool) }, "360s", "10s").Should(Equal(true)) // An HTTP Post to the /tts endpoint should return a wav audio file resp, err := http.Post("http://127.0.0.1:9090/tts", "application/json", bytes.NewBuffer([]byte(`{"input": "Hello world", "model": "voice-en-us-kathleen-low"}`))) Expect(err).ToNot(HaveOccurred(), fmt.Sprint(resp)) dat, err := io.ReadAll(resp.Body) Expect(err).ToNot(HaveOccurred(), fmt.Sprint(resp)) Expect(resp.StatusCode).To(Equal(200), fmt.Sprint(string(dat))) Expect(resp.Header.Get("Content-Type")).To(Or(Equal("audio/x-wav"), Equal("audio/vnd.wave"))) }) It("installs and is capable to generate images", Label("stablediffusion"), func() { if runtime.GOOS != "linux" { Skip("test supported only on linux") } response := postModelApplyRequest("http://127.0.0.1:9090/models/apply", modelApplyRequest{ ID: "localai@sd-1.5-ggml", Name: "stablediffusion", }) Expect(response["uuid"]).ToNot(BeEmpty(), fmt.Sprint(response)) uuid := response["uuid"].(string) Eventually(func() bool { response := getModelStatus("http://127.0.0.1:9090/models/jobs/" + uuid) fmt.Println(response) return response["processed"].(bool) }, "1200s", "10s").Should(Equal(true)) resp, err := http.Post( "http://127.0.0.1:9090/v1/images/generations", "application/json", bytes.NewBuffer([]byte(`{ "prompt": "a lovely cat", "step": 1, "seed":9000, "size": "256x256", "n":2}`))) // The response should contain an URL Expect(err).ToNot(HaveOccurred(), fmt.Sprint(resp)) dat, err := io.ReadAll(resp.Body) Expect(err).ToNot(HaveOccurred(), "error reading /image/generations response") imgUrlResp := &schema.OpenAIResponse{} err = json.Unmarshal(dat, imgUrlResp) Expect(err).ToNot(HaveOccurred(), fmt.Sprint(dat)) Expect(imgUrlResp.Data).ToNot(Or(BeNil(), BeZero())) imgUrl := imgUrlResp.Data[0].URL Expect(imgUrl).To(ContainSubstring("http://127.0.0.1:9090/"), imgUrl) Expect(imgUrl).To(ContainSubstring(".png"), imgUrl) imgResp, err := http.Get(imgUrl) Expect(err).To(BeNil()) Expect(imgResp).ToNot(BeNil()) Expect(imgResp.StatusCode).To(Equal(200)) Expect(imgResp.ContentLength).To(BeNumerically(">", 0)) imgData := make([]byte, 512) count, err := io.ReadFull(imgResp.Body, imgData) Expect(err).To(Or(BeNil(), MatchError(io.EOF))) Expect(count).To(BeNumerically(">", 0)) Expect(count).To(BeNumerically("<=", 512)) Expect(http.DetectContentType(imgData)).To(Equal("image/png")) }) }) Context("API query", func() { BeforeEach(func() { modelPath := os.Getenv("MODELS_PATH") backendPath := os.Getenv("BACKENDS_PATH") c, cancel = context.WithCancel(context.Background()) var err error systemState, err := system.GetSystemState( system.WithBackendPath(backendPath), system.WithModelPath(modelPath), ) Expect(err).ToNot(HaveOccurred()) application, err := application.New( append(commonOpts, config.WithExternalBackend("transformers", os.Getenv("HUGGINGFACE_GRPC")), config.WithContext(c), config.WithSystemState(systemState), )...) Expect(err).ToNot(HaveOccurred()) app, err = API(application) Expect(err).ToNot(HaveOccurred()) go func() { if err := app.Start("127.0.0.1:9090"); err != nil && err != http.ErrServerClosed { xlog.Error("server error", "error", err) } }() defaultConfig := openai.DefaultConfig("") defaultConfig.BaseURL = "http://127.0.0.1:9090/v1" client2 = openaigo.NewClient("") client2.BaseURL = defaultConfig.BaseURL // Wait for API to be ready client = openai.NewClientWithConfig(defaultConfig) Eventually(func() error { _, err := client.ListModels(context.TODO()) return err }, "2m").ShouldNot(HaveOccurred()) }) AfterEach(func() { cancel() if app != nil { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() err := app.Shutdown(ctx) Expect(err).ToNot(HaveOccurred()) } }) It("returns the models list", func() { models, err := client.ListModels(context.TODO()) Expect(err).ToNot(HaveOccurred()) Expect(len(models.Models)).To(Equal(7)) // If "config.yaml" should be included, this should be 8? }) It("can generate completions via ggml", func() { if runtime.GOOS != "linux" { Skip("test supported only on linux") } resp, err := client.CreateCompletion(context.TODO(), openai.CompletionRequest{Model: "testmodel.ggml", Prompt: testPrompt}) Expect(err).ToNot(HaveOccurred()) Expect(len(resp.Choices)).To(Equal(1)) Expect(resp.Choices[0].Text).ToNot(BeEmpty()) }) It("can generate chat completions via ggml", func() { if runtime.GOOS != "linux" { Skip("test supported only on linux") } resp, err := client.CreateChatCompletion(context.TODO(), openai.ChatCompletionRequest{Model: "testmodel.ggml", Messages: []openai.ChatCompletionMessage{openai.ChatCompletionMessage{Role: "user", Content: testPrompt}}}) Expect(err).ToNot(HaveOccurred()) Expect(len(resp.Choices)).To(Equal(1)) Expect(resp.Choices[0].Message.Content).ToNot(BeEmpty()) }) It("returns logprobs in chat completions when requested", func() { if runtime.GOOS != "linux" { Skip("test only on linux") } topLogprobsVal := 3 response, err := client.CreateChatCompletion(context.TODO(), openai.ChatCompletionRequest{ Model: "testmodel.ggml", LogProbs: true, TopLogProbs: topLogprobsVal, Messages: []openai.ChatCompletionMessage{{Role: "user", Content: testPrompt}}}) Expect(err).ToNot(HaveOccurred()) Expect(len(response.Choices)).To(Equal(1)) Expect(response.Choices[0].Message).ToNot(BeNil()) Expect(response.Choices[0].Message.Content).ToNot(BeEmpty()) // Verify logprobs are present and have correct structure Expect(response.Choices[0].LogProbs).ToNot(BeNil()) Expect(response.Choices[0].LogProbs.Content).ToNot(BeEmpty()) Expect(len(response.Choices[0].LogProbs.Content)).To(BeNumerically(">", 1)) foundatLeastToken := "" foundAtLeastBytes := []byte{} foundAtLeastTopLogprobBytes := []byte{} foundatLeastTopLogprob := "" // Verify logprobs content structure matches OpenAI format for _, logprobContent := range response.Choices[0].LogProbs.Content { // Bytes can be empty for certain tokens (special tokens, etc.), so we don't require it if len(logprobContent.Bytes) > 0 { foundAtLeastBytes = logprobContent.Bytes } if len(logprobContent.Token) > 0 { foundatLeastToken = logprobContent.Token } Expect(logprobContent.LogProb).To(BeNumerically("<=", 0)) // Logprobs are always <= 0 Expect(len(logprobContent.TopLogProbs)).To(BeNumerically(">", 1)) // If top_logprobs is requested, verify top_logprobs array respects the limit if len(logprobContent.TopLogProbs) > 0 { // Should respect top_logprobs limit (3 in this test) Expect(len(logprobContent.TopLogProbs)).To(BeNumerically("<=", topLogprobsVal)) for _, topLogprob := range logprobContent.TopLogProbs { if len(topLogprob.Bytes) > 0 { foundAtLeastTopLogprobBytes = topLogprob.Bytes } if len(topLogprob.Token) > 0 { foundatLeastTopLogprob = topLogprob.Token } Expect(topLogprob.LogProb).To(BeNumerically("<=", 0)) } } } Expect(foundAtLeastBytes).ToNot(BeEmpty()) Expect(foundAtLeastTopLogprobBytes).ToNot(BeEmpty()) Expect(foundatLeastToken).ToNot(BeEmpty()) Expect(foundatLeastTopLogprob).ToNot(BeEmpty()) }) It("applies logit_bias to chat completions when requested", func() { if runtime.GOOS != "linux" { Skip("test only on linux") } // logit_bias is a map of token IDs (as strings) to bias values (-100 to 100) // According to OpenAI API: modifies the likelihood of specified tokens appearing in the completion logitBias := map[string]int{ "15043": 1, // Bias token ID 15043 (example token ID) with bias value 1 } response, err := client.CreateChatCompletion(context.TODO(), openai.ChatCompletionRequest{ Model: "testmodel.ggml", Messages: []openai.ChatCompletionMessage{{Role: "user", Content: testPrompt}}, LogitBias: logitBias, }) Expect(err).ToNot(HaveOccurred()) Expect(len(response.Choices)).To(Equal(1)) Expect(response.Choices[0].Message).ToNot(BeNil()) Expect(response.Choices[0].Message.Content).ToNot(BeEmpty()) // If logit_bias is applied, the response should be generated successfully // We can't easily verify the bias effect without knowing the actual token IDs for the model, // but the fact that the request succeeds confirms the API accepts and processes logit_bias }) It("returns errors", func() {
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
true
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/http_suite_test.go
core/http/http_suite_test.go
package http_test import ( "testing" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) func TestLocalAI(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "LocalAI HTTP test suite") }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/explorer.go
core/http/explorer.go
package http import ( "io/fs" "net/http" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/explorer" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/http/routes" "github.com/mudler/xlog" ) func Explorer(db *explorer.Database) *echo.Echo { e := echo.New() // Set renderer e.Renderer = renderEngine() // Hide banner e.HideBanner = true e.Pre(middleware.StripPathPrefix()) routes.RegisterExplorerRoutes(e, db) // Favicon handler e.GET("/favicon.svg", func(c echo.Context) error { data, err := embedDirStatic.ReadFile("static/favicon.svg") if err != nil { return c.NoContent(http.StatusNotFound) } c.Response().Header().Set("Content-Type", "image/svg+xml") return c.Blob(http.StatusOK, "image/svg+xml", data) }) // Static files - use fs.Sub to create a filesystem rooted at "static" staticFS, err := fs.Sub(embedDirStatic, "static") if err != nil { // Log error but continue - static files might not work xlog.Error("failed to create static filesystem", "error", err) } else { e.StaticFS("/static", staticFS) } // Define a custom 404 handler // Note: keep this at the bottom! e.GET("/*", notFoundHandler) return e }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/app.go
core/http/app.go
package http import ( "embed" "errors" "fmt" "io/fs" "net/http" "os" "path/filepath" "strings" "github.com/labstack/echo/v4" "github.com/labstack/echo/v4/middleware" "github.com/mudler/LocalAI/core/http/endpoints/localai" httpMiddleware "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/http/routes" "github.com/mudler/LocalAI/core/application" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/LocalAI/core/services" "github.com/mudler/xlog" ) // Embed a directory // //go:embed static/* var embedDirStatic embed.FS // @title LocalAI API // @version 2.0.0 // @description The LocalAI Rest API. // @termsOfService // @contact.name LocalAI // @contact.url https://localai.io // @license.name MIT // @license.url https://raw.githubusercontent.com/mudler/LocalAI/master/LICENSE // @BasePath / // @securityDefinitions.apikey BearerAuth // @in header // @name Authorization func API(application *application.Application) (*echo.Echo, error) { e := echo.New() // Set body limit if application.ApplicationConfig().UploadLimitMB > 0 { e.Use(middleware.BodyLimit(fmt.Sprintf("%dM", application.ApplicationConfig().UploadLimitMB))) } // Set error handler if !application.ApplicationConfig().OpaqueErrors { e.HTTPErrorHandler = func(err error, c echo.Context) { code := http.StatusInternalServerError var he *echo.HTTPError if errors.As(err, &he) { code = he.Code } // Handle 404 errors with HTML rendering when appropriate if code == http.StatusNotFound { notFoundHandler(c) return } // Send custom error page c.JSON(code, schema.ErrorResponse{ Error: &schema.APIError{Message: err.Error(), Code: code}, }) } } else { e.HTTPErrorHandler = func(err error, c echo.Context) { code := http.StatusInternalServerError var he *echo.HTTPError if errors.As(err, &he) { code = he.Code } c.NoContent(code) } } // Set renderer e.Renderer = renderEngine() // Hide banner e.HideBanner = true e.HidePort = true // Middleware - StripPathPrefix must be registered early as it uses Rewrite which runs before routing e.Pre(httpMiddleware.StripPathPrefix()) e.Pre(middleware.RemoveTrailingSlash()) if application.ApplicationConfig().MachineTag != "" { e.Use(func(next echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { c.Response().Header().Set("Machine-Tag", application.ApplicationConfig().MachineTag) return next(c) } }) } // Custom logger middleware using xlog e.Use(func(next echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { req := c.Request() res := c.Response() err := next(c) xlog.Info("HTTP request", "method", req.Method, "path", req.URL.Path, "status", res.Status) return err } }) // Recover middleware if !application.ApplicationConfig().Debug { e.Use(middleware.Recover()) } // Metrics middleware if !application.ApplicationConfig().DisableMetrics { metricsService, err := services.NewLocalAIMetricsService() if err != nil { return nil, err } if metricsService != nil { e.Use(localai.LocalAIMetricsAPIMiddleware(metricsService)) e.Server.RegisterOnShutdown(func() { metricsService.Shutdown() }) } } // Health Checks should always be exempt from auth, so register these first routes.HealthRoutes(e) // Get key auth middleware keyAuthMiddleware, err := httpMiddleware.GetKeyAuthConfig(application.ApplicationConfig()) if err != nil { return nil, fmt.Errorf("failed to create key auth config: %w", err) } // Favicon handler e.GET("/favicon.svg", func(c echo.Context) error { data, err := embedDirStatic.ReadFile("static/favicon.svg") if err != nil { return c.NoContent(http.StatusNotFound) } c.Response().Header().Set("Content-Type", "image/svg+xml") return c.Blob(http.StatusOK, "image/svg+xml", data) }) // Static files - use fs.Sub to create a filesystem rooted at "static" staticFS, err := fs.Sub(embedDirStatic, "static") if err != nil { return nil, fmt.Errorf("failed to create static filesystem: %w", err) } e.StaticFS("/static", staticFS) // Generated content directories if application.ApplicationConfig().GeneratedContentDir != "" { os.MkdirAll(application.ApplicationConfig().GeneratedContentDir, 0750) audioPath := filepath.Join(application.ApplicationConfig().GeneratedContentDir, "audio") imagePath := filepath.Join(application.ApplicationConfig().GeneratedContentDir, "images") videoPath := filepath.Join(application.ApplicationConfig().GeneratedContentDir, "videos") os.MkdirAll(audioPath, 0750) os.MkdirAll(imagePath, 0750) os.MkdirAll(videoPath, 0750) e.Static("/generated-audio", audioPath) e.Static("/generated-images", imagePath) e.Static("/generated-videos", videoPath) } // Auth is applied to _all_ endpoints. No exceptions. Filtering out endpoints to bypass is the role of the Skipper property of the KeyAuth Configuration e.Use(keyAuthMiddleware) // CORS middleware if application.ApplicationConfig().CORS { corsConfig := middleware.CORSConfig{} if application.ApplicationConfig().CORSAllowOrigins != "" { corsConfig.AllowOrigins = strings.Split(application.ApplicationConfig().CORSAllowOrigins, ",") } e.Use(middleware.CORSWithConfig(corsConfig)) } // CSRF middleware if application.ApplicationConfig().CSRF { xlog.Debug("Enabling CSRF middleware. Tokens are now required for state-modifying requests") e.Use(middleware.CSRF()) } requestExtractor := httpMiddleware.NewRequestExtractor(application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig()) routes.RegisterElevenLabsRoutes(e, requestExtractor, application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig()) // Create opcache for tracking UI operations (used by both UI and LocalAI routes) var opcache *services.OpCache if !application.ApplicationConfig().DisableWebUI { opcache = services.NewOpCache(application.GalleryService()) } routes.RegisterLocalAIRoutes(e, requestExtractor, application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig(), application.GalleryService(), opcache, application.TemplatesEvaluator(), application) routes.RegisterOpenAIRoutes(e, requestExtractor, application) if !application.ApplicationConfig().DisableWebUI { routes.RegisterUIAPIRoutes(e, application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig(), application.GalleryService(), opcache, application) routes.RegisterUIRoutes(e, application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig(), application.GalleryService()) } routes.RegisterJINARoutes(e, requestExtractor, application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig()) // Note: 404 handling is done via HTTPErrorHandler above, no need for catch-all route // Log startup message e.Server.RegisterOnShutdown(func() { xlog.Info("LocalAI API server shutting down") }) return e, nil }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/openai_mapping_test.go
core/http/openai_mapping_test.go
package http_test import ( "encoding/json" openai "github.com/mudler/LocalAI/core/http/endpoints/openai" "github.com/mudler/LocalAI/core/schema" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) var _ = Describe("MapOpenAIToVideo", func() { It("maps size and seconds correctly", func() { cases := []struct { name string input *schema.OpenAIRequest raw map[string]interface{} expectsW int32 expectsH int32 expectsF int32 expectsN int32 }{ { name: "size in input", input: &schema.OpenAIRequest{ PredictionOptions: schema.PredictionOptions{ BasicModelRequest: schema.BasicModelRequest{Model: "m"}, }, Size: "256x128", }, expectsW: 256, expectsH: 128, }, { name: "size in raw and seconds as string", input: &schema.OpenAIRequest{PredictionOptions: schema.PredictionOptions{BasicModelRequest: schema.BasicModelRequest{Model: "m"}}}, raw: map[string]interface{}{"size": "720x480", "seconds": "2"}, expectsW: 720, expectsH: 480, expectsF: 30, expectsN: 60, }, { name: "seconds as number and fps override", input: &schema.OpenAIRequest{PredictionOptions: schema.PredictionOptions{BasicModelRequest: schema.BasicModelRequest{Model: "m"}}}, raw: map[string]interface{}{"seconds": 3.0, "fps": 24.0}, expectsF: 24, expectsN: 72, }, } for _, c := range cases { By(c.name) vr := openai.MapOpenAIToVideo(c.input, c.raw) if c.expectsW != 0 { Expect(vr.Width).To(Equal(c.expectsW)) } if c.expectsH != 0 { Expect(vr.Height).To(Equal(c.expectsH)) } if c.expectsF != 0 { Expect(vr.FPS).To(Equal(c.expectsF)) } if c.expectsN != 0 { Expect(vr.NumFrames).To(Equal(c.expectsN)) } b, err := json.Marshal(vr) Expect(err).ToNot(HaveOccurred()) _ = b } }) })
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/middleware/request.go
core/http/middleware/request.go
package middleware import ( "context" "encoding/json" "fmt" "net/http" "strconv" "strings" "github.com/google/uuid" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/LocalAI/core/services" "github.com/mudler/LocalAI/core/templates" "github.com/mudler/LocalAI/pkg/functions" "github.com/mudler/LocalAI/pkg/model" "github.com/mudler/LocalAI/pkg/utils" "github.com/mudler/xlog" ) type correlationIDKeyType string // CorrelationIDKey to track request across process boundary const CorrelationIDKey correlationIDKeyType = "correlationID" type RequestExtractor struct { modelConfigLoader *config.ModelConfigLoader modelLoader *model.ModelLoader applicationConfig *config.ApplicationConfig } func NewRequestExtractor(modelConfigLoader *config.ModelConfigLoader, modelLoader *model.ModelLoader, applicationConfig *config.ApplicationConfig) *RequestExtractor { return &RequestExtractor{ modelConfigLoader: modelConfigLoader, modelLoader: modelLoader, applicationConfig: applicationConfig, } } const CONTEXT_LOCALS_KEY_MODEL_NAME = "MODEL_NAME" const CONTEXT_LOCALS_KEY_LOCALAI_REQUEST = "LOCALAI_REQUEST" const CONTEXT_LOCALS_KEY_MODEL_CONFIG = "MODEL_CONFIG" // TODO: Refactor to not return error if unchanged func (re *RequestExtractor) setModelNameFromRequest(c echo.Context) { model, ok := c.Get(CONTEXT_LOCALS_KEY_MODEL_NAME).(string) if ok && model != "" { return } model = c.Param("model") if model == "" { model = c.QueryParam("model") } // Check FormValue for multipart/form-data requests (e.g., /v1/images/inpainting) if model == "" { model = c.FormValue("model") } if model == "" { // Set model from bearer token, if available auth := c.Request().Header.Get("Authorization") bearer := strings.TrimPrefix(auth, "Bearer ") if bearer != "" && bearer != auth { exists, err := services.CheckIfModelExists(re.modelConfigLoader, re.modelLoader, bearer, services.ALWAYS_INCLUDE) if err == nil && exists { model = bearer } } } c.Set(CONTEXT_LOCALS_KEY_MODEL_NAME, model) } func (re *RequestExtractor) BuildConstantDefaultModelNameMiddleware(defaultModelName string) echo.MiddlewareFunc { return func(next echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { re.setModelNameFromRequest(c) localModelName, ok := c.Get(CONTEXT_LOCALS_KEY_MODEL_NAME).(string) if !ok || localModelName == "" { c.Set(CONTEXT_LOCALS_KEY_MODEL_NAME, defaultModelName) xlog.Debug("context local model name not found, setting to default", "defaultModelName", defaultModelName) } return next(c) } } } func (re *RequestExtractor) BuildFilteredFirstAvailableDefaultModel(filterFn config.ModelConfigFilterFn) echo.MiddlewareFunc { return func(next echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { re.setModelNameFromRequest(c) localModelName := c.Get(CONTEXT_LOCALS_KEY_MODEL_NAME).(string) if localModelName != "" { // Don't overwrite existing values return next(c) } modelNames, err := services.ListModels(re.modelConfigLoader, re.modelLoader, filterFn, services.SKIP_IF_CONFIGURED) if err != nil { xlog.Error("non-fatal error calling ListModels during SetDefaultModelNameToFirstAvailable()", "error", err) return next(c) } if len(modelNames) == 0 { xlog.Warn("SetDefaultModelNameToFirstAvailable used with no matching models installed") // This is non-fatal - making it so was breaking the case of direct installation of raw models // return errors.New("this endpoint requires at least one model to be installed") return next(c) } c.Set(CONTEXT_LOCALS_KEY_MODEL_NAME, modelNames[0]) xlog.Debug("context local model name not found, setting to the first model", "first model name", modelNames[0]) return next(c) } } } // TODO: If context and cancel above belong on all methods, move that part of above into here! // Otherwise, it's in its own method below for now func (re *RequestExtractor) SetModelAndConfig(initializer func() schema.LocalAIRequest) echo.MiddlewareFunc { return func(next echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { input := initializer() if input == nil { return echo.NewHTTPError(http.StatusBadRequest, "unable to initialize body") } if err := c.Bind(input); err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("failed parsing request body: %v", err)) } // If this request doesn't have an associated model name, fetch it from earlier in the middleware chain if input.ModelName(nil) == "" { localModelName, ok := c.Get(CONTEXT_LOCALS_KEY_MODEL_NAME).(string) if ok && localModelName != "" { xlog.Debug("overriding empty model name in request body with value found earlier in middleware chain", "context localModelName", localModelName) input.ModelName(&localModelName) } } cfg, err := re.modelConfigLoader.LoadModelConfigFileByNameDefaultOptions(input.ModelName(nil), re.applicationConfig) if err != nil { xlog.Warn("Model Configuration File not found", "model", input.ModelName(nil), "error", err) } else if cfg.Model == "" && input.ModelName(nil) != "" { xlog.Debug("config does not include model, using input", "input.ModelName", input.ModelName(nil)) cfg.Model = input.ModelName(nil) } c.Set(CONTEXT_LOCALS_KEY_LOCALAI_REQUEST, input) c.Set(CONTEXT_LOCALS_KEY_MODEL_CONFIG, cfg) return next(c) } } } func (re *RequestExtractor) SetOpenAIRequest(c echo.Context) error { input, ok := c.Get(CONTEXT_LOCALS_KEY_LOCALAI_REQUEST).(*schema.OpenAIRequest) if !ok || input.Model == "" { return echo.ErrBadRequest } cfg, ok := c.Get(CONTEXT_LOCALS_KEY_MODEL_CONFIG).(*config.ModelConfig) if !ok || cfg == nil { return echo.ErrBadRequest } // Extract or generate the correlation ID correlationID := c.Request().Header.Get("X-Correlation-ID") if correlationID == "" { correlationID = uuid.New().String() } c.Response().Header().Set("X-Correlation-ID", correlationID) // Use the request context directly - Echo properly supports context cancellation! // No need for workarounds like handleConnectionCancellation reqCtx := c.Request().Context() c1, cancel := context.WithCancel(re.applicationConfig.Context) // Cancel when request context is cancelled (client disconnects) go func() { select { case <-reqCtx.Done(): cancel() case <-c1.Done(): // Already cancelled } }() // Add the correlation ID to the new context ctxWithCorrelationID := context.WithValue(c1, CorrelationIDKey, correlationID) input.Context = ctxWithCorrelationID input.Cancel = cancel err := mergeOpenAIRequestAndModelConfig(cfg, input) if err != nil { return err } if cfg.Model == "" { xlog.Debug("replacing empty cfg.Model with input value", "input.Model", input.Model) cfg.Model = input.Model } c.Set(CONTEXT_LOCALS_KEY_LOCALAI_REQUEST, input) c.Set(CONTEXT_LOCALS_KEY_MODEL_CONFIG, cfg) return nil } func mergeOpenAIRequestAndModelConfig(config *config.ModelConfig, input *schema.OpenAIRequest) error { if input.Echo { config.Echo = input.Echo } if input.TopK != nil { config.TopK = input.TopK } if input.TopP != nil { config.TopP = input.TopP } if input.Backend != "" { config.Backend = input.Backend } if input.ClipSkip != 0 { config.Diffusers.ClipSkip = input.ClipSkip } if input.NegativePromptScale != 0 { config.NegativePromptScale = input.NegativePromptScale } if input.NegativePrompt != "" { config.NegativePrompt = input.NegativePrompt } if input.RopeFreqBase != 0 { config.RopeFreqBase = input.RopeFreqBase } if input.RopeFreqScale != 0 { config.RopeFreqScale = input.RopeFreqScale } if input.Grammar != "" { config.Grammar = input.Grammar } if input.Temperature != nil { config.Temperature = input.Temperature } if input.Maxtokens != nil { config.Maxtokens = input.Maxtokens } if input.ResponseFormat != nil { switch responseFormat := input.ResponseFormat.(type) { case string: config.ResponseFormat = responseFormat case map[string]interface{}: config.ResponseFormatMap = responseFormat } } switch stop := input.Stop.(type) { case string: if stop != "" { config.StopWords = append(config.StopWords, stop) } case []interface{}: for _, pp := range stop { if s, ok := pp.(string); ok { config.StopWords = append(config.StopWords, s) } } } if len(input.Tools) > 0 { for _, tool := range input.Tools { input.Functions = append(input.Functions, tool.Function) } } if input.ToolsChoice != nil { var toolChoice functions.Tool switch content := input.ToolsChoice.(type) { case string: _ = json.Unmarshal([]byte(content), &toolChoice) case map[string]interface{}: dat, _ := json.Marshal(content) _ = json.Unmarshal(dat, &toolChoice) } input.FunctionCall = map[string]interface{}{ "name": toolChoice.Function.Name, } } // Decode each request's message content imgIndex, vidIndex, audioIndex := 0, 0, 0 for i, m := range input.Messages { nrOfImgsInMessage := 0 nrOfVideosInMessage := 0 nrOfAudiosInMessage := 0 switch content := m.Content.(type) { case string: input.Messages[i].StringContent = content case []interface{}: dat, _ := json.Marshal(content) c := []schema.Content{} json.Unmarshal(dat, &c) textContent := "" // we will template this at the end CONTENT: for _, pp := range c { switch pp.Type { case "text": textContent += pp.Text //input.Messages[i].StringContent = pp.Text case "video", "video_url": // Decode content as base64 either if it's an URL or base64 text base64, err := utils.GetContentURIAsBase64(pp.VideoURL.URL) if err != nil { xlog.Error("Failed encoding video", "error", err) continue CONTENT } input.Messages[i].StringVideos = append(input.Messages[i].StringVideos, base64) // TODO: make sure that we only return base64 stuff vidIndex++ nrOfVideosInMessage++ case "audio_url", "audio": // Decode content as base64 either if it's an URL or base64 text base64, err := utils.GetContentURIAsBase64(pp.AudioURL.URL) if err != nil { xlog.Error("Failed encoding audio", "error", err) continue CONTENT } input.Messages[i].StringAudios = append(input.Messages[i].StringAudios, base64) // TODO: make sure that we only return base64 stuff audioIndex++ nrOfAudiosInMessage++ case "input_audio": // TODO: make sure that we only return base64 stuff input.Messages[i].StringAudios = append(input.Messages[i].StringAudios, pp.InputAudio.Data) audioIndex++ nrOfAudiosInMessage++ case "image_url", "image": // Decode content as base64 either if it's an URL or base64 text base64, err := utils.GetContentURIAsBase64(pp.ImageURL.URL) if err != nil { xlog.Error("Failed encoding image", "error", err) continue CONTENT } input.Messages[i].StringImages = append(input.Messages[i].StringImages, base64) // TODO: make sure that we only return base64 stuff imgIndex++ nrOfImgsInMessage++ } } input.Messages[i].StringContent, _ = templates.TemplateMultiModal(config.TemplateConfig.Multimodal, templates.MultiModalOptions{ TotalImages: imgIndex, TotalVideos: vidIndex, TotalAudios: audioIndex, ImagesInMessage: nrOfImgsInMessage, VideosInMessage: nrOfVideosInMessage, AudiosInMessage: nrOfAudiosInMessage, }, textContent) } } if input.RepeatPenalty != 0 { config.RepeatPenalty = input.RepeatPenalty } if input.FrequencyPenalty != 0 { config.FrequencyPenalty = input.FrequencyPenalty } if input.PresencePenalty != 0 { config.PresencePenalty = input.PresencePenalty } if input.Keep != 0 { config.Keep = input.Keep } if input.Batch != 0 { config.Batch = input.Batch } if input.IgnoreEOS { config.IgnoreEOS = input.IgnoreEOS } if input.Seed != nil { config.Seed = input.Seed } if input.TypicalP != nil { config.TypicalP = input.TypicalP } xlog.Debug("input.Input", "input", fmt.Sprintf("%+v", input.Input)) switch inputs := input.Input.(type) { case string: if inputs != "" { config.InputStrings = append(config.InputStrings, inputs) } case []any: for _, pp := range inputs { switch i := pp.(type) { case string: config.InputStrings = append(config.InputStrings, i) case []any: tokens := []int{} inputStrings := []string{} for _, ii := range i { switch ii := ii.(type) { case int: tokens = append(tokens, ii) case float64: tokens = append(tokens, int(ii)) case string: inputStrings = append(inputStrings, ii) default: xlog.Error("Unknown input type", "type", fmt.Sprintf("%T", ii)) } } config.InputToken = append(config.InputToken, tokens) config.InputStrings = append(config.InputStrings, inputStrings...) } } } // Can be either a string or an object switch fnc := input.FunctionCall.(type) { case string: if fnc != "" { config.SetFunctionCallString(fnc) } case map[string]interface{}: var name string n, exists := fnc["name"] if exists { nn, e := n.(string) if e { name = nn } } config.SetFunctionCallNameString(name) } switch p := input.Prompt.(type) { case string: config.PromptStrings = append(config.PromptStrings, p) case []interface{}: for _, pp := range p { if s, ok := pp.(string); ok { config.PromptStrings = append(config.PromptStrings, s) } } } // If a quality was defined as number, convert it to step if input.Quality != "" { q, err := strconv.Atoi(input.Quality) if err == nil { config.Step = q } } if valid, _ := config.Validate(); valid { return nil } return fmt.Errorf("unable to validate configuration after merging") }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/middleware/middleware_suite_test.go
core/http/middleware/middleware_suite_test.go
package middleware_test import ( "testing" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) func TestMiddleware(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Middleware test suite") }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/middleware/trace.go
core/http/middleware/trace.go
package middleware import ( "bytes" "github.com/emirpasic/gods/v2/queues/circularbuffer" "io" "net/http" "sort" "sync" "time" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/application" "github.com/mudler/xlog" ) type APIExchangeRequest struct { Method string `json:"method"` Path string `json:"path"` Headers *http.Header `json:"headers"` Body *[]byte `json:"body"` } type APIExchangeResponse struct { Status int `json:"status"` Headers *http.Header `json:"headers"` Body *[]byte `json:"body"` } type APIExchange struct { Timestamp time.Time `json:"timestamp"` Request APIExchangeRequest `json:"request"` Response APIExchangeResponse `json:"response"` } var traceBuffer *circularbuffer.Queue[APIExchange] var mu sync.Mutex var logChan = make(chan APIExchange, 100) type bodyWriter struct { http.ResponseWriter body *bytes.Buffer } func (w *bodyWriter) Write(b []byte) (int, error) { w.body.Write(b) return w.ResponseWriter.Write(b) } func (w *bodyWriter) Flush() { if flusher, ok := w.ResponseWriter.(http.Flusher); ok { flusher.Flush() } } // TraceMiddleware intercepts and logs JSON API requests and responses func TraceMiddleware(app *application.Application) echo.MiddlewareFunc { if app.ApplicationConfig().EnableTracing && traceBuffer == nil { traceBuffer = circularbuffer.New[APIExchange](app.ApplicationConfig().TracingMaxItems) go func() { for exchange := range logChan { mu.Lock() traceBuffer.Enqueue(exchange) mu.Unlock() } }() } return func(next echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { if !app.ApplicationConfig().EnableTracing { return next(c) } if c.Request().Header.Get("Content-Type") != "application/json" { return next(c) } body, err := io.ReadAll(c.Request().Body) if err != nil { xlog.Error("Failed to read request body") return err } // Restore the body for downstream handlers c.Request().Body = io.NopCloser(bytes.NewBuffer(body)) startTime := time.Now() // Wrap response writer to capture body resBody := new(bytes.Buffer) mw := &bodyWriter{ ResponseWriter: c.Response().Writer, body: resBody, } c.Response().Writer = mw err = next(c) if err != nil { c.Response().Writer = mw.ResponseWriter // Restore original writer if error return err } // Create exchange log requestHeaders := c.Request().Header.Clone() requestBody := make([]byte, len(body)) copy(requestBody, body) responseHeaders := c.Response().Header().Clone() responseBody := make([]byte, resBody.Len()) copy(responseBody, resBody.Bytes()) exchange := APIExchange{ Timestamp: startTime, Request: APIExchangeRequest{ Method: c.Request().Method, Path: c.Path(), Headers: &requestHeaders, Body: &requestBody, }, Response: APIExchangeResponse{ Status: c.Response().Status, Headers: &responseHeaders, Body: &responseBody, }, } select { case logChan <- exchange: default: xlog.Warn("Trace channel full, dropping trace") } return nil } } } // GetTraces returns a copy of the logged API exchanges for display func GetTraces() []APIExchange { mu.Lock() traces := traceBuffer.Values() mu.Unlock() sort.Slice(traces, func(i, j int) bool { return traces[i].Timestamp.Before(traces[j].Timestamp) }) return traces } // ClearTraces clears the in-memory logs func ClearTraces() { mu.Lock() traceBuffer.Clear() mu.Unlock() }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/middleware/strippathprefix.go
core/http/middleware/strippathprefix.go
package middleware import ( "strings" "github.com/labstack/echo/v4" ) // StripPathPrefix returns middleware that strips a path prefix from the request path. // The path prefix is obtained from the X-Forwarded-Prefix HTTP request header. // This must be registered as Pre middleware (using e.Pre()) to modify the path before routing. func StripPathPrefix() echo.MiddlewareFunc { return func(next echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { prefixes := c.Request().Header.Values("X-Forwarded-Prefix") originalPath := c.Request().URL.Path for _, prefix := range prefixes { if prefix != "" { normalizedPrefix := prefix if !strings.HasSuffix(prefix, "/") { normalizedPrefix = prefix + "/" } if strings.HasPrefix(originalPath, normalizedPrefix) { // Update the request path by stripping the normalized prefix newPath := originalPath[len(normalizedPrefix):] if newPath == "" { newPath = "/" } // Ensure path starts with / for proper routing if !strings.HasPrefix(newPath, "/") { newPath = "/" + newPath } // Update the URL path - Echo's router uses URL.Path for routing c.Request().URL.Path = newPath c.Request().URL.RawPath = "" // Update RequestURI to match the new path (needed for proper routing) if c.Request().URL.RawQuery != "" { c.Request().RequestURI = newPath + "?" + c.Request().URL.RawQuery } else { c.Request().RequestURI = newPath } // Store original path for BaseURL utility c.Set("_original_path", originalPath) break } else if originalPath == prefix || originalPath == prefix+"/" { // Redirect to prefix with trailing slash (use 302 to match test expectations) return c.Redirect(302, normalizedPrefix) } } } return next(c) } } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/middleware/auth.go
core/http/middleware/auth.go
package middleware import ( "crypto/subtle" "errors" "net/http" "strings" "github.com/labstack/echo/v4" "github.com/labstack/echo/v4/middleware" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/schema" ) var ErrMissingOrMalformedAPIKey = errors.New("missing or malformed API Key") // GetKeyAuthConfig returns Echo's KeyAuth middleware configuration func GetKeyAuthConfig(applicationConfig *config.ApplicationConfig) (echo.MiddlewareFunc, error) { // Create validator function validator := getApiKeyValidationFunction(applicationConfig) // Create error handler errorHandler := getApiKeyErrorHandler(applicationConfig) // Create Next function (skip middleware for certain requests) skipper := getApiKeyRequiredFilterFunction(applicationConfig) // Wrap it with our custom key lookup that checks multiple sources return func(next echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { if len(applicationConfig.ApiKeys) == 0 { return next(c) } // Skip if skipper says so if skipper != nil && skipper(c) { return next(c) } // Try to extract key from multiple sources key, err := extractKeyFromMultipleSources(c) if err != nil { return errorHandler(err, c) } // Validate the key valid, err := validator(key, c) if err != nil || !valid { return errorHandler(ErrMissingOrMalformedAPIKey, c) } // Store key in context for later use c.Set("api_key", key) return next(c) } }, nil } // extractKeyFromMultipleSources checks multiple sources for the API key // in order: Authorization header, x-api-key header, xi-api-key header, token cookie func extractKeyFromMultipleSources(c echo.Context) (string, error) { // Check Authorization header first auth := c.Request().Header.Get("Authorization") if auth != "" { // Check for Bearer scheme if strings.HasPrefix(auth, "Bearer ") { return strings.TrimPrefix(auth, "Bearer "), nil } // If no Bearer prefix, return as-is (for backward compatibility) return auth, nil } // Check x-api-key header if key := c.Request().Header.Get("x-api-key"); key != "" { return key, nil } // Check xi-api-key header if key := c.Request().Header.Get("xi-api-key"); key != "" { return key, nil } // Check token cookie cookie, err := c.Cookie("token") if err == nil && cookie != nil && cookie.Value != "" { return cookie.Value, nil } return "", ErrMissingOrMalformedAPIKey } func getApiKeyErrorHandler(applicationConfig *config.ApplicationConfig) func(error, echo.Context) error { return func(err error, c echo.Context) error { if errors.Is(err, ErrMissingOrMalformedAPIKey) { if len(applicationConfig.ApiKeys) == 0 { return nil // if no keys are set up, any error we get here is not an error. } c.Response().Header().Set("WWW-Authenticate", "Bearer") if applicationConfig.OpaqueErrors { return c.NoContent(http.StatusUnauthorized) } // Check if the request content type is JSON contentType := c.Request().Header.Get("Content-Type") if strings.Contains(contentType, "application/json") { return c.JSON(http.StatusUnauthorized, schema.ErrorResponse{ Error: &schema.APIError{ Message: "An authentication key is required", Code: 401, Type: "invalid_request_error", }, }) } return c.Render(http.StatusUnauthorized, "views/login", map[string]interface{}{ "BaseURL": BaseURL(c), }) } if applicationConfig.OpaqueErrors { return c.NoContent(http.StatusInternalServerError) } return err } } func getApiKeyValidationFunction(applicationConfig *config.ApplicationConfig) func(string, echo.Context) (bool, error) { if applicationConfig.UseSubtleKeyComparison { return func(key string, c echo.Context) (bool, error) { if len(applicationConfig.ApiKeys) == 0 { return true, nil // If no keys are setup, accept everything } for _, validKey := range applicationConfig.ApiKeys { if subtle.ConstantTimeCompare([]byte(key), []byte(validKey)) == 1 { return true, nil } } return false, ErrMissingOrMalformedAPIKey } } return func(key string, c echo.Context) (bool, error) { if len(applicationConfig.ApiKeys) == 0 { return true, nil // If no keys are setup, accept everything } for _, validKey := range applicationConfig.ApiKeys { if key == validKey { return true, nil } } return false, ErrMissingOrMalformedAPIKey } } func getApiKeyRequiredFilterFunction(applicationConfig *config.ApplicationConfig) middleware.Skipper { return func(c echo.Context) bool { path := c.Request().URL.Path for _, p := range applicationConfig.PathWithoutAuth { if strings.HasPrefix(path, p) { return true } } // Handle GET request exemptions if enabled if applicationConfig.DisableApiKeyRequirementForHttpGet { if c.Request().Method != http.MethodGet { return false } for _, rx := range applicationConfig.HttpGetExemptedEndpoints { if rx.MatchString(c.Path()) { return true } } } return false } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/middleware/baseurl_test.go
core/http/middleware/baseurl_test.go
package middleware import ( "net/http/httptest" "github.com/labstack/echo/v4" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) var _ = Describe("BaseURL", func() { Context("without prefix", func() { It("should return base URL without prefix", func() { app := echo.New() actualURL := "" // Register route - use the actual request path so routing works routePath := "/hello/world" app.GET(routePath, func(c echo.Context) error { actualURL = BaseURL(c) return nil }) req := httptest.NewRequest("GET", "/hello/world", nil) rec := httptest.NewRecorder() app.ServeHTTP(rec, req) Expect(rec.Code).To(Equal(200), "response status code") Expect(actualURL).To(Equal("http://example.com/"), "base URL") }) }) Context("with prefix", func() { It("should return base URL with prefix", func() { app := echo.New() actualURL := "" // Register route with the stripped path (after middleware removes prefix) routePath := "/hello/world" app.GET(routePath, func(c echo.Context) error { // Simulate what StripPathPrefix middleware does - store original path c.Set("_original_path", "/myprefix/hello/world") // Modify the request path to simulate prefix stripping c.Request().URL.Path = "/hello/world" actualURL = BaseURL(c) return nil }) // Make request with stripped path (middleware would have already processed it) req := httptest.NewRequest("GET", "/hello/world", nil) rec := httptest.NewRecorder() app.ServeHTTP(rec, req) Expect(rec.Code).To(Equal(200), "response status code") Expect(actualURL).To(Equal("http://example.com/myprefix/"), "base URL") }) }) })
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/middleware/baseurl.go
core/http/middleware/baseurl.go
package middleware import ( "strings" "github.com/labstack/echo/v4" ) // BaseURL returns the base URL for the given HTTP request context. // It takes into account that the app may be exposed by a reverse-proxy under a different protocol, host and path. // The returned URL is guaranteed to end with `/`. // The method should be used in conjunction with the StripPathPrefix middleware. func BaseURL(c echo.Context) string { path := c.Path() origPath := c.Request().URL.Path // Check if StripPathPrefix middleware stored the original path if storedPath, ok := c.Get("_original_path").(string); ok && storedPath != "" { origPath = storedPath } // Check X-Forwarded-Proto for scheme scheme := "http" if c.Request().Header.Get("X-Forwarded-Proto") == "https" { scheme = "https" } else if c.Request().TLS != nil { scheme = "https" } // Check X-Forwarded-Host for host host := c.Request().Host if forwardedHost := c.Request().Header.Get("X-Forwarded-Host"); forwardedHost != "" { host = forwardedHost } if path != origPath && strings.HasSuffix(origPath, path) && len(path) > 0 { prefixLen := len(origPath) - len(path) if prefixLen > 0 && prefixLen <= len(origPath) { pathPrefix := origPath[:prefixLen] if !strings.HasSuffix(pathPrefix, "/") { pathPrefix += "/" } return scheme + "://" + host + pathPrefix } } return scheme + "://" + host + "/" }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/middleware/strippathprefix_test.go
core/http/middleware/strippathprefix_test.go
package middleware import ( "net/http/httptest" "github.com/labstack/echo/v4" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) var _ = Describe("StripPathPrefix", func() { var app *echo.Echo var actualPath string var appInitialized bool BeforeEach(func() { actualPath = "" if !appInitialized { app = echo.New() app.Pre(StripPathPrefix()) app.GET("/hello/world", func(c echo.Context) error { actualPath = c.Request().URL.Path return nil }) app.GET("/", func(c echo.Context) error { actualPath = c.Request().URL.Path return nil }) appInitialized = true } }) Context("without prefix", func() { It("should not modify path when no header is present", func() { req := httptest.NewRequest("GET", "/hello/world", nil) rec := httptest.NewRecorder() app.ServeHTTP(rec, req) Expect(rec.Code).To(Equal(200), "response status code") Expect(actualPath).To(Equal("/hello/world"), "rewritten path") }) It("should not modify root path when no header is present", func() { req := httptest.NewRequest("GET", "/", nil) rec := httptest.NewRecorder() app.ServeHTTP(rec, req) Expect(rec.Code).To(Equal(200), "response status code") Expect(actualPath).To(Equal("/"), "rewritten path") }) It("should not modify path when header does not match", func() { req := httptest.NewRequest("GET", "/hello/world", nil) req.Header["X-Forwarded-Prefix"] = []string{"/otherprefix/"} rec := httptest.NewRecorder() app.ServeHTTP(rec, req) Expect(rec.Code).To(Equal(200), "response status code") Expect(actualPath).To(Equal("/hello/world"), "rewritten path") }) }) Context("with prefix", func() { It("should return 404 when prefix does not match header", func() { req := httptest.NewRequest("GET", "/prefix/hello/world", nil) req.Header["X-Forwarded-Prefix"] = []string{"/otherprefix/"} rec := httptest.NewRecorder() app.ServeHTTP(rec, req) Expect(rec.Code).To(Equal(404), "response status code") }) It("should strip matching prefix from path", func() { req := httptest.NewRequest("GET", "/myprefix/hello/world", nil) req.Header["X-Forwarded-Prefix"] = []string{"/myprefix/"} rec := httptest.NewRecorder() app.ServeHTTP(rec, req) Expect(rec.Code).To(Equal(200), "response status code") Expect(actualPath).To(Equal("/hello/world"), "rewritten path") }) It("should strip prefix when it matches the first header value", func() { req := httptest.NewRequest("GET", "/myprefix/hello/world", nil) req.Header["X-Forwarded-Prefix"] = []string{"/myprefix/", "/otherprefix/"} rec := httptest.NewRecorder() app.ServeHTTP(rec, req) Expect(rec.Code).To(Equal(200), "response status code") Expect(actualPath).To(Equal("/hello/world"), "rewritten path") }) It("should strip prefix when it matches the second header value", func() { req := httptest.NewRequest("GET", "/myprefix/hello/world", nil) req.Header["X-Forwarded-Prefix"] = []string{"/otherprefix/", "/myprefix/"} rec := httptest.NewRecorder() app.ServeHTTP(rec, req) Expect(rec.Code).To(Equal(200), "response status code") Expect(actualPath).To(Equal("/hello/world"), "rewritten path") }) It("should strip prefix when header does not end with slash", func() { req := httptest.NewRequest("GET", "/myprefix/hello/world", nil) req.Header["X-Forwarded-Prefix"] = []string{"/myprefix"} rec := httptest.NewRecorder() app.ServeHTTP(rec, req) Expect(rec.Code).To(Equal(200), "response status code") Expect(actualPath).To(Equal("/hello/world"), "rewritten path") }) It("should return 404 when prefix does not match header without trailing slash", func() { req := httptest.NewRequest("GET", "/myprefix-suffix/hello/world", nil) req.Header["X-Forwarded-Prefix"] = []string{"/myprefix"} rec := httptest.NewRecorder() app.ServeHTTP(rec, req) Expect(rec.Code).To(Equal(404), "response status code") }) It("should redirect when prefix does not end with a slash", func() { req := httptest.NewRequest("GET", "/myprefix", nil) req.Header["X-Forwarded-Prefix"] = []string{"/myprefix"} rec := httptest.NewRecorder() app.ServeHTTP(rec, req) Expect(rec.Code).To(Equal(302), "response status code") Expect(rec.Header().Get("Location")).To(Equal("/myprefix/"), "redirect location") }) }) })
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/localai/edit_model.go
core/http/endpoints/localai/edit_model.go
package localai import ( "fmt" "io" "net/http" "os" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/config" httpUtils "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/internal" "github.com/mudler/LocalAI/pkg/utils" "gopkg.in/yaml.v3" ) // GetEditModelPage renders the edit model page with current configuration func GetEditModelPage(cl *config.ModelConfigLoader, appConfig *config.ApplicationConfig) echo.HandlerFunc { return func(c echo.Context) error { modelName := c.Param("name") if modelName == "" { response := ModelResponse{ Success: false, Error: "Model name is required", } return c.JSON(http.StatusBadRequest, response) } modelConfig, exists := cl.GetModelConfig(modelName) if !exists { response := ModelResponse{ Success: false, Error: "Model configuration not found", } return c.JSON(http.StatusNotFound, response) } modelConfigFile := modelConfig.GetModelConfigFile() if modelConfigFile == "" { response := ModelResponse{ Success: false, Error: "Model configuration file not found", } return c.JSON(http.StatusNotFound, response) } configData, err := os.ReadFile(modelConfigFile) if err != nil { response := ModelResponse{ Success: false, Error: "Failed to read configuration file: " + err.Error(), } return c.JSON(http.StatusInternalServerError, response) } // Render the edit page with the current configuration templateData := struct { Title string ModelName string Config *config.ModelConfig ConfigJSON string ConfigYAML string BaseURL string Version string }{ Title: "LocalAI - Edit Model " + modelName, ModelName: modelName, Config: &modelConfig, ConfigYAML: string(configData), BaseURL: httpUtils.BaseURL(c), Version: internal.PrintableVersion(), } return c.Render(http.StatusOK, "views/model-editor", templateData) } } // EditModelEndpoint handles updating existing model configurations func EditModelEndpoint(cl *config.ModelConfigLoader, appConfig *config.ApplicationConfig) echo.HandlerFunc { return func(c echo.Context) error { modelName := c.Param("name") if modelName == "" { response := ModelResponse{ Success: false, Error: "Model name is required", } return c.JSON(http.StatusBadRequest, response) } modelConfig, exists := cl.GetModelConfig(modelName) if !exists { response := ModelResponse{ Success: false, Error: "Existing model configuration not found", } return c.JSON(http.StatusNotFound, response) } // Get the raw body body, err := io.ReadAll(c.Request().Body) if err != nil { response := ModelResponse{ Success: false, Error: "Failed to read request body: " + err.Error(), } return c.JSON(http.StatusBadRequest, response) } if len(body) == 0 { response := ModelResponse{ Success: false, Error: "Request body is empty", } return c.JSON(http.StatusBadRequest, response) } // Check content to see if it's a valid model config var req config.ModelConfig // Parse YAML if err := yaml.Unmarshal(body, &req); err != nil { response := ModelResponse{ Success: false, Error: "Failed to parse YAML: " + err.Error(), } return c.JSON(http.StatusBadRequest, response) } // Validate required fields if req.Name == "" { response := ModelResponse{ Success: false, Error: "Name is required", } return c.JSON(http.StatusBadRequest, response) } // Validate the configuration if valid, _ := req.Validate(); !valid { response := ModelResponse{ Success: false, Error: "Validation failed", Details: []string{"Configuration validation failed. Please check your YAML syntax and required fields."}, } return c.JSON(http.StatusBadRequest, response) } // Load the existing configuration configPath := modelConfig.GetModelConfigFile() if err := utils.VerifyPath(configPath, appConfig.SystemState.Model.ModelsPath); err != nil { response := ModelResponse{ Success: false, Error: "Model configuration not trusted: " + err.Error(), } return c.JSON(http.StatusNotFound, response) } // Write new content to file if err := os.WriteFile(configPath, body, 0644); err != nil { response := ModelResponse{ Success: false, Error: "Failed to write configuration file: " + err.Error(), } return c.JSON(http.StatusInternalServerError, response) } // Reload configurations if err := cl.LoadModelConfigsFromPath(appConfig.SystemState.Model.ModelsPath, appConfig.ToConfigLoaderOptions()...); err != nil { response := ModelResponse{ Success: false, Error: "Failed to reload configurations: " + err.Error(), } return c.JSON(http.StatusInternalServerError, response) } // Preload the model if err := cl.Preload(appConfig.SystemState.Model.ModelsPath); err != nil { response := ModelResponse{ Success: false, Error: "Failed to preload model: " + err.Error(), } return c.JSON(http.StatusInternalServerError, response) } // Return success response response := ModelResponse{ Success: true, Message: fmt.Sprintf("Model '%s' updated successfully", modelName), Filename: configPath, Config: req, } return c.JSON(200, response) } } // ReloadModelsEndpoint handles reloading model configurations from disk func ReloadModelsEndpoint(cl *config.ModelConfigLoader, appConfig *config.ApplicationConfig) echo.HandlerFunc { return func(c echo.Context) error { // Reload configurations if err := cl.LoadModelConfigsFromPath(appConfig.SystemState.Model.ModelsPath, appConfig.ToConfigLoaderOptions()...); err != nil { response := ModelResponse{ Success: false, Error: "Failed to reload configurations: " + err.Error(), } return c.JSON(http.StatusInternalServerError, response) } // Preload the models if err := cl.Preload(appConfig.SystemState.Model.ModelsPath); err != nil { response := ModelResponse{ Success: false, Error: "Failed to preload models: " + err.Error(), } return c.JSON(http.StatusInternalServerError, response) } // Return success response response := ModelResponse{ Success: true, Message: "Model configurations reloaded successfully", } return c.JSON(http.StatusOK, response) } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/localai/get_token_metrics.go
core/http/endpoints/localai/get_token_metrics.go
package localai import ( "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/backend" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/xlog" "github.com/mudler/LocalAI/pkg/model" ) // TODO: This is not yet in use. Needs middleware rework, since it is not referenced. // TokenMetricsEndpoint is an endpoint to get TokensProcessed Per Second for Active SlotID // // @Summary Get TokenMetrics for Active Slot. // @Accept json // @Produce audio/x-wav // @Success 200 {string} binary "generated audio/wav file" // @Router /v1/tokenMetrics [get] // @Router /tokenMetrics [get] func TokenMetricsEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) echo.HandlerFunc { return func(c echo.Context) error { input := new(schema.TokenMetricsRequest) // Get input data from the request body if err := c.Bind(input); err != nil { return err } modelFile, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_MODEL_NAME).(string) if !ok || modelFile != "" { modelFile = input.Model xlog.Warn("Model not found in context", "model", input.Model) } cfg, err := cl.LoadModelConfigFileByNameDefaultOptions(modelFile, appConfig) if err != nil { xlog.Error("Error loading model config", "error", err) modelFile = input.Model xlog.Warn("Model not found in context", "model", input.Model) } else { modelFile = cfg.Model } xlog.Debug("Token Metrics for model", "model", modelFile) response, err := backend.TokenMetrics(modelFile, ml, appConfig, *cfg) if err != nil { return err } return c.JSON(200, response) } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/localai/edit_model_test.go
core/http/endpoints/localai/edit_model_test.go
package localai_test import ( "bytes" "encoding/json" "io" "net/http" "net/http/httptest" "os" "path/filepath" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/config" . "github.com/mudler/LocalAI/core/http/endpoints/localai" "github.com/mudler/LocalAI/pkg/system" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) // testRenderer is a simple renderer for tests that returns JSON type testRenderer struct{} func (t *testRenderer) Render(w io.Writer, name string, data interface{}, c echo.Context) error { // For tests, just return the data as JSON return json.NewEncoder(w).Encode(data) } var _ = Describe("Edit Model test", func() { var tempDir string BeforeEach(func() { var err error tempDir, err = os.MkdirTemp("", "localai-test") Expect(err).ToNot(HaveOccurred()) }) AfterEach(func() { os.RemoveAll(tempDir) }) Context("Edit Model endpoint", func() { It("should edit a model", func() { systemState, err := system.GetSystemState( system.WithModelPath(filepath.Join(tempDir)), ) Expect(err).ToNot(HaveOccurred()) applicationConfig := config.NewApplicationConfig( config.WithSystemState(systemState), ) //modelLoader := model.NewModelLoader(systemState, true) modelConfigLoader := config.NewModelConfigLoader(systemState.Model.ModelsPath) // Define Echo app and register all routes upfront app := echo.New() // Set up a simple renderer for the test app.Renderer = &testRenderer{} app.POST("/import-model", ImportModelEndpoint(modelConfigLoader, applicationConfig)) app.GET("/edit-model/:name", GetEditModelPage(modelConfigLoader, applicationConfig)) requestBody := bytes.NewBufferString(`{"name": "foo", "backend": "foo", "model": "foo"}`) req := httptest.NewRequest("POST", "/import-model", requestBody) req.Header.Set("Content-Type", "application/json") rec := httptest.NewRecorder() app.ServeHTTP(rec, req) body, err := io.ReadAll(rec.Body) Expect(err).ToNot(HaveOccurred()) Expect(string(body)).To(ContainSubstring("Model configuration created successfully")) Expect(rec.Code).To(Equal(http.StatusOK)) req = httptest.NewRequest("GET", "/edit-model/foo", nil) rec = httptest.NewRecorder() app.ServeHTTP(rec, req) body, err = io.ReadAll(rec.Body) Expect(err).ToNot(HaveOccurred()) // The response contains the model configuration with backend field Expect(string(body)).To(ContainSubstring(`"backend":"foo"`)) Expect(string(body)).To(ContainSubstring(`"name":"foo"`)) Expect(rec.Code).To(Equal(http.StatusOK)) }) }) })
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/localai/metrics.go
core/http/endpoints/localai/metrics.go
package localai import ( "time" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/services" "github.com/prometheus/client_golang/prometheus/promhttp" ) // LocalAIMetricsEndpoint returns the metrics endpoint for LocalAI // @Summary Prometheus metrics endpoint // @Param request body config.Gallery true "Gallery details" // @Router /metrics [get] func LocalAIMetricsEndpoint() echo.HandlerFunc { return echo.WrapHandler(promhttp.Handler()) } type apiMiddlewareConfig struct { Filter func(c echo.Context) bool metricsService *services.LocalAIMetricsService } func LocalAIMetricsAPIMiddleware(metrics *services.LocalAIMetricsService) echo.MiddlewareFunc { cfg := apiMiddlewareConfig{ metricsService: metrics, Filter: func(c echo.Context) bool { return c.Path() == "/metrics" }, } return func(next echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { if cfg.Filter != nil && cfg.Filter(c) { return next(c) } path := c.Path() method := c.Request().Method start := time.Now() err := next(c) elapsed := float64(time.Since(start)) / float64(time.Second) cfg.metricsService.ObserveAPICall(method, path, elapsed) return err } } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/localai/p2p.go
core/http/endpoints/localai/p2p.go
package localai import ( "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/p2p" "github.com/mudler/LocalAI/core/schema" ) // ShowP2PNodes returns the P2P Nodes // @Summary Returns available P2P nodes // @Success 200 {object} []schema.P2PNodesResponse "Response" // @Router /api/p2p [get] func ShowP2PNodes(appConfig *config.ApplicationConfig) echo.HandlerFunc { // Render index return func(c echo.Context) error { return c.JSON(200, schema.P2PNodesResponse{ Nodes: p2p.GetAvailableNodes(p2p.NetworkID(appConfig.P2PNetworkID, p2p.WorkerID)), FederatedNodes: p2p.GetAvailableNodes(p2p.NetworkID(appConfig.P2PNetworkID, p2p.FederatedID)), }) } } // ShowP2PToken returns the P2P token // @Summary Show the P2P token // @Success 200 {string} string "Response" // @Router /api/p2p/token [get] func ShowP2PToken(appConfig *config.ApplicationConfig) echo.HandlerFunc { return func(c echo.Context) error { return c.String(200, appConfig.P2PToken) } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/localai/tts.go
core/http/endpoints/localai/tts.go
package localai import ( "path/filepath" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/backend" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/pkg/model" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/xlog" "github.com/mudler/LocalAI/pkg/utils" ) // TTSEndpoint is the OpenAI Speech API endpoint https://platform.openai.com/docs/api-reference/audio/createSpeech // // @Summary Generates audio from the input text. // @Accept json // @Produce audio/x-wav // @Param request body schema.TTSRequest true "query params" // @Success 200 {string} binary "generated audio/wav file" // @Router /v1/audio/speech [post] // @Router /tts [post] func TTSEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) echo.HandlerFunc { return func(c echo.Context) error { input, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_LOCALAI_REQUEST).(*schema.TTSRequest) if !ok || input.Model == "" { return echo.ErrBadRequest } cfg, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_MODEL_CONFIG).(*config.ModelConfig) if !ok || cfg == nil { return echo.ErrBadRequest } xlog.Debug("LocalAI TTS Request received", "model", input.Model) if cfg.Backend == "" && input.Backend != "" { cfg.Backend = input.Backend } if input.Language != "" { cfg.Language = input.Language } if input.Voice != "" { cfg.Voice = input.Voice } filePath, _, err := backend.ModelTTS(input.Input, cfg.Voice, cfg.Language, ml, appConfig, *cfg) if err != nil { return err } // Convert generated file to target format filePath, err = utils.AudioConvert(filePath, input.Format) if err != nil { return err } return c.Attachment(filePath, filepath.Base(filePath)) } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/localai/types.go
core/http/endpoints/localai/types.go
package localai // ModelResponse represents the common response structure for model operations type ModelResponse struct { Success bool `json:"success"` Message string `json:"message"` Filename string `json:"filename,omitempty"` Config interface{} `json:"config,omitempty"` Error string `json:"error,omitempty"` Details []string `json:"details,omitempty"` }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/localai/localai_suite_test.go
core/http/endpoints/localai/localai_suite_test.go
package localai_test import ( "testing" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) func TestLocalAIEndpoints(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "LocalAI Endpoints test suite") }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/localai/vad.go
core/http/endpoints/localai/vad.go
package localai import ( "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/backend" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/LocalAI/pkg/model" "github.com/mudler/xlog" ) // VADEndpoint is Voice-Activation-Detection endpoint // @Summary Detect voice fragments in an audio stream // @Accept json // @Param request body schema.VADRequest true "query params" // @Success 200 {object} proto.VADResponse "Response" // @Router /vad [post] func VADEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) echo.HandlerFunc { return func(c echo.Context) error { input, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_LOCALAI_REQUEST).(*schema.VADRequest) if !ok || input.Model == "" { return echo.ErrBadRequest } cfg, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_MODEL_CONFIG).(*config.ModelConfig) if !ok || cfg == nil { return echo.ErrBadRequest } xlog.Debug("LocalAI VAD Request received", "model", input.Model) resp, err := backend.VAD(input, c.Request().Context(), ml, appConfig, *cfg) if err != nil { return err } return c.JSON(200, resp) } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/localai/detection.go
core/http/endpoints/localai/detection.go
package localai import ( "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/backend" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/LocalAI/pkg/model" "github.com/mudler/LocalAI/pkg/utils" "github.com/mudler/xlog" ) // DetectionEndpoint is the LocalAI Detection endpoint https://localai.io/docs/api-reference/detection // @Summary Detects objects in the input image. // @Param request body schema.DetectionRequest true "query params" // @Success 200 {object} schema.DetectionResponse "Response" // @Router /v1/detection [post] func DetectionEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) echo.HandlerFunc { return func(c echo.Context) error { input, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_LOCALAI_REQUEST).(*schema.DetectionRequest) if !ok || input.Model == "" { return echo.ErrBadRequest } cfg, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_MODEL_CONFIG).(*config.ModelConfig) if !ok || cfg == nil { return echo.ErrBadRequest } xlog.Debug("Detection", "image", input.Image, "modelFile", "modelFile", "backend", cfg.Backend) image, err := utils.GetContentURIAsBase64(input.Image) if err != nil { return err } res, err := backend.Detection(image, ml, appConfig, *cfg) if err != nil { return err } response := schema.DetectionResponse{ Detections: make([]schema.Detection, len(res.Detections)), } for i, detection := range res.Detections { response.Detections[i] = schema.Detection{ X: detection.X, Y: detection.Y, Width: detection.Width, Height: detection.Height, ClassName: detection.ClassName, } } return c.JSON(200, response) } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/localai/mcp.go
core/http/endpoints/localai/mcp.go
package localai import ( "context" "encoding/json" "errors" "fmt" "net" "time" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/config" mcpTools "github.com/mudler/LocalAI/core/http/endpoints/mcp" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/LocalAI/core/templates" "github.com/mudler/LocalAI/pkg/model" "github.com/mudler/cogito" "github.com/mudler/xlog" ) // MCP SSE Event Types type MCPReasoningEvent struct { Type string `json:"type"` Content string `json:"content"` } type MCPToolCallEvent struct { Type string `json:"type"` Name string `json:"name"` Arguments map[string]interface{} `json:"arguments"` Reasoning string `json:"reasoning"` } type MCPToolResultEvent struct { Type string `json:"type"` Name string `json:"name"` Result string `json:"result"` } type MCPStatusEvent struct { Type string `json:"type"` Message string `json:"message"` } type MCPAssistantEvent struct { Type string `json:"type"` Content string `json:"content"` } type MCPErrorEvent struct { Type string `json:"type"` Message string `json:"message"` } // MCPEndpoint is the endpoint for MCP chat completions. Supports SSE mode, but it is not compatible with the OpenAI apis. // @Summary Stream MCP chat completions with reasoning, tool calls, and results // @Param request body schema.OpenAIRequest true "query params" // @Success 200 {object} schema.OpenAIResponse "Response" // @Router /v1/mcp/chat/completions [post] func MCPEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator *templates.Evaluator, appConfig *config.ApplicationConfig) echo.HandlerFunc { return func(c echo.Context) error { ctx := c.Request().Context() created := int(time.Now().Unix()) // Handle Correlation id := c.Request().Header.Get("X-Correlation-ID") if id == "" { id = fmt.Sprintf("mcp-%d", time.Now().UnixNano()) } input, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_LOCALAI_REQUEST).(*schema.OpenAIRequest) if !ok || input.Model == "" { return echo.ErrBadRequest } config, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_MODEL_CONFIG).(*config.ModelConfig) if !ok || config == nil { return echo.ErrBadRequest } if config.MCP.Servers == "" && config.MCP.Stdio == "" { return fmt.Errorf("no MCP servers configured") } // Get MCP config from model config remote, stdio, err := config.MCP.MCPConfigFromYAML() if err != nil { return fmt.Errorf("failed to get MCP config: %w", err) } // Check if we have tools in cache, or we have to have an initial connection sessions, err := mcpTools.SessionsFromMCPConfig(config.Name, remote, stdio) if err != nil { return fmt.Errorf("failed to get MCP sessions: %w", err) } if len(sessions) == 0 { return fmt.Errorf("no working MCP servers found") } // Build fragment from messages fragment := cogito.NewEmptyFragment() for _, message := range input.Messages { fragment = fragment.AddMessage(message.Role, message.StringContent) } _, port, err := net.SplitHostPort(appConfig.APIAddress) if err != nil { return err } apiKey := "" if len(appConfig.ApiKeys) > 0 { apiKey = appConfig.ApiKeys[0] } ctxWithCancellation, cancel := context.WithCancel(ctx) defer cancel() // TODO: instead of connecting to the API, we should just wire this internally // and act like completion.go. // We can do this as cogito expects an interface and we can create one that // we satisfy to just call internally ComputeChoices defaultLLM := cogito.NewOpenAILLM(config.Name, apiKey, "http://127.0.0.1:"+port) // Build cogito options using the consolidated method cogitoOpts := config.BuildCogitoOptions() cogitoOpts = append( cogitoOpts, cogito.WithContext(ctxWithCancellation), cogito.WithMCPs(sessions...), ) // Check if streaming is requested toStream := input.Stream if !toStream { // Non-streaming mode: execute synchronously and return JSON response cogitoOpts = append( cogitoOpts, cogito.WithStatusCallback(func(s string) { xlog.Debug("[model agent] Status", "model", config.Name, "status", s) }), cogito.WithReasoningCallback(func(s string) { xlog.Debug("[model agent] Reasoning", "model", config.Name, "reasoning", s) }), cogito.WithToolCallBack(func(t *cogito.ToolChoice, state *cogito.SessionState) cogito.ToolCallDecision { xlog.Debug("[model agent] Tool call", "model", config.Name, "tool", t.Name, "reasoning", t.Reasoning, "arguments", t.Arguments) return cogito.ToolCallDecision{ Approved: true, } }), cogito.WithToolCallResultCallback(func(t cogito.ToolStatus) { xlog.Debug("[model agent] Tool call result", "model", config.Name, "tool", t.Name, "result", t.Result, "tool_arguments", t.ToolArguments) }), ) f, err := cogito.ExecuteTools( defaultLLM, fragment, cogitoOpts..., ) if err != nil && !errors.Is(err, cogito.ErrNoToolSelected) { return err } f, err = defaultLLM.Ask(ctxWithCancellation, f) if err != nil { return err } resp := &schema.OpenAIResponse{ ID: id, Created: created, Model: input.Model, // we have to return what the user sent here, due to OpenAI spec. Choices: []schema.Choice{{Message: &schema.Message{Role: "assistant", Content: &f.LastMessage().Content}}}, Object: "chat.completion", } jsonResult, _ := json.Marshal(resp) xlog.Debug("Response", "response", string(jsonResult)) // Return the prediction in the response body return c.JSON(200, resp) } // Streaming mode: use SSE // Set up SSE headers c.Response().Header().Set("Content-Type", "text/event-stream") c.Response().Header().Set("Cache-Control", "no-cache") c.Response().Header().Set("Connection", "keep-alive") c.Response().Header().Set("X-Correlation-ID", id) // Create channel for streaming events events := make(chan interface{}) ended := make(chan error, 1) // Set up callbacks for streaming statusCallback := func(s string) { events <- MCPStatusEvent{ Type: "status", Message: s, } } reasoningCallback := func(s string) { events <- MCPReasoningEvent{ Type: "reasoning", Content: s, } } toolCallCallback := func(t *cogito.ToolChoice, state *cogito.SessionState) cogito.ToolCallDecision { events <- MCPToolCallEvent{ Type: "tool_call", Name: t.Name, Arguments: t.Arguments, Reasoning: t.Reasoning, } return cogito.ToolCallDecision{ Approved: true, } } toolCallResultCallback := func(t cogito.ToolStatus) { events <- MCPToolResultEvent{ Type: "tool_result", Name: t.Name, Result: t.Result, } } cogitoOpts = append(cogitoOpts, cogito.WithStatusCallback(statusCallback), cogito.WithReasoningCallback(reasoningCallback), cogito.WithToolCallBack(toolCallCallback), cogito.WithToolCallResultCallback(toolCallResultCallback), ) // Execute tools in a goroutine go func() { defer close(events) f, err := cogito.ExecuteTools( defaultLLM, fragment, cogitoOpts..., ) if err != nil && !errors.Is(err, cogito.ErrNoToolSelected) { events <- MCPErrorEvent{ Type: "error", Message: fmt.Sprintf("Failed to execute tools: %v", err), } ended <- err return } // Get final response f, err = defaultLLM.Ask(ctxWithCancellation, f) if err != nil { events <- MCPErrorEvent{ Type: "error", Message: fmt.Sprintf("Failed to get response: %v", err), } ended <- err return } // Stream final assistant response content := f.LastMessage().Content events <- MCPAssistantEvent{ Type: "assistant", Content: content, } ended <- nil }() // Stream events to client LOOP: for { select { case <-ctx.Done(): // Context was cancelled (client disconnected or request cancelled) xlog.Debug("Request context cancelled, stopping stream") cancel() break LOOP case event := <-events: if event == nil { // Channel closed break LOOP } eventData, err := json.Marshal(event) if err != nil { xlog.Debug("Failed to marshal event", "error", err) continue } xlog.Debug("Sending event", "event", string(eventData)) _, err = fmt.Fprintf(c.Response().Writer, "data: %s\n\n", string(eventData)) if err != nil { xlog.Debug("Sending event failed", "error", err) cancel() return err } c.Response().Flush() case err := <-ended: if err == nil { // Send done signal fmt.Fprintf(c.Response().Writer, "data: [DONE]\n\n") c.Response().Flush() break LOOP } xlog.Error("Stream ended with error", "error", err) errorEvent := MCPErrorEvent{ Type: "error", Message: err.Error(), } errorData, marshalErr := json.Marshal(errorEvent) if marshalErr != nil { fmt.Fprintf(c.Response().Writer, "data: {\"type\":\"error\",\"message\":\"Internal error\"}\n\n") } else { fmt.Fprintf(c.Response().Writer, "data: %s\n\n", string(errorData)) } fmt.Fprintf(c.Response().Writer, "data: [DONE]\n\n") c.Response().Flush() return nil } } xlog.Debug("Stream ended") return nil } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/localai/settings.go
core/http/endpoints/localai/settings.go
package localai import ( "encoding/json" "io" "net/http" "os" "path/filepath" "time" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/application" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/p2p" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/xlog" ) // GetSettingsEndpoint returns current settings with precedence (env > file > defaults) func GetSettingsEndpoint(app *application.Application) echo.HandlerFunc { return func(c echo.Context) error { appConfig := app.ApplicationConfig() settings := appConfig.ToRuntimeSettings() return c.JSON(http.StatusOK, settings) } } // UpdateSettingsEndpoint updates settings, saves to file, and applies immediately func UpdateSettingsEndpoint(app *application.Application) echo.HandlerFunc { return func(c echo.Context) error { appConfig := app.ApplicationConfig() startupConfig := app.StartupConfig() if startupConfig == nil { startupConfig = appConfig } body, err := io.ReadAll(c.Request().Body) if err != nil { return c.JSON(http.StatusBadRequest, schema.SettingsResponse{ Success: false, Error: "Failed to read request body: " + err.Error(), }) } var settings config.RuntimeSettings if err := json.Unmarshal(body, &settings); err != nil { return c.JSON(http.StatusBadRequest, schema.SettingsResponse{ Success: false, Error: "Failed to parse JSON: " + err.Error(), }) } // Validate timeouts if provided if settings.WatchdogIdleTimeout != nil { if _, err := time.ParseDuration(*settings.WatchdogIdleTimeout); err != nil { return c.JSON(http.StatusBadRequest, schema.SettingsResponse{ Success: false, Error: "Invalid watchdog_idle_timeout format: " + err.Error(), }) } } if settings.WatchdogBusyTimeout != nil { if _, err := time.ParseDuration(*settings.WatchdogBusyTimeout); err != nil { return c.JSON(http.StatusBadRequest, schema.SettingsResponse{ Success: false, Error: "Invalid watchdog_busy_timeout format: " + err.Error(), }) } } if settings.WatchdogInterval != nil { if _, err := time.ParseDuration(*settings.WatchdogInterval); err != nil { return c.JSON(http.StatusBadRequest, schema.SettingsResponse{ Success: false, Error: "Invalid watchdog_interval format: " + err.Error(), }) } } if settings.LRUEvictionRetryInterval != nil { if _, err := time.ParseDuration(*settings.LRUEvictionRetryInterval); err != nil { return c.JSON(http.StatusBadRequest, schema.SettingsResponse{ Success: false, Error: "Invalid lru_eviction_retry_interval format: " + err.Error(), }) } } // Save to file if appConfig.DynamicConfigsDir == "" { return c.JSON(http.StatusBadRequest, schema.SettingsResponse{ Success: false, Error: "DynamicConfigsDir is not set", }) } settingsFile := filepath.Join(appConfig.DynamicConfigsDir, "runtime_settings.json") settingsJSON, err := json.MarshalIndent(settings, "", " ") if err != nil { return c.JSON(http.StatusInternalServerError, schema.SettingsResponse{ Success: false, Error: "Failed to marshal settings: " + err.Error(), }) } if err := os.WriteFile(settingsFile, settingsJSON, 0600); err != nil { return c.JSON(http.StatusInternalServerError, schema.SettingsResponse{ Success: false, Error: "Failed to write settings file: " + err.Error(), }) } // Apply settings using centralized method watchdogChanged := appConfig.ApplyRuntimeSettings(&settings) // Handle API keys specially (merge with startup keys) if settings.ApiKeys != nil { envKeys := startupConfig.ApiKeys runtimeKeys := *settings.ApiKeys appConfig.ApiKeys = append(envKeys, runtimeKeys...) } // Update watchdog dynamically for settings that don't require restart if settings.ForceEvictionWhenBusy != nil { currentWD := app.ModelLoader().GetWatchDog() if currentWD != nil { currentWD.SetForceEvictionWhenBusy(*settings.ForceEvictionWhenBusy) xlog.Info("Updated watchdog force eviction when busy setting", "forceEvictionWhenBusy", *settings.ForceEvictionWhenBusy) } } // Update ModelLoader LRU eviction retry settings dynamically maxRetries := appConfig.LRUEvictionMaxRetries retryInterval := appConfig.LRUEvictionRetryInterval if settings.LRUEvictionMaxRetries != nil { maxRetries = *settings.LRUEvictionMaxRetries } if settings.LRUEvictionRetryInterval != nil { if dur, err := time.ParseDuration(*settings.LRUEvictionRetryInterval); err == nil { retryInterval = dur } } if settings.LRUEvictionMaxRetries != nil || settings.LRUEvictionRetryInterval != nil { app.ModelLoader().SetLRUEvictionRetrySettings(maxRetries, retryInterval) xlog.Info("Updated LRU eviction retry settings", "maxRetries", maxRetries, "retryInterval", retryInterval) } // Check if agent job retention changed agentJobChanged := settings.AgentJobRetentionDays != nil // Restart watchdog if settings changed if watchdogChanged { if settings.WatchdogEnabled != nil && !*settings.WatchdogEnabled { if err := app.StopWatchdog(); err != nil { xlog.Error("Failed to stop watchdog", "error", err) return c.JSON(http.StatusInternalServerError, schema.SettingsResponse{ Success: false, Error: "Settings saved but failed to stop watchdog: " + err.Error(), }) } } else { if err := app.RestartWatchdog(); err != nil { xlog.Error("Failed to restart watchdog", "error", err) return c.JSON(http.StatusInternalServerError, schema.SettingsResponse{ Success: false, Error: "Settings saved but failed to restart watchdog: " + err.Error(), }) } } } // Restart agent job service if retention days changed if agentJobChanged { if err := app.RestartAgentJobService(); err != nil { xlog.Error("Failed to restart agent job service", "error", err) return c.JSON(http.StatusInternalServerError, schema.SettingsResponse{ Success: false, Error: "Settings saved but failed to restart agent job service: " + err.Error(), }) } } // Restart P2P if P2P settings changed p2pChanged := settings.P2PToken != nil || settings.P2PNetworkID != nil || settings.Federated != nil if p2pChanged { if settings.P2PToken != nil && *settings.P2PToken == "" { if err := app.StopP2P(); err != nil { xlog.Error("Failed to stop P2P", "error", err) return c.JSON(http.StatusInternalServerError, schema.SettingsResponse{ Success: false, Error: "Settings saved but failed to stop P2P: " + err.Error(), }) } } else { if settings.P2PToken != nil && *settings.P2PToken == "0" { token := p2p.GenerateToken(60, 60) settings.P2PToken = &token appConfig.P2PToken = token } if err := app.RestartP2P(); err != nil { xlog.Error("Failed to restart P2P", "error", err) return c.JSON(http.StatusInternalServerError, schema.SettingsResponse{ Success: false, Error: "Settings saved but failed to restart P2P: " + err.Error(), }) } } } return c.JSON(http.StatusOK, schema.SettingsResponse{ Success: true, Message: "Settings updated successfully", }) } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/localai/system.go
core/http/endpoints/localai/system.go
package localai import ( "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/LocalAI/pkg/model" ) // SystemInformations returns the system informations // @Summary Show the LocalAI instance information // @Success 200 {object} schema.SystemInformationResponse "Response" // @Router /system [get] func SystemInformations(ml *model.ModelLoader, appConfig *config.ApplicationConfig) echo.HandlerFunc { return func(c echo.Context) error { availableBackends := []string{} loadedModels := ml.ListLoadedModels() for b := range appConfig.ExternalGRPCBackends { availableBackends = append(availableBackends, b) } for b := range ml.GetAllExternalBackends(nil) { availableBackends = append(availableBackends, b) } sysmodels := []schema.SysInfoModel{} for _, m := range loadedModels { sysmodels = append(sysmodels, schema.SysInfoModel{ID: m.ID}) } return c.JSON(200, schema.SystemInformationResponse{ Backends: availableBackends, Models: sysmodels, }, ) } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/localai/welcome.go
core/http/endpoints/localai/welcome.go
package localai import ( "strings" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/gallery" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/services" "github.com/mudler/LocalAI/internal" "github.com/mudler/LocalAI/pkg/model" ) func WelcomeEndpoint(appConfig *config.ApplicationConfig, cl *config.ModelConfigLoader, ml *model.ModelLoader, opcache *services.OpCache) echo.HandlerFunc { return func(c echo.Context) error { modelConfigs := cl.GetAllModelsConfigs() galleryConfigs := map[string]*gallery.ModelConfig{} installedBackends, err := gallery.ListSystemBackends(appConfig.SystemState) if err != nil { return err } for _, m := range modelConfigs { cfg, err := gallery.GetLocalModelConfiguration(ml.ModelPath, m.Name) if err != nil { continue } galleryConfigs[m.Name] = cfg } loadedModels := ml.ListLoadedModels() loadedModelsMap := map[string]bool{} for _, m := range loadedModels { loadedModelsMap[m.ID] = true } modelsWithoutConfig, _ := services.ListModels(cl, ml, config.NoFilterFn, services.LOOSE_ONLY) // Get model statuses to display in the UI the operation in progress processingModels, taskTypes := opcache.GetStatus() summary := map[string]interface{}{ "Title": "LocalAI API - " + internal.PrintableVersion(), "Version": internal.PrintableVersion(), "BaseURL": middleware.BaseURL(c), "Models": modelsWithoutConfig, "ModelsConfig": modelConfigs, "GalleryConfig": galleryConfigs, "ApplicationConfig": appConfig, "ProcessingModels": processingModels, "TaskTypes": taskTypes, "LoadedModels": loadedModelsMap, "InstalledBackends": installedBackends, "DisableRuntimeSettings": appConfig.DisableRuntimeSettings, } contentType := c.Request().Header.Get("Content-Type") accept := c.Request().Header.Get("Accept") // Default to HTML if Accept header is empty (browser behavior) // Only return JSON if explicitly requested or Content-Type is application/json if strings.Contains(contentType, "application/json") || (accept != "" && !strings.Contains(accept, "text/html")) { // The client expects a JSON response return c.JSON(200, summary) } else { // Check if this is the manage route templateName := "views/index" if strings.HasSuffix(c.Request().URL.Path, "/manage") || c.Request().URL.Path == "/manage" { templateName = "views/manage" } // Render appropriate template return c.Render(200, templateName, summary) } } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/localai/gallery.go
core/http/endpoints/localai/gallery.go
package localai import ( "encoding/json" "fmt" "github.com/google/uuid" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/gallery" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/LocalAI/core/services" "github.com/mudler/LocalAI/pkg/system" "github.com/mudler/xlog" ) type ModelGalleryEndpointService struct { galleries []config.Gallery backendGalleries []config.Gallery modelPath string galleryApplier *services.GalleryService } type GalleryModel struct { ID string `json:"id"` gallery.GalleryModel } func CreateModelGalleryEndpointService(galleries []config.Gallery, backendGalleries []config.Gallery, systemState *system.SystemState, galleryApplier *services.GalleryService) ModelGalleryEndpointService { return ModelGalleryEndpointService{ galleries: galleries, backendGalleries: backendGalleries, modelPath: systemState.Model.ModelsPath, galleryApplier: galleryApplier, } } // GetOpStatusEndpoint returns the job status // @Summary Returns the job status // @Success 200 {object} services.GalleryOpStatus "Response" // @Router /models/jobs/{uuid} [get] func (mgs *ModelGalleryEndpointService) GetOpStatusEndpoint() echo.HandlerFunc { return func(c echo.Context) error { status := mgs.galleryApplier.GetStatus(c.Param("uuid")) if status == nil { return fmt.Errorf("could not find any status for ID") } return c.JSON(200, status) } } // GetAllStatusEndpoint returns all the jobs status progress // @Summary Returns all the jobs status progress // @Success 200 {object} map[string]services.GalleryOpStatus "Response" // @Router /models/jobs [get] func (mgs *ModelGalleryEndpointService) GetAllStatusEndpoint() echo.HandlerFunc { return func(c echo.Context) error { return c.JSON(200, mgs.galleryApplier.GetAllStatus()) } } // ApplyModelGalleryEndpoint installs a new model to a LocalAI instance from the model gallery // @Summary Install models to LocalAI. // @Param request body GalleryModel true "query params" // @Success 200 {object} schema.GalleryResponse "Response" // @Router /models/apply [post] func (mgs *ModelGalleryEndpointService) ApplyModelGalleryEndpoint() echo.HandlerFunc { return func(c echo.Context) error { input := new(GalleryModel) // Get input data from the request body if err := c.Bind(input); err != nil { return err } uuid, err := uuid.NewUUID() if err != nil { return err } mgs.galleryApplier.ModelGalleryChannel <- services.GalleryOp[gallery.GalleryModel, gallery.ModelConfig]{ Req: input.GalleryModel, ID: uuid.String(), GalleryElementName: input.ID, Galleries: mgs.galleries, BackendGalleries: mgs.backendGalleries, } return c.JSON(200, schema.GalleryResponse{ID: uuid.String(), StatusURL: fmt.Sprintf("%smodels/jobs/%s", middleware.BaseURL(c), uuid.String())}) } } // DeleteModelGalleryEndpoint lets delete models from a LocalAI instance // @Summary delete models to LocalAI. // @Param name path string true "Model name" // @Success 200 {object} schema.GalleryResponse "Response" // @Router /models/delete/{name} [post] func (mgs *ModelGalleryEndpointService) DeleteModelGalleryEndpoint() echo.HandlerFunc { return func(c echo.Context) error { modelName := c.Param("name") mgs.galleryApplier.ModelGalleryChannel <- services.GalleryOp[gallery.GalleryModel, gallery.ModelConfig]{ Delete: true, GalleryElementName: modelName, } uuid, err := uuid.NewUUID() if err != nil { return err } return c.JSON(200, schema.GalleryResponse{ID: uuid.String(), StatusURL: fmt.Sprintf("%smodels/jobs/%s", middleware.BaseURL(c), uuid.String())}) } } // ListModelFromGalleryEndpoint list the available models for installation from the active galleries // @Summary List installable models. // @Success 200 {object} []gallery.GalleryModel "Response" // @Router /models/available [get] func (mgs *ModelGalleryEndpointService) ListModelFromGalleryEndpoint(systemState *system.SystemState) echo.HandlerFunc { return func(c echo.Context) error { models, err := gallery.AvailableGalleryModels(mgs.galleries, systemState) if err != nil { xlog.Error("could not list models from galleries", "error", err) return err } xlog.Debug("Available models from galleries", "modelCount", len(models), "galleryCount", len(mgs.galleries)) m := []gallery.Metadata{} for _, mm := range models { m = append(m, mm.Metadata) } xlog.Debug("Models", "models", m) dat, err := json.Marshal(m) if err != nil { return fmt.Errorf("could not marshal models: %w", err) } return c.Blob(200, "application/json", dat) } } // ListModelGalleriesEndpoint list the available galleries configured in LocalAI // @Summary List all Galleries // @Success 200 {object} []config.Gallery "Response" // @Router /models/galleries [get] // NOTE: This is different (and much simpler!) than above! This JUST lists the model galleries that have been loaded, not their contents! func (mgs *ModelGalleryEndpointService) ListModelGalleriesEndpoint() echo.HandlerFunc { return func(c echo.Context) error { xlog.Debug("Listing model galleries", "galleries", mgs.galleries) dat, err := json.Marshal(mgs.galleries) if err != nil { return err } return c.Blob(200, "application/json", dat) } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/localai/tokenize.go
core/http/endpoints/localai/tokenize.go
package localai import ( "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/backend" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/LocalAI/pkg/model" ) // TokenizeEndpoint exposes a REST API to tokenize the content // @Summary Tokenize the input. // @Param request body schema.TokenizeRequest true "Request" // @Success 200 {object} schema.TokenizeResponse "Response" // @Router /v1/tokenize [post] func TokenizeEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) echo.HandlerFunc { return func(c echo.Context) error { input, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_LOCALAI_REQUEST).(*schema.TokenizeRequest) if !ok || input.Model == "" { return echo.ErrBadRequest } cfg, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_MODEL_CONFIG).(*config.ModelConfig) if !ok || cfg == nil { return echo.ErrBadRequest } tokenResponse, err := backend.ModelTokenize(input.Content, ml, *cfg, appConfig) if err != nil { return err } return c.JSON(200, tokenResponse) } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/localai/video.go
core/http/endpoints/localai/video.go
package localai import ( "bufio" "encoding/base64" "encoding/json" "fmt" "io" "net/http" "net/url" "os" "path/filepath" "strings" "time" "github.com/google/uuid" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/LocalAI/core/backend" model "github.com/mudler/LocalAI/pkg/model" "github.com/mudler/xlog" ) func downloadFile(url string) (string, error) { // Get the data resp, err := http.Get(url) if err != nil { return "", err } defer resp.Body.Close() // Create the file out, err := os.CreateTemp("", "video") if err != nil { return "", err } defer out.Close() // Write the body to file _, err = io.Copy(out, resp.Body) return out.Name(), err } // /* * curl http://localhost:8080/v1/images/generations \ -H "Content-Type: application/json" \ -d '{ "prompt": "A cute baby sea otter", "n": 1, "size": "512x512" }' * */ // VideoEndpoint // @Summary Creates a video given a prompt. // @Param request body schema.VideoRequest true "query params" // @Success 200 {object} schema.OpenAIResponse "Response" // @Router /video [post] func VideoEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) echo.HandlerFunc { return func(c echo.Context) error { input, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_LOCALAI_REQUEST).(*schema.VideoRequest) if !ok || input.Model == "" { xlog.Error("Video Endpoint - Invalid Input") return echo.ErrBadRequest } config, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_MODEL_CONFIG).(*config.ModelConfig) if !ok || config == nil { xlog.Error("Video Endpoint - Invalid Config") return echo.ErrBadRequest } src := "" if input.StartImage != "" { var fileData []byte var err error // check if input.File is an URL, if so download it and save it // to a temporary file if strings.HasPrefix(input.StartImage, "http://") || strings.HasPrefix(input.StartImage, "https://") { out, err := downloadFile(input.StartImage) if err != nil { return fmt.Errorf("failed downloading file:%w", err) } defer os.RemoveAll(out) fileData, err = os.ReadFile(out) if err != nil { return fmt.Errorf("failed reading file:%w", err) } } else { // base 64 decode the file and write it somewhere // that we will cleanup fileData, err = base64.StdEncoding.DecodeString(input.StartImage) if err != nil { return err } } // Create a temporary file outputFile, err := os.CreateTemp(appConfig.GeneratedContentDir, "b64") if err != nil { return err } // write the base64 result writer := bufio.NewWriter(outputFile) _, err = writer.Write(fileData) if err != nil { outputFile.Close() return err } outputFile.Close() src = outputFile.Name() defer os.RemoveAll(src) } xlog.Debug("Parameter Config", "config", config) switch config.Backend { case "stablediffusion": config.Backend = model.StableDiffusionGGMLBackend case "": config.Backend = model.StableDiffusionGGMLBackend } width := input.Width height := input.Height if width == 0 { width = 512 } if height == 0 { height = 512 } b64JSON := input.ResponseFormat == "b64_json" tempDir := "" if !b64JSON { tempDir = filepath.Join(appConfig.GeneratedContentDir, "videos") } // Create a temporary file outputFile, err := os.CreateTemp(tempDir, "b64") if err != nil { return err } outputFile.Close() // TODO: use mime type to determine the extension output := outputFile.Name() + ".mp4" // Rename the temporary file err = os.Rename(outputFile.Name(), output) if err != nil { return err } baseURL := middleware.BaseURL(c) fn, err := backend.VideoGeneration( height, width, input.Prompt, input.NegativePrompt, src, input.EndImage, output, input.NumFrames, input.FPS, input.Seed, input.CFGScale, input.Step, ml, *config, appConfig, ) if err != nil { return err } if err := fn(); err != nil { return err } item := &schema.Item{} if b64JSON { defer os.RemoveAll(output) data, err := os.ReadFile(output) if err != nil { return err } item.B64JSON = base64.StdEncoding.EncodeToString(data) } else { base := filepath.Base(output) item.URL, err = url.JoinPath(baseURL, "generated-videos", base) if err != nil { return err } } id := uuid.New().String() created := int(time.Now().Unix()) resp := &schema.OpenAIResponse{ ID: id, Created: created, Data: []schema.Item{*item}, } jsonResult, _ := json.Marshal(resp) xlog.Debug("Response", "response", string(jsonResult)) // Return the prediction in the response body return c.JSON(200, resp) } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/localai/stores.go
core/http/endpoints/localai/stores.go
package localai import ( "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/backend" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/LocalAI/pkg/model" "github.com/mudler/LocalAI/pkg/store" ) func StoresSetEndpoint(sl *model.ModelLoader, appConfig *config.ApplicationConfig) echo.HandlerFunc { return func(c echo.Context) error { input := new(schema.StoresSet) if err := c.Bind(input); err != nil { return err } sb, err := backend.StoreBackend(sl, appConfig, input.Store, input.Backend) if err != nil { return err } vals := make([][]byte, len(input.Values)) for i, v := range input.Values { vals[i] = []byte(v) } err = store.SetCols(c.Request().Context(), sb, input.Keys, vals) if err != nil { return err } return c.NoContent(200) } } func StoresDeleteEndpoint(sl *model.ModelLoader, appConfig *config.ApplicationConfig) echo.HandlerFunc { return func(c echo.Context) error { input := new(schema.StoresDelete) if err := c.Bind(input); err != nil { return err } sb, err := backend.StoreBackend(sl, appConfig, input.Store, input.Backend) if err != nil { return err } if err := store.DeleteCols(c.Request().Context(), sb, input.Keys); err != nil { return err } return c.NoContent(200) } } func StoresGetEndpoint(sl *model.ModelLoader, appConfig *config.ApplicationConfig) echo.HandlerFunc { return func(c echo.Context) error { input := new(schema.StoresGet) if err := c.Bind(input); err != nil { return err } sb, err := backend.StoreBackend(sl, appConfig, input.Store, input.Backend) if err != nil { return err } keys, vals, err := store.GetCols(c.Request().Context(), sb, input.Keys) if err != nil { return err } res := schema.StoresGetResponse{ Keys: keys, Values: make([]string, len(vals)), } for i, v := range vals { res.Values[i] = string(v) } return c.JSON(200, res) } } func StoresFindEndpoint(sl *model.ModelLoader, appConfig *config.ApplicationConfig) echo.HandlerFunc { return func(c echo.Context) error { input := new(schema.StoresFind) if err := c.Bind(input); err != nil { return err } sb, err := backend.StoreBackend(sl, appConfig, input.Store, input.Backend) if err != nil { return err } keys, vals, similarities, err := store.Find(c.Request().Context(), sb, input.Key, input.Topk) if err != nil { return err } res := schema.StoresFindResponse{ Keys: keys, Values: make([]string, len(vals)), Similarities: similarities, } for i, v := range vals { res.Values[i] = string(v) } return c.JSON(200, res) } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/localai/import_model.go
core/http/endpoints/localai/import_model.go
package localai import ( "encoding/json" "fmt" "io" "net/http" "os" "path/filepath" "strings" "github.com/google/uuid" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/gallery" "github.com/mudler/LocalAI/core/gallery/importers" httpUtils "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/LocalAI/core/services" "github.com/mudler/LocalAI/pkg/utils" "gopkg.in/yaml.v3" ) // ImportModelURIEndpoint handles creating new model configurations from a URI func ImportModelURIEndpoint(cl *config.ModelConfigLoader, appConfig *config.ApplicationConfig, galleryService *services.GalleryService, opcache *services.OpCache) echo.HandlerFunc { return func(c echo.Context) error { input := new(schema.ImportModelRequest) if err := c.Bind(input); err != nil { return err } modelConfig, err := importers.DiscoverModelConfig(input.URI, input.Preferences) if err != nil { return fmt.Errorf("failed to discover model config: %w", err) } uuid, err := uuid.NewUUID() if err != nil { return err } // Determine gallery ID for tracking - use model name if available, otherwise use URI galleryID := input.URI if modelConfig.Name != "" { galleryID = modelConfig.Name } // Register operation in opcache if available (for UI progress tracking) if opcache != nil { opcache.Set(galleryID, uuid.String()) } galleryService.ModelGalleryChannel <- services.GalleryOp[gallery.GalleryModel, gallery.ModelConfig]{ Req: gallery.GalleryModel{ Overrides: map[string]interface{}{}, }, ID: uuid.String(), GalleryElementName: galleryID, GalleryElement: &modelConfig, BackendGalleries: appConfig.BackendGalleries, } return c.JSON(200, schema.GalleryResponse{ ID: uuid.String(), StatusURL: fmt.Sprintf("%smodels/jobs/%s", httpUtils.BaseURL(c), uuid.String()), }) } } // ImportModelEndpoint handles creating new model configurations func ImportModelEndpoint(cl *config.ModelConfigLoader, appConfig *config.ApplicationConfig) echo.HandlerFunc { return func(c echo.Context) error { // Get the raw body body, err := io.ReadAll(c.Request().Body) if err != nil { response := ModelResponse{ Success: false, Error: "Failed to read request body: " + err.Error(), } return c.JSON(http.StatusBadRequest, response) } if len(body) == 0 { response := ModelResponse{ Success: false, Error: "Request body is empty", } return c.JSON(http.StatusBadRequest, response) } // Check content type to determine how to parse contentType := c.Request().Header.Get("Content-Type") var modelConfig config.ModelConfig if strings.Contains(contentType, "application/json") { // Parse JSON if err := json.Unmarshal(body, &modelConfig); err != nil { response := ModelResponse{ Success: false, Error: "Failed to parse JSON: " + err.Error(), } return c.JSON(http.StatusBadRequest, response) } } else if strings.Contains(contentType, "application/x-yaml") || strings.Contains(contentType, "text/yaml") { // Parse YAML if err := yaml.Unmarshal(body, &modelConfig); err != nil { response := ModelResponse{ Success: false, Error: "Failed to parse YAML: " + err.Error(), } return c.JSON(http.StatusBadRequest, response) } } else { // Try to auto-detect format if len(body) > 0 && strings.TrimSpace(string(body))[0] == '{' { // Looks like JSON if err := json.Unmarshal(body, &modelConfig); err != nil { response := ModelResponse{ Success: false, Error: "Failed to parse JSON: " + err.Error(), } return c.JSON(http.StatusBadRequest, response) } } else { // Assume YAML if err := yaml.Unmarshal(body, &modelConfig); err != nil { response := ModelResponse{ Success: false, Error: "Failed to parse YAML: " + err.Error(), } return c.JSON(http.StatusBadRequest, response) } } } // Validate required fields if modelConfig.Name == "" { response := ModelResponse{ Success: false, Error: "Name is required", } return c.JSON(http.StatusBadRequest, response) } // Set defaults modelConfig.SetDefaults(appConfig.ToConfigLoaderOptions()...) // Validate the configuration if valid, _ := modelConfig.Validate(); !valid { response := ModelResponse{ Success: false, Error: "Invalid configuration", } return c.JSON(http.StatusBadRequest, response) } // Create the configuration file configPath := filepath.Join(appConfig.SystemState.Model.ModelsPath, modelConfig.Name+".yaml") if err := utils.VerifyPath(modelConfig.Name+".yaml", appConfig.SystemState.Model.ModelsPath); err != nil { response := ModelResponse{ Success: false, Error: "Model path not trusted: " + err.Error(), } return c.JSON(http.StatusBadRequest, response) } // Marshal to YAML for storage yamlData, err := yaml.Marshal(&modelConfig) if err != nil { response := ModelResponse{ Success: false, Error: "Failed to marshal configuration: " + err.Error(), } return c.JSON(http.StatusInternalServerError, response) } // Write the file if err := os.WriteFile(configPath, yamlData, 0644); err != nil { response := ModelResponse{ Success: false, Error: "Failed to write configuration file: " + err.Error(), } return c.JSON(http.StatusInternalServerError, response) } // Reload configurations if err := cl.LoadModelConfigsFromPath(appConfig.SystemState.Model.ModelsPath, appConfig.ToConfigLoaderOptions()...); err != nil { response := ModelResponse{ Success: false, Error: "Failed to reload configurations: " + err.Error(), } return c.JSON(http.StatusInternalServerError, response) } // Preload the model if err := cl.Preload(appConfig.SystemState.Model.ModelsPath); err != nil { response := ModelResponse{ Success: false, Error: "Failed to preload model: " + err.Error(), } return c.JSON(http.StatusInternalServerError, response) } // Return success response response := ModelResponse{ Success: true, Message: "Model configuration created successfully", Filename: filepath.Base(configPath), } return c.JSON(200, response) } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/localai/backend.go
core/http/endpoints/localai/backend.go
package localai import ( "encoding/json" "fmt" "github.com/google/uuid" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/gallery" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/LocalAI/core/services" "github.com/mudler/LocalAI/pkg/system" "github.com/mudler/xlog" ) type BackendEndpointService struct { galleries []config.Gallery backendPath string backendSystemPath string backendApplier *services.GalleryService } type GalleryBackend struct { ID string `json:"id"` } func CreateBackendEndpointService(galleries []config.Gallery, systemState *system.SystemState, backendApplier *services.GalleryService) BackendEndpointService { return BackendEndpointService{ galleries: galleries, backendPath: systemState.Backend.BackendsPath, backendSystemPath: systemState.Backend.BackendsSystemPath, backendApplier: backendApplier, } } // GetOpStatusEndpoint returns the job status // @Summary Returns the job status // @Success 200 {object} services.GalleryOpStatus "Response" // @Router /backends/jobs/{uuid} [get] func (mgs *BackendEndpointService) GetOpStatusEndpoint() echo.HandlerFunc { return func(c echo.Context) error { status := mgs.backendApplier.GetStatus(c.Param("uuid")) if status == nil { return fmt.Errorf("could not find any status for ID") } return c.JSON(200, status) } } // GetAllStatusEndpoint returns all the jobs status progress // @Summary Returns all the jobs status progress // @Success 200 {object} map[string]services.GalleryOpStatus "Response" // @Router /backends/jobs [get] func (mgs *BackendEndpointService) GetAllStatusEndpoint() echo.HandlerFunc { return func(c echo.Context) error { return c.JSON(200, mgs.backendApplier.GetAllStatus()) } } // ApplyBackendEndpoint installs a new backend to a LocalAI instance // @Summary Install backends to LocalAI. // @Param request body GalleryBackend true "query params" // @Success 200 {object} schema.BackendResponse "Response" // @Router /backends/apply [post] func (mgs *BackendEndpointService) ApplyBackendEndpoint() echo.HandlerFunc { return func(c echo.Context) error { input := new(GalleryBackend) // Get input data from the request body if err := c.Bind(input); err != nil { return err } uuid, err := uuid.NewUUID() if err != nil { return err } mgs.backendApplier.BackendGalleryChannel <- services.GalleryOp[gallery.GalleryBackend, any]{ ID: uuid.String(), GalleryElementName: input.ID, Galleries: mgs.galleries, } return c.JSON(200, schema.BackendResponse{ID: uuid.String(), StatusURL: fmt.Sprintf("%sbackends/jobs/%s", middleware.BaseURL(c), uuid.String())}) } } // DeleteBackendEndpoint lets delete backends from a LocalAI instance // @Summary delete backends from LocalAI. // @Param name path string true "Backend name" // @Success 200 {object} schema.BackendResponse "Response" // @Router /backends/delete/{name} [post] func (mgs *BackendEndpointService) DeleteBackendEndpoint() echo.HandlerFunc { return func(c echo.Context) error { backendName := c.Param("name") mgs.backendApplier.BackendGalleryChannel <- services.GalleryOp[gallery.GalleryBackend, any]{ Delete: true, GalleryElementName: backendName, Galleries: mgs.galleries, } uuid, err := uuid.NewUUID() if err != nil { return err } return c.JSON(200, schema.BackendResponse{ID: uuid.String(), StatusURL: fmt.Sprintf("%sbackends/jobs/%s", middleware.BaseURL(c), uuid.String())}) } } // ListBackendsEndpoint list the available backends configured in LocalAI // @Summary List all Backends // @Success 200 {object} []gallery.GalleryBackend "Response" // @Router /backends [get] func (mgs *BackendEndpointService) ListBackendsEndpoint(systemState *system.SystemState) echo.HandlerFunc { return func(c echo.Context) error { backends, err := gallery.ListSystemBackends(systemState) if err != nil { return err } return c.JSON(200, backends.GetAll()) } } // ListModelGalleriesEndpoint list the available galleries configured in LocalAI // @Summary List all Galleries // @Success 200 {object} []config.Gallery "Response" // @Router /backends/galleries [get] // NOTE: This is different (and much simpler!) than above! This JUST lists the model galleries that have been loaded, not their contents! func (mgs *BackendEndpointService) ListBackendGalleriesEndpoint() echo.HandlerFunc { return func(c echo.Context) error { xlog.Debug("Listing backend galleries", "galleries", mgs.galleries) dat, err := json.Marshal(mgs.galleries) if err != nil { return err } return c.Blob(200, "application/json", dat) } } // ListAvailableBackendsEndpoint list the available backends in the galleries configured in LocalAI // @Summary List all available Backends // @Success 200 {object} []gallery.GalleryBackend "Response" // @Router /backends/available [get] func (mgs *BackendEndpointService) ListAvailableBackendsEndpoint(systemState *system.SystemState) echo.HandlerFunc { return func(c echo.Context) error { backends, err := gallery.AvailableBackends(mgs.galleries, systemState) if err != nil { return err } return c.JSON(200, backends) } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/localai/agent_jobs.go
core/http/endpoints/localai/agent_jobs.go
package localai import ( "fmt" "net/http" "strconv" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/application" "github.com/mudler/LocalAI/core/schema" ) // CreateTaskEndpoint creates a new agent task // @Summary Create a new agent task // @Description Create a new reusable agent task with prompt template and configuration // @Tags agent-jobs // @Accept json // @Produce json // @Param task body schema.Task true "Task definition" // @Success 201 {object} map[string]string "Task created" // @Failure 400 {object} map[string]string "Invalid request" // @Failure 500 {object} map[string]string "Internal server error" // @Router /api/agent/tasks [post] func CreateTaskEndpoint(app *application.Application) echo.HandlerFunc { return func(c echo.Context) error { var task schema.Task if err := c.Bind(&task); err != nil { return c.JSON(http.StatusBadRequest, map[string]string{"error": "Invalid request body: " + err.Error()}) } id, err := app.AgentJobService().CreateTask(task) if err != nil { return c.JSON(http.StatusBadRequest, map[string]string{"error": err.Error()}) } return c.JSON(http.StatusCreated, map[string]string{"id": id}) } } // UpdateTaskEndpoint updates an existing task // @Summary Update an agent task // @Description Update an existing agent task // @Tags agent-jobs // @Accept json // @Produce json // @Param id path string true "Task ID" // @Param task body schema.Task true "Updated task definition" // @Success 200 {object} map[string]string "Task updated" // @Failure 400 {object} map[string]string "Invalid request" // @Failure 404 {object} map[string]string "Task not found" // @Router /api/agent/tasks/{id} [put] func UpdateTaskEndpoint(app *application.Application) echo.HandlerFunc { return func(c echo.Context) error { id := c.Param("id") var task schema.Task if err := c.Bind(&task); err != nil { return c.JSON(http.StatusBadRequest, map[string]string{"error": "Invalid request body: " + err.Error()}) } if err := app.AgentJobService().UpdateTask(id, task); err != nil { if err.Error() == "task not found: "+id { return c.JSON(http.StatusNotFound, map[string]string{"error": err.Error()}) } return c.JSON(http.StatusBadRequest, map[string]string{"error": err.Error()}) } return c.JSON(http.StatusOK, map[string]string{"message": "Task updated"}) } } // DeleteTaskEndpoint deletes a task // @Summary Delete an agent task // @Description Delete an agent task by ID // @Tags agent-jobs // @Produce json // @Param id path string true "Task ID" // @Success 200 {object} map[string]string "Task deleted" // @Failure 404 {object} map[string]string "Task not found" // @Router /api/agent/tasks/{id} [delete] func DeleteTaskEndpoint(app *application.Application) echo.HandlerFunc { return func(c echo.Context) error { id := c.Param("id") if err := app.AgentJobService().DeleteTask(id); err != nil { if err.Error() == "task not found: "+id { return c.JSON(http.StatusNotFound, map[string]string{"error": err.Error()}) } return c.JSON(http.StatusInternalServerError, map[string]string{"error": err.Error()}) } return c.JSON(http.StatusOK, map[string]string{"message": "Task deleted"}) } } // ListTasksEndpoint lists all tasks // @Summary List all agent tasks // @Description Get a list of all agent tasks // @Tags agent-jobs // @Produce json // @Success 200 {array} schema.Task "List of tasks" // @Router /api/agent/tasks [get] func ListTasksEndpoint(app *application.Application) echo.HandlerFunc { return func(c echo.Context) error { tasks := app.AgentJobService().ListTasks() return c.JSON(http.StatusOK, tasks) } } // GetTaskEndpoint gets a task by ID // @Summary Get an agent task // @Description Get an agent task by ID // @Tags agent-jobs // @Produce json // @Param id path string true "Task ID" // @Success 200 {object} schema.Task "Task details" // @Failure 404 {object} map[string]string "Task not found" // @Router /api/agent/tasks/{id} [get] func GetTaskEndpoint(app *application.Application) echo.HandlerFunc { return func(c echo.Context) error { id := c.Param("id") task, err := app.AgentJobService().GetTask(id) if err != nil { return c.JSON(http.StatusNotFound, map[string]string{"error": err.Error()}) } return c.JSON(http.StatusOK, task) } } // ExecuteJobEndpoint executes a job // @Summary Execute an agent job // @Description Create and execute a new agent job // @Tags agent-jobs // @Accept json // @Produce json // @Param request body schema.JobExecutionRequest true "Job execution request" // @Success 201 {object} schema.JobExecutionResponse "Job created" // @Failure 400 {object} map[string]string "Invalid request" // @Router /api/agent/jobs/execute [post] func ExecuteJobEndpoint(app *application.Application) echo.HandlerFunc { return func(c echo.Context) error { var req schema.JobExecutionRequest if err := c.Bind(&req); err != nil { return c.JSON(http.StatusBadRequest, map[string]string{"error": "Invalid request body: " + err.Error()}) } if req.Parameters == nil { req.Parameters = make(map[string]string) } // Build multimedia struct from request var multimedia *schema.MultimediaAttachment if len(req.Images) > 0 || len(req.Videos) > 0 || len(req.Audios) > 0 || len(req.Files) > 0 { multimedia = &schema.MultimediaAttachment{ Images: req.Images, Videos: req.Videos, Audios: req.Audios, Files: req.Files, } } jobID, err := app.AgentJobService().ExecuteJob(req.TaskID, req.Parameters, "api", multimedia) if err != nil { return c.JSON(http.StatusBadRequest, map[string]string{"error": err.Error()}) } baseURL := c.Scheme() + "://" + c.Request().Host return c.JSON(http.StatusCreated, schema.JobExecutionResponse{ JobID: jobID, Status: "pending", URL: baseURL + "/api/agent/jobs/" + jobID, }) } } // GetJobEndpoint gets a job by ID // @Summary Get an agent job // @Description Get an agent job by ID // @Tags agent-jobs // @Produce json // @Param id path string true "Job ID" // @Success 200 {object} schema.Job "Job details" // @Failure 404 {object} map[string]string "Job not found" // @Router /api/agent/jobs/{id} [get] func GetJobEndpoint(app *application.Application) echo.HandlerFunc { return func(c echo.Context) error { id := c.Param("id") job, err := app.AgentJobService().GetJob(id) if err != nil { return c.JSON(http.StatusNotFound, map[string]string{"error": err.Error()}) } return c.JSON(http.StatusOK, job) } } // ListJobsEndpoint lists jobs with optional filtering // @Summary List agent jobs // @Description Get a list of agent jobs, optionally filtered by task_id and status // @Tags agent-jobs // @Produce json // @Param task_id query string false "Filter by task ID" // @Param status query string false "Filter by status (pending, running, completed, failed, cancelled)" // @Param limit query int false "Limit number of results" // @Success 200 {array} schema.Job "List of jobs" // @Router /api/agent/jobs [get] func ListJobsEndpoint(app *application.Application) echo.HandlerFunc { return func(c echo.Context) error { var taskID *string var status *schema.JobStatus limit := 0 if taskIDParam := c.QueryParam("task_id"); taskIDParam != "" { taskID = &taskIDParam } if statusParam := c.QueryParam("status"); statusParam != "" { s := schema.JobStatus(statusParam) status = &s } if limitParam := c.QueryParam("limit"); limitParam != "" { if l, err := strconv.Atoi(limitParam); err == nil { limit = l } } jobs := app.AgentJobService().ListJobs(taskID, status, limit) return c.JSON(http.StatusOK, jobs) } } // CancelJobEndpoint cancels a running job // @Summary Cancel an agent job // @Description Cancel a running or pending agent job // @Tags agent-jobs // @Produce json // @Param id path string true "Job ID" // @Success 200 {object} map[string]string "Job cancelled" // @Failure 400 {object} map[string]string "Job cannot be cancelled" // @Failure 404 {object} map[string]string "Job not found" // @Router /api/agent/jobs/{id}/cancel [post] func CancelJobEndpoint(app *application.Application) echo.HandlerFunc { return func(c echo.Context) error { id := c.Param("id") if err := app.AgentJobService().CancelJob(id); err != nil { if err.Error() == "job not found: "+id { return c.JSON(http.StatusNotFound, map[string]string{"error": err.Error()}) } return c.JSON(http.StatusBadRequest, map[string]string{"error": err.Error()}) } return c.JSON(http.StatusOK, map[string]string{"message": "Job cancelled"}) } } // DeleteJobEndpoint deletes a job // @Summary Delete an agent job // @Description Delete an agent job by ID // @Tags agent-jobs // @Produce json // @Param id path string true "Job ID" // @Success 200 {object} map[string]string "Job deleted" // @Failure 404 {object} map[string]string "Job not found" // @Router /api/agent/jobs/{id} [delete] func DeleteJobEndpoint(app *application.Application) echo.HandlerFunc { return func(c echo.Context) error { id := c.Param("id") if err := app.AgentJobService().DeleteJob(id); err != nil { if err.Error() == "job not found: "+id { return c.JSON(http.StatusNotFound, map[string]string{"error": err.Error()}) } return c.JSON(http.StatusInternalServerError, map[string]string{"error": err.Error()}) } return c.JSON(http.StatusOK, map[string]string{"message": "Job deleted"}) } } // ExecuteTaskByNameEndpoint executes a task by name // @Summary Execute a task by name // @Description Execute an agent task by its name (convenience endpoint). Parameters can be provided in the request body as a JSON object with string values. // @Tags agent-jobs // @Accept json // @Produce json // @Param name path string true "Task name" // @Param request body map[string]string false "Template parameters (JSON object with string values)" // @Success 201 {object} schema.JobExecutionResponse "Job created" // @Failure 400 {object} map[string]string "Invalid request" // @Failure 404 {object} map[string]string "Task not found" // @Router /api/agent/tasks/{name}/execute [post] func ExecuteTaskByNameEndpoint(app *application.Application) echo.HandlerFunc { return func(c echo.Context) error { name := c.Param("name") var params map[string]string // Try to bind parameters from request body // If body is empty or invalid, use empty params if c.Request().ContentLength > 0 { if err := c.Bind(&params); err != nil { // If binding fails, try to read as raw JSON body := make(map[string]interface{}) if err := c.Bind(&body); err == nil { // Convert interface{} values to strings params = make(map[string]string) for k, v := range body { if str, ok := v.(string); ok { params[k] = str } else { // Convert non-string values to string params[k] = fmt.Sprintf("%v", v) } } } else { // If all binding fails, use empty params params = make(map[string]string) } } } else { // No body provided, use empty params params = make(map[string]string) } // Find task by name tasks := app.AgentJobService().ListTasks() var task *schema.Task for _, t := range tasks { if t.Name == name { task = &t break } } if task == nil { return c.JSON(http.StatusNotFound, map[string]string{"error": "Task not found: " + name}) } jobID, err := app.AgentJobService().ExecuteJob(task.ID, params, "api", nil) if err != nil { return c.JSON(http.StatusBadRequest, map[string]string{"error": err.Error()}) } baseURL := c.Scheme() + "://" + c.Request().Host return c.JSON(http.StatusCreated, schema.JobExecutionResponse{ JobID: jobID, Status: "pending", URL: baseURL + "/api/agent/jobs/" + jobID, }) } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/localai/backend_monitor.go
core/http/endpoints/localai/backend_monitor.go
package localai import ( "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/LocalAI/core/services" ) // BackendMonitorEndpoint returns the status of the specified backend // @Summary Backend monitor endpoint // @Param request body schema.BackendMonitorRequest true "Backend statistics request" // @Success 200 {object} proto.StatusResponse "Response" // @Router /backend/monitor [get] func BackendMonitorEndpoint(bm *services.BackendMonitorService) echo.HandlerFunc { return func(c echo.Context) error { input := new(schema.BackendMonitorRequest) // Get input data from the request body if err := c.Bind(input); err != nil { return err } resp, err := bm.CheckAndSample(input.Model) if err != nil { return err } return c.JSON(200, resp) } } // BackendShutdownEndpoint shuts down the specified backend // @Summary Backend monitor endpoint // @Param request body schema.BackendMonitorRequest true "Backend statistics request" // @Router /backend/shutdown [post] func BackendShutdownEndpoint(bm *services.BackendMonitorService) echo.HandlerFunc { return func(c echo.Context) error { input := new(schema.BackendMonitorRequest) // Get input data from the request body if err := c.Bind(input); err != nil { return err } return bm.ShutdownModel(input.Model) } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/openai/inference.go
core/http/endpoints/openai/inference.go
package openai import ( "encoding/json" "github.com/mudler/LocalAI/core/backend" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/schema" model "github.com/mudler/LocalAI/pkg/model" ) func ComputeChoices( req *schema.OpenAIRequest, predInput string, config *config.ModelConfig, bcl *config.ModelConfigLoader, o *config.ApplicationConfig, loader *model.ModelLoader, cb func(string, *[]schema.Choice), tokenCallback func(string, backend.TokenUsage) bool) ([]schema.Choice, backend.TokenUsage, error) { n := req.N // number of completions to return result := []schema.Choice{} if n == 0 { n = 1 } images := []string{} for _, m := range req.Messages { images = append(images, m.StringImages...) } videos := []string{} for _, m := range req.Messages { videos = append(videos, m.StringVideos...) } audios := []string{} for _, m := range req.Messages { audios = append(audios, m.StringAudios...) } // Serialize tools and tool_choice to JSON strings toolsJSON := "" if len(req.Tools) > 0 { toolsBytes, err := json.Marshal(req.Tools) if err == nil { toolsJSON = string(toolsBytes) } } toolChoiceJSON := "" if req.ToolsChoice != nil { toolChoiceBytes, err := json.Marshal(req.ToolsChoice) if err == nil { toolChoiceJSON = string(toolChoiceBytes) } } // Extract logprobs from request // According to OpenAI API: logprobs is boolean, top_logprobs (0-20) controls how many top tokens per position var logprobs *int var topLogprobs *int if req.Logprobs.IsEnabled() { // If logprobs is enabled, use top_logprobs if provided, otherwise default to 1 if req.TopLogprobs != nil { topLogprobs = req.TopLogprobs // For backend compatibility, set logprobs to the top_logprobs value logprobs = req.TopLogprobs } else { // Default to 1 if logprobs is true but top_logprobs not specified val := 1 logprobs = &val topLogprobs = &val } } // Extract logit_bias from request // According to OpenAI API: logit_bias is a map of token IDs (as strings) to bias values (-100 to 100) var logitBias map[string]float64 if len(req.LogitBias) > 0 { logitBias = req.LogitBias } // get the model function to call for the result predFunc, err := backend.ModelInference( req.Context, predInput, req.Messages, images, videos, audios, loader, config, bcl, o, tokenCallback, toolsJSON, toolChoiceJSON, logprobs, topLogprobs, logitBias) if err != nil { return result, backend.TokenUsage{}, err } tokenUsage := backend.TokenUsage{} for i := 0; i < n; i++ { prediction, err := predFunc() if err != nil { return result, backend.TokenUsage{}, err } tokenUsage.Prompt += prediction.Usage.Prompt tokenUsage.Completion += prediction.Usage.Completion tokenUsage.TimingPromptProcessing += prediction.Usage.TimingPromptProcessing tokenUsage.TimingTokenGeneration += prediction.Usage.TimingTokenGeneration finetunedResponse := backend.Finetune(*config, predInput, prediction.Response) cb(finetunedResponse, &result) // Add logprobs to the last choice if present if prediction.Logprobs != nil && len(result) > 0 { result[len(result)-1].Logprobs = prediction.Logprobs } //result = append(result, Choice{Text: prediction}) } return result, tokenUsage, err }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/openai/constants.go
core/http/endpoints/openai/constants.go
package openai // Finish reason constants for OpenAI API responses const ( FinishReasonStop = "stop" FinishReasonToolCalls = "tool_calls" FinishReasonFunctionCall = "function_call" )
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/openai/edit.go
core/http/endpoints/openai/edit.go
package openai import ( "encoding/json" "time" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/backend" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/http/middleware" "github.com/google/uuid" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/LocalAI/core/templates" "github.com/mudler/LocalAI/pkg/model" "github.com/mudler/xlog" ) // EditEndpoint is the OpenAI edit API endpoint // @Summary OpenAI edit endpoint // @Param request body schema.OpenAIRequest true "query params" // @Success 200 {object} schema.OpenAIResponse "Response" // @Router /v1/edits [post] func EditEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator *templates.Evaluator, appConfig *config.ApplicationConfig) echo.HandlerFunc { return func(c echo.Context) error { input, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_LOCALAI_REQUEST).(*schema.OpenAIRequest) if !ok || input.Model == "" { return echo.ErrBadRequest } // Opt-in extra usage flag extraUsage := c.Request().Header.Get("Extra-Usage") != "" config, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_MODEL_CONFIG).(*config.ModelConfig) if !ok || config == nil { return echo.ErrBadRequest } xlog.Debug("Edit Endpoint Input", "input", input) xlog.Debug("Edit Endpoint Config", "config", *config) var result []schema.Choice totalTokenUsage := backend.TokenUsage{} for _, i := range config.InputStrings { templatedInput, err := evaluator.EvaluateTemplateForPrompt(templates.EditPromptTemplate, *config, templates.PromptTemplateData{ Input: i, Instruction: input.Instruction, SystemPrompt: config.SystemPrompt, ReasoningEffort: input.ReasoningEffort, Metadata: input.Metadata, }) if err == nil { i = templatedInput xlog.Debug("Template found, input modified", "input", i) } r, tokenUsage, err := ComputeChoices(input, i, config, cl, appConfig, ml, func(s string, c *[]schema.Choice) { *c = append(*c, schema.Choice{Text: s}) }, nil) if err != nil { return err } totalTokenUsage.Prompt += tokenUsage.Prompt totalTokenUsage.Completion += tokenUsage.Completion totalTokenUsage.TimingTokenGeneration += tokenUsage.TimingTokenGeneration totalTokenUsage.TimingPromptProcessing += tokenUsage.TimingPromptProcessing result = append(result, r...) } usage := schema.OpenAIUsage{ PromptTokens: totalTokenUsage.Prompt, CompletionTokens: totalTokenUsage.Completion, TotalTokens: totalTokenUsage.Prompt + totalTokenUsage.Completion, } if extraUsage { usage.TimingTokenGeneration = totalTokenUsage.TimingTokenGeneration usage.TimingPromptProcessing = totalTokenUsage.TimingPromptProcessing } id := uuid.New().String() created := int(time.Now().Unix()) resp := &schema.OpenAIResponse{ ID: id, Created: created, Model: input.Model, // we have to return what the user sent here, due to OpenAI spec. Choices: result, Object: "edit", Usage: usage, } jsonResult, _ := json.Marshal(resp) xlog.Debug("Response", "response", string(jsonResult)) // Return the prediction in the response body return c.JSON(200, resp) } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/openai/image.go
core/http/endpoints/openai/image.go
package openai import ( "bufio" "encoding/base64" "encoding/json" "fmt" "io" "net/http" "net/url" "os" "path/filepath" "strconv" "strings" "time" "github.com/google/uuid" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/LocalAI/core/backend" model "github.com/mudler/LocalAI/pkg/model" "github.com/mudler/xlog" ) func downloadFile(url string) (string, error) { // Get the data resp, err := http.Get(url) if err != nil { return "", err } defer resp.Body.Close() // Create the file out, err := os.CreateTemp("", "image") if err != nil { return "", err } defer out.Close() // Write the body to file _, err = io.Copy(out, resp.Body) return out.Name(), err } // /* * curl http://localhost:8080/v1/images/generations \ -H "Content-Type: application/json" \ -d '{ "prompt": "A cute baby sea otter", "n": 1, "size": "512x512" }' * */ // ImageEndpoint is the OpenAI Image generation API endpoint https://platform.openai.com/docs/api-reference/images/create // @Summary Creates an image given a prompt. // @Param request body schema.OpenAIRequest true "query params" // @Success 200 {object} schema.OpenAIResponse "Response" // @Router /v1/images/generations [post] func ImageEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) echo.HandlerFunc { return func(c echo.Context) error { input, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_LOCALAI_REQUEST).(*schema.OpenAIRequest) if !ok || input.Model == "" { xlog.Error("Image Endpoint - Invalid Input") return echo.ErrBadRequest } config, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_MODEL_CONFIG).(*config.ModelConfig) if !ok || config == nil { xlog.Error("Image Endpoint - Invalid Config") return echo.ErrBadRequest } // Process input images (for img2img/inpainting) src := "" if input.File != "" { src = processImageFile(input.File, appConfig.GeneratedContentDir) if src != "" { defer os.RemoveAll(src) } } // Process multiple input images var inputImages []string if len(input.Files) > 0 { for _, file := range input.Files { processedFile := processImageFile(file, appConfig.GeneratedContentDir) if processedFile != "" { inputImages = append(inputImages, processedFile) defer os.RemoveAll(processedFile) } } } // Process reference images var refImages []string if len(input.RefImages) > 0 { for _, file := range input.RefImages { processedFile := processImageFile(file, appConfig.GeneratedContentDir) if processedFile != "" { refImages = append(refImages, processedFile) defer os.RemoveAll(processedFile) } } } xlog.Debug("Parameter Config", "config", config) switch config.Backend { case "stablediffusion": config.Backend = model.StableDiffusionGGMLBackend case "": config.Backend = model.StableDiffusionGGMLBackend } if !strings.Contains(input.Size, "x") { input.Size = "512x512" xlog.Warn("Invalid size, using default 512x512") } sizeParts := strings.Split(input.Size, "x") if len(sizeParts) != 2 { return fmt.Errorf("invalid value for 'size'") } width, err := strconv.Atoi(sizeParts[0]) if err != nil { return fmt.Errorf("invalid value for 'size'") } height, err := strconv.Atoi(sizeParts[1]) if err != nil { return fmt.Errorf("invalid value for 'size'") } b64JSON := config.ResponseFormat == "b64_json" // src and clip_skip var result []schema.Item for _, i := range config.PromptStrings { n := input.N if input.N == 0 { n = 1 } for j := 0; j < n; j++ { prompts := strings.Split(i, "|") positive_prompt := prompts[0] negative_prompt := "" if len(prompts) > 1 { negative_prompt = prompts[1] } step := config.Step if step == 0 { step = 15 } if input.Step != 0 { step = input.Step } tempDir := "" if !b64JSON { tempDir = filepath.Join(appConfig.GeneratedContentDir, "images") } // Create a temporary file outputFile, err := os.CreateTemp(tempDir, "b64") if err != nil { return err } outputFile.Close() output := outputFile.Name() + ".png" // Rename the temporary file err = os.Rename(outputFile.Name(), output) if err != nil { return err } baseURL := middleware.BaseURL(c) // Use the first input image as src if available, otherwise use the original src inputSrc := src if len(inputImages) > 0 { inputSrc = inputImages[0] } fn, err := backend.ImageGeneration(height, width, step, *config.Seed, positive_prompt, negative_prompt, inputSrc, output, ml, *config, appConfig, refImages) if err != nil { return err } if err := fn(); err != nil { return err } item := &schema.Item{} if b64JSON { defer os.RemoveAll(output) data, err := os.ReadFile(output) if err != nil { return err } item.B64JSON = base64.StdEncoding.EncodeToString(data) } else { base := filepath.Base(output) item.URL, err = url.JoinPath(baseURL, "generated-images", base) if err != nil { return err } } result = append(result, *item) } } id := uuid.New().String() created := int(time.Now().Unix()) resp := &schema.OpenAIResponse{ ID: id, Created: created, Data: result, Usage: schema.OpenAIUsage{ PromptTokens: 0, CompletionTokens: 0, TotalTokens: 0, InputTokens: 0, OutputTokens: 0, InputTokensDetails: &schema.InputTokensDetails{ TextTokens: 0, ImageTokens: 0, }, }, } jsonResult, _ := json.Marshal(resp) xlog.Debug("Response", "response", string(jsonResult)) // Return the prediction in the response body return c.JSON(200, resp) } } // processImageFile handles a single image file (URL or base64) and returns the path to the temporary file func processImageFile(file string, generatedContentDir string) string { fileData := []byte{} var err error // check if file is an URL, if so download it and save it to a temporary file if strings.HasPrefix(file, "http://") || strings.HasPrefix(file, "https://") { out, err := downloadFile(file) if err != nil { xlog.Error("Failed downloading file", "error", err, "file", file) return "" } defer os.RemoveAll(out) fileData, err = os.ReadFile(out) if err != nil { xlog.Error("Failed reading downloaded file", "error", err, "file", out) return "" } } else { // base 64 decode the file and write it somewhere that we will cleanup fileData, err = base64.StdEncoding.DecodeString(file) if err != nil { xlog.Error("Failed decoding base64 file", "error", err) return "" } } // Create a temporary file outputFile, err := os.CreateTemp(generatedContentDir, "b64") if err != nil { xlog.Error("Failed creating temporary file", "error", err) return "" } // write the base64 result writer := bufio.NewWriter(outputFile) _, err = writer.Write(fileData) if err != nil { outputFile.Close() xlog.Error("Failed writing to temporary file", "error", err) return "" } outputFile.Close() return outputFile.Name() }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/openai/realtime_model.go
core/http/endpoints/openai/realtime_model.go
package openai import ( "context" "fmt" "github.com/mudler/LocalAI/core/backend" "github.com/mudler/LocalAI/core/config" grpcClient "github.com/mudler/LocalAI/pkg/grpc" "github.com/mudler/LocalAI/pkg/grpc/proto" model "github.com/mudler/LocalAI/pkg/model" "github.com/mudler/xlog" "google.golang.org/grpc" ) var ( _ Model = new(wrappedModel) _ Model = new(anyToAnyModel) ) // wrappedModel represent a model which does not support Any-to-Any operations // This means that we will fake an Any-to-Any model by overriding some of the gRPC client methods // which are for Any-To-Any models, but instead we will call a pipeline (for e.g STT->LLM->TTS) type wrappedModel struct { TTSConfig *config.ModelConfig TranscriptionConfig *config.ModelConfig LLMConfig *config.ModelConfig TTSClient grpcClient.Backend TranscriptionClient grpcClient.Backend LLMClient grpcClient.Backend VADConfig *config.ModelConfig VADClient grpcClient.Backend } // anyToAnyModel represent a model which supports Any-to-Any operations // We have to wrap this out as well because we want to load two models one for VAD and one for the actual model. // In the future there could be models that accept continous audio input only so this design will be useful for that type anyToAnyModel struct { LLMConfig *config.ModelConfig LLMClient grpcClient.Backend VADConfig *config.ModelConfig VADClient grpcClient.Backend } type transcriptOnlyModel struct { TranscriptionConfig *config.ModelConfig TranscriptionClient grpcClient.Backend VADConfig *config.ModelConfig VADClient grpcClient.Backend } func (m *transcriptOnlyModel) VAD(ctx context.Context, in *proto.VADRequest, opts ...grpc.CallOption) (*proto.VADResponse, error) { return m.VADClient.VAD(ctx, in) } func (m *transcriptOnlyModel) Transcribe(ctx context.Context, in *proto.TranscriptRequest, opts ...grpc.CallOption) (*proto.TranscriptResult, error) { return m.TranscriptionClient.AudioTranscription(ctx, in, opts...) } func (m *transcriptOnlyModel) Predict(ctx context.Context, in *proto.PredictOptions, opts ...grpc.CallOption) (*proto.Reply, error) { return nil, fmt.Errorf("predict operation not supported in transcript-only mode") } func (m *transcriptOnlyModel) PredictStream(ctx context.Context, in *proto.PredictOptions, f func(reply *proto.Reply), opts ...grpc.CallOption) error { return fmt.Errorf("predict stream operation not supported in transcript-only mode") } func (m *wrappedModel) VAD(ctx context.Context, in *proto.VADRequest, opts ...grpc.CallOption) (*proto.VADResponse, error) { return m.VADClient.VAD(ctx, in) } func (m *anyToAnyModel) VAD(ctx context.Context, in *proto.VADRequest, opts ...grpc.CallOption) (*proto.VADResponse, error) { return m.VADClient.VAD(ctx, in) } func (m *wrappedModel) Transcribe(ctx context.Context, in *proto.TranscriptRequest, opts ...grpc.CallOption) (*proto.TranscriptResult, error) { return m.TranscriptionClient.AudioTranscription(ctx, in, opts...) } func (m *anyToAnyModel) Transcribe(ctx context.Context, in *proto.TranscriptRequest, opts ...grpc.CallOption) (*proto.TranscriptResult, error) { // TODO: Can any-to-any models transcribe? return m.LLMClient.AudioTranscription(ctx, in, opts...) } func (m *wrappedModel) Predict(ctx context.Context, in *proto.PredictOptions, opts ...grpc.CallOption) (*proto.Reply, error) { // TODO: Convert with pipeline (audio to text, text to llm, result to tts, and return it) // sound.BufferAsWAV(audioData, "audio.wav") return m.LLMClient.Predict(ctx, in) } func (m *wrappedModel) PredictStream(ctx context.Context, in *proto.PredictOptions, f func(reply *proto.Reply), opts ...grpc.CallOption) error { // TODO: Convert with pipeline (audio to text, text to llm, result to tts, and return it) return m.LLMClient.PredictStream(ctx, in, f) } func (m *anyToAnyModel) Predict(ctx context.Context, in *proto.PredictOptions, opts ...grpc.CallOption) (*proto.Reply, error) { return m.LLMClient.Predict(ctx, in) } func (m *anyToAnyModel) PredictStream(ctx context.Context, in *proto.PredictOptions, f func(reply *proto.Reply), opts ...grpc.CallOption) error { return m.LLMClient.PredictStream(ctx, in, f) } func newTranscriptionOnlyModel(pipeline *config.Pipeline, cl *config.ModelConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) (Model, *config.ModelConfig, error) { cfgVAD, err := cl.LoadModelConfigFileByName(pipeline.VAD, ml.ModelPath) if err != nil { return nil, nil, fmt.Errorf("failed to load backend config: %w", err) } if valid, _ := cfgVAD.Validate(); !valid { return nil, nil, fmt.Errorf("failed to validate config: %w", err) } opts := backend.ModelOptions(*cfgVAD, appConfig) VADClient, err := ml.Load(opts...) if err != nil { return nil, nil, fmt.Errorf("failed to load tts model: %w", err) } cfgSST, err := cl.LoadModelConfigFileByName(pipeline.Transcription, ml.ModelPath) if err != nil { return nil, nil, fmt.Errorf("failed to load backend config: %w", err) } if valid, _ := cfgSST.Validate(); !valid { return nil, nil, fmt.Errorf("failed to validate config: %w", err) } opts = backend.ModelOptions(*cfgSST, appConfig) transcriptionClient, err := ml.Load(opts...) if err != nil { return nil, nil, fmt.Errorf("failed to load SST model: %w", err) } return &transcriptOnlyModel{ VADConfig: cfgVAD, VADClient: VADClient, TranscriptionConfig: cfgSST, TranscriptionClient: transcriptionClient, }, cfgSST, nil } // returns and loads either a wrapped model or a model that support audio-to-audio func newModel(pipeline *config.Pipeline, cl *config.ModelConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) (Model, error) { cfgVAD, err := cl.LoadModelConfigFileByName(pipeline.VAD, ml.ModelPath) if err != nil { return nil, fmt.Errorf("failed to load backend config: %w", err) } if valid, _ := cfgVAD.Validate(); !valid { return nil, fmt.Errorf("failed to validate config: %w", err) } opts := backend.ModelOptions(*cfgVAD, appConfig) VADClient, err := ml.Load(opts...) if err != nil { return nil, fmt.Errorf("failed to load tts model: %w", err) } // TODO: Do we always need a transcription model? It can be disabled. Note that any-to-any instruction following models don't transcribe as such, so if transcription is required it is a separate process cfgSST, err := cl.LoadModelConfigFileByName(pipeline.Transcription, ml.ModelPath) if err != nil { return nil, fmt.Errorf("failed to load backend config: %w", err) } if valid, _ := cfgSST.Validate(); !valid { return nil, fmt.Errorf("failed to validate config: %w", err) } opts = backend.ModelOptions(*cfgSST, appConfig) transcriptionClient, err := ml.Load(opts...) if err != nil { return nil, fmt.Errorf("failed to load SST model: %w", err) } // TODO: Decide when we have a real any-to-any model if false { cfgAnyToAny, err := cl.LoadModelConfigFileByName(pipeline.LLM, ml.ModelPath) if err != nil { return nil, fmt.Errorf("failed to load backend config: %w", err) } if valid, _ := cfgAnyToAny.Validate(); !valid { return nil, fmt.Errorf("failed to validate config: %w", err) } opts := backend.ModelOptions(*cfgAnyToAny, appConfig) anyToAnyClient, err := ml.Load(opts...) if err != nil { return nil, fmt.Errorf("failed to load tts model: %w", err) } return &anyToAnyModel{ LLMConfig: cfgAnyToAny, LLMClient: anyToAnyClient, VADConfig: cfgVAD, VADClient: VADClient, }, nil } xlog.Debug("Loading a wrapped model") // Otherwise we want to return a wrapped model, which is a "virtual" model that re-uses other models to perform operations cfgLLM, err := cl.LoadModelConfigFileByName(pipeline.LLM, ml.ModelPath) if err != nil { return nil, fmt.Errorf("failed to load backend config: %w", err) } if valid, _ := cfgLLM.Validate(); !valid { return nil, fmt.Errorf("failed to validate config: %w", err) } cfgTTS, err := cl.LoadModelConfigFileByName(pipeline.TTS, ml.ModelPath) if err != nil { return nil, fmt.Errorf("failed to load backend config: %w", err) } if valid, _ := cfgTTS.Validate(); !valid { return nil, fmt.Errorf("failed to validate config: %w", err) } opts = backend.ModelOptions(*cfgTTS, appConfig) ttsClient, err := ml.Load(opts...) if err != nil { return nil, fmt.Errorf("failed to load tts model: %w", err) } opts = backend.ModelOptions(*cfgLLM, appConfig) llmClient, err := ml.Load(opts...) if err != nil { return nil, fmt.Errorf("failed to load LLM model: %w", err) } return &wrappedModel{ TTSConfig: cfgTTS, TranscriptionConfig: cfgSST, LLMConfig: cfgLLM, TTSClient: ttsClient, TranscriptionClient: transcriptionClient, LLMClient: llmClient, VADConfig: cfgVAD, VADClient: VADClient, }, nil }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/openai/realtime.go
core/http/endpoints/openai/realtime.go
package openai import ( "context" "encoding/base64" "encoding/json" "fmt" "os" "strings" "sync" "time" "net/http" "github.com/go-audio/audio" "github.com/gorilla/websocket" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/application" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/http/endpoints/openai/types" "github.com/mudler/LocalAI/core/templates" laudio "github.com/mudler/LocalAI/pkg/audio" "github.com/mudler/LocalAI/pkg/functions" "github.com/mudler/LocalAI/pkg/grpc/proto" model "github.com/mudler/LocalAI/pkg/model" "github.com/mudler/LocalAI/pkg/sound" "google.golang.org/grpc" "github.com/mudler/xlog" ) const ( localSampleRate = 16000 remoteSampleRate = 24000 vadModel = "silero-vad-ggml" ) // A model can be "emulated" that is: transcribe audio to text -> feed text to the LLM -> generate audio as result // If the model support instead audio-to-audio, we will use the specific gRPC calls instead // Session represents a single WebSocket connection and its state type Session struct { ID string TranscriptionOnly bool Model string Voice string TurnDetection *types.ServerTurnDetection `json:"turn_detection"` // "server_vad" or "none" InputAudioTranscription *types.InputAudioTranscription Functions functions.Functions Conversations map[string]*Conversation InputAudioBuffer []byte AudioBufferLock sync.Mutex Instructions string DefaultConversationID string ModelInterface Model } func (s *Session) FromClient(session *types.ClientSession) { } func (s *Session) ToServer() types.ServerSession { return types.ServerSession{ ID: s.ID, Object: func() string { if s.TranscriptionOnly { return "realtime.transcription_session" } else { return "realtime.session" } }(), Model: s.Model, Modalities: []types.Modality{types.ModalityText, types.ModalityAudio}, Instructions: s.Instructions, Voice: s.Voice, InputAudioFormat: types.AudioFormatPcm16, OutputAudioFormat: types.AudioFormatPcm16, TurnDetection: s.TurnDetection, InputAudioTranscription: s.InputAudioTranscription, // TODO: Should be constructed from Functions? Tools: []types.Tool{}, // TODO: ToolChoice // TODO: Temperature // TODO: MaxOutputTokens // TODO: InputAudioNoiseReduction } } // TODO: Update to tools? // FunctionCall represents a function call initiated by the model type FunctionCall struct { Name string `json:"name"` Arguments map[string]interface{} `json:"arguments"` } // Conversation represents a conversation with a list of items type Conversation struct { ID string Items []*types.MessageItem Lock sync.Mutex } func (c *Conversation) ToServer() types.Conversation { return types.Conversation{ ID: c.ID, Object: "realtime.conversation", } } // Item represents a message, function_call, or function_call_output type Item struct { ID string `json:"id"` Object string `json:"object"` Type string `json:"type"` // "message", "function_call", "function_call_output" Status string `json:"status"` Role string `json:"role"` Content []ConversationContent `json:"content,omitempty"` FunctionCall *FunctionCall `json:"function_call,omitempty"` } // ConversationContent represents the content of an item type ConversationContent struct { Type string `json:"type"` // "input_text", "input_audio", "text", "audio", etc. Audio string `json:"audio,omitempty"` Text string `json:"text,omitempty"` // Additional fields as needed } // Define the structures for incoming messages type IncomingMessage struct { Type types.ClientEventType `json:"type"` Session json.RawMessage `json:"session,omitempty"` Item json.RawMessage `json:"item,omitempty"` Audio string `json:"audio,omitempty"` Response json.RawMessage `json:"response,omitempty"` Error *ErrorMessage `json:"error,omitempty"` // Other fields as needed } // ErrorMessage represents an error message sent to the client type ErrorMessage struct { Type string `json:"type"` Code string `json:"code"` Message string `json:"message"` Param string `json:"param,omitempty"` EventID string `json:"event_id,omitempty"` } // Define a structure for outgoing messages type OutgoingMessage struct { Type string `json:"type"` Session *Session `json:"session,omitempty"` Conversation *Conversation `json:"conversation,omitempty"` Item *Item `json:"item,omitempty"` Content string `json:"content,omitempty"` Audio string `json:"audio,omitempty"` Error *ErrorMessage `json:"error,omitempty"` } // Map to store sessions (in-memory) var sessions = make(map[string]*Session) var sessionLock sync.Mutex // TODO: implement interface as we start to define usages type Model interface { VAD(ctx context.Context, in *proto.VADRequest, opts ...grpc.CallOption) (*proto.VADResponse, error) Transcribe(ctx context.Context, in *proto.TranscriptRequest, opts ...grpc.CallOption) (*proto.TranscriptResult, error) Predict(ctx context.Context, in *proto.PredictOptions, opts ...grpc.CallOption) (*proto.Reply, error) PredictStream(ctx context.Context, in *proto.PredictOptions, f func(*proto.Reply), opts ...grpc.CallOption) error } var upgrader = websocket.Upgrader{ CheckOrigin: func(r *http.Request) bool { return true // Allow all origins }, } // TODO: Implement ephemeral keys to allow these endpoints to be used func RealtimeSessions(application *application.Application) echo.HandlerFunc { return func(c echo.Context) error { return c.NoContent(501) } } func RealtimeTranscriptionSession(application *application.Application) echo.HandlerFunc { return func(c echo.Context) error { return c.NoContent(501) } } func Realtime(application *application.Application) echo.HandlerFunc { return func(c echo.Context) error { ws, err := upgrader.Upgrade(c.Response(), c.Request(), nil) if err != nil { return err } defer ws.Close() // Extract query parameters from Echo context before passing to websocket handler model := c.QueryParam("model") if model == "" { model = "gpt-4o" } intent := c.QueryParam("intent") registerRealtime(application, model, intent)(ws) return nil } } func registerRealtime(application *application.Application, model, intent string) func(c *websocket.Conn) { return func(c *websocket.Conn) { evaluator := application.TemplatesEvaluator() xlog.Debug("WebSocket connection established", "address", c.RemoteAddr().String()) if intent != "transcription" { sendNotImplemented(c, "Only transcription mode is supported which requires the intent=transcription parameter") } xlog.Debug("Realtime params", "model", model, "intent", intent) sessionID := generateSessionID() session := &Session{ ID: sessionID, TranscriptionOnly: true, Model: model, // default model Voice: "alloy", // default voice TurnDetection: &types.ServerTurnDetection{ Type: types.ServerTurnDetectionTypeServerVad, TurnDetectionParams: types.TurnDetectionParams{ // TODO: Need some way to pass this to the backend Threshold: 0.5, // TODO: This is ignored and the amount of padding is random at present PrefixPaddingMs: 30, SilenceDurationMs: 500, CreateResponse: func() *bool { t := true; return &t }(), }, }, InputAudioTranscription: &types.InputAudioTranscription{ Model: "whisper-1", }, Conversations: make(map[string]*Conversation), } // Create a default conversation conversationID := generateConversationID() conversation := &Conversation{ ID: conversationID, Items: []*types.MessageItem{}, } session.Conversations[conversationID] = conversation session.DefaultConversationID = conversationID // TODO: The API has no way to configure the VAD model or other models that make up a pipeline to fake any-to-any // So possibly we could have a way to configure a composite model that can be used in situations where any-to-any is expected pipeline := config.Pipeline{ VAD: vadModel, Transcription: session.InputAudioTranscription.Model, } m, cfg, err := newTranscriptionOnlyModel( &pipeline, application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig(), ) if err != nil { xlog.Error("failed to load model", "error", err) sendError(c, "model_load_error", "Failed to load model", "", "") return } session.ModelInterface = m // Store the session sessionLock.Lock() sessions[sessionID] = session sessionLock.Unlock() sendEvent(c, types.TranscriptionSessionCreatedEvent{ ServerEventBase: types.ServerEventBase{ EventID: "event_TODO", Type: types.ServerEventTypeTranscriptionSessionCreated, }, Session: session.ToServer(), }) var ( // mt int msg []byte wg sync.WaitGroup done = make(chan struct{}) ) vadServerStarted := true wg.Add(1) go func() { defer wg.Done() conversation := session.Conversations[session.DefaultConversationID] handleVAD(cfg, evaluator, session, conversation, c, done) }() for { if _, msg, err = c.ReadMessage(); err != nil { xlog.Error("read error", "error", err) break } // Parse the incoming message var incomingMsg IncomingMessage if err := json.Unmarshal(msg, &incomingMsg); err != nil { xlog.Error("invalid json", "error", err) sendError(c, "invalid_json", "Invalid JSON format", "", "") continue } var sessionUpdate types.ClientSession switch incomingMsg.Type { case types.ClientEventTypeTranscriptionSessionUpdate: xlog.Debug("recv", "message", string(msg)) if err := json.Unmarshal(incomingMsg.Session, &sessionUpdate); err != nil { xlog.Error("failed to unmarshal 'transcription_session.update'", "error", err) sendError(c, "invalid_session_update", "Invalid session update format", "", "") continue } if err := updateTransSession( session, &sessionUpdate, application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig(), ); err != nil { xlog.Error("failed to update session", "error", err) sendError(c, "session_update_error", "Failed to update session", "", "") continue } sendEvent(c, types.SessionUpdatedEvent{ ServerEventBase: types.ServerEventBase{ EventID: "event_TODO", Type: types.ServerEventTypeTranscriptionSessionUpdated, }, Session: session.ToServer(), }) case types.ClientEventTypeSessionUpdate: xlog.Debug("recv", "message", string(msg)) // Update session configurations if err := json.Unmarshal(incomingMsg.Session, &sessionUpdate); err != nil { xlog.Error("failed to unmarshal 'session.update'", "error", err) sendError(c, "invalid_session_update", "Invalid session update format", "", "") continue } if err := updateSession( session, &sessionUpdate, application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig(), ); err != nil { xlog.Error("failed to update session", "error", err) sendError(c, "session_update_error", "Failed to update session", "", "") continue } sendEvent(c, types.SessionUpdatedEvent{ ServerEventBase: types.ServerEventBase{ EventID: "event_TODO", Type: types.ServerEventTypeSessionUpdated, }, Session: session.ToServer(), }) if session.TurnDetection.Type == types.ServerTurnDetectionTypeServerVad && !vadServerStarted { xlog.Debug("Starting VAD goroutine...") wg.Add(1) go func() { defer wg.Done() conversation := session.Conversations[session.DefaultConversationID] handleVAD(cfg, evaluator, session, conversation, c, done) }() vadServerStarted = true } else if session.TurnDetection.Type != types.ServerTurnDetectionTypeServerVad && vadServerStarted { xlog.Debug("Stopping VAD goroutine...") wg.Add(-1) go func() { done <- struct{}{} }() vadServerStarted = false } case types.ClientEventTypeInputAudioBufferAppend: // Handle 'input_audio_buffer.append' if incomingMsg.Audio == "" { xlog.Error("Audio data is missing in 'input_audio_buffer.append'") sendError(c, "missing_audio_data", "Audio data is missing", "", "") continue } // Decode base64 audio data decodedAudio, err := base64.StdEncoding.DecodeString(incomingMsg.Audio) if err != nil { xlog.Error("failed to decode audio data", "error", err) sendError(c, "invalid_audio_data", "Failed to decode audio data", "", "") continue } // Append to InputAudioBuffer session.AudioBufferLock.Lock() session.InputAudioBuffer = append(session.InputAudioBuffer, decodedAudio...) session.AudioBufferLock.Unlock() case types.ClientEventTypeInputAudioBufferCommit: xlog.Debug("recv", "message", string(msg)) // TODO: Trigger transcription. // TODO: Ignore this if VAD enabled or interrupt VAD? if session.TranscriptionOnly { continue } // Commit the audio buffer to the conversation as a new item item := &types.MessageItem{ ID: generateItemID(), Type: "message", Status: "completed", Role: "user", Content: []types.MessageContentPart{ { Type: "input_audio", Audio: base64.StdEncoding.EncodeToString(session.InputAudioBuffer), }, }, } // Add item to conversation conversation.Lock.Lock() conversation.Items = append(conversation.Items, item) conversation.Lock.Unlock() // Reset InputAudioBuffer session.AudioBufferLock.Lock() session.InputAudioBuffer = nil session.AudioBufferLock.Unlock() // Send item.created event sendEvent(c, types.ConversationItemCreatedEvent{ ServerEventBase: types.ServerEventBase{ EventID: "event_TODO", Type: "conversation.item.created", }, Item: types.ResponseMessageItem{ Object: "realtime.item", MessageItem: *item, }, }) case types.ClientEventTypeConversationItemCreate: xlog.Debug("recv", "message", string(msg)) // Handle creating new conversation items var item types.ConversationItemCreateEvent if err := json.Unmarshal(incomingMsg.Item, &item); err != nil { xlog.Error("failed to unmarshal 'conversation.item.create'", "error", err) sendError(c, "invalid_item", "Invalid item format", "", "") continue } sendNotImplemented(c, "conversation.item.create") // Generate item ID and set status // item.ID = generateItemID() // item.Object = "realtime.item" // item.Status = "completed" // // // Add item to conversation // conversation.Lock.Lock() // conversation.Items = append(conversation.Items, &item) // conversation.Lock.Unlock() // // // Send item.created event // sendEvent(c, OutgoingMessage{ // Type: "conversation.item.created", // Item: &item, // }) case types.ClientEventTypeConversationItemDelete: sendError(c, "not_implemented", "Deleting items not implemented", "", "event_TODO") case types.ClientEventTypeResponseCreate: // Handle generating a response var responseCreate types.ResponseCreateEvent if len(incomingMsg.Response) > 0 { if err := json.Unmarshal(incomingMsg.Response, &responseCreate); err != nil { xlog.Error("failed to unmarshal 'response.create' response object", "error", err) sendError(c, "invalid_response_create", "Invalid response create format", "", "") continue } } // Update session functions if provided if len(responseCreate.Response.Tools) > 0 { // TODO: Tools -> Functions } sendNotImplemented(c, "response.create") // TODO: Generate a response based on the conversation history // wg.Add(1) // go func() { // defer wg.Done() // generateResponse(cfg, evaluator, session, conversation, responseCreate, c, mt) // }() case types.ClientEventTypeResponseCancel: xlog.Debug("recv", "message", string(msg)) // Handle cancellation of ongoing responses // Implement cancellation logic as needed sendNotImplemented(c, "response.cancel") default: xlog.Error("unknown message type", "type", incomingMsg.Type) sendError(c, "unknown_message_type", fmt.Sprintf("Unknown message type: %s", incomingMsg.Type), "", "") } } // Close the done channel to signal goroutines to exit close(done) wg.Wait() // Remove the session from the sessions map sessionLock.Lock() delete(sessions, sessionID) sessionLock.Unlock() } } // Helper function to send events to the client func sendEvent(c *websocket.Conn, event types.ServerEvent) { eventBytes, err := json.Marshal(event) if err != nil { xlog.Error("failed to marshal event", "error", err) return } if err = c.WriteMessage(websocket.TextMessage, eventBytes); err != nil { xlog.Error("write error", "error", err) } } // Helper function to send errors to the client func sendError(c *websocket.Conn, code, message, param, eventID string) { errorEvent := types.ErrorEvent{ ServerEventBase: types.ServerEventBase{ Type: types.ServerEventTypeError, EventID: eventID, }, Error: types.Error{ Type: "invalid_request_error", Code: code, Message: message, EventID: eventID, }, } sendEvent(c, errorEvent) } func sendNotImplemented(c *websocket.Conn, message string) { sendError(c, "not_implemented", message, "", "event_TODO") } func updateTransSession(session *Session, update *types.ClientSession, cl *config.ModelConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) error { sessionLock.Lock() defer sessionLock.Unlock() trUpd := update.InputAudioTranscription trCur := session.InputAudioTranscription if trUpd != nil && trUpd.Model != "" && trUpd.Model != trCur.Model { pipeline := config.Pipeline{ VAD: vadModel, Transcription: trUpd.Model, } m, _, err := newTranscriptionOnlyModel(&pipeline, cl, ml, appConfig) if err != nil { return err } session.ModelInterface = m } if trUpd != nil { trCur.Language = trUpd.Language trCur.Prompt = trUpd.Prompt } if update.TurnDetection != nil && update.TurnDetection.Type != "" { session.TurnDetection.Type = types.ServerTurnDetectionType(update.TurnDetection.Type) session.TurnDetection.TurnDetectionParams = update.TurnDetection.TurnDetectionParams } return nil } // Function to update session configurations func updateSession(session *Session, update *types.ClientSession, cl *config.ModelConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) error { sessionLock.Lock() defer sessionLock.Unlock() if update.Model != "" { pipeline := config.Pipeline{ LLM: update.Model, // TODO: Setup pipeline by configuring STT and TTS models } m, err := newModel(&pipeline, cl, ml, appConfig) if err != nil { return err } session.ModelInterface = m session.Model = update.Model } if update.Voice != "" { session.Voice = update.Voice } if update.TurnDetection != nil && update.TurnDetection.Type != "" { session.TurnDetection.Type = types.ServerTurnDetectionType(update.TurnDetection.Type) session.TurnDetection.TurnDetectionParams = update.TurnDetection.TurnDetectionParams } // TODO: We should actually check if the field was present in the JSON; empty string means clear the settings if update.Instructions != "" { session.Instructions = update.Instructions } if update.Tools != nil { return fmt.Errorf("Haven't implemented tools") } session.InputAudioTranscription = update.InputAudioTranscription return nil } // handleVAD is a goroutine that listens for audio data from the client, // runs VAD on the audio data, and commits utterances to the conversation func handleVAD(cfg *config.ModelConfig, evaluator *templates.Evaluator, session *Session, conv *Conversation, c *websocket.Conn, done chan struct{}) { vadContext, cancel := context.WithCancel(context.Background()) go func() { <-done cancel() }() silenceThreshold := float64(session.TurnDetection.SilenceDurationMs) / 1000 speechStarted := false startTime := time.Now() ticker := time.NewTicker(300 * time.Millisecond) defer ticker.Stop() for { select { case <-done: return case <-ticker.C: session.AudioBufferLock.Lock() allAudio := make([]byte, len(session.InputAudioBuffer)) copy(allAudio, session.InputAudioBuffer) session.AudioBufferLock.Unlock() aints := sound.BytesToInt16sLE(allAudio) if len(aints) == 0 || len(aints) < int(silenceThreshold)*remoteSampleRate { continue } // Resample from 24kHz to 16kHz aints = sound.ResampleInt16(aints, remoteSampleRate, localSampleRate) segments, err := runVAD(vadContext, session, aints) if err != nil { if err.Error() == "unexpected speech end" { xlog.Debug("VAD cancelled") continue } xlog.Error("failed to process audio", "error", err) sendError(c, "processing_error", "Failed to process audio: "+err.Error(), "", "") continue } audioLength := float64(len(aints)) / localSampleRate // TODO: When resetting the buffer we should retain a small postfix // TODO: The OpenAI documentation seems to suggest that only the client decides when to clear the buffer if len(segments) == 0 && audioLength > silenceThreshold { session.AudioBufferLock.Lock() session.InputAudioBuffer = nil session.AudioBufferLock.Unlock() xlog.Debug("Detected silence for a while, clearing audio buffer") sendEvent(c, types.InputAudioBufferClearedEvent{ ServerEventBase: types.ServerEventBase{ EventID: "event_TODO", Type: types.ServerEventTypeInputAudioBufferCleared, }, }) continue } else if len(segments) == 0 { continue } if !speechStarted { sendEvent(c, types.InputAudioBufferSpeechStartedEvent{ ServerEventBase: types.ServerEventBase{ EventID: "event_TODO", Type: types.ServerEventTypeInputAudioBufferSpeechStarted, }, AudioStartMs: time.Now().Sub(startTime).Milliseconds(), }) speechStarted = true } // Segment still in progress when audio ended segEndTime := segments[len(segments)-1].GetEnd() if segEndTime == 0 { continue } if float32(audioLength)-segEndTime > float32(silenceThreshold) { xlog.Debug("Detected end of speech segment") session.AudioBufferLock.Lock() session.InputAudioBuffer = nil session.AudioBufferLock.Unlock() sendEvent(c, types.InputAudioBufferSpeechStoppedEvent{ ServerEventBase: types.ServerEventBase{ EventID: "event_TODO", Type: types.ServerEventTypeInputAudioBufferSpeechStopped, }, AudioEndMs: time.Now().Sub(startTime).Milliseconds(), }) speechStarted = false sendEvent(c, types.InputAudioBufferCommittedEvent{ ServerEventBase: types.ServerEventBase{ EventID: "event_TODO", Type: types.ServerEventTypeInputAudioBufferCommitted, }, ItemID: generateItemID(), PreviousItemID: "TODO", }) abytes := sound.Int16toBytesLE(aints) // TODO: Remove prefix silence that is is over TurnDetectionParams.PrefixPaddingMs go commitUtterance(vadContext, abytes, cfg, evaluator, session, conv, c) } } } } func commitUtterance(ctx context.Context, utt []byte, cfg *config.ModelConfig, evaluator *templates.Evaluator, session *Session, conv *Conversation, c *websocket.Conn) { if len(utt) == 0 { return } // TODO: If we have a real any-to-any model then transcription is optional f, err := os.CreateTemp("", "realtime-audio-chunk-*.wav") if err != nil { xlog.Error("failed to create temp file", "error", err) return } defer f.Close() defer os.Remove(f.Name()) xlog.Debug("Writing to file", "file", f.Name()) hdr := laudio.NewWAVHeader(uint32(len(utt))) if err := hdr.Write(f); err != nil { xlog.Error("Failed to write WAV header", "error", err) return } if _, err := f.Write(utt); err != nil { xlog.Error("Failed to write audio data", "error", err) return } f.Sync() if session.InputAudioTranscription != nil { tr, err := session.ModelInterface.Transcribe(ctx, &proto.TranscriptRequest{ Dst: f.Name(), Language: session.InputAudioTranscription.Language, Translate: false, Threads: uint32(*cfg.Threads), Prompt: session.InputAudioTranscription.Prompt, }) if err != nil { sendError(c, "transcription_failed", err.Error(), "", "event_TODO") } sendEvent(c, types.ResponseAudioTranscriptDoneEvent{ ServerEventBase: types.ServerEventBase{ Type: types.ServerEventTypeResponseAudioTranscriptDone, EventID: "event_TODO", }, ItemID: generateItemID(), ResponseID: "resp_TODO", OutputIndex: 0, ContentIndex: 0, Transcript: tr.GetText(), }) // TODO: Update the prompt with transcription result? } if !session.TranscriptionOnly { sendNotImplemented(c, "Commiting items to the conversation not implemented") } // TODO: Commit the audio and/or transcribed text to the conversation // Commit logic: create item, broadcast item.created, etc. // item := &Item{ // ID: generateItemID(), // Object: "realtime.item", // Type: "message", // Status: "completed", // Role: "user", // Content: []ConversationContent{ // { // Type: "input_audio", // Audio: base64.StdEncoding.EncodeToString(utt), // }, // }, // } // conv.Lock.Lock() // conv.Items = append(conv.Items, item) // conv.Lock.Unlock() // // // sendEvent(c, OutgoingMessage{ // Type: "conversation.item.created", // Item: item, // }) // // // // trigger the response generation // generateResponse(cfg, evaluator, session, conv, ResponseCreate{}, c, websocket.TextMessage) } func runVAD(ctx context.Context, session *Session, adata []int16) ([]*proto.VADSegment, error) { soundIntBuffer := &audio.IntBuffer{ Format: &audio.Format{SampleRate: localSampleRate, NumChannels: 1}, SourceBitDepth: 16, Data: sound.ConvertInt16ToInt(adata), } float32Data := soundIntBuffer.AsFloat32Buffer().Data resp, err := session.ModelInterface.VAD(ctx, &proto.VADRequest{ Audio: float32Data, }) if err != nil { return nil, err } // If resp.Segments is empty => no speech return resp.Segments, nil } // TODO: Below needed for normal mode instead of transcription only // Function to generate a response based on the conversation // func generateResponse(config *config.ModelConfig, evaluator *templates.Evaluator, session *Session, conversation *Conversation, responseCreate ResponseCreate, c *websocket.Conn, mt int) { // // log.Debug().Msg("Generating realtime response...") // // // Compile the conversation history // conversation.Lock.Lock() // var conversationHistory []schema.Message // var latestUserAudio string // for _, item := range conversation.Items { // for _, content := range item.Content { // switch content.Type { // case "input_text", "text": // conversationHistory = append(conversationHistory, schema.Message{ // Role: string(item.Role), // StringContent: content.Text, // Content: content.Text, // }) // case "input_audio": // // We do not to turn to text here the audio result. // // When generating it later on from the LLM, // // we will also generate text and return it and store it in the conversation // // Here we just want to get the user audio if there is any as a new input for the conversation. // if item.Role == "user" { // latestUserAudio = content.Audio // } // } // } // } // // conversation.Lock.Unlock() // // var generatedText string // var generatedAudio []byte // var functionCall *FunctionCall // var err error // // if latestUserAudio != "" { // // Process the latest user audio input // decodedAudio, err := base64.StdEncoding.DecodeString(latestUserAudio) // if err != nil { // log.Error().Msgf("failed to decode latest user audio: %s", err.Error()) // sendError(c, "invalid_audio_data", "Failed to decode audio data", "", "") // return // } // // // Process the audio input and generate a response // generatedText, generatedAudio, functionCall, err = processAudioResponse(session, decodedAudio) // if err != nil { // log.Error().Msgf("failed to process audio response: %s", err.Error()) // sendError(c, "processing_error", "Failed to generate audio response", "", "") // return // } // } else { // // if session.Instructions != "" { // conversationHistory = append([]schema.Message{{ // Role: "system", // StringContent: session.Instructions, // Content: session.Instructions, // }}, conversationHistory...) // } // // funcs := session.Functions // shouldUseFn := len(funcs) > 0 && config.ShouldUseFunctions() // // // Allow the user to set custom actions via config file // // to be "embedded" in each model // noActionName := "answer" // noActionDescription := "use this action to answer without performing any action" // // if config.FunctionsConfig.NoActionFunctionName != "" { // noActionName = config.FunctionsConfig.NoActionFunctionName // } // if config.FunctionsConfig.NoActionDescriptionName != "" { // noActionDescription = config.FunctionsConfig.NoActionDescriptionName // } // // if (!config.FunctionsConfig.GrammarConfig.NoGrammar) && shouldUseFn { // noActionGrammar := functions.Function{ // Name: noActionName, // Description: noActionDescription, // Parameters: map[string]interface{}{ // "properties": map[string]interface{}{ // "message": map[string]interface{}{ // "type": "string", // "description": "The message to reply the user with", // }}, // }, // } // // // Append the no action function // if !config.FunctionsConfig.DisableNoAction { // funcs = append(funcs, noActionGrammar) // } // // // Update input grammar // jsStruct := funcs.ToJSONStructure(config.FunctionsConfig.FunctionNameKey, config.FunctionsConfig.FunctionNameKey) // g, err := jsStruct.Grammar(config.FunctionsConfig.GrammarOptions()...) // if err == nil { // config.Grammar = g // } // } // // // Generate a response based on text conversation history // prompt := evaluator.TemplateMessages(conversationHistory, config, funcs, shouldUseFn) // // generatedText, functionCall, err = processTextResponse(config, session, prompt) // if err != nil { // log.Error().Msgf("failed to process text response: %s", err.Error()) // sendError(c, "processing_error", "Failed to generate text response", "", "") // return // } // log.Debug().Any("text", generatedText).Msg("Generated text response") // } // // if functionCall != nil { // // The model wants to call a function // // Create a function_call item and send it to the client // item := &Item{ // ID: generateItemID(), // Object: "realtime.item", // Type: "function_call", // Status: "completed", // Role: "assistant", // FunctionCall: functionCall, // } // // // Add item to conversation // conversation.Lock.Lock() // conversation.Items = append(conversation.Items, item) // conversation.Lock.Unlock() // // // Send item.created event // sendEvent(c, OutgoingMessage{ // Type: "conversation.item.created", // Item: item, // }) // // // Optionally, you can generate a message to the user indicating the function call // // For now, we'll assume the client handles the function call and may trigger another response // // } else { // // Send response.stream messages // if generatedAudio != nil { // // If generatedAudio is available, send it as audio // encodedAudio := base64.StdEncoding.EncodeToString(generatedAudio) // outgoingMsg := OutgoingMessage{ // Type: "response.stream", // Audio: encodedAudio, // } // sendEvent(c, outgoingMsg) // } else { // // Send text response (could be streamed in chunks) // chunks := splitResponseIntoChunks(generatedText) // for _, chunk := range chunks {
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
true
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/openai/transcription.go
core/http/endpoints/openai/transcription.go
package openai import ( "io" "net/http" "os" "path" "path/filepath" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/backend" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/schema" model "github.com/mudler/LocalAI/pkg/model" "github.com/mudler/xlog" ) // TranscriptEndpoint is the OpenAI Whisper API endpoint https://platform.openai.com/docs/api-reference/audio/create // @Summary Transcribes audio into the input language. // @accept multipart/form-data // @Param model formData string true "model" // @Param file formData file true "file" // @Success 200 {object} map[string]string "Response" // @Router /v1/audio/transcriptions [post] func TranscriptEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) echo.HandlerFunc { return func(c echo.Context) error { input, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_LOCALAI_REQUEST).(*schema.OpenAIRequest) if !ok || input.Model == "" { return echo.ErrBadRequest } config, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_MODEL_CONFIG).(*config.ModelConfig) if !ok || config == nil { return echo.ErrBadRequest } diarize := c.FormValue("diarize") != "false" prompt := c.FormValue("prompt") // retrieve the file data from the request file, err := c.FormFile("file") if err != nil { return err } f, err := file.Open() if err != nil { return err } defer f.Close() dir, err := os.MkdirTemp("", "whisper") if err != nil { return err } defer os.RemoveAll(dir) dst := filepath.Join(dir, path.Base(file.Filename)) dstFile, err := os.Create(dst) if err != nil { return err } if _, err := io.Copy(dstFile, f); err != nil { xlog.Debug("Audio file copying error", "filename", file.Filename, "dst", dst, "error", err) return err } xlog.Debug("Audio file copied", "dst", dst) tr, err := backend.ModelTranscription(dst, input.Language, input.Translate, diarize, prompt, ml, *config, appConfig) if err != nil { return err } xlog.Debug("Transcribed", "transcription", tr) // TODO: handle different outputs here return c.JSON(http.StatusOK, tr) } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/openai/video.go
core/http/endpoints/openai/video.go
package openai import ( "encoding/json" "fmt" "strconv" "strings" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/http/endpoints/localai" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/schema" model "github.com/mudler/LocalAI/pkg/model" ) func VideoEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) echo.HandlerFunc { return func(c echo.Context) error { input, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_LOCALAI_REQUEST).(*schema.OpenAIRequest) if !ok || input == nil { return echo.ErrBadRequest } var raw map[string]interface{} body := make([]byte, 0) if c.Request().Body != nil { c.Request().Body.Read(body) } if len(body) > 0 { _ = json.Unmarshal(body, &raw) } // Build VideoRequest using shared mapper vr := MapOpenAIToVideo(input, raw) // Place VideoRequest into context so localai.VideoEndpoint can consume it c.Set(middleware.CONTEXT_LOCALS_KEY_LOCALAI_REQUEST, vr) // Delegate to existing localai handler return localai.VideoEndpoint(cl, ml, appConfig)(c) } } // VideoEndpoint godoc // @Summary Generate a video from an OpenAI-compatible request // @Description Accepts an OpenAI-style request and delegates to the LocalAI video generator // @Tags openai // @Accept json // @Produce json // @Param request body schema.OpenAIRequest true "OpenAI-style request" // @Success 200 {object} map[string]interface{} // @Failure 400 {object} map[string]interface{} // @Router /v1/videos [post] func MapOpenAIToVideo(input *schema.OpenAIRequest, raw map[string]interface{}) *schema.VideoRequest { vr := &schema.VideoRequest{} if input == nil { return vr } if input.Model != "" { vr.Model = input.Model } // Prompt mapping switch p := input.Prompt.(type) { case string: vr.Prompt = p case []interface{}: if len(p) > 0 { if s, ok := p[0].(string); ok { vr.Prompt = s } } } // Size size := input.Size if size == "" && raw != nil { if v, ok := raw["size"].(string); ok { size = v } } if size != "" { parts := strings.SplitN(size, "x", 2) if len(parts) == 2 { if wi, err := strconv.Atoi(parts[0]); err == nil { vr.Width = int32(wi) } if hi, err := strconv.Atoi(parts[1]); err == nil { vr.Height = int32(hi) } } } // seconds -> num frames secondsStr := "" if raw != nil { if v, ok := raw["seconds"].(string); ok { secondsStr = v } else if v, ok := raw["seconds"].(float64); ok { secondsStr = fmt.Sprintf("%v", int(v)) } } fps := int32(30) if raw != nil { if rawFPS, ok := raw["fps"]; ok { switch rf := rawFPS.(type) { case float64: fps = int32(rf) case string: if fi, err := strconv.Atoi(rf); err == nil { fps = int32(fi) } } } } if secondsStr != "" { if secF, err := strconv.Atoi(secondsStr); err == nil { vr.FPS = fps vr.NumFrames = int32(secF) * fps } } // input_reference if raw != nil { if v, ok := raw["input_reference"].(string); ok { vr.StartImage = v } } // response format if input.ResponseFormat != nil { if rf, ok := input.ResponseFormat.(string); ok { vr.ResponseFormat = rf } } if input.Step != 0 { vr.Step = int32(input.Step) } return vr }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/openai/list.go
core/http/endpoints/openai/list.go
package openai import ( "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/LocalAI/core/services" model "github.com/mudler/LocalAI/pkg/model" ) // ListModelsEndpoint is the OpenAI Models API endpoint https://platform.openai.com/docs/api-reference/models // @Summary List and describe the various models available in the API. // @Success 200 {object} schema.ModelsDataResponse "Response" // @Router /v1/models [get] func ListModelsEndpoint(bcl *config.ModelConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) echo.HandlerFunc { return func(c echo.Context) error { // If blank, no filter is applied. filter := c.QueryParam("filter") // By default, exclude any loose files that are already referenced by a configuration file. var policy services.LooseFilePolicy excludeConfigured := c.QueryParam("excludeConfigured") if excludeConfigured == "" || excludeConfigured == "true" { policy = services.SKIP_IF_CONFIGURED } else { policy = services.ALWAYS_INCLUDE // This replicates current behavior. TODO: give more options to the user? } filterFn, err := config.BuildNameFilterFn(filter) if err != nil { return err } modelNames, err := services.ListModels(bcl, ml, filterFn, policy) if err != nil { return err } // Map from a slice of names to a slice of OpenAIModel response objects dataModels := []schema.OpenAIModel{} for _, m := range modelNames { dataModels = append(dataModels, schema.OpenAIModel{ID: m, Object: "model"}) } return c.JSON(200, schema.ModelsDataResponse{ Object: "list", Data: dataModels, }) } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/openai/completion.go
core/http/endpoints/openai/completion.go
package openai import ( "encoding/json" "errors" "fmt" "time" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/backend" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/http/middleware" "github.com/google/uuid" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/LocalAI/core/templates" "github.com/mudler/LocalAI/pkg/functions" "github.com/mudler/LocalAI/pkg/model" "github.com/mudler/xlog" ) // CompletionEndpoint is the OpenAI Completion API endpoint https://platform.openai.com/docs/api-reference/completions // @Summary Generate completions for a given prompt and model. // @Param request body schema.OpenAIRequest true "query params" // @Success 200 {object} schema.OpenAIResponse "Response" // @Router /v1/completions [post] func CompletionEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator *templates.Evaluator, appConfig *config.ApplicationConfig) echo.HandlerFunc { process := func(id string, s string, req *schema.OpenAIRequest, config *config.ModelConfig, loader *model.ModelLoader, responses chan schema.OpenAIResponse, extraUsage bool) error { tokenCallback := func(s string, tokenUsage backend.TokenUsage) bool { created := int(time.Now().Unix()) usage := schema.OpenAIUsage{ PromptTokens: tokenUsage.Prompt, CompletionTokens: tokenUsage.Completion, TotalTokens: tokenUsage.Prompt + tokenUsage.Completion, } if extraUsage { usage.TimingTokenGeneration = tokenUsage.TimingTokenGeneration usage.TimingPromptProcessing = tokenUsage.TimingPromptProcessing } resp := schema.OpenAIResponse{ ID: id, Created: created, Model: req.Model, // we have to return what the user sent here, due to OpenAI spec. Choices: []schema.Choice{ { Index: 0, Text: s, FinishReason: nil, }, }, Object: "text_completion", Usage: usage, } xlog.Debug("Sending goroutine", "text", s) responses <- resp return true } _, _, err := ComputeChoices(req, s, config, cl, appConfig, loader, func(s string, c *[]schema.Choice) {}, tokenCallback) close(responses) return err } return func(c echo.Context) error { created := int(time.Now().Unix()) // Handle Correlation id := c.Request().Header.Get("X-Correlation-ID") if id == "" { id = uuid.New().String() } extraUsage := c.Request().Header.Get("Extra-Usage") != "" input, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_LOCALAI_REQUEST).(*schema.OpenAIRequest) if !ok || input.Model == "" { return echo.ErrBadRequest } config, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_MODEL_CONFIG).(*config.ModelConfig) if !ok || config == nil { return echo.ErrBadRequest } if config.ResponseFormatMap != nil { d := schema.ChatCompletionResponseFormat{} dat, _ := json.Marshal(config.ResponseFormatMap) _ = json.Unmarshal(dat, &d) if d.Type == "json_object" { input.Grammar = functions.JSONBNF } } config.Grammar = input.Grammar xlog.Debug("Parameter Config", "config", config) if input.Stream { xlog.Debug("Stream request received") c.Response().Header().Set("Content-Type", "text/event-stream") c.Response().Header().Set("Cache-Control", "no-cache") c.Response().Header().Set("Connection", "keep-alive") if len(config.PromptStrings) > 1 { return errors.New("cannot handle more than 1 `PromptStrings` when Streaming") } predInput := config.PromptStrings[0] templatedInput, err := evaluator.EvaluateTemplateForPrompt(templates.CompletionPromptTemplate, *config, templates.PromptTemplateData{ Input: predInput, SystemPrompt: config.SystemPrompt, ReasoningEffort: input.ReasoningEffort, Metadata: input.Metadata, }) if err == nil { predInput = templatedInput xlog.Debug("Template found, input modified", "input", predInput) } responses := make(chan schema.OpenAIResponse) ended := make(chan error) go func() { ended <- process(id, predInput, input, config, ml, responses, extraUsage) }() LOOP: for { select { case ev := <-responses: if len(ev.Choices) == 0 { xlog.Debug("No choices in the response, skipping") continue } respData, err := json.Marshal(ev) if err != nil { xlog.Debug("Failed to marshal response", "error", err) continue } xlog.Debug("Sending chunk", "chunk", string(respData)) _, err = fmt.Fprintf(c.Response().Writer, "data: %s\n\n", string(respData)) if err != nil { return err } c.Response().Flush() case err := <-ended: if err == nil { break LOOP } xlog.Error("Stream ended with error", "error", err) stopReason := FinishReasonStop errorResp := schema.OpenAIResponse{ ID: id, Created: created, Model: input.Model, Choices: []schema.Choice{ { Index: 0, FinishReason: &stopReason, Text: "Internal error: " + err.Error(), }, }, Object: "text_completion", } errorData, marshalErr := json.Marshal(errorResp) if marshalErr != nil { xlog.Error("Failed to marshal error response", "error", marshalErr) // Send a simple error message as fallback fmt.Fprintf(c.Response().Writer, "data: {\"error\":\"Internal error\"}\n\n") } else { fmt.Fprintf(c.Response().Writer, "data: %s\n\n", string(errorData)) } c.Response().Flush() return nil } } stopReason := FinishReasonStop resp := &schema.OpenAIResponse{ ID: id, Created: created, Model: input.Model, // we have to return what the user sent here, due to OpenAI spec. Choices: []schema.Choice{ { Index: 0, FinishReason: &stopReason, }, }, Object: "text_completion", } respData, _ := json.Marshal(resp) fmt.Fprintf(c.Response().Writer, "data: %s\n\n", respData) fmt.Fprintf(c.Response().Writer, "data: [DONE]\n\n") c.Response().Flush() return nil } var result []schema.Choice totalTokenUsage := backend.TokenUsage{} for k, i := range config.PromptStrings { templatedInput, err := evaluator.EvaluateTemplateForPrompt(templates.CompletionPromptTemplate, *config, templates.PromptTemplateData{ SystemPrompt: config.SystemPrompt, Input: i, ReasoningEffort: input.ReasoningEffort, Metadata: input.Metadata, }) if err == nil { i = templatedInput xlog.Debug("Template found, input modified", "input", i) } r, tokenUsage, err := ComputeChoices( input, i, config, cl, appConfig, ml, func(s string, c *[]schema.Choice) { stopReason := FinishReasonStop *c = append(*c, schema.Choice{Text: s, FinishReason: &stopReason, Index: k}) }, nil) if err != nil { return err } totalTokenUsage.TimingTokenGeneration += tokenUsage.TimingTokenGeneration totalTokenUsage.TimingPromptProcessing += tokenUsage.TimingPromptProcessing result = append(result, r...) } usage := schema.OpenAIUsage{ PromptTokens: totalTokenUsage.Prompt, CompletionTokens: totalTokenUsage.Completion, TotalTokens: totalTokenUsage.Prompt + totalTokenUsage.Completion, } if extraUsage { usage.TimingTokenGeneration = totalTokenUsage.TimingTokenGeneration usage.TimingPromptProcessing = totalTokenUsage.TimingPromptProcessing } resp := &schema.OpenAIResponse{ ID: id, Created: created, Model: input.Model, // we have to return what the user sent here, due to OpenAI spec. Choices: result, Object: "text_completion", Usage: usage, } jsonResult, _ := json.Marshal(resp) xlog.Debug("Response", "response", string(jsonResult)) // Return the prediction in the response body return c.JSON(200, resp) } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/openai/inpainting.go
core/http/endpoints/openai/inpainting.go
package openai import ( "encoding/base64" "encoding/json" "fmt" "io" "net/http" "net/url" "os" "path/filepath" "strconv" "time" "github.com/google/uuid" "github.com/labstack/echo/v4" "github.com/mudler/xlog" "github.com/mudler/LocalAI/core/backend" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/schema" model "github.com/mudler/LocalAI/pkg/model" ) // InpaintingEndpoint handles POST /v1/images/inpainting // // Swagger / OpenAPI docstring (swaggo): // @Summary Image inpainting // @Description Perform image inpainting. Accepts multipart/form-data with `image` and `mask` files. // @Tags images // @Accept multipart/form-data // @Produce application/json // @Param model formData string true "Model identifier" // @Param prompt formData string true "Text prompt guiding the generation" // @Param steps formData int false "Number of inference steps (default 25)" // @Param image formData file true "Original image file" // @Param mask formData file true "Mask image file (white = area to inpaint)" // @Success 200 {object} schema.OpenAIResponse // @Failure 400 {object} map[string]string // @Failure 500 {object} map[string]string // @Router /v1/images/inpainting [post] func InpaintingEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) echo.HandlerFunc { return func(c echo.Context) error { // Parse basic form values modelName := c.FormValue("model") prompt := c.FormValue("prompt") stepsStr := c.FormValue("steps") if modelName == "" || prompt == "" { xlog.Error("Inpainting Endpoint - missing model or prompt") return echo.ErrBadRequest } // steps default steps := 25 if stepsStr != "" { if v, err := strconv.Atoi(stepsStr); err == nil { steps = v } } // Get uploaded files imageFile, err := c.FormFile("image") if err != nil { xlog.Error("Inpainting Endpoint - missing image file", "error", err) return echo.NewHTTPError(http.StatusBadRequest, "missing image file") } maskFile, err := c.FormFile("mask") if err != nil { xlog.Error("Inpainting Endpoint - missing mask file", "error", err) return echo.NewHTTPError(http.StatusBadRequest, "missing mask file") } // Read files into memory (small files expected) imgSrc, err := imageFile.Open() if err != nil { return err } defer imgSrc.Close() imgBytes, err := io.ReadAll(imgSrc) if err != nil { return err } maskSrc, err := maskFile.Open() if err != nil { return err } defer maskSrc.Close() maskBytes, err := io.ReadAll(maskSrc) if err != nil { return err } // Create JSON with base64 fields expected by backend b64Image := base64.StdEncoding.EncodeToString(imgBytes) b64Mask := base64.StdEncoding.EncodeToString(maskBytes) // get model config from context (middleware set it) cfg, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_MODEL_CONFIG).(*config.ModelConfig) if !ok || cfg == nil { xlog.Error("Inpainting Endpoint - model config not found in context") return echo.ErrBadRequest } // Use the GeneratedContentDir so the generated PNG is placed where the // HTTP static handler serves `/generated-images`. tmpDir := appConfig.GeneratedContentDir // Ensure the directory exists if err := os.MkdirAll(tmpDir, 0750); err != nil { xlog.Error("Inpainting Endpoint - failed to create generated content dir", "error", err, "dir", tmpDir) return echo.NewHTTPError(http.StatusInternalServerError, "failed to prepare storage") } id := uuid.New().String() jsonPath := filepath.Join(tmpDir, fmt.Sprintf("inpaint_%s.json", id)) jsonFile := map[string]string{ "image": b64Image, "mask_image": b64Mask, } jf, err := os.CreateTemp(tmpDir, "inpaint_") if err != nil { return err } // setup cleanup on error; if everything succeeds we set success = true success := false var dst string var origRef string var maskRef string defer func() { if !success { // Best-effort cleanup; log any failures if jf != nil { if cerr := jf.Close(); cerr != nil { xlog.Warn("Inpainting Endpoint - failed to close temp json file in cleanup", "error", cerr) } if name := jf.Name(); name != "" { if rerr := os.Remove(name); rerr != nil && !os.IsNotExist(rerr) { xlog.Warn("Inpainting Endpoint - failed to remove temp json file in cleanup", "error", rerr, "file", name) } } } if jsonPath != "" { if rerr := os.Remove(jsonPath); rerr != nil && !os.IsNotExist(rerr) { xlog.Warn("Inpainting Endpoint - failed to remove json file in cleanup", "error", rerr, "file", jsonPath) } } if dst != "" { if rerr := os.Remove(dst); rerr != nil && !os.IsNotExist(rerr) { xlog.Warn("Inpainting Endpoint - failed to remove dst file in cleanup", "error", rerr, "file", dst) } } if origRef != "" { if rerr := os.Remove(origRef); rerr != nil && !os.IsNotExist(rerr) { xlog.Warn("Inpainting Endpoint - failed to remove orig ref file in cleanup", "error", rerr, "file", origRef) } } if maskRef != "" { if rerr := os.Remove(maskRef); rerr != nil && !os.IsNotExist(rerr) { xlog.Warn("Inpainting Endpoint - failed to remove mask ref file in cleanup", "error", rerr, "file", maskRef) } } } }() // write original image and mask to disk as ref images so backends that // accept reference image files can use them (maintainer request). origTmp, err := os.CreateTemp(tmpDir, "refimg_") if err != nil { return err } if _, err := origTmp.Write(imgBytes); err != nil { _ = origTmp.Close() _ = os.Remove(origTmp.Name()) return err } if cerr := origTmp.Close(); cerr != nil { xlog.Warn("Inpainting Endpoint - failed to close orig temp file", "error", cerr) } origRef = origTmp.Name() maskTmp, err := os.CreateTemp(tmpDir, "refmask_") if err != nil { // cleanup origTmp on error _ = os.Remove(origRef) return err } if _, err := maskTmp.Write(maskBytes); err != nil { _ = maskTmp.Close() _ = os.Remove(maskTmp.Name()) _ = os.Remove(origRef) return err } if cerr := maskTmp.Close(); cerr != nil { xlog.Warn("Inpainting Endpoint - failed to close mask temp file", "error", cerr) } maskRef = maskTmp.Name() // write JSON enc := json.NewEncoder(jf) if err := enc.Encode(jsonFile); err != nil { if cerr := jf.Close(); cerr != nil { xlog.Warn("Inpainting Endpoint - failed to close temp json file after encode error", "error", cerr) } return err } if cerr := jf.Close(); cerr != nil { xlog.Warn("Inpainting Endpoint - failed to close temp json file", "error", cerr) } // rename to desired name if err := os.Rename(jf.Name(), jsonPath); err != nil { return err } // prepare dst outTmp, err := os.CreateTemp(tmpDir, "out_") if err != nil { return err } if cerr := outTmp.Close(); cerr != nil { xlog.Warn("Inpainting Endpoint - failed to close out temp file", "error", cerr) } dst = outTmp.Name() + ".png" if err := os.Rename(outTmp.Name(), dst); err != nil { return err } // Determine width/height default width := 512 height := 512 // Call backend image generation via indirection so tests can stub it // Note: ImageGenerationFunc will call into the loaded model's GenerateImage which expects src JSON // Also pass ref images (orig + mask) so backends that support ref images can use them. refImages := []string{origRef, maskRef} fn, err := backend.ImageGenerationFunc(height, width, steps, 0, prompt, "", jsonPath, dst, ml, *cfg, appConfig, refImages) if err != nil { return err } // Execute generation function (blocking) if err := fn(); err != nil { return err } // On success, build response URL using BaseURL middleware helper and // the same `generated-images` prefix used by the server static mount. baseURL := middleware.BaseURL(c) // Build response using url.JoinPath for correct URL escaping imgPath, err := url.JoinPath(baseURL, "generated-images", filepath.Base(dst)) if err != nil { return err } created := int(time.Now().Unix()) resp := &schema.OpenAIResponse{ ID: id, Created: created, Data: []schema.Item{{ URL: imgPath, }}, Usage: schema.OpenAIUsage{ PromptTokens: 0, CompletionTokens: 0, TotalTokens: 0, InputTokens: 0, OutputTokens: 0, InputTokensDetails: &schema.InputTokensDetails{ TextTokens: 0, ImageTokens: 0, }, }, } // mark success so defer cleanup will not remove output files success = true return c.JSON(http.StatusOK, resp) } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/openai/chat.go
core/http/endpoints/openai/chat.go
package openai import ( "encoding/json" "fmt" "time" "github.com/google/uuid" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/backend" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/LocalAI/pkg/functions" "github.com/mudler/LocalAI/core/templates" "github.com/mudler/LocalAI/pkg/model" "github.com/mudler/xlog" ) // ChatEndpoint is the OpenAI Completion API endpoint https://platform.openai.com/docs/api-reference/chat/create // @Summary Generate a chat completions for a given prompt and model. // @Param request body schema.OpenAIRequest true "query params" // @Success 200 {object} schema.OpenAIResponse "Response" // @Router /v1/chat/completions [post] func ChatEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator *templates.Evaluator, startupOptions *config.ApplicationConfig) echo.HandlerFunc { var id, textContentToReturn string var created int process := func(s string, req *schema.OpenAIRequest, config *config.ModelConfig, loader *model.ModelLoader, responses chan schema.OpenAIResponse, extraUsage bool) error { initialMessage := schema.OpenAIResponse{ ID: id, Created: created, Model: req.Model, // we have to return what the user sent here, due to OpenAI spec. Choices: []schema.Choice{{Delta: &schema.Message{Role: "assistant"}, Index: 0, FinishReason: nil}}, Object: "chat.completion.chunk", } responses <- initialMessage _, _, err := ComputeChoices(req, s, config, cl, startupOptions, loader, func(s string, c *[]schema.Choice) {}, func(s string, tokenUsage backend.TokenUsage) bool { usage := schema.OpenAIUsage{ PromptTokens: tokenUsage.Prompt, CompletionTokens: tokenUsage.Completion, TotalTokens: tokenUsage.Prompt + tokenUsage.Completion, } if extraUsage { usage.TimingTokenGeneration = tokenUsage.TimingTokenGeneration usage.TimingPromptProcessing = tokenUsage.TimingPromptProcessing } resp := schema.OpenAIResponse{ ID: id, Created: created, Model: req.Model, // we have to return what the user sent here, due to OpenAI spec. Choices: []schema.Choice{{Delta: &schema.Message{Content: &s}, Index: 0, FinishReason: nil}}, Object: "chat.completion.chunk", Usage: usage, } responses <- resp return true }) close(responses) return err } processTools := func(noAction string, prompt string, req *schema.OpenAIRequest, config *config.ModelConfig, loader *model.ModelLoader, responses chan schema.OpenAIResponse, extraUsage bool) error { result := "" lastEmittedCount := 0 _, tokenUsage, err := ComputeChoices(req, prompt, config, cl, startupOptions, loader, func(s string, c *[]schema.Choice) {}, func(s string, usage backend.TokenUsage) bool { result += s // Try incremental XML parsing for streaming support using iterative parser // This allows emitting partial tool calls as they're being generated cleanedResult := functions.CleanupLLMResult(result, config.FunctionsConfig) // Determine XML format from config var xmlFormat *functions.XMLToolCallFormat if config.FunctionsConfig.XMLFormat != nil { xmlFormat = config.FunctionsConfig.XMLFormat } else if config.FunctionsConfig.XMLFormatPreset != "" { xmlFormat = functions.GetXMLFormatPreset(config.FunctionsConfig.XMLFormatPreset) } // Use iterative parser for streaming (partial parsing enabled) // Try XML parsing first partialResults, parseErr := functions.ParseXMLIterative(cleanedResult, xmlFormat, true) if parseErr == nil && len(partialResults) > 0 { // Emit new XML tool calls that weren't emitted before if len(partialResults) > lastEmittedCount { for i := lastEmittedCount; i < len(partialResults); i++ { toolCall := partialResults[i] initialMessage := schema.OpenAIResponse{ ID: id, Created: created, Model: req.Model, Choices: []schema.Choice{{ Delta: &schema.Message{ Role: "assistant", ToolCalls: []schema.ToolCall{ { Index: i, ID: id, Type: "function", FunctionCall: schema.FunctionCall{ Name: toolCall.Name, }, }, }, }, Index: 0, FinishReason: nil, }}, Object: "chat.completion.chunk", } select { case responses <- initialMessage: default: } } lastEmittedCount = len(partialResults) } } else { // Try JSON tool call parsing for streaming // Check if the result looks like JSON tool calls jsonResults, jsonErr := functions.ParseJSONIterative(cleanedResult, true) if jsonErr == nil && len(jsonResults) > 0 { // Check if these are tool calls (have "name" and optionally "arguments") for _, jsonObj := range jsonResults { if name, ok := jsonObj["name"].(string); ok && name != "" { // This looks like a tool call args := "{}" if argsVal, ok := jsonObj["arguments"]; ok { if argsStr, ok := argsVal.(string); ok { args = argsStr } else { argsBytes, _ := json.Marshal(argsVal) args = string(argsBytes) } } // Emit tool call initialMessage := schema.OpenAIResponse{ ID: id, Created: created, Model: req.Model, Choices: []schema.Choice{{ Delta: &schema.Message{ Role: "assistant", ToolCalls: []schema.ToolCall{ { Index: lastEmittedCount, ID: id, Type: "function", FunctionCall: schema.FunctionCall{ Name: name, Arguments: args, }, }, }, }, Index: 0, FinishReason: nil, }}, Object: "chat.completion.chunk", } select { case responses <- initialMessage: default: } lastEmittedCount++ } } } } return true }) if err != nil { return err } textContentToReturn = functions.ParseTextContent(result, config.FunctionsConfig) result = functions.CleanupLLMResult(result, config.FunctionsConfig) functionResults := functions.ParseFunctionCall(result, config.FunctionsConfig) xlog.Debug("Text content to return", "text", textContentToReturn) noActionToRun := len(functionResults) > 0 && functionResults[0].Name == noAction || len(functionResults) == 0 switch { case noActionToRun: initialMessage := schema.OpenAIResponse{ ID: id, Created: created, Model: req.Model, // we have to return what the user sent here, due to OpenAI spec. Choices: []schema.Choice{{Delta: &schema.Message{Role: "assistant"}, Index: 0, FinishReason: nil}}, Object: "chat.completion.chunk", } responses <- initialMessage result, err := handleQuestion(config, cl, req, ml, startupOptions, functionResults, result, prompt) if err != nil { xlog.Error("error handling question", "error", err) return err } usage := schema.OpenAIUsage{ PromptTokens: tokenUsage.Prompt, CompletionTokens: tokenUsage.Completion, TotalTokens: tokenUsage.Prompt + tokenUsage.Completion, } if extraUsage { usage.TimingTokenGeneration = tokenUsage.TimingTokenGeneration usage.TimingPromptProcessing = tokenUsage.TimingPromptProcessing } resp := schema.OpenAIResponse{ ID: id, Created: created, Model: req.Model, // we have to return what the user sent here, due to OpenAI spec. Choices: []schema.Choice{{Delta: &schema.Message{Content: &result}, Index: 0, FinishReason: nil}}, Object: "chat.completion.chunk", Usage: usage, } responses <- resp default: for i, ss := range functionResults { name, args := ss.Name, ss.Arguments initialMessage := schema.OpenAIResponse{ ID: id, Created: created, Model: req.Model, // we have to return what the user sent here, due to OpenAI spec. Choices: []schema.Choice{{ Delta: &schema.Message{ Role: "assistant", ToolCalls: []schema.ToolCall{ { Index: i, ID: id, Type: "function", FunctionCall: schema.FunctionCall{ Name: name, }, }, }, }, Index: 0, FinishReason: nil, }}, Object: "chat.completion.chunk", } responses <- initialMessage responses <- schema.OpenAIResponse{ ID: id, Created: created, Model: req.Model, // we have to return what the user sent here, due to OpenAI spec. Choices: []schema.Choice{{ Delta: &schema.Message{ Role: "assistant", Content: &textContentToReturn, ToolCalls: []schema.ToolCall{ { Index: i, ID: id, Type: "function", FunctionCall: schema.FunctionCall{ Arguments: args, }, }, }, }, Index: 0, FinishReason: nil, }}, Object: "chat.completion.chunk", } } } close(responses) return err } return func(c echo.Context) error { textContentToReturn = "" id = uuid.New().String() created = int(time.Now().Unix()) input, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_LOCALAI_REQUEST).(*schema.OpenAIRequest) if !ok || input.Model == "" { return echo.ErrBadRequest } extraUsage := c.Request().Header.Get("Extra-Usage") != "" config, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_MODEL_CONFIG).(*config.ModelConfig) if !ok || config == nil { return echo.ErrBadRequest } xlog.Debug("Chat endpoint configuration read", "config", config) funcs := input.Functions shouldUseFn := len(input.Functions) > 0 && config.ShouldUseFunctions() strictMode := false for _, f := range input.Functions { if f.Strict { strictMode = true break } } // Allow the user to set custom actions via config file // to be "embedded" in each model noActionName := "answer" noActionDescription := "use this action to answer without performing any action" if config.FunctionsConfig.NoActionFunctionName != "" { noActionName = config.FunctionsConfig.NoActionFunctionName } if config.FunctionsConfig.NoActionDescriptionName != "" { noActionDescription = config.FunctionsConfig.NoActionDescriptionName } // If we are using a response format, we need to generate a grammar for it if config.ResponseFormatMap != nil { d := schema.ChatCompletionResponseFormat{} dat, err := json.Marshal(config.ResponseFormatMap) if err != nil { return err } err = json.Unmarshal(dat, &d) if err != nil { return err } switch d.Type { case "json_object": input.Grammar = functions.JSONBNF case "json_schema": d := schema.JsonSchemaRequest{} dat, err := json.Marshal(config.ResponseFormatMap) if err != nil { return err } err = json.Unmarshal(dat, &d) if err != nil { return err } fs := &functions.JSONFunctionStructure{ AnyOf: []functions.Item{d.JsonSchema.Schema}, } g, err := fs.Grammar(config.FunctionsConfig.GrammarOptions()...) if err == nil { input.Grammar = g } else { xlog.Error("Failed generating grammar", "error", err) } } } config.Grammar = input.Grammar if shouldUseFn { xlog.Debug("Response needs to process functions") } switch { // Generates grammar with internal's LocalAI engine case (!config.FunctionsConfig.GrammarConfig.NoGrammar || strictMode) && shouldUseFn: noActionGrammar := functions.Function{ Name: noActionName, Description: noActionDescription, Parameters: map[string]interface{}{ "properties": map[string]interface{}{ "message": map[string]interface{}{ "type": "string", "description": "The message to reply the user with", }}, }, } // Append the no action function if !config.FunctionsConfig.DisableNoAction && !strictMode { funcs = append(funcs, noActionGrammar) } // Force picking one of the functions by the request if config.FunctionToCall() != "" { funcs = funcs.Select(config.FunctionToCall()) } // Update input grammar or json_schema based on use_llama_grammar option jsStruct := funcs.ToJSONStructure(config.FunctionsConfig.FunctionNameKey, config.FunctionsConfig.FunctionNameKey) g, err := jsStruct.Grammar(config.FunctionsConfig.GrammarOptions()...) if err == nil { config.Grammar = g } else { xlog.Error("Failed generating grammar", "error", err) } case input.JSONFunctionGrammarObject != nil: g, err := input.JSONFunctionGrammarObject.Grammar(config.FunctionsConfig.GrammarOptions()...) if err == nil { config.Grammar = g } else { xlog.Error("Failed generating grammar", "error", err) } default: // Force picking one of the functions by the request if config.FunctionToCall() != "" { funcs = funcs.Select(config.FunctionToCall()) } } // process functions if we have any defined or if we have a function call string // functions are not supported in stream mode (yet?) toStream := input.Stream xlog.Debug("Parameters", "config", config) var predInput string // If we are using the tokenizer template, we don't need to process the messages // unless we are processing functions if !config.TemplateConfig.UseTokenizerTemplate { predInput = evaluator.TemplateMessages(*input, input.Messages, config, funcs, shouldUseFn) xlog.Debug("Prompt (after templating)", "prompt", predInput) if config.Grammar != "" { xlog.Debug("Grammar", "grammar", config.Grammar) } } switch { case toStream: xlog.Debug("Stream request received") c.Response().Header().Set("Content-Type", "text/event-stream") c.Response().Header().Set("Cache-Control", "no-cache") c.Response().Header().Set("Connection", "keep-alive") c.Response().Header().Set("X-Correlation-ID", id) responses := make(chan schema.OpenAIResponse) ended := make(chan error, 1) go func() { if !shouldUseFn { ended <- process(predInput, input, config, ml, responses, extraUsage) } else { ended <- processTools(noActionName, predInput, input, config, ml, responses, extraUsage) } }() usage := &schema.OpenAIUsage{} toolsCalled := false LOOP: for { select { case <-input.Context.Done(): // Context was cancelled (client disconnected or request cancelled) xlog.Debug("Request context cancelled, stopping stream") input.Cancel() break LOOP case ev := <-responses: if len(ev.Choices) == 0 { xlog.Debug("No choices in the response, skipping") continue } usage = &ev.Usage // Copy a pointer to the latest usage chunk so that the stop message can reference it if len(ev.Choices[0].Delta.ToolCalls) > 0 { toolsCalled = true } respData, err := json.Marshal(ev) if err != nil { xlog.Debug("Failed to marshal response", "error", err) input.Cancel() continue } xlog.Debug("Sending chunk", "chunk", string(respData)) _, err = fmt.Fprintf(c.Response().Writer, "data: %s\n\n", string(respData)) if err != nil { xlog.Debug("Sending chunk failed", "error", err) input.Cancel() return err } c.Response().Flush() case err := <-ended: if err == nil { break LOOP } xlog.Error("Stream ended with error", "error", err) stopReason := FinishReasonStop resp := &schema.OpenAIResponse{ ID: id, Created: created, Model: input.Model, // we have to return what the user sent here, due to OpenAI spec. Choices: []schema.Choice{ { FinishReason: &stopReason, Index: 0, Delta: &schema.Message{Content: "Internal error: " + err.Error()}, }}, Object: "chat.completion.chunk", Usage: *usage, } respData, marshalErr := json.Marshal(resp) if marshalErr != nil { xlog.Error("Failed to marshal error response", "error", marshalErr) // Send a simple error message as fallback fmt.Fprintf(c.Response().Writer, "data: {\"error\":\"Internal error\"}\n\n") } else { fmt.Fprintf(c.Response().Writer, "data: %s\n\n", respData) } fmt.Fprintf(c.Response().Writer, "data: [DONE]\n\n") c.Response().Flush() return nil } } finishReason := FinishReasonStop if toolsCalled && len(input.Tools) > 0 { finishReason = FinishReasonToolCalls } else if toolsCalled { finishReason = FinishReasonFunctionCall } resp := &schema.OpenAIResponse{ ID: id, Created: created, Model: input.Model, // we have to return what the user sent here, due to OpenAI spec. Choices: []schema.Choice{ { FinishReason: &finishReason, Index: 0, Delta: &schema.Message{}, }}, Object: "chat.completion.chunk", Usage: *usage, } respData, _ := json.Marshal(resp) fmt.Fprintf(c.Response().Writer, "data: %s\n\n", respData) fmt.Fprintf(c.Response().Writer, "data: [DONE]\n\n") c.Response().Flush() xlog.Debug("Stream ended") return nil // no streaming mode default: tokenCallback := func(s string, c *[]schema.Choice) { if !shouldUseFn { // no function is called, just reply and use stop as finish reason stopReason := FinishReasonStop *c = append(*c, schema.Choice{FinishReason: &stopReason, Index: 0, Message: &schema.Message{Role: "assistant", Content: &s}}) return } textContentToReturn = functions.ParseTextContent(s, config.FunctionsConfig) s = functions.CleanupLLMResult(s, config.FunctionsConfig) results := functions.ParseFunctionCall(s, config.FunctionsConfig) xlog.Debug("Text content to return", "text", textContentToReturn) noActionsToRun := len(results) > 0 && results[0].Name == noActionName || len(results) == 0 switch { case noActionsToRun: result, err := handleQuestion(config, cl, input, ml, startupOptions, results, s, predInput) if err != nil { xlog.Error("error handling question", "error", err) return } stopReason := FinishReasonStop *c = append(*c, schema.Choice{ FinishReason: &stopReason, Message: &schema.Message{Role: "assistant", Content: &result}}) default: toolCallsReason := FinishReasonToolCalls toolChoice := schema.Choice{ FinishReason: &toolCallsReason, Message: &schema.Message{ Role: "assistant", }, } for _, ss := range results { name, args := ss.Name, ss.Arguments if len(input.Tools) > 0 { // If we are using tools, we condense the function calls into // a single response choice with all the tools toolChoice.Message.Content = textContentToReturn toolChoice.Message.ToolCalls = append(toolChoice.Message.ToolCalls, schema.ToolCall{ ID: id, Type: "function", FunctionCall: schema.FunctionCall{ Name: name, Arguments: args, }, }, ) } else { // otherwise we return more choices directly (deprecated) functionCallReason := FinishReasonFunctionCall *c = append(*c, schema.Choice{ FinishReason: &functionCallReason, Message: &schema.Message{ Role: "assistant", Content: &textContentToReturn, FunctionCall: map[string]interface{}{ "name": name, "arguments": args, }, }, }) } } if len(input.Tools) > 0 { // we need to append our result if we are using tools *c = append(*c, toolChoice) } } } // Echo properly supports context cancellation via c.Request().Context() // No workaround needed! result, tokenUsage, err := ComputeChoices( input, predInput, config, cl, startupOptions, ml, tokenCallback, nil, ) if err != nil { return err } usage := schema.OpenAIUsage{ PromptTokens: tokenUsage.Prompt, CompletionTokens: tokenUsage.Completion, TotalTokens: tokenUsage.Prompt + tokenUsage.Completion, } if extraUsage { usage.TimingTokenGeneration = tokenUsage.TimingTokenGeneration usage.TimingPromptProcessing = tokenUsage.TimingPromptProcessing } resp := &schema.OpenAIResponse{ ID: id, Created: created, Model: input.Model, // we have to return what the user sent here, due to OpenAI spec. Choices: result, Object: "chat.completion", Usage: usage, } respData, _ := json.Marshal(resp) xlog.Debug("Response", "response", string(respData)) // Return the prediction in the response body return c.JSON(200, resp) } } } func handleQuestion(config *config.ModelConfig, cl *config.ModelConfigLoader, input *schema.OpenAIRequest, ml *model.ModelLoader, o *config.ApplicationConfig, funcResults []functions.FuncCallResults, result, prompt string) (string, error) { if len(funcResults) == 0 && result != "" { xlog.Debug("nothing function results but we had a message from the LLM") return result, nil } xlog.Debug("nothing to do, computing a reply") arg := "" if len(funcResults) > 0 { arg = funcResults[0].Arguments } // If there is a message that the LLM already sends as part of the JSON reply, use it arguments := map[string]interface{}{} if err := json.Unmarshal([]byte(arg), &arguments); err != nil { xlog.Debug("handleQuestion: function result did not contain a valid JSON object") } m, exists := arguments["message"] if exists { switch message := m.(type) { case string: if message != "" { xlog.Debug("Reply received from LLM", "message", message) message = backend.Finetune(*config, prompt, message) xlog.Debug("Reply received from LLM(finetuned)", "message", message) return message, nil } } } xlog.Debug("No action received from LLM, without a message, computing a reply") // Otherwise ask the LLM to understand the JSON output and the context, and return a message // Note: This costs (in term of CPU/GPU) another computation config.Grammar = "" images := []string{} for _, m := range input.Messages { images = append(images, m.StringImages...) } videos := []string{} for _, m := range input.Messages { videos = append(videos, m.StringVideos...) } audios := []string{} for _, m := range input.Messages { audios = append(audios, m.StringAudios...) } // Serialize tools and tool_choice to JSON strings toolsJSON := "" if len(input.Tools) > 0 { toolsBytes, err := json.Marshal(input.Tools) if err == nil { toolsJSON = string(toolsBytes) } } toolChoiceJSON := "" if input.ToolsChoice != nil { toolChoiceBytes, err := json.Marshal(input.ToolsChoice) if err == nil { toolChoiceJSON = string(toolChoiceBytes) } } // Extract logprobs from request // According to OpenAI API: logprobs is boolean, top_logprobs (0-20) controls how many top tokens per position var logprobs *int var topLogprobs *int if input.Logprobs.IsEnabled() { // If logprobs is enabled, use top_logprobs if provided, otherwise default to 1 if input.TopLogprobs != nil { topLogprobs = input.TopLogprobs // For backend compatibility, set logprobs to the top_logprobs value logprobs = input.TopLogprobs } else { // Default to 1 if logprobs is true but top_logprobs not specified val := 1 logprobs = &val topLogprobs = &val } } // Extract logit_bias from request // According to OpenAI API: logit_bias is a map of token IDs (as strings) to bias values (-100 to 100) var logitBias map[string]float64 if len(input.LogitBias) > 0 { logitBias = input.LogitBias } predFunc, err := backend.ModelInference(input.Context, prompt, input.Messages, images, videos, audios, ml, config, cl, o, nil, toolsJSON, toolChoiceJSON, logprobs, topLogprobs, logitBias) if err != nil { xlog.Error("model inference failed", "error", err) return "", err } prediction, err := predFunc() if err != nil { xlog.Error("prediction failed", "error", err) return "", err } return backend.Finetune(*config, prompt, prediction.Response), nil }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/openai/inpainting_test.go
core/http/endpoints/openai/inpainting_test.go
package openai import ( "bytes" "mime/multipart" "net/http" "net/http/httptest" "os" "path/filepath" "testing" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/backend" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/http/middleware" model "github.com/mudler/LocalAI/pkg/model" "github.com/stretchr/testify/require" ) func makeMultipartRequest(t *testing.T, fields map[string]string, files map[string][]byte) (*http.Request, string) { b := &bytes.Buffer{} w := multipart.NewWriter(b) for k, v := range fields { _ = w.WriteField(k, v) } for fname, content := range files { fw, err := w.CreateFormFile(fname, fname+".png") require.NoError(t, err) _, err = fw.Write(content) require.NoError(t, err) } require.NoError(t, w.Close()) req := httptest.NewRequest(http.MethodPost, "/v1/images/inpainting", b) req.Header.Set("Content-Type", w.FormDataContentType()) return req, w.FormDataContentType() } func TestInpainting_MissingFiles(t *testing.T) { e := echo.New() // handler requires cl, ml, appConfig but this test verifies missing files early h := InpaintingEndpoint(nil, nil, config.NewApplicationConfig()) req := httptest.NewRequest(http.MethodPost, "/v1/images/inpainting", nil) rec := httptest.NewRecorder() c := e.NewContext(req, rec) err := h(c) require.Error(t, err) } func TestInpainting_HappyPath(t *testing.T) { // Setup temp generated content dir tmpDir, err := os.MkdirTemp("", "gencontent") require.NoError(t, err) defer os.RemoveAll(tmpDir) appConf := config.NewApplicationConfig(config.WithGeneratedContentDir(tmpDir)) // stub the backend.ImageGenerationFunc orig := backend.ImageGenerationFunc backend.ImageGenerationFunc = func(height, width, step, seed int, positive_prompt, negative_prompt, src, dst string, loader *model.ModelLoader, modelConfig config.ModelConfig, appConfig *config.ApplicationConfig, refImages []string) (func() error, error) { fn := func() error { // write a fake png file to dst return os.WriteFile(dst, []byte("PNGDATA"), 0644) } return fn, nil } defer func() { backend.ImageGenerationFunc = orig }() // prepare multipart request with image and mask fields := map[string]string{"model": "dreamshaper-8-inpainting", "prompt": "A test"} files := map[string][]byte{"image": []byte("IMAGEDATA"), "mask": []byte("MASKDATA")} reqBuf, _ := makeMultipartRequest(t, fields, files) rec := httptest.NewRecorder() e := echo.New() c := e.NewContext(reqBuf, rec) // set a minimal model config in context as handler expects c.Set(middleware.CONTEXT_LOCALS_KEY_MODEL_CONFIG, &config.ModelConfig{Backend: "diffusers"}) h := InpaintingEndpoint(nil, nil, appConf) // call handler err = h(c) require.NoError(t, err) require.Equal(t, http.StatusOK, rec.Code) // verify response body contains generated-images path body := rec.Body.String() require.Contains(t, body, "generated-images") // confirm the file was created in tmpDir // parse out filename from response (naive search) // find "generated-images/" and extract until closing quote or brace idx := bytes.Index(rec.Body.Bytes(), []byte("generated-images/")) require.True(t, idx >= 0) rest := rec.Body.Bytes()[idx:] end := bytes.IndexAny(rest, "\",}\n") if end == -1 { end = len(rest) } fname := string(rest[len("generated-images/"):end]) // ensure file exists _, err = os.Stat(filepath.Join(tmpDir, fname)) require.NoError(t, err) }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/openai/embeddings.go
core/http/endpoints/openai/embeddings.go
package openai import ( "encoding/json" "time" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/backend" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/pkg/model" "github.com/google/uuid" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/xlog" ) // EmbeddingsEndpoint is the OpenAI Embeddings API endpoint https://platform.openai.com/docs/api-reference/embeddings // @Summary Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. // @Param request body schema.OpenAIRequest true "query params" // @Success 200 {object} schema.OpenAIResponse "Response" // @Router /v1/embeddings [post] func EmbeddingsEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) echo.HandlerFunc { return func(c echo.Context) error { input, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_LOCALAI_REQUEST).(*schema.OpenAIRequest) if !ok || input.Model == "" { return echo.ErrBadRequest } config, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_MODEL_CONFIG).(*config.ModelConfig) if !ok || config == nil { return echo.ErrBadRequest } xlog.Debug("Parameter Config", "config", config) items := []schema.Item{} for i, s := range config.InputToken { // get the model function to call for the result embedFn, err := backend.ModelEmbedding("", s, ml, *config, appConfig) if err != nil { return err } embeddings, err := embedFn() if err != nil { return err } items = append(items, schema.Item{Embedding: embeddings, Index: i, Object: "embedding"}) } for i, s := range config.InputStrings { // get the model function to call for the result embedFn, err := backend.ModelEmbedding(s, []int{}, ml, *config, appConfig) if err != nil { return err } embeddings, err := embedFn() if err != nil { return err } items = append(items, schema.Item{Embedding: embeddings, Index: i, Object: "embedding"}) } id := uuid.New().String() created := int(time.Now().Unix()) resp := &schema.OpenAIResponse{ ID: id, Created: created, Model: input.Model, // we have to return what the user sent here, due to OpenAI spec. Data: items, Object: "list", } jsonResult, _ := json.Marshal(resp) xlog.Debug("Response", "response", string(jsonResult)) // Return the prediction in the response body return c.JSON(200, resp) } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/openai/types/realtime.go
core/http/endpoints/openai/types/realtime.go
package types // Most of this file was coppied from https://github.com/WqyJh/go-openai-realtime // Copyright (c) 2024 Qiying Wang MIT License import ( "encoding/json" "fmt" "math" ) const ( // Inf is the maximum value for an IntOrInf. Inf IntOrInf = math.MaxInt ) // IntOrInf is a type that can be either an int or "inf". type IntOrInf int // IsInf returns true if the value is "inf". func (m IntOrInf) IsInf() bool { return m == Inf } // MarshalJSON marshals the IntOrInf to JSON. func (m IntOrInf) MarshalJSON() ([]byte, error) { if m == Inf { return []byte("\"inf\""), nil } return json.Marshal(int(m)) } // UnmarshalJSON unmarshals the IntOrInf from JSON. func (m *IntOrInf) UnmarshalJSON(data []byte) error { if string(data) == "\"inf\"" { *m = Inf return nil } if len(data) == 0 { return nil } return json.Unmarshal(data, (*int)(m)) } type AudioFormat string const ( AudioFormatPcm16 AudioFormat = "pcm16" AudioFormatG711Ulaw AudioFormat = "g711_ulaw" AudioFormatG711Alaw AudioFormat = "g711_alaw" ) type Modality string const ( ModalityText Modality = "text" ModalityAudio Modality = "audio" ) type ClientTurnDetectionType string const ( ClientTurnDetectionTypeServerVad ClientTurnDetectionType = "server_vad" ) type ServerTurnDetectionType string const ( ServerTurnDetectionTypeNone ServerTurnDetectionType = "none" ServerTurnDetectionTypeServerVad ServerTurnDetectionType = "server_vad" ) type TurnDetectionType string const ( // TurnDetectionTypeNone means turn detection is disabled. // This can only be used in ServerSession, not in ClientSession. // If you want to disable turn detection, you should send SessionUpdateEvent with TurnDetection set to nil. TurnDetectionTypeNone TurnDetectionType = "none" // TurnDetectionTypeServerVad use server-side VAD to detect turn. // This is default value for newly created session. TurnDetectionTypeServerVad TurnDetectionType = "server_vad" ) type TurnDetectionParams struct { // Activation threshold for VAD. Threshold float64 `json:"threshold,omitempty"` // Audio included before speech starts (in milliseconds). PrefixPaddingMs int `json:"prefix_padding_ms,omitempty"` // Duration of silence to detect speech stop (in milliseconds). SilenceDurationMs int `json:"silence_duration_ms,omitempty"` // Whether or not to automatically generate a response when VAD is enabled. true by default. CreateResponse *bool `json:"create_response,omitempty"` } type ClientTurnDetection struct { // Type of turn detection, only "server_vad" is currently supported. Type ClientTurnDetectionType `json:"type"` TurnDetectionParams } type ServerTurnDetection struct { // The type of turn detection ("server_vad" or "none"). Type ServerTurnDetectionType `json:"type"` TurnDetectionParams } type ToolType string const ( ToolTypeFunction ToolType = "function" ) type ToolChoiceInterface interface { ToolChoice() } type ToolChoiceString string func (ToolChoiceString) ToolChoice() {} const ( ToolChoiceAuto ToolChoiceString = "auto" ToolChoiceNone ToolChoiceString = "none" ToolChoiceRequired ToolChoiceString = "required" ) type ToolChoice struct { Type ToolType `json:"type"` Function ToolFunction `json:"function,omitempty"` } func (t ToolChoice) ToolChoice() {} type ToolFunction struct { Name string `json:"name"` } type MessageRole string const ( MessageRoleSystem MessageRole = "system" MessageRoleAssistant MessageRole = "assistant" MessageRoleUser MessageRole = "user" ) type InputAudioTranscription struct { // The model used for transcription. Model string `json:"model"` Language string `json:"language,omitempty"` Prompt string `json:"prompt,omitempty"` } type Tool struct { Type ToolType `json:"type"` Name string `json:"name"` Description string `json:"description"` Parameters any `json:"parameters"` } type MessageItemType string const ( MessageItemTypeMessage MessageItemType = "message" MessageItemTypeFunctionCall MessageItemType = "function_call" MessageItemTypeFunctionCallOutput MessageItemType = "function_call_output" ) type MessageContentType string const ( MessageContentTypeText MessageContentType = "text" MessageContentTypeAudio MessageContentType = "audio" MessageContentTypeTranscript MessageContentType = "transcript" MessageContentTypeInputText MessageContentType = "input_text" MessageContentTypeInputAudio MessageContentType = "input_audio" ) type MessageContentPart struct { // The content type. Type MessageContentType `json:"type"` // The text content. Validated if type is text. Text string `json:"text,omitempty"` // Base64-encoded audio data. Validated if type is audio. Audio string `json:"audio,omitempty"` // The transcript of the audio. Validated if type is transcript. Transcript string `json:"transcript,omitempty"` } type MessageItem struct { // The unique ID of the item. ID string `json:"id,omitempty"` // The type of the item ("message", "function_call", "function_call_output"). Type MessageItemType `json:"type"` // The final status of the item. Status ItemStatus `json:"status,omitempty"` // The role associated with the item. Role MessageRole `json:"role,omitempty"` // The content of the item. Content []MessageContentPart `json:"content,omitempty"` // The ID of the function call, if the item is a function call. CallID string `json:"call_id,omitempty"` // The name of the function, if the item is a function call. Name string `json:"name,omitempty"` // The arguments of the function, if the item is a function call. Arguments string `json:"arguments,omitempty"` // The output of the function, if the item is a function call output. Output string `json:"output,omitempty"` } type ResponseMessageItem struct { MessageItem // The object type, must be "realtime.item". Object string `json:"object,omitempty"` } type Error struct { // The type of error (e.g., "invalid_request_error", "server_error"). Message string `json:"message,omitempty"` // Error code, if any. Type string `json:"type,omitempty"` // A human-readable error message. Code string `json:"code,omitempty"` // Parameter related to the error, if any. Param string `json:"param,omitempty"` // The event_id of the client event that caused the error, if applicable. EventID string `json:"event_id,omitempty"` } // ServerToolChoice is a type that can be used to choose a tool response from the server. type ServerToolChoice struct { String ToolChoiceString Function ToolChoice } // UnmarshalJSON is a custom unmarshaler for ServerToolChoice. func (m *ServerToolChoice) UnmarshalJSON(data []byte) error { err := json.Unmarshal(data, &m.Function) if err != nil { if data[0] == '"' { data = data[1:] } if data[len(data)-1] == '"' { data = data[:len(data)-1] } m.String = ToolChoiceString(data) m.Function = ToolChoice{} return nil } return nil } // IsFunction returns true if the tool choice is a function call. func (m *ServerToolChoice) IsFunction() bool { return m.Function.Type == ToolTypeFunction } // Get returns the ToolChoiceInterface based on the type of tool choice. func (m ServerToolChoice) Get() ToolChoiceInterface { if m.IsFunction() { return m.Function } return m.String } type ServerSession struct { // The unique ID of the session. ID string `json:"id"` // The object type, must be "realtime.session". Object string `json:"object"` // The default model used for this session. Model string `json:"model"` // The set of modalities the model can respond with. Modalities []Modality `json:"modalities,omitempty"` // The default system instructions. Instructions string `json:"instructions,omitempty"` // The voice the model uses to respond - one of alloy, echo, or shimmer. Voice string `json:"voice,omitempty"` // The format of input audio. InputAudioFormat AudioFormat `json:"input_audio_format,omitempty"` // The format of output audio. OutputAudioFormat AudioFormat `json:"output_audio_format,omitempty"` // Configuration for input audio transcription. InputAudioTranscription *InputAudioTranscription `json:"input_audio_transcription,omitempty"` // Configuration for turn detection. TurnDetection *ServerTurnDetection `json:"turn_detection,omitempty"` // Tools (functions) available to the model. Tools []Tool `json:"tools,omitempty"` // How the model chooses tools. ToolChoice ServerToolChoice `json:"tool_choice,omitempty"` // Sampling temperature. Temperature *float32 `json:"temperature,omitempty"` // Maximum number of output tokens. MaxOutputTokens IntOrInf `json:"max_response_output_tokens,omitempty"` } type ItemStatus string const ( ItemStatusInProgress ItemStatus = "in_progress" ItemStatusCompleted ItemStatus = "completed" ItemStatusIncomplete ItemStatus = "incomplete" ) type Conversation struct { // The unique ID of the conversation. ID string `json:"id"` // The object type, must be "realtime.conversation". Object string `json:"object"` } type ResponseStatus string const ( ResponseStatusInProgress ResponseStatus = "in_progress" ResponseStatusCompleted ResponseStatus = "completed" ResponseStatusCancelled ResponseStatus = "cancelled" ResponseStatusIncomplete ResponseStatus = "incomplete" ResponseStatusFailed ResponseStatus = "failed" ) type CachedTokensDetails struct { TextTokens int `json:"text_tokens"` AudioTokens int `json:"audio_tokens"` } type InputTokenDetails struct { CachedTokens int `json:"cached_tokens"` TextTokens int `json:"text_tokens"` AudioTokens int `json:"audio_tokens"` CachedTokensDetails CachedTokensDetails `json:"cached_tokens_details,omitempty"` } type OutputTokenDetails struct { TextTokens int `json:"text_tokens"` AudioTokens int `json:"audio_tokens"` } type Usage struct { TotalTokens int `json:"total_tokens"` InputTokens int `json:"input_tokens"` OutputTokens int `json:"output_tokens"` // Input token details. InputTokenDetails InputTokenDetails `json:"input_token_details,omitempty"` // Output token details. OutputTokenDetails OutputTokenDetails `json:"output_token_details,omitempty"` } type Response struct { // The unique ID of the response. ID string `json:"id"` // The object type, must be "realtime.response". Object string `json:"object"` // The status of the response. Status ResponseStatus `json:"status"` // Additional details about the status. StatusDetails any `json:"status_details,omitempty"` // The list of output items generated by the response. Output []ResponseMessageItem `json:"output"` // Usage statistics for the response. Usage *Usage `json:"usage,omitempty"` } type RateLimit struct { // The name of the rate limit ("requests", "tokens", "input_tokens", "output_tokens"). Name string `json:"name"` // The maximum allowed value for the rate limit. Limit int `json:"limit"` // The remaining value before the limit is reached. Remaining int `json:"remaining"` // Seconds until the rate limit resets. ResetSeconds float64 `json:"reset_seconds"` } // ClientEventType is the type of client event. See https://platform.openai.com/docs/guides/realtime/client-events type ClientEventType string const ( ClientEventTypeSessionUpdate ClientEventType = "session.update" ClientEventTypeTranscriptionSessionUpdate ClientEventType = "transcription_session.update" ClientEventTypeInputAudioBufferAppend ClientEventType = "input_audio_buffer.append" ClientEventTypeInputAudioBufferCommit ClientEventType = "input_audio_buffer.commit" ClientEventTypeInputAudioBufferClear ClientEventType = "input_audio_buffer.clear" ClientEventTypeConversationItemCreate ClientEventType = "conversation.item.create" ClientEventTypeConversationItemTruncate ClientEventType = "conversation.item.truncate" ClientEventTypeConversationItemDelete ClientEventType = "conversation.item.delete" ClientEventTypeResponseCreate ClientEventType = "response.create" ClientEventTypeResponseCancel ClientEventType = "response.cancel" ) // ClientEvent is the interface for client event. type ClientEvent interface { ClientEventType() ClientEventType } // EventBase is the base struct for all client events. type EventBase struct { // Optional client-generated ID used to identify this event. EventID string `json:"event_id,omitempty"` } type ClientSession struct { Model string `json:"model,omitempty"` // The set of modalities the model can respond with. To disable audio, set this to ["text"]. Modalities []Modality `json:"modalities,omitempty"` // The default system instructions prepended to model calls. Instructions string `json:"instructions,omitempty"` // The voice the model uses to respond - one of alloy, echo, or shimmer. Cannot be changed once the model has responded with audio at least once. Voice string `json:"voice,omitempty"` // The format of input audio. Options are "pcm16", "g711_ulaw", or "g711_alaw". InputAudioFormat AudioFormat `json:"input_audio_format,omitempty"` // The format of output audio. Options are "pcm16", "g711_ulaw", or "g711_alaw". OutputAudioFormat AudioFormat `json:"output_audio_format,omitempty"` // Configuration for input audio transcription. Can be set to `nil` to turn off. InputAudioTranscription *InputAudioTranscription `json:"input_audio_transcription,omitempty"` // Configuration for turn detection. Can be set to `nil` to turn off. TurnDetection *ClientTurnDetection `json:"turn_detection"` // Tools (functions) available to the model. Tools []Tool `json:"tools,omitempty"` // How the model chooses tools. Options are "auto", "none", "required", or specify a function. ToolChoice ToolChoiceInterface `json:"tool_choice,omitempty"` // Sampling temperature for the model. Temperature *float32 `json:"temperature,omitempty"` // Maximum number of output tokens for a single assistant response, inclusive of tool calls. Provide an integer between 1 and 4096 to limit output tokens, or "inf" for the maximum available tokens for a given model. Defaults to "inf". MaxOutputTokens IntOrInf `json:"max_response_output_tokens,omitempty"` } type CreateSessionRequest struct { ClientSession // The Realtime model used for this session. Model string `json:"model,omitempty"` } type ClientSecret struct { // Ephemeral key usable in client environments to authenticate connections to the Realtime API. Use this in client-side environments rather than a standard API token, which should only be used server-side. Value string `json:"value"` // Timestamp for when the token expires. Currently, all tokens expire after one minute. ExpiresAt int64 `json:"expires_at"` } type CreateSessionResponse struct { ServerSession // Ephemeral key returned by the API. ClientSecret ClientSecret `json:"client_secret"` } // SessionUpdateEvent is the event for session update. // Send this event to update the session’s default configuration. // See https://platform.openai.com/docs/api-reference/realtime-client-events/session/update type SessionUpdateEvent struct { EventBase // Session configuration to update. Session ClientSession `json:"session"` } func (m SessionUpdateEvent) ClientEventType() ClientEventType { return ClientEventTypeSessionUpdate } func (m SessionUpdateEvent) MarshalJSON() ([]byte, error) { type sessionUpdateEvent SessionUpdateEvent v := struct { *sessionUpdateEvent Type ClientEventType `json:"type"` }{ sessionUpdateEvent: (*sessionUpdateEvent)(&m), Type: m.ClientEventType(), } return json.Marshal(v) } // InputAudioBufferAppendEvent is the event for input audio buffer append. // Send this event to append audio bytes to the input audio buffer. // See https://platform.openai.com/docs/api-reference/realtime-client-events/input_audio_buffer/append type InputAudioBufferAppendEvent struct { EventBase Audio string `json:"audio"` // Base64-encoded audio bytes. } func (m InputAudioBufferAppendEvent) ClientEventType() ClientEventType { return ClientEventTypeInputAudioBufferAppend } func (m InputAudioBufferAppendEvent) MarshalJSON() ([]byte, error) { type inputAudioBufferAppendEvent InputAudioBufferAppendEvent v := struct { *inputAudioBufferAppendEvent Type ClientEventType `json:"type"` }{ inputAudioBufferAppendEvent: (*inputAudioBufferAppendEvent)(&m), Type: m.ClientEventType(), } return json.Marshal(v) } // InputAudioBufferCommitEvent is the event for input audio buffer commit. // Send this event to commit audio bytes to a user message. // See https://platform.openai.com/docs/api-reference/realtime-client-events/input_audio_buffer/commit type InputAudioBufferCommitEvent struct { EventBase } func (m InputAudioBufferCommitEvent) ClientEventType() ClientEventType { return ClientEventTypeInputAudioBufferCommit } func (m InputAudioBufferCommitEvent) MarshalJSON() ([]byte, error) { type inputAudioBufferCommitEvent InputAudioBufferCommitEvent v := struct { *inputAudioBufferCommitEvent Type ClientEventType `json:"type"` }{ inputAudioBufferCommitEvent: (*inputAudioBufferCommitEvent)(&m), Type: m.ClientEventType(), } return json.Marshal(v) } // InputAudioBufferClearEvent is the event for input audio buffer clear. // Send this event to clear the audio bytes in the buffer. // See https://platform.openai.com/docs/api-reference/realtime-client-events/input_audio_buffer/clear type InputAudioBufferClearEvent struct { EventBase } func (m InputAudioBufferClearEvent) ClientEventType() ClientEventType { return ClientEventTypeInputAudioBufferClear } func (m InputAudioBufferClearEvent) MarshalJSON() ([]byte, error) { type inputAudioBufferClearEvent InputAudioBufferClearEvent v := struct { *inputAudioBufferClearEvent Type ClientEventType `json:"type"` }{ inputAudioBufferClearEvent: (*inputAudioBufferClearEvent)(&m), Type: m.ClientEventType(), } return json.Marshal(v) } // ConversationItemCreateEvent is the event for conversation item create. // Send this event when adding an item to the conversation. // See https://platform.openai.com/docs/api-reference/realtime-client-events/conversation/item/create type ConversationItemCreateEvent struct { EventBase // The ID of the preceding item after which the new item will be inserted. PreviousItemID string `json:"previous_item_id,omitempty"` // The item to add to the conversation. Item MessageItem `json:"item"` } func (m ConversationItemCreateEvent) ClientEventType() ClientEventType { return ClientEventTypeConversationItemCreate } func (m ConversationItemCreateEvent) MarshalJSON() ([]byte, error) { type conversationItemCreateEvent ConversationItemCreateEvent v := struct { *conversationItemCreateEvent Type ClientEventType `json:"type"` }{ conversationItemCreateEvent: (*conversationItemCreateEvent)(&m), Type: m.ClientEventType(), } return json.Marshal(v) } // ConversationItemTruncateEvent is the event for conversation item truncate. // Send this event when you want to truncate a previous assistant message’s audio. // See https://platform.openai.com/docs/api-reference/realtime-client-events/conversation/item/truncate type ConversationItemTruncateEvent struct { EventBase // The ID of the assistant message item to truncate. ItemID string `json:"item_id"` // The index of the content part to truncate. ContentIndex int `json:"content_index"` // Inclusive duration up to which audio is truncated, in milliseconds. AudioEndMs int `json:"audio_end_ms"` } func (m ConversationItemTruncateEvent) ClientEventType() ClientEventType { return ClientEventTypeConversationItemTruncate } func (m ConversationItemTruncateEvent) MarshalJSON() ([]byte, error) { type conversationItemTruncateEvent ConversationItemTruncateEvent v := struct { *conversationItemTruncateEvent Type ClientEventType `json:"type"` }{ conversationItemTruncateEvent: (*conversationItemTruncateEvent)(&m), Type: m.ClientEventType(), } return json.Marshal(v) } // ConversationItemDeleteEvent is the event for conversation item delete. // Send this event when you want to remove any item from the conversation history. // See https://platform.openai.com/docs/api-reference/realtime-client-events/conversation/item/delete type ConversationItemDeleteEvent struct { EventBase // The ID of the item to delete. ItemID string `json:"item_id"` } func (m ConversationItemDeleteEvent) ClientEventType() ClientEventType { return ClientEventTypeConversationItemDelete } func (m ConversationItemDeleteEvent) MarshalJSON() ([]byte, error) { type conversationItemDeleteEvent ConversationItemDeleteEvent v := struct { *conversationItemDeleteEvent Type ClientEventType `json:"type"` }{ conversationItemDeleteEvent: (*conversationItemDeleteEvent)(&m), Type: m.ClientEventType(), } return json.Marshal(v) } type ResponseCreateParams struct { // The modalities for the response. Modalities []Modality `json:"modalities,omitempty"` // Instructions for the model. Instructions string `json:"instructions,omitempty"` // The voice the model uses to respond - one of alloy, echo, or shimmer. Voice string `json:"voice,omitempty"` // The format of output audio. OutputAudioFormat AudioFormat `json:"output_audio_format,omitempty"` // Tools (functions) available to the model. Tools []Tool `json:"tools,omitempty"` // How the model chooses tools. ToolChoice ToolChoiceInterface `json:"tool_choice,omitempty"` // Sampling temperature. Temperature *float32 `json:"temperature,omitempty"` // Maximum number of output tokens for a single assistant response, inclusive of tool calls. Provide an integer between 1 and 4096 to limit output tokens, or "inf" for the maximum available tokens for a given model. Defaults to "inf". MaxOutputTokens IntOrInf `json:"max_output_tokens,omitempty"` } // ResponseCreateEvent is the event for response create. // Send this event to trigger a response generation. // See https://platform.openai.com/docs/api-reference/realtime-client-events/response/create type ResponseCreateEvent struct { EventBase // Configuration for the response. Response ResponseCreateParams `json:"response"` } func (m ResponseCreateEvent) ClientEventType() ClientEventType { return ClientEventTypeResponseCreate } func (m ResponseCreateEvent) MarshalJSON() ([]byte, error) { type responseCreateEvent ResponseCreateEvent v := struct { *responseCreateEvent Type ClientEventType `json:"type"` }{ responseCreateEvent: (*responseCreateEvent)(&m), Type: m.ClientEventType(), } return json.Marshal(v) } // ResponseCancelEvent is the event for response cancel. // Send this event to cancel an in-progress response. // See https://platform.openai.com/docs/api-reference/realtime-client-events/response/cancel type ResponseCancelEvent struct { EventBase // A specific response ID to cancel - if not provided, will cancel an in-progress response in the default conversation. ResponseID string `json:"response_id,omitempty"` } func (m ResponseCancelEvent) ClientEventType() ClientEventType { return ClientEventTypeResponseCancel } func (m ResponseCancelEvent) MarshalJSON() ([]byte, error) { type responseCancelEvent ResponseCancelEvent v := struct { *responseCancelEvent Type ClientEventType `json:"type"` }{ responseCancelEvent: (*responseCancelEvent)(&m), Type: m.ClientEventType(), } return json.Marshal(v) } // MarshalClientEvent marshals the client event to JSON. func MarshalClientEvent(event ClientEvent) ([]byte, error) { return json.Marshal(event) } type ServerEventType string const ( ServerEventTypeError ServerEventType = "error" ServerEventTypeSessionCreated ServerEventType = "session.created" ServerEventTypeSessionUpdated ServerEventType = "session.updated" ServerEventTypeTranscriptionSessionCreated ServerEventType = "transcription_session.created" ServerEventTypeTranscriptionSessionUpdated ServerEventType = "transcription_session.updated" ServerEventTypeConversationCreated ServerEventType = "conversation.created" ServerEventTypeInputAudioBufferCommitted ServerEventType = "input_audio_buffer.committed" ServerEventTypeInputAudioBufferCleared ServerEventType = "input_audio_buffer.cleared" ServerEventTypeInputAudioBufferSpeechStarted ServerEventType = "input_audio_buffer.speech_started" ServerEventTypeInputAudioBufferSpeechStopped ServerEventType = "input_audio_buffer.speech_stopped" ServerEventTypeConversationItemCreated ServerEventType = "conversation.item.created" ServerEventTypeConversationItemInputAudioTranscriptionCompleted ServerEventType = "conversation.item.input_audio_transcription.completed" ServerEventTypeConversationItemInputAudioTranscriptionFailed ServerEventType = "conversation.item.input_audio_transcription.failed" ServerEventTypeConversationItemTruncated ServerEventType = "conversation.item.truncated" ServerEventTypeConversationItemDeleted ServerEventType = "conversation.item.deleted" ServerEventTypeResponseCreated ServerEventType = "response.created" ServerEventTypeResponseDone ServerEventType = "response.done" ServerEventTypeResponseOutputItemAdded ServerEventType = "response.output_item.added" ServerEventTypeResponseOutputItemDone ServerEventType = "response.output_item.done" ServerEventTypeResponseContentPartAdded ServerEventType = "response.content_part.added" ServerEventTypeResponseContentPartDone ServerEventType = "response.content_part.done" ServerEventTypeResponseTextDelta ServerEventType = "response.text.delta" ServerEventTypeResponseTextDone ServerEventType = "response.text.done" ServerEventTypeResponseAudioTranscriptDelta ServerEventType = "response.audio_transcript.delta" ServerEventTypeResponseAudioTranscriptDone ServerEventType = "response.audio_transcript.done" ServerEventTypeResponseAudioDelta ServerEventType = "response.audio.delta" ServerEventTypeResponseAudioDone ServerEventType = "response.audio.done" ServerEventTypeResponseFunctionCallArgumentsDelta ServerEventType = "response.function_call_arguments.delta" ServerEventTypeResponseFunctionCallArgumentsDone ServerEventType = "response.function_call_arguments.done" ServerEventTypeRateLimitsUpdated ServerEventType = "rate_limits.updated" ) // ServerEvent is the interface for server events. type ServerEvent interface { ServerEventType() ServerEventType } // ServerEventBase is the base struct for all server events. type ServerEventBase struct { // The unique ID of the server event. EventID string `json:"event_id,omitempty"` // The type of the server event. Type ServerEventType `json:"type"` } func (m ServerEventBase) ServerEventType() ServerEventType { return m.Type } // ErrorEvent is the event for error. // Returned when an error occurs. // See https://platform.openai.com/docs/api-reference/realtime-server-events/error type ErrorEvent struct { ServerEventBase // Details of the error. Error Error `json:"error"` } // SessionCreatedEvent is the event for session created. // Returned when a session is created. Emitted automatically when a new connection is established. // See https://platform.openai.com/docs/api-reference/realtime-server-events/session/created type SessionCreatedEvent struct { ServerEventBase // The session resource. Session ServerSession `json:"session"` } // TranscriptionSessionCreatedEvent is the event for session created. // Returned when a transcription session is created. // See https://platform.openai.com/docs/api-reference/realtime-server-events/session/created type TranscriptionSessionCreatedEvent struct { ServerEventBase // The transcription session resource. Session ServerSession `json:"session"` } // SessionUpdatedEvent is the event for session updated. // Returned when a session is updated. // See https://platform.openai.com/docs/api-reference/realtime-server-events/session/updated type SessionUpdatedEvent struct { ServerEventBase // The updated session resource. Session ServerSession `json:"session"` } // ConversationCreatedEvent is the event for conversation created. // Returned when a conversation is created. Emitted right after session creation. // See https://platform.openai.com/docs/api-reference/realtime-server-events/conversation/created type ConversationCreatedEvent struct { ServerEventBase // The conversation resource. Conversation Conversation `json:"conversation"` } // InputAudioBufferCommittedEvent is the event for input audio buffer committed. // Returned when an input audio buffer is committed, either by the client or automatically in server VAD mode. // See https://platform.openai.com/docs/api-reference/realtime-server-events/input_audio_buffer/committed type InputAudioBufferCommittedEvent struct { ServerEventBase // The ID of the preceding item after which the new item will be inserted. PreviousItemID string `json:"previous_item_id,omitempty"` // The ID of the user message item that will be created. ItemID string `json:"item_id"` } // InputAudioBufferClearedEvent is the event for input audio buffer cleared. // Returned when the input audio buffer is cleared by the client. // See https://platform.openai.com/docs/api-reference/realtime-server-events/input_audio_buffer/cleared type InputAudioBufferClearedEvent struct { ServerEventBase } // InputAudioBufferSpeechStartedEvent is the event for input audio buffer speech started. // Returned in server turn detection mode when speech is detected. // See https://platform.openai.com/docs/api-reference/realtime-server-events/input_audio_buffer/speech_started type InputAudioBufferSpeechStartedEvent struct { ServerEventBase // Milliseconds since the session started when speech was detected. AudioStartMs int64 `json:"audio_start_ms"` // The ID of the user message item that will be created when speech stops. ItemID string `json:"item_id"` } // InputAudioBufferSpeechStoppedEvent is the event for input audio buffer speech stopped. // Returned in server turn detection mode when speech stops. // See https://platform.openai.com/docs/api-reference/realtime-server-events/input_audio_buffer/speech_stopped type InputAudioBufferSpeechStoppedEvent struct { ServerEventBase // Milliseconds since the session started when speech stopped. AudioEndMs int64 `json:"audio_end_ms"` // The ID of the user message item that will be created. ItemID string `json:"item_id"` } type ConversationItemCreatedEvent struct { ServerEventBase PreviousItemID string `json:"previous_item_id,omitempty"` Item ResponseMessageItem `json:"item"` } type ConversationItemInputAudioTranscriptionCompletedEvent struct { ServerEventBase ItemID string `json:"item_id"` ContentIndex int `json:"content_index"` Transcript string `json:"transcript"` } type ConversationItemInputAudioTranscriptionFailedEvent struct { ServerEventBase ItemID string `json:"item_id"` ContentIndex int `json:"content_index"` Error Error `json:"error"` } type ConversationItemTruncatedEvent struct { ServerEventBase ItemID string `json:"item_id"` // The ID of the assistant message item that was truncated. ContentIndex int `json:"content_index"` // The index of the content part that was truncated. AudioEndMs int `json:"audio_end_ms"` // The duration up to which the audio was truncated, in milliseconds. } type ConversationItemDeletedEvent struct { ServerEventBase ItemID string `json:"item_id"` // The ID of the item that was deleted. } // ResponseCreatedEvent is the event for response created. // Returned when a new Response is created. The first event of response creation, where the response is in an initial state of "in_progress". // See https://platform.openai.com/docs/api-reference/realtime-server-events/response/created type ResponseCreatedEvent struct { ServerEventBase
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
true
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/explorer/dashboard.go
core/http/endpoints/explorer/dashboard.go
package explorer import ( "encoding/base64" "net/http" "sort" "strings" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/explorer" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/internal" ) func Dashboard() echo.HandlerFunc { return func(c echo.Context) error { summary := map[string]interface{}{ "Title": "LocalAI API - " + internal.PrintableVersion(), "Version": internal.PrintableVersion(), "BaseURL": middleware.BaseURL(c), } contentType := c.Request().Header.Get("Content-Type") accept := c.Request().Header.Get("Accept") if strings.Contains(contentType, "application/json") || (accept != "" && !strings.Contains(accept, "html")) { // The client expects a JSON response return c.JSON(http.StatusOK, summary) } else { // Render index return c.Render(http.StatusOK, "views/explorer", summary) } } } type AddNetworkRequest struct { Token string `json:"token"` Name string `json:"name"` Description string `json:"description"` } type Network struct { explorer.TokenData Token string `json:"token"` } func ShowNetworks(db *explorer.Database) echo.HandlerFunc { return func(c echo.Context) error { results := []Network{} for _, token := range db.TokenList() { networkData, exists := db.Get(token) // get the token data hasWorkers := false for _, cluster := range networkData.Clusters { if len(cluster.Workers) > 0 { hasWorkers = true break } } if exists && hasWorkers { results = append(results, Network{TokenData: networkData, Token: token}) } } // order by number of clusters sort.Slice(results, func(i, j int) bool { return len(results[i].Clusters) > len(results[j].Clusters) }) return c.JSON(http.StatusOK, results) } } func AddNetwork(db *explorer.Database) echo.HandlerFunc { return func(c echo.Context) error { request := new(AddNetworkRequest) if err := c.Bind(request); err != nil { return c.JSON(http.StatusBadRequest, map[string]interface{}{"error": "Cannot parse JSON"}) } if request.Token == "" { return c.JSON(http.StatusBadRequest, map[string]interface{}{"error": "Token is required"}) } if request.Name == "" { return c.JSON(http.StatusBadRequest, map[string]interface{}{"error": "Name is required"}) } if request.Description == "" { return c.JSON(http.StatusBadRequest, map[string]interface{}{"error": "Description is required"}) } // TODO: check if token is valid, otherwise reject // try to decode the token from base64 _, err := base64.StdEncoding.DecodeString(request.Token) if err != nil { return c.JSON(http.StatusBadRequest, map[string]interface{}{"error": "Invalid token"}) } if _, exists := db.Get(request.Token); exists { return c.JSON(http.StatusBadRequest, map[string]interface{}{"error": "Token already exists"}) } err = db.Set(request.Token, explorer.TokenData{Name: request.Name, Description: request.Description}) if err != nil { return c.JSON(http.StatusInternalServerError, map[string]interface{}{"error": "Cannot add token"}) } return c.JSON(http.StatusOK, map[string]interface{}{"message": "Token added"}) } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/elevenlabs/soundgeneration.go
core/http/endpoints/elevenlabs/soundgeneration.go
package elevenlabs import ( "path/filepath" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/backend" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/LocalAI/pkg/model" "github.com/mudler/xlog" ) // SoundGenerationEndpoint is the ElevenLabs SoundGeneration endpoint https://elevenlabs.io/docs/api-reference/sound-generation // @Summary Generates audio from the input text. // @Param request body schema.ElevenLabsSoundGenerationRequest true "query params" // @Success 200 {string} binary "Response" // @Router /v1/sound-generation [post] func SoundGenerationEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) echo.HandlerFunc { return func(c echo.Context) error { input, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_LOCALAI_REQUEST).(*schema.ElevenLabsSoundGenerationRequest) if !ok || input.ModelID == "" { return echo.ErrBadRequest } cfg, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_MODEL_CONFIG).(*config.ModelConfig) if !ok || cfg == nil { return echo.ErrBadRequest } xlog.Debug("Sound Generation Request about to be sent to backend", "modelFile", "modelFile", "backend", cfg.Backend) // TODO: Support uploading files? filePath, _, err := backend.SoundGeneration(input.Text, input.Duration, input.Temperature, input.DoSample, nil, nil, ml, appConfig, *cfg) if err != nil { return err } return c.Attachment(filePath, filepath.Base(filePath)) } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/elevenlabs/tts.go
core/http/endpoints/elevenlabs/tts.go
package elevenlabs import ( "path/filepath" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/backend" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/LocalAI/pkg/model" "github.com/mudler/xlog" ) // TTSEndpoint is the OpenAI Speech API endpoint https://platform.openai.com/docs/api-reference/audio/createSpeech // @Summary Generates audio from the input text. // @Param voice-id path string true "Account ID" // @Param request body schema.TTSRequest true "query params" // @Success 200 {string} binary "Response" // @Router /v1/text-to-speech/{voice-id} [post] func TTSEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) echo.HandlerFunc { return func(c echo.Context) error { voiceID := c.Param("voice-id") input, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_LOCALAI_REQUEST).(*schema.ElevenLabsTTSRequest) if !ok || input.ModelID == "" { return echo.ErrBadRequest } cfg, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_MODEL_CONFIG).(*config.ModelConfig) if !ok || cfg == nil { return echo.ErrBadRequest } xlog.Debug("elevenlabs TTS request received", "modelName", input.ModelID) filePath, _, err := backend.ModelTTS(input.Text, voiceID, input.LanguageCode, ml, appConfig, *cfg) if err != nil { return err } return c.Attachment(filePath, filepath.Base(filePath)) } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/jina/rerank.go
core/http/endpoints/jina/rerank.go
package jina import ( "net/http" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/backend" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/LocalAI/pkg/grpc/proto" "github.com/mudler/LocalAI/pkg/model" "github.com/mudler/xlog" ) // JINARerankEndpoint acts like the Jina reranker endpoint (https://jina.ai/reranker/) // @Summary Reranks a list of phrases by relevance to a given text query. // @Param request body schema.JINARerankRequest true "query params" // @Success 200 {object} schema.JINARerankResponse "Response" // @Router /v1/rerank [post] func JINARerankEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) echo.HandlerFunc { return func(c echo.Context) error { input, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_LOCALAI_REQUEST).(*schema.JINARerankRequest) if !ok || input.Model == "" { return echo.ErrBadRequest } cfg, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_MODEL_CONFIG).(*config.ModelConfig) if !ok || cfg == nil { return echo.ErrBadRequest } xlog.Debug("JINA Rerank Request received", "model", input.Model) var requestTopN int32 docs := int32(len(input.Documents)) if input.TopN == nil { // omit top_n to get all requestTopN = docs } else { requestTopN = int32(*input.TopN) if requestTopN < 1 { return c.JSON(http.StatusUnprocessableEntity, "top_n - should be greater than or equal to 1") } if requestTopN > docs { // make it more obvious for backends requestTopN = docs } } request := &proto.RerankRequest{ Query: input.Query, TopN: requestTopN, Documents: input.Documents, } results, err := backend.Rerank(request, ml, appConfig, *cfg) if err != nil { return err } response := &schema.JINARerankResponse{ Model: input.Model, } for _, r := range results.Results { response.Results = append(response.Results, schema.JINADocumentResult{ Index: int(r.Index), Document: schema.JINAText{Text: r.Text}, RelevanceScore: float64(r.RelevanceScore), }) } response.Usage.TotalTokens = int(results.Usage.TotalTokens) response.Usage.PromptTokens = int(results.Usage.PromptTokens) return c.JSON(http.StatusOK, response) } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/endpoints/mcp/tools.go
core/http/endpoints/mcp/tools.go
package mcp import ( "context" "net/http" "os" "os/exec" "sync" "time" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/pkg/signals" "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/mudler/xlog" ) type sessionCache struct { mu sync.Mutex cache map[string][]*mcp.ClientSession } var ( cache = sessionCache{ cache: make(map[string][]*mcp.ClientSession), } client = mcp.NewClient(&mcp.Implementation{Name: "LocalAI", Version: "v1.0.0"}, nil) ) func SessionsFromMCPConfig( name string, remote config.MCPGenericConfig[config.MCPRemoteServers], stdio config.MCPGenericConfig[config.MCPSTDIOServers], ) ([]*mcp.ClientSession, error) { cache.mu.Lock() defer cache.mu.Unlock() sessions, exists := cache.cache[name] if exists { return sessions, nil } allSessions := []*mcp.ClientSession{} ctx, cancel := context.WithCancel(context.Background()) // Get the list of all the tools that the Agent will be esposed to for _, server := range remote.Servers { xlog.Debug("[MCP remote server] Configuration", "server", server) // Create HTTP client with custom roundtripper for bearer token injection httpClient := &http.Client{ Timeout: 360 * time.Second, Transport: newBearerTokenRoundTripper(server.Token, http.DefaultTransport), } transport := &mcp.StreamableClientTransport{Endpoint: server.URL, HTTPClient: httpClient} mcpSession, err := client.Connect(ctx, transport, nil) if err != nil { xlog.Error("Failed to connect to MCP server", "error", err, "url", server.URL) continue } xlog.Debug("[MCP remote server] Connected to MCP server", "url", server.URL) cache.cache[name] = append(cache.cache[name], mcpSession) allSessions = append(allSessions, mcpSession) } for _, server := range stdio.Servers { xlog.Debug("[MCP stdio server] Configuration", "server", server) command := exec.Command(server.Command, server.Args...) command.Env = os.Environ() for key, value := range server.Env { command.Env = append(command.Env, key+"="+value) } transport := &mcp.CommandTransport{Command: command} mcpSession, err := client.Connect(ctx, transport, nil) if err != nil { xlog.Error("Failed to start MCP server", "error", err, "command", command) continue } xlog.Debug("[MCP stdio server] Connected to MCP server", "command", command) cache.cache[name] = append(cache.cache[name], mcpSession) allSessions = append(allSessions, mcpSession) } signals.RegisterGracefulTerminationHandler(func() { for _, session := range allSessions { session.Close() } cancel() }) return allSessions, nil } // bearerTokenRoundTripper is a custom roundtripper that injects a bearer token // into HTTP requests type bearerTokenRoundTripper struct { token string base http.RoundTripper } // RoundTrip implements the http.RoundTripper interface func (rt *bearerTokenRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { if rt.token != "" { req.Header.Set("Authorization", "Bearer "+rt.token) } return rt.base.RoundTrip(req) } // newBearerTokenRoundTripper creates a new roundtripper that injects the given token func newBearerTokenRoundTripper(token string, base http.RoundTripper) http.RoundTripper { if base == nil { base = http.DefaultTransport } return &bearerTokenRoundTripper{ token: token, base: base, } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/routes/health.go
core/http/routes/health.go
package routes import ( "github.com/labstack/echo/v4" ) func HealthRoutes(app *echo.Echo) { // Service health checks ok := func(c echo.Context) error { return c.NoContent(200) } app.GET("/healthz", ok) app.GET("/readyz", ok) }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/routes/jina.go
core/http/routes/jina.go
package routes import ( "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/http/endpoints/jina" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/LocalAI/pkg/model" ) func RegisterJINARoutes(app *echo.Echo, re *middleware.RequestExtractor, cl *config.ModelConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) { // POST endpoint to mimic the reranking rerankHandler := jina.JINARerankEndpoint(cl, ml, appConfig) app.POST("/v1/rerank", rerankHandler, re.BuildFilteredFirstAvailableDefaultModel(config.BuildUsecaseFilterFn(config.FLAG_RERANK)), re.SetModelAndConfig(func() schema.LocalAIRequest { return new(schema.JINARerankRequest) })) }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/routes/ui_gallery.go
core/http/routes/ui_gallery.go
package routes import ( "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/services" "github.com/mudler/LocalAI/internal" ) func registerGalleryRoutes(app *echo.Echo, cl *config.ModelConfigLoader, appConfig *config.ApplicationConfig, galleryService *services.GalleryService, opcache *services.OpCache) { app.GET("/browse", func(c echo.Context) error { summary := map[string]interface{}{ "Title": "LocalAI - Models", "BaseURL": middleware.BaseURL(c), "Version": internal.PrintableVersion(), "Repositories": appConfig.Galleries, } // Render index - models are now loaded via Alpine.js from /api/models return c.Render(200, "views/models", summary) }) }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/routes/ui_backend_gallery.go
core/http/routes/ui_backend_gallery.go
package routes import ( "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/services" "github.com/mudler/LocalAI/internal" ) func registerBackendGalleryRoutes(app *echo.Echo, appConfig *config.ApplicationConfig, galleryService *services.GalleryService, opcache *services.OpCache) { // Show the Backends page (all backends are loaded client-side via Alpine.js) app.GET("/browse/backends", func(c echo.Context) error { summary := map[string]interface{}{ "Title": "LocalAI - Backends", "BaseURL": middleware.BaseURL(c), "Version": internal.PrintableVersion(), "Repositories": appConfig.BackendGalleries, } // Render index - backends are now loaded via Alpine.js from /api/backends return c.Render(200, "views/backends", summary) }) }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/routes/ui_api.go
core/http/routes/ui_api.go
package routes import ( "context" "fmt" "math" "net/http" "net/url" "sort" "strconv" "strings" "github.com/google/uuid" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/application" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/gallery" "github.com/mudler/LocalAI/core/http/endpoints/localai" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/p2p" "github.com/mudler/LocalAI/core/services" "github.com/mudler/LocalAI/pkg/model" "github.com/mudler/LocalAI/pkg/xsysinfo" "github.com/mudler/xlog" ) const ( nameSortFieldName = "name" repositorySortFieldName = "repository" licenseSortFieldName = "license" statusSortFieldName = "status" ascSortOrder = "asc" ) // RegisterUIAPIRoutes registers JSON API routes for the web UI func RegisterUIAPIRoutes(app *echo.Echo, cl *config.ModelConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig, galleryService *services.GalleryService, opcache *services.OpCache, applicationInstance *application.Application) { // Operations API - Get all current operations (models + backends) app.GET("/api/operations", func(c echo.Context) error { processingData, taskTypes := opcache.GetStatus() operations := []map[string]interface{}{} for galleryID, jobID := range processingData { taskType := "installation" if tt, ok := taskTypes[galleryID]; ok { taskType = tt } status := galleryService.GetStatus(jobID) progress := 0 isDeletion := false isQueued := false isCancelled := false isCancellable := false message := "" if status != nil { // Skip completed operations (unless cancelled and not yet cleaned up) if status.Processed && !status.Cancelled { continue } // Skip cancelled operations that are processed (they're done, no need to show) if status.Processed && status.Cancelled { continue } progress = int(status.Progress) isDeletion = status.Deletion isCancelled = status.Cancelled isCancellable = status.Cancellable message = status.Message if isDeletion { taskType = "deletion" } if isCancelled { taskType = "cancelled" } } else { // Job is queued but hasn't started isQueued = true isCancellable = true message = "Operation queued" } // Determine if it's a model or backend // First check if it was explicitly marked as a backend operation isBackend := opcache.IsBackendOp(galleryID) // If not explicitly marked, check if it matches a known backend from the gallery if !isBackend { backends, _ := gallery.AvailableBackends(appConfig.BackendGalleries, appConfig.SystemState) for _, b := range backends { backendID := fmt.Sprintf("%s@%s", b.Gallery.Name, b.Name) if backendID == galleryID || b.Name == galleryID { isBackend = true break } } } // Extract display name (remove repo prefix if exists) displayName := galleryID if strings.Contains(galleryID, "@") { parts := strings.Split(galleryID, "@") if len(parts) > 1 { displayName = parts[1] } } operations = append(operations, map[string]interface{}{ "id": galleryID, "name": displayName, "fullName": galleryID, "jobID": jobID, "progress": progress, "taskType": taskType, "isDeletion": isDeletion, "isBackend": isBackend, "isQueued": isQueued, "isCancelled": isCancelled, "cancellable": isCancellable, "message": message, }) } // Sort operations by progress (ascending), then by ID for stable display order sort.Slice(operations, func(i, j int) bool { progressI := operations[i]["progress"].(int) progressJ := operations[j]["progress"].(int) // Primary sort by progress if progressI != progressJ { return progressI < progressJ } // Secondary sort by ID for stability when progress is the same return operations[i]["id"].(string) < operations[j]["id"].(string) }) return c.JSON(200, map[string]interface{}{ "operations": operations, }) }) // Cancel operation endpoint app.POST("/api/operations/:jobID/cancel", func(c echo.Context) error { jobID := c.Param("jobID") xlog.Debug("API request to cancel operation", "jobID", jobID) err := galleryService.CancelOperation(jobID) if err != nil { xlog.Error("Failed to cancel operation", "error", err, "jobID", jobID) return c.JSON(http.StatusBadRequest, map[string]interface{}{ "error": err.Error(), }) } // Clean up opcache for cancelled operation opcache.DeleteUUID(jobID) return c.JSON(200, map[string]interface{}{ "success": true, "message": "Operation cancelled", }) }) // Model Gallery APIs app.GET("/api/models", func(c echo.Context) error { term := c.QueryParam("term") page := c.QueryParam("page") if page == "" { page = "1" } items := c.QueryParam("items") if items == "" { items = "21" } models, err := gallery.AvailableGalleryModels(appConfig.Galleries, appConfig.SystemState) if err != nil { xlog.Error("could not list models from galleries", "error", err) return c.JSON(http.StatusInternalServerError, map[string]interface{}{ "error": err.Error(), }) } // Get all available tags allTags := map[string]struct{}{} tags := []string{} for _, m := range models { for _, t := range m.Tags { allTags[t] = struct{}{} } } for t := range allTags { tags = append(tags, t) } sort.Strings(tags) if term != "" { models = gallery.GalleryElements[*gallery.GalleryModel](models).Search(term) } // Get model statuses processingModelsData, taskTypes := opcache.GetStatus() // Apply sorting if requested sortBy := c.QueryParam("sort") sortOrder := c.QueryParam("order") if sortOrder == "" { sortOrder = ascSortOrder } switch sortBy { case nameSortFieldName: models = gallery.GalleryElements[*gallery.GalleryModel](models).SortByName(sortOrder) case repositorySortFieldName: models = gallery.GalleryElements[*gallery.GalleryModel](models).SortByRepository(sortOrder) case licenseSortFieldName: models = gallery.GalleryElements[*gallery.GalleryModel](models).SortByLicense(sortOrder) case statusSortFieldName: models = gallery.GalleryElements[*gallery.GalleryModel](models).SortByInstalled(sortOrder) } pageNum, err := strconv.Atoi(page) if err != nil || pageNum < 1 { pageNum = 1 } itemsNum, err := strconv.Atoi(items) if err != nil || itemsNum < 1 { itemsNum = 21 } totalPages := int(math.Ceil(float64(len(models)) / float64(itemsNum))) totalModels := len(models) if pageNum > 0 { models = models.Paginate(pageNum, itemsNum) } // Convert models to JSON-friendly format and deduplicate by ID modelsJSON := make([]map[string]interface{}, 0, len(models)) seenIDs := make(map[string]bool) for _, m := range models { modelID := m.ID() // Skip duplicate IDs to prevent Alpine.js x-for errors if seenIDs[modelID] { xlog.Debug("Skipping duplicate model ID", "modelID", modelID) continue } seenIDs[modelID] = true currentlyProcessing := opcache.Exists(modelID) jobID := "" isDeletionOp := false if currentlyProcessing { jobID = opcache.Get(modelID) status := galleryService.GetStatus(jobID) if status != nil && status.Deletion { isDeletionOp = true } } _, trustRemoteCodeExists := m.Overrides["trust_remote_code"] modelsJSON = append(modelsJSON, map[string]interface{}{ "id": modelID, "name": m.Name, "description": m.Description, "icon": m.Icon, "license": m.License, "urls": m.URLs, "tags": m.Tags, "gallery": m.Gallery.Name, "installed": m.Installed, "processing": currentlyProcessing, "jobID": jobID, "isDeletion": isDeletionOp, "trustRemoteCode": trustRemoteCodeExists, "additionalFiles": m.AdditionalFiles, }) } prevPage := pageNum - 1 nextPage := pageNum + 1 if prevPage < 1 { prevPage = 1 } if nextPage > totalPages { nextPage = totalPages } // Calculate installed models count (models with configs + models without configs) modelConfigs := cl.GetAllModelsConfigs() modelsWithoutConfig, _ := services.ListModels(cl, ml, config.NoFilterFn, services.LOOSE_ONLY) installedModelsCount := len(modelConfigs) + len(modelsWithoutConfig) return c.JSON(200, map[string]interface{}{ "models": modelsJSON, "repositories": appConfig.Galleries, "allTags": tags, "processingModels": processingModelsData, "taskTypes": taskTypes, "availableModels": totalModels, "installedModels": installedModelsCount, "currentPage": pageNum, "totalPages": totalPages, "prevPage": prevPage, "nextPage": nextPage, }) }) app.POST("/api/models/install/:id", func(c echo.Context) error { galleryID := c.Param("id") // URL decode the gallery ID (e.g., "localai%40model" -> "localai@model") galleryID, err := url.QueryUnescape(galleryID) if err != nil { return c.JSON(http.StatusBadRequest, map[string]interface{}{ "error": "invalid model ID", }) } xlog.Debug("API job submitted to install", "galleryID", galleryID) id, err := uuid.NewUUID() if err != nil { return c.JSON(http.StatusInternalServerError, map[string]interface{}{ "error": err.Error(), }) } uid := id.String() opcache.Set(galleryID, uid) ctx, cancelFunc := context.WithCancel(context.Background()) op := services.GalleryOp[gallery.GalleryModel, gallery.ModelConfig]{ ID: uid, GalleryElementName: galleryID, Galleries: appConfig.Galleries, BackendGalleries: appConfig.BackendGalleries, Context: ctx, CancelFunc: cancelFunc, } // Store cancellation function immediately so queued operations can be cancelled galleryService.StoreCancellation(uid, cancelFunc) go func() { galleryService.ModelGalleryChannel <- op }() return c.JSON(200, map[string]interface{}{ "jobID": uid, "message": "Installation started", }) }) app.POST("/api/models/delete/:id", func(c echo.Context) error { galleryID := c.Param("id") // URL decode the gallery ID galleryID, err := url.QueryUnescape(galleryID) if err != nil { return c.JSON(http.StatusBadRequest, map[string]interface{}{ "error": "invalid model ID", }) } xlog.Debug("API job submitted to delete", "galleryID", galleryID) var galleryName = galleryID if strings.Contains(galleryID, "@") { galleryName = strings.Split(galleryID, "@")[1] } id, err := uuid.NewUUID() if err != nil { return c.JSON(http.StatusInternalServerError, map[string]interface{}{ "error": err.Error(), }) } uid := id.String() opcache.Set(galleryID, uid) ctx, cancelFunc := context.WithCancel(context.Background()) op := services.GalleryOp[gallery.GalleryModel, gallery.ModelConfig]{ ID: uid, Delete: true, GalleryElementName: galleryName, Galleries: appConfig.Galleries, BackendGalleries: appConfig.BackendGalleries, Context: ctx, CancelFunc: cancelFunc, } // Store cancellation function immediately so queued operations can be cancelled galleryService.StoreCancellation(uid, cancelFunc) go func() { galleryService.ModelGalleryChannel <- op cl.RemoveModelConfig(galleryName) }() return c.JSON(200, map[string]interface{}{ "jobID": uid, "message": "Deletion started", }) }) app.POST("/api/models/config/:id", func(c echo.Context) error { galleryID := c.Param("id") // URL decode the gallery ID galleryID, err := url.QueryUnescape(galleryID) if err != nil { return c.JSON(http.StatusBadRequest, map[string]interface{}{ "error": "invalid model ID", }) } xlog.Debug("API job submitted to get config", "galleryID", galleryID) models, err := gallery.AvailableGalleryModels(appConfig.Galleries, appConfig.SystemState) if err != nil { return c.JSON(http.StatusInternalServerError, map[string]interface{}{ "error": err.Error(), }) } model := gallery.FindGalleryElement(models, galleryID) if model == nil { return c.JSON(http.StatusNotFound, map[string]interface{}{ "error": "model not found", }) } config, err := gallery.GetGalleryConfigFromURL[gallery.ModelConfig](model.URL, appConfig.SystemState.Model.ModelsPath) if err != nil { return c.JSON(http.StatusInternalServerError, map[string]interface{}{ "error": err.Error(), }) } _, err = gallery.InstallModel(context.Background(), appConfig.SystemState, model.Name, &config, model.Overrides, nil, false) if err != nil { return c.JSON(http.StatusInternalServerError, map[string]interface{}{ "error": err.Error(), }) } return c.JSON(200, map[string]interface{}{ "message": "Configuration file saved", }) }) app.GET("/api/models/job/:uid", func(c echo.Context) error { jobUID := c.Param("uid") status := galleryService.GetStatus(jobUID) if status == nil { // Job is queued but hasn't started processing yet return c.JSON(200, map[string]interface{}{ "progress": 0, "message": "Operation queued", "galleryElementName": "", "processed": false, "deletion": false, "queued": true, }) } response := map[string]interface{}{ "progress": status.Progress, "message": status.Message, "galleryElementName": status.GalleryElementName, "processed": status.Processed, "deletion": status.Deletion, "queued": false, } if status.Error != nil { response["error"] = status.Error.Error() } if status.Progress == 100 && status.Processed && status.Message == "completed" { opcache.DeleteUUID(jobUID) response["completed"] = true } return c.JSON(200, response) }) // Backend Gallery APIs app.GET("/api/backends", func(c echo.Context) error { term := c.QueryParam("term") page := c.QueryParam("page") if page == "" { page = "1" } items := c.QueryParam("items") if items == "" { items = "21" } backends, err := gallery.AvailableBackends(appConfig.BackendGalleries, appConfig.SystemState) if err != nil { xlog.Error("could not list backends from galleries", "error", err) return c.JSON(http.StatusInternalServerError, map[string]interface{}{ "error": err.Error(), }) } // Get all available tags allTags := map[string]struct{}{} tags := []string{} for _, b := range backends { for _, t := range b.Tags { allTags[t] = struct{}{} } } for t := range allTags { tags = append(tags, t) } sort.Strings(tags) if term != "" { backends = gallery.GalleryElements[*gallery.GalleryBackend](backends).Search(term) } // Get backend statuses processingBackendsData, taskTypes := opcache.GetStatus() // Apply sorting if requested sortBy := c.QueryParam("sort") sortOrder := c.QueryParam("order") if sortOrder == "" { sortOrder = ascSortOrder } switch sortBy { case nameSortFieldName: backends = gallery.GalleryElements[*gallery.GalleryBackend](backends).SortByName(sortOrder) case repositorySortFieldName: backends = gallery.GalleryElements[*gallery.GalleryBackend](backends).SortByRepository(sortOrder) case licenseSortFieldName: backends = gallery.GalleryElements[*gallery.GalleryBackend](backends).SortByLicense(sortOrder) case statusSortFieldName: backends = gallery.GalleryElements[*gallery.GalleryBackend](backends).SortByInstalled(sortOrder) } pageNum, err := strconv.Atoi(page) if err != nil || pageNum < 1 { pageNum = 1 } itemsNum, err := strconv.Atoi(items) if err != nil || itemsNum < 1 { itemsNum = 21 } totalPages := int(math.Ceil(float64(len(backends)) / float64(itemsNum))) totalBackends := len(backends) if pageNum > 0 { backends = backends.Paginate(pageNum, itemsNum) } // Convert backends to JSON-friendly format and deduplicate by ID backendsJSON := make([]map[string]interface{}, 0, len(backends)) seenBackendIDs := make(map[string]bool) for _, b := range backends { backendID := b.ID() // Skip duplicate IDs to prevent Alpine.js x-for errors if seenBackendIDs[backendID] { xlog.Debug("Skipping duplicate backend ID", "backendID", backendID) continue } seenBackendIDs[backendID] = true currentlyProcessing := opcache.Exists(backendID) jobID := "" isDeletionOp := false if currentlyProcessing { jobID = opcache.Get(backendID) status := galleryService.GetStatus(jobID) if status != nil && status.Deletion { isDeletionOp = true } } backendsJSON = append(backendsJSON, map[string]interface{}{ "id": backendID, "name": b.Name, "description": b.Description, "icon": b.Icon, "license": b.License, "urls": b.URLs, "tags": b.Tags, "gallery": b.Gallery.Name, "installed": b.Installed, "processing": currentlyProcessing, "jobID": jobID, "isDeletion": isDeletionOp, }) } prevPage := pageNum - 1 nextPage := pageNum + 1 if prevPage < 1 { prevPage = 1 } if nextPage > totalPages { nextPage = totalPages } // Calculate installed backends count installedBackends, err := gallery.ListSystemBackends(appConfig.SystemState) installedBackendsCount := 0 if err == nil { installedBackendsCount = len(installedBackends) } return c.JSON(200, map[string]interface{}{ "backends": backendsJSON, "repositories": appConfig.BackendGalleries, "allTags": tags, "processingBackends": processingBackendsData, "taskTypes": taskTypes, "availableBackends": totalBackends, "installedBackends": installedBackendsCount, "currentPage": pageNum, "totalPages": totalPages, "prevPage": prevPage, "nextPage": nextPage, }) }) app.POST("/api/backends/install/:id", func(c echo.Context) error { backendID := c.Param("id") // URL decode the backend ID backendID, err := url.QueryUnescape(backendID) if err != nil { return c.JSON(http.StatusBadRequest, map[string]interface{}{ "error": "invalid backend ID", }) } xlog.Debug("API job submitted to install backend", "backendID", backendID) id, err := uuid.NewUUID() if err != nil { return c.JSON(http.StatusInternalServerError, map[string]interface{}{ "error": err.Error(), }) } uid := id.String() opcache.SetBackend(backendID, uid) ctx, cancelFunc := context.WithCancel(context.Background()) op := services.GalleryOp[gallery.GalleryBackend, any]{ ID: uid, GalleryElementName: backendID, Galleries: appConfig.BackendGalleries, Context: ctx, CancelFunc: cancelFunc, } // Store cancellation function immediately so queued operations can be cancelled galleryService.StoreCancellation(uid, cancelFunc) go func() { galleryService.BackendGalleryChannel <- op }() return c.JSON(200, map[string]interface{}{ "jobID": uid, "message": "Backend installation started", }) }) // Install backend from external source (OCI image, URL, or path) app.POST("/api/backends/install-external", func(c echo.Context) error { // Request body structure type ExternalBackendRequest struct { URI string `json:"uri"` Name string `json:"name"` Alias string `json:"alias"` } var req ExternalBackendRequest if err := c.Bind(&req); err != nil { return c.JSON(http.StatusBadRequest, map[string]interface{}{ "error": "invalid request body", }) } // Validate required fields if req.URI == "" { return c.JSON(http.StatusBadRequest, map[string]interface{}{ "error": "uri is required", }) } xlog.Debug("API job submitted to install external backend", "uri", req.URI, "name", req.Name, "alias", req.Alias) id, err := uuid.NewUUID() if err != nil { return c.JSON(http.StatusInternalServerError, map[string]interface{}{ "error": err.Error(), }) } uid := id.String() // Use URI as the key for opcache, or name if provided cacheKey := req.URI if req.Name != "" { cacheKey = req.Name } opcache.SetBackend(cacheKey, uid) ctx, cancelFunc := context.WithCancel(context.Background()) op := services.GalleryOp[gallery.GalleryBackend, any]{ ID: uid, GalleryElementName: req.Name, // May be empty, will be derived during installation Galleries: appConfig.BackendGalleries, Context: ctx, CancelFunc: cancelFunc, ExternalURI: req.URI, ExternalName: req.Name, ExternalAlias: req.Alias, } // Store cancellation function immediately so queued operations can be cancelled galleryService.StoreCancellation(uid, cancelFunc) go func() { galleryService.BackendGalleryChannel <- op }() return c.JSON(200, map[string]interface{}{ "jobID": uid, "message": "External backend installation started", }) }) app.POST("/api/backends/delete/:id", func(c echo.Context) error { backendID := c.Param("id") // URL decode the backend ID backendID, err := url.QueryUnescape(backendID) if err != nil { return c.JSON(http.StatusBadRequest, map[string]interface{}{ "error": "invalid backend ID", }) } xlog.Debug("API job submitted to delete backend", "backendID", backendID) var backendName = backendID if strings.Contains(backendID, "@") { backendName = strings.Split(backendID, "@")[1] } id, err := uuid.NewUUID() if err != nil { return c.JSON(http.StatusInternalServerError, map[string]interface{}{ "error": err.Error(), }) } uid := id.String() opcache.SetBackend(backendID, uid) ctx, cancelFunc := context.WithCancel(context.Background()) op := services.GalleryOp[gallery.GalleryBackend, any]{ ID: uid, Delete: true, GalleryElementName: backendName, Galleries: appConfig.BackendGalleries, Context: ctx, CancelFunc: cancelFunc, } // Store cancellation function immediately so queued operations can be cancelled galleryService.StoreCancellation(uid, cancelFunc) go func() { galleryService.BackendGalleryChannel <- op }() return c.JSON(200, map[string]interface{}{ "jobID": uid, "message": "Backend deletion started", }) }) app.GET("/api/backends/job/:uid", func(c echo.Context) error { jobUID := c.Param("uid") status := galleryService.GetStatus(jobUID) if status == nil { // Job is queued but hasn't started processing yet return c.JSON(200, map[string]interface{}{ "progress": 0, "message": "Operation queued", "galleryElementName": "", "processed": false, "deletion": false, "queued": true, }) } response := map[string]interface{}{ "progress": status.Progress, "message": status.Message, "galleryElementName": status.GalleryElementName, "processed": status.Processed, "deletion": status.Deletion, "queued": false, } if status.Error != nil { response["error"] = status.Error.Error() } if status.Progress == 100 && status.Processed && status.Message == "completed" { opcache.DeleteUUID(jobUID) response["completed"] = true } return c.JSON(200, response) }) // System Backend Deletion API (for installed backends on index page) app.POST("/api/backends/system/delete/:name", func(c echo.Context) error { backendName := c.Param("name") // URL decode the backend name backendName, err := url.QueryUnescape(backendName) if err != nil { return c.JSON(http.StatusBadRequest, map[string]interface{}{ "error": "invalid backend name", }) } xlog.Debug("API request to delete system backend", "backendName", backendName) // Use the gallery package to delete the backend if err := gallery.DeleteBackendFromSystem(appConfig.SystemState, backendName); err != nil { xlog.Error("Failed to delete backend", "error", err, "backendName", backendName) return c.JSON(http.StatusInternalServerError, map[string]interface{}{ "error": err.Error(), }) } return c.JSON(200, map[string]interface{}{ "success": true, "message": "Backend deleted successfully", }) }) // P2P APIs app.GET("/api/p2p/workers", func(c echo.Context) error { nodes := p2p.GetAvailableNodes(p2p.NetworkID(appConfig.P2PNetworkID, p2p.WorkerID)) nodesJSON := make([]map[string]interface{}, 0, len(nodes)) for _, n := range nodes { nodesJSON = append(nodesJSON, map[string]interface{}{ "name": n.Name, "id": n.ID, "tunnelAddress": n.TunnelAddress, "serviceID": n.ServiceID, "lastSeen": n.LastSeen, "isOnline": n.IsOnline(), }) } return c.JSON(200, map[string]interface{}{ "nodes": nodesJSON, }) }) app.GET("/api/p2p/federation", func(c echo.Context) error { nodes := p2p.GetAvailableNodes(p2p.NetworkID(appConfig.P2PNetworkID, p2p.FederatedID)) nodesJSON := make([]map[string]interface{}, 0, len(nodes)) for _, n := range nodes { nodesJSON = append(nodesJSON, map[string]interface{}{ "name": n.Name, "id": n.ID, "tunnelAddress": n.TunnelAddress, "serviceID": n.ServiceID, "lastSeen": n.LastSeen, "isOnline": n.IsOnline(), }) } return c.JSON(200, map[string]interface{}{ "nodes": nodesJSON, }) }) app.GET("/api/p2p/stats", func(c echo.Context) error { workerNodes := p2p.GetAvailableNodes(p2p.NetworkID(appConfig.P2PNetworkID, p2p.WorkerID)) federatedNodes := p2p.GetAvailableNodes(p2p.NetworkID(appConfig.P2PNetworkID, p2p.FederatedID)) workersOnline := 0 for _, n := range workerNodes { if n.IsOnline() { workersOnline++ } } federatedOnline := 0 for _, n := range federatedNodes { if n.IsOnline() { federatedOnline++ } } return c.JSON(200, map[string]interface{}{ "workers": map[string]interface{}{ "online": workersOnline, "total": len(workerNodes), }, "federated": map[string]interface{}{ "online": federatedOnline, "total": len(federatedNodes), }, }) }) // Resources API endpoint - unified memory info (GPU if available, otherwise RAM) app.GET("/api/resources", func(c echo.Context) error { resourceInfo := xsysinfo.GetResourceInfo() // Format watchdog interval watchdogInterval := "2s" // default if appConfig.WatchDogInterval > 0 { watchdogInterval = appConfig.WatchDogInterval.String() } response := map[string]interface{}{ "type": resourceInfo.Type, // "gpu" or "ram" "available": resourceInfo.Available, "gpus": resourceInfo.GPUs, "ram": resourceInfo.RAM, "aggregate": resourceInfo.Aggregate, "reclaimer_enabled": appConfig.MemoryReclaimerEnabled, "reclaimer_threshold": appConfig.MemoryReclaimerThreshold, "watchdog_interval": watchdogInterval, } return c.JSON(200, response) }) if !appConfig.DisableRuntimeSettings { // Settings API app.GET("/api/settings", localai.GetSettingsEndpoint(applicationInstance)) app.POST("/api/settings", localai.UpdateSettingsEndpoint(applicationInstance)) } // Logs API app.GET("/api/traces", func(c echo.Context) error { if !appConfig.EnableTracing { return c.JSON(503, map[string]any{ "error": "Tracing disabled", }) } traces := middleware.GetTraces() return c.JSON(200, map[string]interface{}{ "traces": traces, }) }) app.POST("/api/traces/clear", func(c echo.Context) error { middleware.ClearTraces() return c.JSON(200, map[string]interface{}{ "message": "Traces cleared", }) }) }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/routes/elevenlabs.go
core/http/routes/elevenlabs.go
package routes import ( "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/http/endpoints/elevenlabs" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/LocalAI/pkg/model" ) func RegisterElevenLabsRoutes(app *echo.Echo, re *middleware.RequestExtractor, cl *config.ModelConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) { // Elevenlabs ttsHandler := elevenlabs.TTSEndpoint(cl, ml, appConfig) app.POST("/v1/text-to-speech/:voice-id", ttsHandler, re.BuildFilteredFirstAvailableDefaultModel(config.BuildUsecaseFilterFn(config.FLAG_TTS)), re.SetModelAndConfig(func() schema.LocalAIRequest { return new(schema.ElevenLabsTTSRequest) })) soundGenHandler := elevenlabs.SoundGenerationEndpoint(cl, ml, appConfig) app.POST("/v1/sound-generation", soundGenHandler, re.BuildFilteredFirstAvailableDefaultModel(config.BuildUsecaseFilterFn(config.FLAG_SOUND_GENERATION)), re.SetModelAndConfig(func() schema.LocalAIRequest { return new(schema.ElevenLabsSoundGenerationRequest) })) }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/routes/ui.go
core/http/routes/ui.go
package routes import ( "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/gallery" "github.com/mudler/LocalAI/core/http/endpoints/localai" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/services" "github.com/mudler/LocalAI/internal" "github.com/mudler/LocalAI/pkg/model" ) func RegisterUIRoutes(app *echo.Echo, cl *config.ModelConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig, galleryService *services.GalleryService) { // keeps the state of ops that are started from the UI var processingOps = services.NewOpCache(galleryService) app.GET("/", localai.WelcomeEndpoint(appConfig, cl, ml, processingOps)) app.GET("/manage", localai.WelcomeEndpoint(appConfig, cl, ml, processingOps)) if !appConfig.DisableRuntimeSettings { // Settings page app.GET("/settings", func(c echo.Context) error { summary := map[string]interface{}{ "Title": "LocalAI - Settings", "BaseURL": middleware.BaseURL(c), } return c.Render(200, "views/settings", summary) }) } // Agent Jobs pages app.GET("/agent-jobs", func(c echo.Context) error { modelConfigs := cl.GetAllModelsConfigs() summary := map[string]interface{}{ "Title": "LocalAI - Agent Jobs", "BaseURL": middleware.BaseURL(c), "Version": internal.PrintableVersion(), "ModelsConfig": modelConfigs, } return c.Render(200, "views/agent-jobs", summary) }) app.GET("/agent-jobs/tasks/new", func(c echo.Context) error { modelConfigs := cl.GetAllModelsConfigs() summary := map[string]interface{}{ "Title": "LocalAI - Create Task", "BaseURL": middleware.BaseURL(c), "Version": internal.PrintableVersion(), "ModelsConfig": modelConfigs, } return c.Render(200, "views/agent-task-details", summary) }) // More specific route must come first app.GET("/agent-jobs/tasks/:id/edit", func(c echo.Context) error { modelConfigs := cl.GetAllModelsConfigs() summary := map[string]interface{}{ "Title": "LocalAI - Edit Task", "BaseURL": middleware.BaseURL(c), "Version": internal.PrintableVersion(), "ModelsConfig": modelConfigs, } return c.Render(200, "views/agent-task-details", summary) }) // Task details page (less specific, comes after edit route) app.GET("/agent-jobs/tasks/:id", func(c echo.Context) error { summary := map[string]interface{}{ "Title": "LocalAI - Task Details", "BaseURL": middleware.BaseURL(c), "Version": internal.PrintableVersion(), } return c.Render(200, "views/agent-task-details", summary) }) app.GET("/agent-jobs/jobs/:id", func(c echo.Context) error { summary := map[string]interface{}{ "Title": "LocalAI - Job Details", "BaseURL": middleware.BaseURL(c), "Version": internal.PrintableVersion(), } return c.Render(200, "views/agent-job-details", summary) }) // P2P app.GET("/p2p", func(c echo.Context) error { summary := map[string]interface{}{ "Title": "LocalAI - P2P dashboard", "BaseURL": middleware.BaseURL(c), "Version": internal.PrintableVersion(), //"Nodes": p2p.GetAvailableNodes(""), //"FederatedNodes": p2p.GetAvailableNodes(p2p.FederatedID), "P2PToken": appConfig.P2PToken, "NetworkID": appConfig.P2PNetworkID, } // Render index return c.Render(200, "views/p2p", summary) }) // Note: P2P UI fragment routes (/p2p/ui/*) were removed // P2P nodes are now fetched via JSON API at /api/p2p/workers and /api/p2p/federation // End P2P if !appConfig.DisableGalleryEndpoint { registerGalleryRoutes(app, cl, appConfig, galleryService, processingOps) registerBackendGalleryRoutes(app, appConfig, galleryService, processingOps) } app.GET("/talk", func(c echo.Context) error { modelConfigs, _ := services.ListModels(cl, ml, config.NoFilterFn, services.SKIP_IF_CONFIGURED) if len(modelConfigs) == 0 { // If no model is available redirect to the index which suggests how to install models return c.Redirect(302, middleware.BaseURL(c)) } summary := map[string]interface{}{ "Title": "LocalAI - Talk", "BaseURL": middleware.BaseURL(c), "ModelsConfig": modelConfigs, "Model": modelConfigs[0], "Version": internal.PrintableVersion(), } // Render index return c.Render(200, "views/talk", summary) }) app.GET("/chat", func(c echo.Context) error { modelConfigs := cl.GetAllModelsConfigs() modelsWithoutConfig, _ := services.ListModels(cl, ml, config.NoFilterFn, services.LOOSE_ONLY) if len(modelConfigs)+len(modelsWithoutConfig) == 0 { // If no model is available redirect to the index which suggests how to install models return c.Redirect(302, middleware.BaseURL(c)) } modelThatCanBeUsed := "" galleryConfigs := map[string]*gallery.ModelConfig{} for _, m := range modelConfigs { cfg, err := gallery.GetLocalModelConfiguration(ml.ModelPath, m.Name) if err != nil { continue } galleryConfigs[m.Name] = cfg } title := "LocalAI - Chat" var modelContextSize *int for _, b := range modelConfigs { if b.HasUsecases(config.FLAG_CHAT) { modelThatCanBeUsed = b.Name title = "LocalAI - Chat with " + modelThatCanBeUsed if b.LLMConfig.ContextSize != nil { modelContextSize = b.LLMConfig.ContextSize } break } } summary := map[string]interface{}{ "Title": title, "BaseURL": middleware.BaseURL(c), "ModelsWithoutConfig": modelsWithoutConfig, "GalleryConfig": galleryConfigs, "ModelsConfig": modelConfigs, "Model": modelThatCanBeUsed, "ContextSize": modelContextSize, "Version": internal.PrintableVersion(), } // Render index return c.Render(200, "views/chat", summary) }) // Show the Chat page app.GET("/chat/:model", func(c echo.Context) error { modelConfigs := cl.GetAllModelsConfigs() modelsWithoutConfig, _ := services.ListModels(cl, ml, config.NoFilterFn, services.LOOSE_ONLY) galleryConfigs := map[string]*gallery.ModelConfig{} modelName := c.Param("model") var modelContextSize *int for _, m := range modelConfigs { cfg, err := gallery.GetLocalModelConfiguration(ml.ModelPath, m.Name) if err != nil { continue } galleryConfigs[m.Name] = cfg if m.Name == modelName && m.LLMConfig.ContextSize != nil { modelContextSize = m.LLMConfig.ContextSize } } summary := map[string]interface{}{ "Title": "LocalAI - Chat with " + modelName, "BaseURL": middleware.BaseURL(c), "ModelsConfig": modelConfigs, "GalleryConfig": galleryConfigs, "ModelsWithoutConfig": modelsWithoutConfig, "Model": modelName, "ContextSize": modelContextSize, "Version": internal.PrintableVersion(), } // Render index return c.Render(200, "views/chat", summary) }) app.GET("/text2image/:model", func(c echo.Context) error { modelConfigs := cl.GetAllModelsConfigs() modelsWithoutConfig, _ := services.ListModels(cl, ml, config.NoFilterFn, services.LOOSE_ONLY) summary := map[string]interface{}{ "Title": "LocalAI - Generate images with " + c.Param("model"), "BaseURL": middleware.BaseURL(c), "ModelsConfig": modelConfigs, "ModelsWithoutConfig": modelsWithoutConfig, "Model": c.Param("model"), "Version": internal.PrintableVersion(), } // Render index return c.Render(200, "views/text2image", summary) }) app.GET("/text2image", func(c echo.Context) error { modelConfigs := cl.GetAllModelsConfigs() modelsWithoutConfig, _ := services.ListModels(cl, ml, config.NoFilterFn, services.LOOSE_ONLY) if len(modelConfigs)+len(modelsWithoutConfig) == 0 { // If no model is available redirect to the index which suggests how to install models return c.Redirect(302, middleware.BaseURL(c)) } modelThatCanBeUsed := "" title := "LocalAI - Generate images" for _, b := range modelConfigs { if b.HasUsecases(config.FLAG_IMAGE) { modelThatCanBeUsed = b.Name title = "LocalAI - Generate images with " + modelThatCanBeUsed break } } summary := map[string]interface{}{ "Title": title, "BaseURL": middleware.BaseURL(c), "ModelsConfig": modelConfigs, "ModelsWithoutConfig": modelsWithoutConfig, "Model": modelThatCanBeUsed, "Version": internal.PrintableVersion(), } // Render index return c.Render(200, "views/text2image", summary) }) app.GET("/tts/:model", func(c echo.Context) error { modelConfigs := cl.GetAllModelsConfigs() modelsWithoutConfig, _ := services.ListModels(cl, ml, config.NoFilterFn, services.LOOSE_ONLY) summary := map[string]interface{}{ "Title": "LocalAI - Generate images with " + c.Param("model"), "BaseURL": middleware.BaseURL(c), "ModelsConfig": modelConfigs, "ModelsWithoutConfig": modelsWithoutConfig, "Model": c.Param("model"), "Version": internal.PrintableVersion(), } // Render index return c.Render(200, "views/tts", summary) }) app.GET("/tts", func(c echo.Context) error { modelConfigs := cl.GetAllModelsConfigs() modelsWithoutConfig, _ := services.ListModels(cl, ml, config.NoFilterFn, services.LOOSE_ONLY) if len(modelConfigs)+len(modelsWithoutConfig) == 0 { // If no model is available redirect to the index which suggests how to install models return c.Redirect(302, middleware.BaseURL(c)) } modelThatCanBeUsed := "" title := "LocalAI - Generate audio" for _, b := range modelConfigs { if b.HasUsecases(config.FLAG_TTS) { modelThatCanBeUsed = b.Name title = "LocalAI - Generate audio with " + modelThatCanBeUsed break } } summary := map[string]interface{}{ "Title": title, "BaseURL": middleware.BaseURL(c), "ModelsConfig": modelConfigs, "ModelsWithoutConfig": modelsWithoutConfig, "Model": modelThatCanBeUsed, "Version": internal.PrintableVersion(), } // Render index return c.Render(200, "views/tts", summary) }) // Traces UI app.GET("/traces", func(c echo.Context) error { summary := map[string]interface{}{ "Title": "LocalAI - Traces", "BaseURL": middleware.BaseURL(c), "Version": internal.PrintableVersion(), } return c.Render(200, "views/traces", summary) }) app.GET("/api/traces", func(c echo.Context) error { return c.JSON(200, middleware.GetTraces()) }) app.POST("/api/traces/clear", func(c echo.Context) error { middleware.ClearTraces() return c.NoContent(204) }) }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/routes/ui_api_backends_test.go
core/http/routes/ui_api_backends_test.go
package routes_test import ( "bytes" "context" "encoding/json" "io" "net/http" "net/http/httptest" "os" "path/filepath" "testing" "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/gallery" "github.com/mudler/LocalAI/core/http/routes" "github.com/mudler/LocalAI/core/services" "github.com/mudler/LocalAI/pkg/model" "github.com/mudler/LocalAI/pkg/system" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) func TestRoutes(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Routes Suite") } var _ = Describe("Backend API Routes", func() { var ( app *echo.Echo tempDir string appConfig *config.ApplicationConfig galleryService *services.GalleryService modelLoader *model.ModelLoader systemState *system.SystemState configLoader *config.ModelConfigLoader ) BeforeEach(func() { var err error tempDir, err = os.MkdirTemp("", "backend-routes-test-*") Expect(err).NotTo(HaveOccurred()) systemState, err = system.GetSystemState( system.WithBackendPath(filepath.Join(tempDir, "backends")), ) Expect(err).NotTo(HaveOccurred()) systemState.Model.ModelsPath = filepath.Join(tempDir, "models") // Create directories err = os.MkdirAll(systemState.Backend.BackendsPath, 0750) Expect(err).NotTo(HaveOccurred()) err = os.MkdirAll(systemState.Model.ModelsPath, 0750) Expect(err).NotTo(HaveOccurred()) modelLoader = model.NewModelLoader(systemState) configLoader = config.NewModelConfigLoader(tempDir) appConfig = config.NewApplicationConfig( config.WithContext(context.Background()), ) appConfig.SystemState = systemState appConfig.BackendGalleries = []config.Gallery{} galleryService = services.NewGalleryService(appConfig, modelLoader) // Start the gallery service err = galleryService.Start(context.Background(), configLoader, systemState) Expect(err).NotTo(HaveOccurred()) app = echo.New() // Register the API routes for backends opcache := services.NewOpCache(galleryService) routes.RegisterUIAPIRoutes(app, configLoader, modelLoader, appConfig, galleryService, opcache, nil) }) AfterEach(func() { os.RemoveAll(tempDir) }) Describe("POST /api/backends/install-external", func() { It("should return error when URI is missing", func() { reqBody := map[string]string{ "name": "test-backend", } jsonBody, err := json.Marshal(reqBody) Expect(err).NotTo(HaveOccurred()) req := httptest.NewRequest(http.MethodPost, "/api/backends/install-external", bytes.NewBuffer(jsonBody)) req.Header.Set("Content-Type", "application/json") rec := httptest.NewRecorder() app.ServeHTTP(rec, req) Expect(rec.Code).To(Equal(http.StatusBadRequest)) var response map[string]interface{} err = json.Unmarshal(rec.Body.Bytes(), &response) Expect(err).NotTo(HaveOccurred()) Expect(response["error"]).To(Equal("uri is required")) }) It("should accept valid request and return job ID", func() { reqBody := map[string]string{ "uri": "oci://quay.io/example/backend:latest", "name": "test-backend", "alias": "test-alias", } jsonBody, err := json.Marshal(reqBody) Expect(err).NotTo(HaveOccurred()) req := httptest.NewRequest(http.MethodPost, "/api/backends/install-external", bytes.NewBuffer(jsonBody)) req.Header.Set("Content-Type", "application/json") rec := httptest.NewRecorder() app.ServeHTTP(rec, req) Expect(rec.Code).To(Equal(http.StatusOK)) var response map[string]interface{} err = json.Unmarshal(rec.Body.Bytes(), &response) Expect(err).NotTo(HaveOccurred()) Expect(response["jobID"]).NotTo(BeEmpty()) Expect(response["message"]).To(Equal("External backend installation started")) }) It("should accept request with only URI", func() { reqBody := map[string]string{ "uri": "/path/to/local/backend", } jsonBody, err := json.Marshal(reqBody) Expect(err).NotTo(HaveOccurred()) req := httptest.NewRequest(http.MethodPost, "/api/backends/install-external", bytes.NewBuffer(jsonBody)) req.Header.Set("Content-Type", "application/json") rec := httptest.NewRecorder() app.ServeHTTP(rec, req) Expect(rec.Code).To(Equal(http.StatusOK)) var response map[string]interface{} err = json.Unmarshal(rec.Body.Bytes(), &response) Expect(err).NotTo(HaveOccurred()) Expect(response["jobID"]).NotTo(BeEmpty()) }) It("should return error for invalid JSON body", func() { req := httptest.NewRequest(http.MethodPost, "/api/backends/install-external", bytes.NewBufferString("invalid json")) req.Header.Set("Content-Type", "application/json") rec := httptest.NewRecorder() app.ServeHTTP(rec, req) Expect(rec.Code).To(Equal(http.StatusBadRequest)) }) }) Describe("GET /api/backends/job/:uid", func() { It("should return queued status for unknown job", func() { req := httptest.NewRequest(http.MethodGet, "/api/backends/job/unknown-job-id", nil) rec := httptest.NewRecorder() app.ServeHTTP(rec, req) Expect(rec.Code).To(Equal(http.StatusOK)) var response map[string]interface{} err := json.Unmarshal(rec.Body.Bytes(), &response) Expect(err).NotTo(HaveOccurred()) Expect(response["queued"]).To(Equal(true)) Expect(response["processed"]).To(Equal(false)) }) }) }) // Helper function to make POST request func postRequest(url string, body interface{}) (*http.Response, error) { jsonBody, err := json.Marshal(body) if err != nil { return nil, err } req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody)) if err != nil { return nil, err } req.Header.Set("Content-Type", "application/json") client := &http.Client{} return client.Do(req) } // Helper function to read response body func readResponseBody(resp *http.Response) (map[string]interface{}, error) { defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } var result map[string]interface{} err = json.Unmarshal(body, &result) return result, err } // Avoid unused import errors var _ = gallery.GalleryModel{}
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/routes/openai.go
core/http/routes/openai.go
package routes import ( "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/application" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/http/endpoints/localai" "github.com/mudler/LocalAI/core/http/endpoints/openai" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/schema" ) func RegisterOpenAIRoutes(app *echo.Echo, re *middleware.RequestExtractor, application *application.Application) { // openAI compatible API endpoint traceMiddleware := middleware.TraceMiddleware(application) // realtime // TODO: Modify/disable the API key middleware for this endpoint to allow ephemeral keys created by sessions app.GET("/v1/realtime", openai.Realtime(application)) app.POST("/v1/realtime/sessions", openai.RealtimeTranscriptionSession(application), traceMiddleware) app.POST("/v1/realtime/transcription_session", openai.RealtimeTranscriptionSession(application), traceMiddleware) // chat chatHandler := openai.ChatEndpoint(application.ModelConfigLoader(), application.ModelLoader(), application.TemplatesEvaluator(), application.ApplicationConfig()) chatMiddleware := []echo.MiddlewareFunc{ traceMiddleware, re.BuildFilteredFirstAvailableDefaultModel(config.BuildUsecaseFilterFn(config.FLAG_CHAT)), re.SetModelAndConfig(func() schema.LocalAIRequest { return new(schema.OpenAIRequest) }), func(next echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { if err := re.SetOpenAIRequest(c); err != nil { return err } return next(c) } }, } app.POST("/v1/chat/completions", chatHandler, chatMiddleware...) app.POST("/chat/completions", chatHandler, chatMiddleware...) // edit editHandler := openai.EditEndpoint(application.ModelConfigLoader(), application.ModelLoader(), application.TemplatesEvaluator(), application.ApplicationConfig()) editMiddleware := []echo.MiddlewareFunc{ traceMiddleware, re.BuildFilteredFirstAvailableDefaultModel(config.BuildUsecaseFilterFn(config.FLAG_EDIT)), re.BuildConstantDefaultModelNameMiddleware("gpt-4o"), re.SetModelAndConfig(func() schema.LocalAIRequest { return new(schema.OpenAIRequest) }), func(next echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { if err := re.SetOpenAIRequest(c); err != nil { return err } return next(c) } }, } app.POST("/v1/edits", editHandler, editMiddleware...) app.POST("/edits", editHandler, editMiddleware...) // completion completionHandler := openai.CompletionEndpoint(application.ModelConfigLoader(), application.ModelLoader(), application.TemplatesEvaluator(), application.ApplicationConfig()) completionMiddleware := []echo.MiddlewareFunc{ traceMiddleware, re.BuildFilteredFirstAvailableDefaultModel(config.BuildUsecaseFilterFn(config.FLAG_COMPLETION)), re.BuildConstantDefaultModelNameMiddleware("gpt-4o"), re.SetModelAndConfig(func() schema.LocalAIRequest { return new(schema.OpenAIRequest) }), func(next echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { if err := re.SetOpenAIRequest(c); err != nil { return err } return next(c) } }, } app.POST("/v1/completions", completionHandler, completionMiddleware...) app.POST("/completions", completionHandler, completionMiddleware...) app.POST("/v1/engines/:model/completions", completionHandler, completionMiddleware...) // embeddings embeddingHandler := openai.EmbeddingsEndpoint(application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig()) embeddingMiddleware := []echo.MiddlewareFunc{ traceMiddleware, re.BuildFilteredFirstAvailableDefaultModel(config.BuildUsecaseFilterFn(config.FLAG_EMBEDDINGS)), re.BuildConstantDefaultModelNameMiddleware("gpt-4o"), re.SetModelAndConfig(func() schema.LocalAIRequest { return new(schema.OpenAIRequest) }), func(next echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { if err := re.SetOpenAIRequest(c); err != nil { return err } return next(c) } }, } app.POST("/v1/embeddings", embeddingHandler, embeddingMiddleware...) app.POST("/embeddings", embeddingHandler, embeddingMiddleware...) app.POST("/v1/engines/:model/embeddings", embeddingHandler, embeddingMiddleware...) audioHandler := openai.TranscriptEndpoint(application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig()) audioMiddleware := []echo.MiddlewareFunc{ traceMiddleware, re.BuildFilteredFirstAvailableDefaultModel(config.BuildUsecaseFilterFn(config.FLAG_TRANSCRIPT)), re.SetModelAndConfig(func() schema.LocalAIRequest { return new(schema.OpenAIRequest) }), func(next echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { if err := re.SetOpenAIRequest(c); err != nil { return err } return next(c) } }, } // audio app.POST("/v1/audio/transcriptions", audioHandler, audioMiddleware...) app.POST("/audio/transcriptions", audioHandler, audioMiddleware...) audioSpeechHandler := localai.TTSEndpoint(application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig()) audioSpeechMiddleware := []echo.MiddlewareFunc{ traceMiddleware, re.BuildFilteredFirstAvailableDefaultModel(config.BuildUsecaseFilterFn(config.FLAG_TTS)), re.SetModelAndConfig(func() schema.LocalAIRequest { return new(schema.TTSRequest) }), } app.POST("/v1/audio/speech", audioSpeechHandler, audioSpeechMiddleware...) app.POST("/audio/speech", audioSpeechHandler, audioSpeechMiddleware...) // images imageHandler := openai.ImageEndpoint(application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig()) imageMiddleware := []echo.MiddlewareFunc{ traceMiddleware, // Default: use the first available image generation model re.BuildFilteredFirstAvailableDefaultModel(config.BuildUsecaseFilterFn(config.FLAG_IMAGE)), re.SetModelAndConfig(func() schema.LocalAIRequest { return new(schema.OpenAIRequest) }), func(next echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { if err := re.SetOpenAIRequest(c); err != nil { return err } return next(c) } }, } app.POST("/v1/images/generations", imageHandler, imageMiddleware...) app.POST("/images/generations", imageHandler, imageMiddleware...) // inpainting endpoint (image + mask) - reuse same middleware config as images inpaintingHandler := openai.InpaintingEndpoint(application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig()) app.POST("/v1/images/inpainting", inpaintingHandler, imageMiddleware...) app.POST("/images/inpainting", inpaintingHandler, imageMiddleware...) // videos (OpenAI-compatible endpoints mapped to LocalAI video handler) videoHandler := openai.VideoEndpoint(application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig()) videoMiddleware := []echo.MiddlewareFunc{ traceMiddleware, re.BuildFilteredFirstAvailableDefaultModel(config.BuildUsecaseFilterFn(config.FLAG_VIDEO)), re.SetModelAndConfig(func() schema.LocalAIRequest { return new(schema.OpenAIRequest) }), func(next echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { if err := re.SetOpenAIRequest(c); err != nil { return err } return next(c) } }, } // OpenAI-style create video endpoint app.POST("/v1/videos", videoHandler, videoMiddleware...) app.POST("/v1/videos/generations", videoHandler, videoMiddleware...) app.POST("/videos", videoHandler, videoMiddleware...) // List models app.GET("/v1/models", openai.ListModelsEndpoint(application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig())) app.GET("/models", openai.ListModelsEndpoint(application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig())) }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/routes/explorer.go
core/http/routes/explorer.go
package routes import ( "github.com/labstack/echo/v4" coreExplorer "github.com/mudler/LocalAI/core/explorer" "github.com/mudler/LocalAI/core/http/endpoints/explorer" ) func RegisterExplorerRoutes(app *echo.Echo, db *coreExplorer.Database) { app.GET("/", explorer.Dashboard()) app.POST("/network/add", explorer.AddNetwork(db)) app.GET("/networks", explorer.ShowNetworks(db)) }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/http/routes/localai.go
core/http/routes/localai.go
package routes import ( "github.com/labstack/echo/v4" "github.com/mudler/LocalAI/core/application" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/http/endpoints/localai" "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/LocalAI/core/services" "github.com/mudler/LocalAI/core/templates" "github.com/mudler/LocalAI/internal" "github.com/mudler/LocalAI/pkg/model" echoswagger "github.com/swaggo/echo-swagger" ) func RegisterLocalAIRoutes(router *echo.Echo, requestExtractor *middleware.RequestExtractor, cl *config.ModelConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig, galleryService *services.GalleryService, opcache *services.OpCache, evaluator *templates.Evaluator, app *application.Application) { router.GET("/swagger/*", echoswagger.WrapHandler) // default // LocalAI API endpoints if !appConfig.DisableGalleryEndpoint { // Import model page router.GET("/import-model", func(c echo.Context) error { return c.Render(200, "views/model-editor", map[string]interface{}{ "Title": "LocalAI - Import Model", "BaseURL": middleware.BaseURL(c), "Version": internal.PrintableVersion(), }) }) // Edit model page router.GET("/models/edit/:name", localai.GetEditModelPage(cl, appConfig)) modelGalleryEndpointService := localai.CreateModelGalleryEndpointService(appConfig.Galleries, appConfig.BackendGalleries, appConfig.SystemState, galleryService) router.POST("/models/apply", modelGalleryEndpointService.ApplyModelGalleryEndpoint()) router.POST("/models/delete/:name", modelGalleryEndpointService.DeleteModelGalleryEndpoint()) router.GET("/models/available", modelGalleryEndpointService.ListModelFromGalleryEndpoint(appConfig.SystemState)) router.GET("/models/galleries", modelGalleryEndpointService.ListModelGalleriesEndpoint()) router.GET("/models/jobs/:uuid", modelGalleryEndpointService.GetOpStatusEndpoint()) router.GET("/models/jobs", modelGalleryEndpointService.GetAllStatusEndpoint()) backendGalleryEndpointService := localai.CreateBackendEndpointService( appConfig.BackendGalleries, appConfig.SystemState, galleryService) router.POST("/backends/apply", backendGalleryEndpointService.ApplyBackendEndpoint()) router.POST("/backends/delete/:name", backendGalleryEndpointService.DeleteBackendEndpoint()) router.GET("/backends", backendGalleryEndpointService.ListBackendsEndpoint(appConfig.SystemState)) router.GET("/backends/available", backendGalleryEndpointService.ListAvailableBackendsEndpoint(appConfig.SystemState)) router.GET("/backends/galleries", backendGalleryEndpointService.ListBackendGalleriesEndpoint()) router.GET("/backends/jobs/:uuid", backendGalleryEndpointService.GetOpStatusEndpoint()) // Custom model import endpoint router.POST("/models/import", localai.ImportModelEndpoint(cl, appConfig)) // URI model import endpoint router.POST("/models/import-uri", localai.ImportModelURIEndpoint(cl, appConfig, galleryService, opcache)) // Custom model edit endpoint router.POST("/models/edit/:name", localai.EditModelEndpoint(cl, appConfig)) // Reload models endpoint router.POST("/models/reload", localai.ReloadModelsEndpoint(cl, appConfig)) } detectionHandler := localai.DetectionEndpoint(cl, ml, appConfig) router.POST("/v1/detection", detectionHandler, requestExtractor.BuildFilteredFirstAvailableDefaultModel(config.BuildUsecaseFilterFn(config.FLAG_DETECTION)), requestExtractor.SetModelAndConfig(func() schema.LocalAIRequest { return new(schema.DetectionRequest) })) ttsHandler := localai.TTSEndpoint(cl, ml, appConfig) router.POST("/tts", ttsHandler, requestExtractor.BuildFilteredFirstAvailableDefaultModel(config.BuildUsecaseFilterFn(config.FLAG_TTS)), requestExtractor.SetModelAndConfig(func() schema.LocalAIRequest { return new(schema.TTSRequest) })) vadHandler := localai.VADEndpoint(cl, ml, appConfig) router.POST("/vad", vadHandler, requestExtractor.BuildFilteredFirstAvailableDefaultModel(config.BuildUsecaseFilterFn(config.FLAG_VAD)), requestExtractor.SetModelAndConfig(func() schema.LocalAIRequest { return new(schema.VADRequest) })) router.POST("/v1/vad", vadHandler, requestExtractor.BuildFilteredFirstAvailableDefaultModel(config.BuildUsecaseFilterFn(config.FLAG_VAD)), requestExtractor.SetModelAndConfig(func() schema.LocalAIRequest { return new(schema.VADRequest) })) // Stores router.POST("/stores/set", localai.StoresSetEndpoint(ml, appConfig)) router.POST("/stores/delete", localai.StoresDeleteEndpoint(ml, appConfig)) router.POST("/stores/get", localai.StoresGetEndpoint(ml, appConfig)) router.POST("/stores/find", localai.StoresFindEndpoint(ml, appConfig)) if !appConfig.DisableMetrics { router.GET("/metrics", localai.LocalAIMetricsEndpoint()) } videoHandler := localai.VideoEndpoint(cl, ml, appConfig) router.POST("/video", videoHandler, requestExtractor.BuildFilteredFirstAvailableDefaultModel(config.BuildUsecaseFilterFn(config.FLAG_VIDEO)), requestExtractor.SetModelAndConfig(func() schema.LocalAIRequest { return new(schema.VideoRequest) })) // Backend Statistics Module // TODO: Should these use standard middlewares? Refactor later, they are extremely simple. backendMonitorService := services.NewBackendMonitorService(ml, cl, appConfig) // Split out for now router.GET("/backend/monitor", localai.BackendMonitorEndpoint(backendMonitorService)) router.POST("/backend/shutdown", localai.BackendShutdownEndpoint(backendMonitorService)) // The v1/* urls are exactly the same as above - makes local e2e testing easier if they are registered. router.GET("/v1/backend/monitor", localai.BackendMonitorEndpoint(backendMonitorService)) router.POST("/v1/backend/shutdown", localai.BackendShutdownEndpoint(backendMonitorService)) // p2p router.GET("/api/p2p", localai.ShowP2PNodes(appConfig)) router.GET("/api/p2p/token", localai.ShowP2PToken(appConfig)) router.GET("/version", func(c echo.Context) error { return c.JSON(200, struct { Version string `json:"version"` }{Version: internal.PrintableVersion()}) }) router.GET("/system", localai.SystemInformations(ml, appConfig)) // misc tokenizeHandler := localai.TokenizeEndpoint(cl, ml, appConfig) router.POST("/v1/tokenize", tokenizeHandler, requestExtractor.BuildFilteredFirstAvailableDefaultModel(config.BuildUsecaseFilterFn(config.FLAG_TOKENIZE)), requestExtractor.SetModelAndConfig(func() schema.LocalAIRequest { return new(schema.TokenizeRequest) })) // MCP endpoint - supports both streaming and non-streaming modes // Note: streaming mode is NOT compatible with the OpenAI apis. We have a set which streams more states. if evaluator != nil { mcpStreamHandler := localai.MCPEndpoint(cl, ml, evaluator, appConfig) mcpStreamMiddleware := []echo.MiddlewareFunc{ requestExtractor.BuildFilteredFirstAvailableDefaultModel(config.BuildUsecaseFilterFn(config.FLAG_CHAT)), requestExtractor.SetModelAndConfig(func() schema.LocalAIRequest { return new(schema.OpenAIRequest) }), func(next echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { if err := requestExtractor.SetOpenAIRequest(c); err != nil { return err } return next(c) } }, } router.POST("/v1/mcp/chat/completions", mcpStreamHandler, mcpStreamMiddleware...) router.POST("/mcp/v1/chat/completions", mcpStreamHandler, mcpStreamMiddleware...) router.POST("/mcp/chat/completions", mcpStreamHandler, mcpStreamMiddleware...) } // Agent job routes if app != nil && app.AgentJobService() != nil { router.POST("/api/agent/tasks", localai.CreateTaskEndpoint(app)) router.PUT("/api/agent/tasks/:id", localai.UpdateTaskEndpoint(app)) router.DELETE("/api/agent/tasks/:id", localai.DeleteTaskEndpoint(app)) router.GET("/api/agent/tasks", localai.ListTasksEndpoint(app)) router.GET("/api/agent/tasks/:id", localai.GetTaskEndpoint(app)) router.POST("/api/agent/jobs/execute", localai.ExecuteJobEndpoint(app)) router.GET("/api/agent/jobs/:id", localai.GetJobEndpoint(app)) router.GET("/api/agent/jobs", localai.ListJobsEndpoint(app)) router.POST("/api/agent/jobs/:id/cancel", localai.CancelJobEndpoint(app)) router.DELETE("/api/agent/jobs/:id", localai.DeleteJobEndpoint(app)) router.POST("/api/agent/tasks/:name/execute", localai.ExecuteTaskByNameEndpoint(app)) } }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/startup/model_preload_test.go
core/startup/model_preload_test.go
package startup_test import ( "context" "fmt" "os" "path/filepath" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/services" . "github.com/mudler/LocalAI/core/startup" "github.com/mudler/LocalAI/pkg/model" "github.com/mudler/LocalAI/pkg/system" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) var _ = Describe("Preload test", func() { var tmpdir string var systemState *system.SystemState var ml *model.ModelLoader var ctx context.Context var cancel context.CancelFunc BeforeEach(func() { ctx, cancel = context.WithCancel(context.Background()) var err error tmpdir, err = os.MkdirTemp("", "") Expect(err).ToNot(HaveOccurred()) systemState, err = system.GetSystemState(system.WithModelPath(tmpdir)) Expect(err).ToNot(HaveOccurred()) ml = model.NewModelLoader(systemState) }) AfterEach(func() { cancel() }) Context("Preloading from strings", func() { It("loads from embedded full-urls", func() { url := "https://raw.githubusercontent.com/mudler/LocalAI-examples/main/configurations/phi-2.yaml" fileName := fmt.Sprintf("%s.yaml", "phi-2") galleryService := services.NewGalleryService(&config.ApplicationConfig{ SystemState: systemState, }, ml) galleryService.Start(ctx, config.NewModelConfigLoader(tmpdir), systemState) err := InstallModels(ctx, galleryService, []config.Gallery{}, []config.Gallery{}, systemState, ml, true, true, func(s1, s2, s3 string, f float64) { fmt.Println(s1, s2, s3, f) }, url) Expect(err).ToNot(HaveOccurred()) resultFile := filepath.Join(tmpdir, fileName) content, err := os.ReadFile(resultFile) Expect(err).ToNot(HaveOccurred()) Expect(string(content)).To(ContainSubstring("name: phi-2")) }) It("downloads from urls", func() { url := "huggingface://TheBloke/TinyLlama-1.1B-Chat-v0.3-GGUF/tinyllama-1.1b-chat-v0.3.Q2_K.gguf" fileName := fmt.Sprintf("%s.gguf", "tinyllama-1.1b-chat-v0.3.Q2_K") galleryService := services.NewGalleryService(&config.ApplicationConfig{ SystemState: systemState, }, ml) galleryService.Start(ctx, config.NewModelConfigLoader(tmpdir), systemState) err := InstallModels(ctx, galleryService, []config.Gallery{}, []config.Gallery{}, systemState, ml, true, true, func(s1, s2, s3 string, f float64) { fmt.Println(s1, s2, s3, f) }, url) Expect(err).ToNot(HaveOccurred()) resultFile := filepath.Join(tmpdir, fileName) dirs, err := os.ReadDir(tmpdir) Expect(err).ToNot(HaveOccurred()) _, err = os.Stat(resultFile) Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("%+v", dirs)) }) }) })
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/startup/startup_suite_test.go
core/startup/startup_suite_test.go
package startup_test import ( "testing" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) func TestStartup(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "LocalAI startup test") }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
mudler/LocalAI
https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/core/startup/model_preload.go
core/startup/model_preload.go
package startup import ( "context" "encoding/json" "errors" "fmt" "time" "github.com/google/uuid" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/gallery" "github.com/mudler/LocalAI/core/gallery/importers" "github.com/mudler/LocalAI/core/services" "github.com/mudler/LocalAI/pkg/model" "github.com/mudler/LocalAI/pkg/system" "github.com/mudler/LocalAI/pkg/utils" "github.com/mudler/xlog" ) const ( YAML_EXTENSION = ".yaml" ) // InstallModels will preload models from the given list of URLs and galleries // It will download the model if it is not already present in the model path // It will also try to resolve if the model is an embedded model YAML configuration func InstallModels(ctx context.Context, galleryService *services.GalleryService, galleries, backendGalleries []config.Gallery, systemState *system.SystemState, modelLoader *model.ModelLoader, enforceScan, autoloadBackendGalleries bool, downloadStatus func(string, string, string, float64), models ...string) error { // create an error that groups all errors var err error for _, url := range models { // Check if it's a model gallery, or print a warning e, found := installModel(ctx, galleries, backendGalleries, url, systemState, modelLoader, downloadStatus, enforceScan, autoloadBackendGalleries) if e != nil && found { xlog.Error("[startup] failed installing model", "error", err, "model", url) err = errors.Join(err, e) } else if !found { xlog.Debug("[startup] model not found in the gallery", "model", url) if galleryService == nil { return fmt.Errorf("cannot start autoimporter, not sure how to handle this uri") } // TODO: we should just use the discoverModelConfig here and default to this. modelConfig, discoverErr := importers.DiscoverModelConfig(url, json.RawMessage{}) if discoverErr != nil { xlog.Error("[startup] failed to discover model config", "error", discoverErr, "model", url) err = errors.Join(discoverErr, fmt.Errorf("failed to discover model config: %w", err)) continue } uuid, uuidErr := uuid.NewUUID() if uuidErr != nil { err = errors.Join(uuidErr, fmt.Errorf("failed to generate UUID: %w", uuidErr)) continue } galleryService.ModelGalleryChannel <- services.GalleryOp[gallery.GalleryModel, gallery.ModelConfig]{ Req: gallery.GalleryModel{ Overrides: map[string]interface{}{}, }, ID: uuid.String(), GalleryElementName: modelConfig.Name, GalleryElement: &modelConfig, BackendGalleries: backendGalleries, } var status *services.GalleryOpStatus // wait for op to finish for { status = galleryService.GetStatus(uuid.String()) if status != nil && status.Processed { break } time.Sleep(1 * time.Second) } if status.Error != nil { xlog.Error("[startup] failed to import model", "error", status.Error, "model", modelConfig.Name, "url", url) return status.Error } xlog.Info("[startup] imported model", "model", modelConfig.Name, "url", url) } } return err } func installModel(ctx context.Context, galleries, backendGalleries []config.Gallery, modelName string, systemState *system.SystemState, modelLoader *model.ModelLoader, downloadStatus func(string, string, string, float64), enforceScan, autoloadBackendGalleries bool) (error, bool) { models, err := gallery.AvailableGalleryModels(galleries, systemState) if err != nil { return err, false } model := gallery.FindGalleryElement(models, modelName) if model == nil { return err, false } if downloadStatus == nil { downloadStatus = utils.DisplayDownloadFunction } xlog.Info("installing model", "model", modelName, "license", model.License) err = gallery.InstallModelFromGallery(ctx, galleries, backendGalleries, systemState, modelLoader, modelName, gallery.GalleryModel{}, downloadStatus, enforceScan, autoloadBackendGalleries) if err != nil { return err, true } return nil, true }
go
MIT
23df29fbd3eec1af3944521205fd62b20d4149e5
2026-01-07T08:35:47.749878Z
false
v2ray/v2ray-core
https://github.com/v2ray/v2ray-core/blob/d80440f3d57b45c829dbf513306f7adf9a0f3f76/mocks.go
mocks.go
package core //go:generate go run github.com/golang/mock/mockgen -package mocks -destination testing/mocks/io.go -mock_names Reader=Reader,Writer=Writer io Reader,Writer //go:generate go run github.com/golang/mock/mockgen -package mocks -destination testing/mocks/log.go -mock_names Handler=LogHandler v2ray.com/core/common/log Handler //go:generate go run github.com/golang/mock/mockgen -package mocks -destination testing/mocks/mux.go -mock_names ClientWorkerFactory=MuxClientWorkerFactory v2ray.com/core/common/mux ClientWorkerFactory //go:generate go run github.com/golang/mock/mockgen -package mocks -destination testing/mocks/dns.go -mock_names Client=DNSClient v2ray.com/core/features/dns Client //go:generate go run github.com/golang/mock/mockgen -package mocks -destination testing/mocks/outbound.go -mock_names Manager=OutboundManager,HandlerSelector=OutboundHandlerSelector v2ray.com/core/features/outbound Manager,HandlerSelector //go:generate go run github.com/golang/mock/mockgen -package mocks -destination testing/mocks/proxy.go -mock_names Inbound=ProxyInbound,Outbound=ProxyOutbound v2ray.com/core/proxy Inbound,Outbound
go
MIT
d80440f3d57b45c829dbf513306f7adf9a0f3f76
2026-01-07T08:35:44.381088Z
false
v2ray/v2ray-core
https://github.com/v2ray/v2ray-core/blob/d80440f3d57b45c829dbf513306f7adf9a0f3f76/errors.generated.go
errors.generated.go
package core import "v2ray.com/core/common/errors" type errPathObjHolder struct{} func newError(values ...interface{}) *errors.Error { return errors.New(values...).WithPathObj(errPathObjHolder{}) }
go
MIT
d80440f3d57b45c829dbf513306f7adf9a0f3f76
2026-01-07T08:35:44.381088Z
false
v2ray/v2ray-core
https://github.com/v2ray/v2ray-core/blob/d80440f3d57b45c829dbf513306f7adf9a0f3f76/annotations.go
annotations.go
package core // Annotation is a concept in V2Ray. This struct is only for documentation. It is not used anywhere. // Annotations begin with "v2ray:" in comment, as metadata of functions or types. type Annotation struct { // API is for types or functions that can be used in other libs. Possible values are: // // * v2ray:api:beta for types or functions that are ready for use, but maybe changed in the future. // * v2ray:api:stable for types or functions with guarantee of backward compatibility. // * v2ray:api:deprecated for types or functions that should not be used anymore. // // Types or functions without api annotation should not be used externally. API string }
go
MIT
d80440f3d57b45c829dbf513306f7adf9a0f3f76
2026-01-07T08:35:44.381088Z
false
v2ray/v2ray-core
https://github.com/v2ray/v2ray-core/blob/d80440f3d57b45c829dbf513306f7adf9a0f3f76/proto.go
proto.go
package core //go:generate go install -v google.golang.org/protobuf/cmd/protoc-gen-go //go:generate go install -v google.golang.org/grpc/cmd/protoc-gen-go-grpc //go:generate go install -v github.com/gogo/protobuf/protoc-gen-gofast //go:generate go run ./infra/vprotogen/main.go import "path/filepath" // ProtoFilesUsingProtocGenGoFast is the map of Proto files // that use `protoc-gen-gofast` to generate pb.go files var ProtoFilesUsingProtocGenGoFast = map[string]bool{"proxy/vless/encoding/addons.proto": true} // ProtocMap is the map of paths to `protoc` binary excutable files of specific platform var ProtocMap = map[string]string{ "windows": filepath.Join(".dev", "protoc", "windows", "protoc.exe"), "darwin": filepath.Join(".dev", "protoc", "macos", "protoc"), "linux": filepath.Join(".dev", "protoc", "linux", "protoc"), }
go
MIT
d80440f3d57b45c829dbf513306f7adf9a0f3f76
2026-01-07T08:35:44.381088Z
false
v2ray/v2ray-core
https://github.com/v2ray/v2ray-core/blob/d80440f3d57b45c829dbf513306f7adf9a0f3f76/context_test.go
context_test.go
package core_test import ( "context" "testing" . "v2ray.com/core" ) func TestContextPanic(t *testing.T) { defer func() { r := recover() if r == nil { t.Error("expect panic, but nil") } }() MustFromContext(context.Background()) }
go
MIT
d80440f3d57b45c829dbf513306f7adf9a0f3f76
2026-01-07T08:35:44.381088Z
false
v2ray/v2ray-core
https://github.com/v2ray/v2ray-core/blob/d80440f3d57b45c829dbf513306f7adf9a0f3f76/config.go
config.go
// +build !confonly package core import ( "io" "strings" "github.com/golang/protobuf/proto" "v2ray.com/core/common" "v2ray.com/core/common/buf" "v2ray.com/core/common/cmdarg" "v2ray.com/core/main/confloader" ) // ConfigFormat is a configurable format of V2Ray config file. type ConfigFormat struct { Name string Extension []string Loader ConfigLoader } // ConfigLoader is a utility to load V2Ray config from external source. type ConfigLoader func(input interface{}) (*Config, error) var ( configLoaderByName = make(map[string]*ConfigFormat) configLoaderByExt = make(map[string]*ConfigFormat) ) // RegisterConfigLoader add a new ConfigLoader. func RegisterConfigLoader(format *ConfigFormat) error { name := strings.ToLower(format.Name) if _, found := configLoaderByName[name]; found { return newError(format.Name, " already registered.") } configLoaderByName[name] = format for _, ext := range format.Extension { lext := strings.ToLower(ext) if f, found := configLoaderByExt[lext]; found { return newError(ext, " already registered to ", f.Name) } configLoaderByExt[lext] = format } return nil } func getExtension(filename string) string { idx := strings.LastIndexByte(filename, '.') if idx == -1 { return "" } return filename[idx+1:] } // LoadConfig loads config with given format from given source. // input accepts 2 different types: // * []string slice of multiple filename/url(s) to open to read // * io.Reader that reads a config content (the original way) func LoadConfig(formatName string, filename string, input interface{}) (*Config, error) { ext := getExtension(filename) if len(ext) > 0 { if f, found := configLoaderByExt[ext]; found { return f.Loader(input) } } if f, found := configLoaderByName[formatName]; found { return f.Loader(input) } return nil, newError("Unable to load config in ", formatName).AtWarning() } func loadProtobufConfig(data []byte) (*Config, error) { config := new(Config) if err := proto.Unmarshal(data, config); err != nil { return nil, err } return config, nil } func init() { common.Must(RegisterConfigLoader(&ConfigFormat{ Name: "Protobuf", Extension: []string{"pb"}, Loader: func(input interface{}) (*Config, error) { switch v := input.(type) { case cmdarg.Arg: r, err := confloader.LoadConfig(v[0]) common.Must(err) data, err := buf.ReadAllToBytes(r) common.Must(err) return loadProtobufConfig(data) case io.Reader: data, err := buf.ReadAllToBytes(v) common.Must(err) return loadProtobufConfig(data) default: return nil, newError("unknow type") } }, })) }
go
MIT
d80440f3d57b45c829dbf513306f7adf9a0f3f76
2026-01-07T08:35:44.381088Z
false
v2ray/v2ray-core
https://github.com/v2ray/v2ray-core/blob/d80440f3d57b45c829dbf513306f7adf9a0f3f76/v2ray.go
v2ray.go
// +build !confonly package core import ( "context" "reflect" "sync" "v2ray.com/core/common" "v2ray.com/core/common/serial" "v2ray.com/core/features" "v2ray.com/core/features/dns" "v2ray.com/core/features/dns/localdns" "v2ray.com/core/features/inbound" "v2ray.com/core/features/outbound" "v2ray.com/core/features/policy" "v2ray.com/core/features/routing" "v2ray.com/core/features/stats" ) // Server is an instance of V2Ray. At any time, there must be at most one Server instance running. type Server interface { common.Runnable } // ServerType returns the type of the server. func ServerType() interface{} { return (*Instance)(nil) } type resolution struct { deps []reflect.Type callback interface{} } func getFeature(allFeatures []features.Feature, t reflect.Type) features.Feature { for _, f := range allFeatures { if reflect.TypeOf(f.Type()) == t { return f } } return nil } func (r *resolution) resolve(allFeatures []features.Feature) (bool, error) { var fs []features.Feature for _, d := range r.deps { f := getFeature(allFeatures, d) if f == nil { return false, nil } fs = append(fs, f) } callback := reflect.ValueOf(r.callback) var input []reflect.Value callbackType := callback.Type() for i := 0; i < callbackType.NumIn(); i++ { pt := callbackType.In(i) for _, f := range fs { if reflect.TypeOf(f).AssignableTo(pt) { input = append(input, reflect.ValueOf(f)) break } } } if len(input) != callbackType.NumIn() { panic("Can't get all input parameters") } var err error ret := callback.Call(input) errInterface := reflect.TypeOf((*error)(nil)).Elem() for i := len(ret) - 1; i >= 0; i-- { if ret[i].Type() == errInterface { v := ret[i].Interface() if v != nil { err = v.(error) } break } } return true, err } // Instance combines all functionalities in V2Ray. type Instance struct { access sync.Mutex features []features.Feature featureResolutions []resolution running bool ctx context.Context } func AddInboundHandler(server *Instance, config *InboundHandlerConfig) error { inboundManager := server.GetFeature(inbound.ManagerType()).(inbound.Manager) rawHandler, err := CreateObject(server, config) if err != nil { return err } handler, ok := rawHandler.(inbound.Handler) if !ok { return newError("not an InboundHandler") } if err := inboundManager.AddHandler(server.ctx, handler); err != nil { return err } return nil } func addInboundHandlers(server *Instance, configs []*InboundHandlerConfig) error { for _, inboundConfig := range configs { if err := AddInboundHandler(server, inboundConfig); err != nil { return err } } return nil } func AddOutboundHandler(server *Instance, config *OutboundHandlerConfig) error { outboundManager := server.GetFeature(outbound.ManagerType()).(outbound.Manager) rawHandler, err := CreateObject(server, config) if err != nil { return err } handler, ok := rawHandler.(outbound.Handler) if !ok { return newError("not an OutboundHandler") } if err := outboundManager.AddHandler(server.ctx, handler); err != nil { return err } return nil } func addOutboundHandlers(server *Instance, configs []*OutboundHandlerConfig) error { for _, outboundConfig := range configs { if err := AddOutboundHandler(server, outboundConfig); err != nil { return err } } return nil } // RequireFeatures is a helper function to require features from Instance in context. // See Instance.RequireFeatures for more information. func RequireFeatures(ctx context.Context, callback interface{}) error { v := MustFromContext(ctx) return v.RequireFeatures(callback) } // New returns a new V2Ray instance based on given configuration. // The instance is not started at this point. // To ensure V2Ray instance works properly, the config must contain one Dispatcher, one InboundHandlerManager and one OutboundHandlerManager. Other features are optional. func New(config *Config) (*Instance, error) { var server = &Instance{ctx: context.Background()} err, done := initInstanceWithConfig(config, server) if done { return nil, err } return server, nil } func NewWithContext(config *Config, ctx context.Context) (*Instance, error) { var server = &Instance{ctx: ctx} err, done := initInstanceWithConfig(config, server) if done { return nil, err } return server, nil } func initInstanceWithConfig(config *Config, server *Instance) (error, bool) { if config.Transport != nil { features.PrintDeprecatedFeatureWarning("global transport settings") } if err := config.Transport.Apply(); err != nil { return err, true } for _, appSettings := range config.App { settings, err := appSettings.GetInstance() if err != nil { return err, true } obj, err := CreateObject(server, settings) if err != nil { return err, true } if feature, ok := obj.(features.Feature); ok { if err := server.AddFeature(feature); err != nil { return err, true } } } essentialFeatures := []struct { Type interface{} Instance features.Feature }{ {dns.ClientType(), localdns.New()}, {policy.ManagerType(), policy.DefaultManager{}}, {routing.RouterType(), routing.DefaultRouter{}}, {stats.ManagerType(), stats.NoopManager{}}, } for _, f := range essentialFeatures { if server.GetFeature(f.Type) == nil { if err := server.AddFeature(f.Instance); err != nil { return err, true } } } if server.featureResolutions != nil { return newError("not all dependency are resolved."), true } if err := addInboundHandlers(server, config.Inbound); err != nil { return err, true } if err := addOutboundHandlers(server, config.Outbound); err != nil { return err, true } return nil, false } // Type implements common.HasType. func (s *Instance) Type() interface{} { return ServerType() } // Close shutdown the V2Ray instance. func (s *Instance) Close() error { s.access.Lock() defer s.access.Unlock() s.running = false var errors []interface{} for _, f := range s.features { if err := f.Close(); err != nil { errors = append(errors, err) } } if len(errors) > 0 { return newError("failed to close all features").Base(newError(serial.Concat(errors...))) } return nil } // RequireFeatures registers a callback, which will be called when all dependent features are registered. // The callback must be a func(). All its parameters must be features.Feature. func (s *Instance) RequireFeatures(callback interface{}) error { callbackType := reflect.TypeOf(callback) if callbackType.Kind() != reflect.Func { panic("not a function") } var featureTypes []reflect.Type for i := 0; i < callbackType.NumIn(); i++ { featureTypes = append(featureTypes, reflect.PtrTo(callbackType.In(i))) } r := resolution{ deps: featureTypes, callback: callback, } if finished, err := r.resolve(s.features); finished { return err } s.featureResolutions = append(s.featureResolutions, r) return nil } // AddFeature registers a feature into current Instance. func (s *Instance) AddFeature(feature features.Feature) error { s.features = append(s.features, feature) if s.running { if err := feature.Start(); err != nil { newError("failed to start feature").Base(err).WriteToLog() } return nil } if s.featureResolutions == nil { return nil } var pendingResolutions []resolution for _, r := range s.featureResolutions { finished, err := r.resolve(s.features) if finished && err != nil { return err } if !finished { pendingResolutions = append(pendingResolutions, r) } } if len(pendingResolutions) == 0 { s.featureResolutions = nil } else if len(pendingResolutions) < len(s.featureResolutions) { s.featureResolutions = pendingResolutions } return nil } // GetFeature returns a feature of the given type, or nil if such feature is not registered. func (s *Instance) GetFeature(featureType interface{}) features.Feature { return getFeature(s.features, reflect.TypeOf(featureType)) } // Start starts the V2Ray instance, including all registered features. When Start returns error, the state of the instance is unknown. // A V2Ray instance can be started only once. Upon closing, the instance is not guaranteed to start again. // // v2ray:api:stable func (s *Instance) Start() error { s.access.Lock() defer s.access.Unlock() s.running = true for _, f := range s.features { if err := f.Start(); err != nil { return err } } newError("V2Ray ", Version(), " started").AtWarning().WriteToLog() return nil }
go
MIT
d80440f3d57b45c829dbf513306f7adf9a0f3f76
2026-01-07T08:35:44.381088Z
false
v2ray/v2ray-core
https://github.com/v2ray/v2ray-core/blob/d80440f3d57b45c829dbf513306f7adf9a0f3f76/v2ray_test.go
v2ray_test.go
package core_test import ( "testing" proto "github.com/golang/protobuf/proto" . "v2ray.com/core" "v2ray.com/core/app/dispatcher" "v2ray.com/core/app/proxyman" "v2ray.com/core/common" "v2ray.com/core/common/net" "v2ray.com/core/common/protocol" "v2ray.com/core/common/serial" "v2ray.com/core/common/uuid" "v2ray.com/core/features/dns" "v2ray.com/core/features/dns/localdns" _ "v2ray.com/core/main/distro/all" "v2ray.com/core/proxy/dokodemo" "v2ray.com/core/proxy/vmess" "v2ray.com/core/proxy/vmess/outbound" "v2ray.com/core/testing/servers/tcp" ) func TestV2RayDependency(t *testing.T) { instance := new(Instance) wait := make(chan bool, 1) instance.RequireFeatures(func(d dns.Client) { if d == nil { t.Error("expected dns client fulfilled, but actually nil") } wait <- true }) instance.AddFeature(localdns.New()) <-wait } func TestV2RayClose(t *testing.T) { port := tcp.PickPort() userId := uuid.New() config := &Config{ App: []*serial.TypedMessage{ serial.ToTypedMessage(&dispatcher.Config{}), serial.ToTypedMessage(&proxyman.InboundConfig{}), serial.ToTypedMessage(&proxyman.OutboundConfig{}), }, Inbound: []*InboundHandlerConfig{ { ReceiverSettings: serial.ToTypedMessage(&proxyman.ReceiverConfig{ PortRange: net.SinglePortRange(port), Listen: net.NewIPOrDomain(net.LocalHostIP), }), ProxySettings: serial.ToTypedMessage(&dokodemo.Config{ Address: net.NewIPOrDomain(net.LocalHostIP), Port: uint32(0), NetworkList: &net.NetworkList{ Network: []net.Network{net.Network_TCP, net.Network_UDP}, }, }), }, }, Outbound: []*OutboundHandlerConfig{ { ProxySettings: serial.ToTypedMessage(&outbound.Config{ Receiver: []*protocol.ServerEndpoint{ { Address: net.NewIPOrDomain(net.LocalHostIP), Port: uint32(0), User: []*protocol.User{ { Account: serial.ToTypedMessage(&vmess.Account{ Id: userId.String(), }), }, }, }, }, }), }, }, } cfgBytes, err := proto.Marshal(config) common.Must(err) server, err := StartInstance("protobuf", cfgBytes) common.Must(err) server.Close() }
go
MIT
d80440f3d57b45c829dbf513306f7adf9a0f3f76
2026-01-07T08:35:44.381088Z
false
v2ray/v2ray-core
https://github.com/v2ray/v2ray-core/blob/d80440f3d57b45c829dbf513306f7adf9a0f3f76/context.go
context.go
// +build !confonly package core import ( "context" ) // V2rayKey is the key type of Instance in Context, exported for test. type V2rayKey int const v2rayKey V2rayKey = 1 // FromContext returns an Instance from the given context, or nil if the context doesn't contain one. func FromContext(ctx context.Context) *Instance { if s, ok := ctx.Value(v2rayKey).(*Instance); ok { return s } return nil } // MustFromContext returns an Instance from the given context, or panics if not present. func MustFromContext(ctx context.Context) *Instance { v := FromContext(ctx) if v == nil { panic("V is not in context.") } return v }
go
MIT
d80440f3d57b45c829dbf513306f7adf9a0f3f76
2026-01-07T08:35:44.381088Z
false
v2ray/v2ray-core
https://github.com/v2ray/v2ray-core/blob/d80440f3d57b45c829dbf513306f7adf9a0f3f76/config.pb.go
config.pb.go
// Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.25.0 // protoc v3.13.0 // source: config.proto package core import ( proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" serial "v2ray.com/core/common/serial" transport "v2ray.com/core/transport" ) const ( // Verify that this generated code is sufficiently up-to-date. _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) // Verify that runtime/protoimpl is sufficiently up-to-date. _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) // This is a compile-time assertion that a sufficiently up-to-date version // of the legacy proto package is being used. const _ = proto.ProtoPackageIsVersion4 // Config is the master config of V2Ray. V2Ray takes this config as input and // functions accordingly. type Config struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Inbound handler configurations. Must have at least one item. Inbound []*InboundHandlerConfig `protobuf:"bytes,1,rep,name=inbound,proto3" json:"inbound,omitempty"` // Outbound handler configurations. Must have at least one item. The first // item is used as default for routing. Outbound []*OutboundHandlerConfig `protobuf:"bytes,2,rep,name=outbound,proto3" json:"outbound,omitempty"` // App is for configurations of all features in V2Ray. A feature must // implement the Feature interface, and its config type must be registered // through common.RegisterConfig. App []*serial.TypedMessage `protobuf:"bytes,4,rep,name=app,proto3" json:"app,omitempty"` // Transport settings. // Deprecated. Each inbound and outbound should choose their own transport // config. Date to remove: 2020-01-13 // // Deprecated: Do not use. Transport *transport.Config `protobuf:"bytes,5,opt,name=transport,proto3" json:"transport,omitempty"` // Configuration for extensions. The config may not work if corresponding // extension is not loaded into V2Ray. V2Ray will ignore such config during // initialization. Extension []*serial.TypedMessage `protobuf:"bytes,6,rep,name=extension,proto3" json:"extension,omitempty"` } func (x *Config) Reset() { *x = Config{} if protoimpl.UnsafeEnabled { mi := &file_config_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Config) String() string { return protoimpl.X.MessageStringOf(x) } func (*Config) ProtoMessage() {} func (x *Config) ProtoReflect() protoreflect.Message { mi := &file_config_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Config.ProtoReflect.Descriptor instead. func (*Config) Descriptor() ([]byte, []int) { return file_config_proto_rawDescGZIP(), []int{0} } func (x *Config) GetInbound() []*InboundHandlerConfig { if x != nil { return x.Inbound } return nil } func (x *Config) GetOutbound() []*OutboundHandlerConfig { if x != nil { return x.Outbound } return nil } func (x *Config) GetApp() []*serial.TypedMessage { if x != nil { return x.App } return nil } // Deprecated: Do not use. func (x *Config) GetTransport() *transport.Config { if x != nil { return x.Transport } return nil } func (x *Config) GetExtension() []*serial.TypedMessage { if x != nil { return x.Extension } return nil } // InboundHandlerConfig is the configuration for inbound handler. type InboundHandlerConfig struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Tag of the inbound handler. The tag must be unique among all inbound // handlers Tag string `protobuf:"bytes,1,opt,name=tag,proto3" json:"tag,omitempty"` // Settings for how this inbound proxy is handled. ReceiverSettings *serial.TypedMessage `protobuf:"bytes,2,opt,name=receiver_settings,json=receiverSettings,proto3" json:"receiver_settings,omitempty"` // Settings for inbound proxy. Must be one of the inbound proxies. ProxySettings *serial.TypedMessage `protobuf:"bytes,3,opt,name=proxy_settings,json=proxySettings,proto3" json:"proxy_settings,omitempty"` } func (x *InboundHandlerConfig) Reset() { *x = InboundHandlerConfig{} if protoimpl.UnsafeEnabled { mi := &file_config_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *InboundHandlerConfig) String() string { return protoimpl.X.MessageStringOf(x) } func (*InboundHandlerConfig) ProtoMessage() {} func (x *InboundHandlerConfig) ProtoReflect() protoreflect.Message { mi := &file_config_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use InboundHandlerConfig.ProtoReflect.Descriptor instead. func (*InboundHandlerConfig) Descriptor() ([]byte, []int) { return file_config_proto_rawDescGZIP(), []int{1} } func (x *InboundHandlerConfig) GetTag() string { if x != nil { return x.Tag } return "" } func (x *InboundHandlerConfig) GetReceiverSettings() *serial.TypedMessage { if x != nil { return x.ReceiverSettings } return nil } func (x *InboundHandlerConfig) GetProxySettings() *serial.TypedMessage { if x != nil { return x.ProxySettings } return nil } // OutboundHandlerConfig is the configuration for outbound handler. type OutboundHandlerConfig struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Tag of this outbound handler. Tag string `protobuf:"bytes,1,opt,name=tag,proto3" json:"tag,omitempty"` // Settings for how to dial connection for this outbound handler. SenderSettings *serial.TypedMessage `protobuf:"bytes,2,opt,name=sender_settings,json=senderSettings,proto3" json:"sender_settings,omitempty"` // Settings for this outbound proxy. Must be one of the outbound proxies. ProxySettings *serial.TypedMessage `protobuf:"bytes,3,opt,name=proxy_settings,json=proxySettings,proto3" json:"proxy_settings,omitempty"` // If not zero, this outbound will be expired in seconds. Not used for now. Expire int64 `protobuf:"varint,4,opt,name=expire,proto3" json:"expire,omitempty"` // Comment of this outbound handler. Not used for now. Comment string `protobuf:"bytes,5,opt,name=comment,proto3" json:"comment,omitempty"` } func (x *OutboundHandlerConfig) Reset() { *x = OutboundHandlerConfig{} if protoimpl.UnsafeEnabled { mi := &file_config_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *OutboundHandlerConfig) String() string { return protoimpl.X.MessageStringOf(x) } func (*OutboundHandlerConfig) ProtoMessage() {} func (x *OutboundHandlerConfig) ProtoReflect() protoreflect.Message { mi := &file_config_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use OutboundHandlerConfig.ProtoReflect.Descriptor instead. func (*OutboundHandlerConfig) Descriptor() ([]byte, []int) { return file_config_proto_rawDescGZIP(), []int{2} } func (x *OutboundHandlerConfig) GetTag() string { if x != nil { return x.Tag } return "" } func (x *OutboundHandlerConfig) GetSenderSettings() *serial.TypedMessage { if x != nil { return x.SenderSettings } return nil } func (x *OutboundHandlerConfig) GetProxySettings() *serial.TypedMessage { if x != nil { return x.ProxySettings } return nil } func (x *OutboundHandlerConfig) GetExpire() int64 { if x != nil { return x.Expire } return 0 } func (x *OutboundHandlerConfig) GetComment() string { if x != nil { return x.Comment } return "" } var File_config_proto protoreflect.FileDescriptor var file_config_proto_rawDesc = []byte{ 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x76, 0x32, 0x72, 0x61, 0x79, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x1a, 0x21, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x16, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc9, 0x02, 0x0a, 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3a, 0x0a, 0x07, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x32, 0x72, 0x61, 0x79, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x07, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3d, 0x0a, 0x08, 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x76, 0x32, 0x72, 0x61, 0x79, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x70, 0x70, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x76, 0x32, 0x72, 0x61, 0x79, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x03, 0x61, 0x70, 0x70, 0x12, 0x3e, 0x0a, 0x09, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x76, 0x32, 0x72, 0x61, 0x79, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x44, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x76, 0x32, 0x72, 0x61, 0x79, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0xcc, 0x01, 0x0a, 0x14, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x61, 0x67, 0x12, 0x53, 0x0a, 0x11, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x76, 0x32, 0x72, 0x61, 0x79, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x10, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x4d, 0x0a, 0x0e, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x76, 0x32, 0x72, 0x61, 0x79, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x22, 0xfb, 0x01, 0x0a, 0x15, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x61, 0x67, 0x12, 0x4f, 0x0a, 0x0f, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x76, 0x32, 0x72, 0x61, 0x79, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0e, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x4d, 0x0a, 0x0e, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x76, 0x32, 0x72, 0x61, 0x79, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x2f, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x76, 0x32, 0x72, 0x61, 0x79, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x50, 0x01, 0x5a, 0x0e, 0x76, 0x32, 0x72, 0x61, 0x79, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0xaa, 0x02, 0x0a, 0x56, 0x32, 0x52, 0x61, 0x79, 0x2e, 0x43, 0x6f, 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( file_config_proto_rawDescOnce sync.Once file_config_proto_rawDescData = file_config_proto_rawDesc ) func file_config_proto_rawDescGZIP() []byte { file_config_proto_rawDescOnce.Do(func() { file_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_config_proto_rawDescData) }) return file_config_proto_rawDescData } var file_config_proto_msgTypes = make([]protoimpl.MessageInfo, 3) var file_config_proto_goTypes = []interface{}{ (*Config)(nil), // 0: v2ray.core.Config (*InboundHandlerConfig)(nil), // 1: v2ray.core.InboundHandlerConfig (*OutboundHandlerConfig)(nil), // 2: v2ray.core.OutboundHandlerConfig (*serial.TypedMessage)(nil), // 3: v2ray.core.common.serial.TypedMessage (*transport.Config)(nil), // 4: v2ray.core.transport.Config } var file_config_proto_depIdxs = []int32{ 1, // 0: v2ray.core.Config.inbound:type_name -> v2ray.core.InboundHandlerConfig 2, // 1: v2ray.core.Config.outbound:type_name -> v2ray.core.OutboundHandlerConfig 3, // 2: v2ray.core.Config.app:type_name -> v2ray.core.common.serial.TypedMessage 4, // 3: v2ray.core.Config.transport:type_name -> v2ray.core.transport.Config 3, // 4: v2ray.core.Config.extension:type_name -> v2ray.core.common.serial.TypedMessage 3, // 5: v2ray.core.InboundHandlerConfig.receiver_settings:type_name -> v2ray.core.common.serial.TypedMessage 3, // 6: v2ray.core.InboundHandlerConfig.proxy_settings:type_name -> v2ray.core.common.serial.TypedMessage 3, // 7: v2ray.core.OutboundHandlerConfig.sender_settings:type_name -> v2ray.core.common.serial.TypedMessage 3, // 8: v2ray.core.OutboundHandlerConfig.proxy_settings:type_name -> v2ray.core.common.serial.TypedMessage 9, // [9:9] is the sub-list for method output_type 9, // [9:9] is the sub-list for method input_type 9, // [9:9] is the sub-list for extension type_name 9, // [9:9] is the sub-list for extension extendee 0, // [0:9] is the sub-list for field type_name } func init() { file_config_proto_init() } func file_config_proto_init() { if File_config_proto != nil { return } if !protoimpl.UnsafeEnabled { file_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Config); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_config_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*InboundHandlerConfig); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_config_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*OutboundHandlerConfig); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_config_proto_rawDesc, NumEnums: 0, NumMessages: 3, NumExtensions: 0, NumServices: 0, }, GoTypes: file_config_proto_goTypes, DependencyIndexes: file_config_proto_depIdxs, MessageInfos: file_config_proto_msgTypes, }.Build() File_config_proto = out.File file_config_proto_rawDesc = nil file_config_proto_goTypes = nil file_config_proto_depIdxs = nil }
go
MIT
d80440f3d57b45c829dbf513306f7adf9a0f3f76
2026-01-07T08:35:44.381088Z
false
v2ray/v2ray-core
https://github.com/v2ray/v2ray-core/blob/d80440f3d57b45c829dbf513306f7adf9a0f3f76/functions.go
functions.go
// +build !confonly package core import ( "bytes" "context" "v2ray.com/core/common" "v2ray.com/core/common/net" "v2ray.com/core/features/routing" "v2ray.com/core/transport/internet/udp" ) // CreateObject creates a new object based on the given V2Ray instance and config. The V2Ray instance may be nil. func CreateObject(v *Instance, config interface{}) (interface{}, error) { ctx := v.ctx if v != nil { ctx = context.WithValue(ctx, v2rayKey, v) } return common.CreateObject(ctx, config) } // StartInstance starts a new V2Ray instance with given serialized config. // By default V2Ray only support config in protobuf format, i.e., configFormat = "protobuf". Caller need to load other packages to add JSON support. // // v2ray:api:stable func StartInstance(configFormat string, configBytes []byte) (*Instance, error) { config, err := LoadConfig(configFormat, "", bytes.NewReader(configBytes)) if err != nil { return nil, err } instance, err := New(config) if err != nil { return nil, err } if err := instance.Start(); err != nil { return nil, err } return instance, nil } // Dial provides an easy way for upstream caller to create net.Conn through V2Ray. // It dispatches the request to the given destination by the given V2Ray instance. // Since it is under a proxy context, the LocalAddr() and RemoteAddr() in returned net.Conn // will not show real addresses being used for communication. // // v2ray:api:stable func Dial(ctx context.Context, v *Instance, dest net.Destination) (net.Conn, error) { dispatcher := v.GetFeature(routing.DispatcherType()) if dispatcher == nil { return nil, newError("routing.Dispatcher is not registered in V2Ray core") } r, err := dispatcher.(routing.Dispatcher).Dispatch(ctx, dest) if err != nil { return nil, err } var readerOpt net.ConnectionOption if dest.Network == net.Network_TCP { readerOpt = net.ConnectionOutputMulti(r.Reader) } else { readerOpt = net.ConnectionOutputMultiUDP(r.Reader) } return net.NewConnection(net.ConnectionInputMulti(r.Writer), readerOpt), nil } // DialUDP provides a way to exchange UDP packets through V2Ray instance to remote servers. // Since it is under a proxy context, the LocalAddr() in returned PacketConn will not show the real address. // // TODO: SetDeadline() / SetReadDeadline() / SetWriteDeadline() are not implemented. // // v2ray:api:beta func DialUDP(ctx context.Context, v *Instance) (net.PacketConn, error) { dispatcher := v.GetFeature(routing.DispatcherType()) if dispatcher == nil { return nil, newError("routing.Dispatcher is not registered in V2Ray core") } return udp.DialDispatcher(ctx, dispatcher.(routing.Dispatcher)) }
go
MIT
d80440f3d57b45c829dbf513306f7adf9a0f3f76
2026-01-07T08:35:44.381088Z
false
v2ray/v2ray-core
https://github.com/v2ray/v2ray-core/blob/d80440f3d57b45c829dbf513306f7adf9a0f3f76/core.go
core.go
// Package core provides an entry point to use V2Ray core functionalities. // // V2Ray makes it possible to accept incoming network connections with certain // protocol, process the data, and send them through another connection with // the same or a difference protocol on demand. // // It may be configured to work with multiple protocols at the same time, and // uses the internal router to tunnel through different inbound and outbound // connections. package core //go:generate go run v2ray.com/core/common/errors/errorgen import ( "runtime" "v2ray.com/core/common/serial" ) var ( version = "4.31.0" build = "Custom" codename = "V2Fly, a community-driven edition of V2Ray." intro = "A unified platform for anti-censorship." ) // Version returns V2Ray's version as a string, in the form of "x.y.z" where x, y and z are numbers. // ".z" part may be omitted in regular releases. func Version() string { return version } // VersionStatement returns a list of strings representing the full version info. func VersionStatement() []string { return []string{ serial.Concat("V2Ray ", Version(), " (", codename, ") ", build, " (", runtime.Version(), " ", runtime.GOOS, "/", runtime.GOARCH, ")"), intro, } } /* ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::c:::::::::::::::::::::::ccc:::cccc::::::c:::::cccc::::cc::::::::::::::::::::::::ccccccc::cccccccccc::cc:::;:::::::::::::::::::::::::::ccc:::cccc:ccc::::::c:::::::::::::::::::::::::::cccc:cccccccccccccccccccccc:;;::::::::::::::::::c::::ccc::::::ccc:::ccc::cccccc::::cc:cc:::ccccccccccccccccccccccccccccccccccccccc :::;::::::::::::::::::::::::::::::::::::::::::::::cc:::::::::::::::::::::::::::::::::::::ccc::cc:::::c:::::::::::ccc::ccc:::::ccc::::::cc:::::cccc:::::::::::::::::::::cccccccccccccccccccccccccc:::::::::::::::::::::::::cc::ccc::::::::::::::c::c::::::::::::c::::::::c::::::cccccccccccccccccccccccccc:;:::::::::::::::ccc::cccccccc::c:cccc::cc:::cccccccccccccccccccccccccccccccccccccccccccccccccccccccccc :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::cc::::::::cc::::::::::::ccccc:::::ccccccccc::cccc::ccccccc:::c::::::::c:c:::::cccccccccccccccccccccccccc::::::::::::::::::::::::cc:::::ccc:::c::::::::::::::::c::::::::::::::::::::::ccccccccccccccccccccccccccc:::::::::::::::::::::cccccc:ccccc::ccccccccc:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccc ::;;::::::::::::::::::::::::::::::::::::::::::::::::::::::::c:::::::::::::::::::::cc::::::::::::::::c:::cc::::c::::::::cccccc::::cccccccccc::ccccccc::c::::::::c::::::::ccccccccccccccccccccccccc:::::::::::::::::::::::::c::::cccc:::cc:::cc:::::::::::::::::::::::::::c::::::ccccccccccccccccccccccccccc::::::::::::::::::::cccc::cccccc:ccc:cccccccccccccccc:cccccccccccccccccccccccccccccccccccccccccccccccc :::::::::::::::::::::::::::::::::::::::::::::::::::::::::cc:::::::::::::::ccc:::::c:::cc::::::::::::::::::::::cc:::c::cccc::::::::::cccccccccccc:ccc::c:::::::ccc:::::::cccccccccccccccccccccccccc:::::::::::::::::::cccccccccccccc::::::::ccc::c::cc:::cc::::::::::::::cc:::::cccccccccccccccccccccccccccc::::::::::::::::::::ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc ::::::::::::::::::::::::::::::::::::::::::::::::::::::c::::::::::::::::::::c::::::::::::::::::::ccccc::::::::::::ccc::cc:::::cccc::::cccccccccccc::::cccccc:::ccccc:::::cccccccccccccccccccccccccc::::::::::::::::::::c:cc:::cc::::::::::::cc::cc:::::::cccc::::::c:::::cc::::::ccccccccccccccccccccccccccc:::::::::ccc::::::ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::cc:::::::::::::::ccccccc:::::ccccc::ccc::ccccccccccccccccccccccccccccccc:cccc::::cccccc::::cccccccccccccccccccccccccc::::::::::::::::c:::::cc::::::::cc:::cccc::::cc::::ccc:ccc::c:::::::::cccc::::ccccccccccccccccccccccccccc::::::::::cc::::::ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc ::::::::::::::::::::::::::::::::::::::::::::::::::::c::::::::cc::::::::::::::cc:::::::ccccc:::c:::::ccc::cccc:::cccccc:ccccccccccccccccccccccccccccccccccc::::ccccccc:::cccccccccccccccccccccccccc:::::::::cc:::::::::::::cc:::cc::cccccccc::cccccc::ccccccccc:c::::::::ccc:::::cccccccccccccccccccccccccccc:::::::::ccc:::::ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc ::::::::::::::::::::::::::::::::::::::::::::cc:::::::::ccc:::::c::::::cc:::cccc:::c:::ccccc::::ccc::cccccccccc::cccccc::ccccccccccccccccccccccccccccccccccc:ccccccccc::ccccccccccccccccccccccccccc:::::::::::::::::::::::::cc:cccc::cccccccccccc:ccc::cccccc:ccc::::::::cccc:::::ccccccccccccccccccccccccccc:::::::::ccc:::::c:ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc :::::::::::::::::::::::::::::::::::c:::::::c:::::c:::::cc::::::::::::cccccc::::ccccccccc:::::::ccccccccccccccccccccccc:cccccccccccccccccccccccllccccccccccccccccccccc::cccccccccccccccccccccccccccc:::::::::c:::::::::::ccccccccccccccccccccccc::cc:::::cccccccc:::cc:::ccccc::::ccccccccccccccccccccccccccc:::::::::cccc::::ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc :::::::::::::::::::::::::::::cc::cc:::::::c:::::::::::::c:::cc:::::::cccccc::::cccccccc:cc:::ccccccccccccccccccccccccccccccccccccccccccccccoxk00Okxddoolllllloodxkkxdoloolccccccccccccccccccccccccc:::::::::c:::::::::cccccccc::cc::ccccc:::cccccc:ccc:ccccccccc:ccccc:::cc:cc:::ccccccccccccccccccccccccccc:::::::::ccccc:::ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc ::::::::::::::::::c::::::::ccc:::cc:::::::c::::cc::c::::c:::ccc:::ccc::::::ccc:::ccccccccccccccccccccccccccccccccccccccccccccloxxkOO0OkkkkOKXK00000000KKK0KKXXNNWWWWNXKKK0kdoolclloooddollccccccccc:::::::::cc:::::cccccccccccc:ccccccccc::cccccccccccccc:cccccc:ccccc:::cc:cc:::cccccccccccccccccccccccccccc:::::::cccccc:::ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc :::::::::::::::::::::::::::::::::::::::c::ccc:::cc::cc::ccc:::::cc:::cc::::ccc::cccc:cccccccccccccccccccccccccccccccccccccccdkO0KKK0000OOkkOKXXXXXXXXXNNXKXNNNNNNNNNNNXXXXXKKKOO0KKKKKKK0Oxxoolllcc:::::::::cc::::ccc::cccc:cccccccc:ccccccccccccccccccccccccccccccc:cc:::cc:::::ccccccccccccccccccccccccccccc::::::::c::c:::ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc :::::::::::::::::::::ccc::::c:::c:ccc::c::ccccc:ccc::::ccccccccc:cc:cccccccccccccc:ccccccccccccccccccccccccccccccccccccccclxOOOO00KKKKKKK00KKXXNNNNXNNNNXXXXXNNNNNNNNNXXKKXXXXXXXXXXXKKXXNXXK00KOdl::::::::ccc:::::cccccccccccc:cccc::ccccccccccccccccccccccccccccc::cc:::ccc:cc::ccccccccccccccccccccccccccccc:::::::cccc::::cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc ::::::::::::::::::ccccccccc:cc::cccc::ccc:ccccc:ccc:::ccccccccccccccccccccccccc:ccccccccccccccccccccccccccccccccccclok000O0XNNNNNXK00KKXK0K0xxk000KXXXXXNNNNNNNXNNNNNNXXXXXNNXXXXXKX0dodxxxxddxOOxl::::::::cccc::::ccccccccccccccccccccc::cccccccccccccccccccccccccccccc::cccccc:::cccccccccccccccccccccccccccc::::::::ccccc::cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc :::::::::::::c::::cccc:::c::ccccccccc::cc:ccccc:ccc::cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccclx0XNNNXNXXXNNNXKKKKXXX0OOxooddoodkKXKKKKXNXXNXXXXKKKKXX00KXKKXXNNXK0kko:;;::cccccc::::::::ccc::::ccccccccccccccccccccccccccccccccccccccccccccccccccccc::cccccc:::cccccccccccccccccccccccccccc::::::::cccccc:::cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc ::::::::::::::cc:ccc::ccccc:cccccccccc::cccccccccccccccccccccccccccccc:cccccccccccccccccccccccccccccccccccccccclxKNNNNXOO0KOkO0KXXNXXXXK0kololllol:lO0kkxkOKXXXXK0kkxddxO000OOkOO0KKXNNNXOxl:::coolc::::::::ccc:::ccccccccccccccccccccccccccccccccccccccccccccccccccccc:::cccccc:::ccccccccccccccccccccccccccccc::::ccccccccc::ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc :ccc::::::::::cc:c:::cccccccccccccccccccccccccccc:ccccccccccccccccccccccccccccccccccccccccc:ccccccccccccccccccoOXNWWNN0dcllldO0KKXXXXXXXXKxllldkkkxxdlc;;lOK00OOOd:;;;:cdk0OxkxoodxxkOOOOO0Okl,cO0kl::::::::ccc:::ccccccccccccccccccccccccccccccccccccccccccccccccccccccc::cccccc::ccccccccccccccccccccccccccccc:::cc:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc ::cc:::::::::c::::ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccldO00KNNNXKOddk0KXXXXXXXXXXKK0OOOOO0KKxc;,,.;oxxddxkOx:;,;loddxxolddlc::ccccc:;cooox0KKkl:::::ccccc:::cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc::ccccccccccccccccccccccccccccc:::::cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc c:cc:::c::cccccc::ccccccccccc:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccldkkxdxkKK000kxkkOO0000OOOOOkdloOKKO0K0dc,...;ccc:lxO0dccclllccloolllc:;;;:cc:;.,:cok0Okkkl::::cccccc:::ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc::ccccccccccccccccccccccccccccc:::::cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc ccc::::ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccclx0KK0OxdxOOOxl::codxxdoooolccoxO0K00kdoodl;''',;;;:lxOOdollccc::cc:;;;;,'',;;;;'','';:coxxl::::cccccc:::cccccccccccccccccccccccccccccccccccccccccccccccccccccccc:cccccc::cccccccccccccccccccccccccccccc::c::ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccllccccccccccccccccccccc cc:::ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccldxOKXNXKOxxdl:;;,,,,,,,ckOx;:k00Okkxolc;;;:;,''',,'',:ok0kocc::,.',,;;;;,''',;,;,...',;:ccc::::::ccccc:::cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc::ccccclcccclcccccccccccccccccc::ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccllcc ccc::ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccooloOKKKK0Oo:;,''..'.. ;xO0OxxOkxdddddoll:;,..,,,,;'.'',:ll:,,,,'..'''',,;;,,,,;;;,',,;;::cc:cccc:ccccc::ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc::ccccllccccccclcclllcccccccclcc::ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccclllccccccccccccc cccc:ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccclollxOOOO0Oxl:;'''',;;;cdxodxdoolllldk00Od:,'..,;,;;;,,'.....'',,,,,'''.''''.....,,,''',::ccc:cccc::ccccc:ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc:::cccclcccllcclcccllcccccccclcc:::ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccllcclcclcccccccccccccccc ccc:ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccclxkxdddxO0Oxdoc;'',;:ldxdlc:coolcc::ldxolc;;::,..,,;;;;;;'...'';cccc::;,,,,,,,'....,',;:oolccccccccccccccc:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc::cclcccccllccccclccclcllllclccc:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccclllccccclllcccccccccccccccccccccl cc::cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccclxOOxddxkkkdoool:;,.';ccc:::::;::::::cc;;;:cdddo:'''',;;;;;;,,;:codddolc:;;,,,;,,''',,,,;:llllc:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccclllclllcccccccclllllllllllcccccccccccccccccccccccccccccccccccccccccccccccccccllccllccccccllccccccccccccllcccccccllcccccc ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccclxkkkkO00koc;,'.';;,...;;::;::;,,;:::::::;;;::::::;'''',,,,;,;;:loxkkOkxdolc:;;;,,,'...',:lllclccccc::cccccc:ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc:cclllllllclllcccclllllllllllcccccccccccccccccccccccccccccccccccccccccccccccccccllcclllcccccccccccccccccllclccllccclccccccc ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccldddkOOO00OOOOxl,.';;'...,;;;;;;,;;;;:::;;::;,'',;;,',,,,,,,,;;:loodxxxxxxxddoolc:;;,...;cdkkdlllc:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccllllcllllllccclllllllclllllcccccccccccccccccccccccllccccccccccclccclcccllccccllcccccccccccccccllcclllccclccllccccccccccc cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccloodOOkkkOOOOOOkxc,;:,. ..',;;;;;;'',,,,,,,;,...,;,,;;;,,,;;;:clllllcc::clodxxddolc;'',;:ccodlccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc:cclllllllllllllllllllllllllllc:ccccccccccccccccccccllccclllcccccccccllcccccccccccccccccclccccccllcccccccclccccccccccccccc ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccclollxOkxxxxxxxddddoc;'.... ..,;;;,'....';,.','.',,;::::;;;;;::cloddddooc:;::cclddolc:;:::::cllllc:cccccccclcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccclccccc:ccllllllllllllllllllllllllcllc:cccccccccccccccccccccccccllccccccccccccccccccccllccccccllllcccccccccccccccccccccccccccccll ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccclccdkkxdooollccc:::;'.......... ..,,,;,''',;,.''',,,;:ccc:::::::::::cccclccc::::::cloolllc:ccccccllccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccclccccc::cllllllllllllllllllllllllllllc:cccccccccccccccccccccccccccccccccccccccclllllccccclllccccccccccccccccccccccccccllllllllll cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccoxdxkdl::::::cc;,,''.. .....',,,;;,,,,,'',;;:cloodollc::;;;;:;;,,;;;;::::::c:looooolcclllllllcccccccccccccccccccccccccclllcccccccccccccccccccccccccccclccclccccccccccccccccccccccllllllllllllllllllllllllllllc:cccccccccccccccccccccccccccccclllcclllllllllllcccccccccccccccccccccccccclllllllllllllllll ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccllllloooc:;,'',;;,,;:..... ....'',,,,,,,,;;;:ccooddxxxxxdol:;;;:ccc::;;;,;;;;;;clollllllllllllllcccccccccccccccccccclccccclccccccccccccccccccccccclcccccccccllcccclcccccccccccccccccllllllllllllllllllllllllllllccccccccccclcccccccccccclllclllllllllllccclllccccccccccccccccccccccllllllllllllllllllllllll cccccccccccccccclccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc::;,,;:;.. ';;';c;'';;'. ....'',,,;;;;;;:clodddxxxxkkkkkkxdoddxxdol:,,,,;;,;:oxdlllllllllllllllcccccccccccccccclcccccccclccccclllcccccccccllcccclcccccccclllccccccccccccccccccccccclllllllllllllllllllllllllllcccccccccccccccllcccccccccccclcccccccccccccccccccccccccccccclllllllllllllllllllllllllllllll ccc::cccccccccccccccccccclccccllcccccccccccclcccccccccccccccccccccccccccccccccccccccccccccccccclccol:;,,''''....','';:;,:;. ...',,,,;;;;;:ccloddxxxxxkkOOOO000Okxddool;'',,;;:loxOkoclllllllllllllcccccccccclccccccccccccccccclccllllllcccccccclcccccccccccclllcccccccccccccccllccccccllllllllllllllllllllllllllllccccccccccccccllccccccccccccccccccccccccccccccccccllllllllllllllllllllllllllllllllllllllll ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccclcccclllccccccccccccccccclllllo:;,,,',,,,,,,,'';::::,. ..',,;;;::::cccooddxxxxkkkOOO00000Okxolllc;;:::coxkO00dllllllllllllllccccccccccccccccccccccllllcclcclllllllcccccclllccccccccccclcccclcccccccccccccccccccclllllllllllllllllllllllllllllcccccccccllccccccccccccccccccccccccccccclllllllllllllllllllllllllllllllllllllllllllllllll ccccccllccllccccccccccccccccccccccccccccccccccccccccclccccccccccccccccccccclllcclccccccllcccclcclccc:;;;,,,,,,;;;::;;:c::;'. .....',;;:::::ccllloddxxxxxkkkkOOO000000OkxdolooddxkOOO00Oolllllllllllllcccccccccccccccllllcclllllcllcclllllllcllccllllcccccccccccccccccccccccccccccclllcccccllllllllllllllllllllllllllllc:cccccccccccccccccccccccccccccccllllllllllllllllllllllllllllllllllllllllllllllllllllllll ccccccccccccccclccllccllllllcccclccccccccccccccccccccccccccccccccccccccclllllllllcccllcclllllllcllcc:::::;;,',;;;:cl::lc:;;,,,,'...,;:::ccclollllooddddddxxxxkkkkOO00000OOOOOOO00OOOOO00xlllllllllllllccccccccccclcccclllcclllclccccclccccccccccccccccccccccccccccccccccccccccllcllllllccccllllllllllllllllllllllllllllc:cccccccccccccccccccccllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll ccccccccccccccccccccclcccllcccclllclllclllllccccccccccccccccccccccccccccccccccccccccccccccccccccccccc:::;''...,;;;::;;:clcc:::::;;,;:::cccloooooooooooodddddddxxxkkOOOOO0000000KKKK00O0KOollllllllllllccccccccccccccccccccccccccccccccccccccccccccccccccllllllllllllllllllllcccccclcccccccclllllllllllllllllllllllllllllc:ccccccccccclllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll cccccccccccccccccccccccccccccccccccccccccclllclccclcccllllllllcclllccccccccccccccccccccccccccccccccccc:;,.....,;;;:;;,,;;::clllllcc::ccllllllloooooooooooddddddxxkkkOOOOOOOOOOOOOO00K0KKKxllllllllllllcccccccccccccccccccccccccllllcclllllllllllcclllllllllllllcccllccllllllclccccccccccccccllllllllllllllllllllllllllllccllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll cccccccccccccccccccccccccccccccccccccccccccccccccccccccllcccccccccccllcccccclllccllllllllllllllllllllcc:;;,..';:::;;,,'.'',,;::cc:::clloooolloddodddoooooddddddxxxkkOOOOOOOkxodxkO00KKKXXOolllllllllllccccccclccclllllllllllcclllcllllcllllllllllllcccccllllcccccccccccccccllcccccccccccc::clllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllool llllllllllllccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccllllclllllllcc:;,..,;;;,,,,,''...',,;;;;;:loooooddddddddddddoooddddddxxxkkkkkkkkkxddOKXXXXKKXXXKxllllllllllllcccccccccccccccllcclllcllccclccccccccccccccccccccccccccccccccccccccccccccccccccccccccllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll llllllllllllllllllllllllllllcllccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccllccc:,'';:;,,,,,,;,'...',,,,;;;clooddddoodddddddooddooodddddxxxxkkkkkkkOkxkkxdxxkkxxkOkolllllllllllccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccclllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllolllllllllllllllllllllllllllooloooloo llllllllllllllllllllllllllllllllllllllllllllllcllcccccccccccccccccccccccccccccccccccccccccccccccccccccc:,.,cllc:;;;;;;;,,'',;;;;;;:llooddddodddddddddddooooodddddxxxkkkkkO0KOdc;..'',:cccoollllllllllllccccccccccccccccccccccccccccccccccccccccccccccccllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllloollllllllllllllollllolooooooooo llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllcccccccccccccccccccccccccccccccccc::;;cllol:;;;:::;,'',;;;;;;:clloooodddooddddddoooooodddddddxxxkkOOO00K0ko:;;;:;clllllllllllllllllc:ccccccccccccccccccccclccclllcllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllollllllllllllloooolllllloooooooooollolll llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllcllollcccc:;,'',;;,,,,;;;;;:::ccllloooooooooooooooooooodddddddxxkkkkkOO0KK0kdlclolllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllollllllllllllllllllllllllllllllllllllllllllllllllllllllllloooollllloollooolllloooooooollooooooloolllllcc llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllollool::;,;;;:;'..',,,,;;;::::::ccclllllllooooooooooooooddddddddxxxxxdddxO0000Oxllollllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllolllllllllllllllllllllllllllllllllllllllllllllllllllllllllllollllllloollloollloooooooooollooollolllllcccc:::::: lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllldo:;;:cllll:'...;;;::ccccccccccccccllllllllloooooooooooodddddddddolclooollloollllllllllllllllllllllollllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllooolllloooollllllloooollloooloooooolooooooooolllllcccc:::::::::::::: llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllc:clooooollcc:;..';::clllllolcccccccccccllllllllllllloooooooooddddxkkkxdxxxxoclllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllloollllloollllllolllllllllllllllollllllllloooolllooooooolllllllloolloolloooloooolllllcccc::::;::::::::::::::::: lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllc:::::::c:::;,',;;:clooooddolcccccccccccccccllllllllloooooooddddxkOOOkdlloddooloollllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllloollllllloolloollllollllllllllllllollooollllllllllloooloooooooolllloollllllloooooolllllllccc::::::;::::::::::::::::cccccccc lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllc:::::::::;;,'''.,:cloodddddolcccccccccccllcclccclllllloooooddxxkOOOOkdoccclooooolollllllllllolllllllllllllllllollllllllllllllllllllllllllllllllllllllllllllllllllllllllllloolllllllllllllollooooooooooooolllloooooollllooooollllloooollooooooooooooooooollllllccccc::::::::::::::::::::::::ccccccclllllllo lllllllllllllllllllllllllllllllllllllllllolllllllllllllllllllllllllllllllllllllllllllllllllllllllllllc::;:::;;::cc:;'.,:coodddxxddolc:cccllllllllllllclllllllllooodxxkO000K00kdoooolllllllllollllllllolllllllllolllloolloolllllllllllllllllllllllolllllllllllllllloolllllllllllllloollllooooolloooooooooooooooooooooooooolllloollooooooooooollooooooollllllcccc:::::::;:::::::::::::::::ccccccclllllllllllllllll ooollllllllloolllllllllllllllllllllllllooollllllllllllllllllllllllllllllllllllllllllllllllllllllllloolc:;;;:::cclllllc:clodddxxxxxdolc::cccllllllllllllllllllllllooddxkO00KKKKK0kdooollllooloooooollooollllllllloolllllllllllllllllllllllllllllllllllllllllllooolllollllllllllllllllolloollooolooloolooooooooooooooooooooooolooollooooooollllllllccccc::::::::;:::::::::::::::ccccccccllllllllloolllllllllccc::: lloooolllollloollllllllllllllllllllllllllooollllllllolllllllllllloolllllllllllolloolllllllllllllllllollc:;;:::::::::cclllodddxxxxxxddlc::::ccccccclllllllllllllllllloodxxkOOOOkkxdolooooooooooooolllooolllllllllooollloolllllllloolllooloollloolllllloollllooooollllollloollloolooloooooolloollooooooloooooooooooooooooooooooooollllllccccc:::::::;::::::::::::::::::::ccccccclllllllllloollllllllcccc::::cccccl ooooolllllllllollllllolllllllolllooolllllloooollllllllllllollllllllllolllllllllllllllllllllllllloolllllccc:c::::::;;::lloodddxxxxkkxxdolcc::ccccccclcclllllllllllllllllloooddddoddoooooooooooooooooooooooooooooooloooooollllllllooollllllooollooolllooooooooolllloooooolllllloolllooooolooooollooooooooooooooooooooooooooooolc:::;;;;;;:;;;::::::::::::::::cccccccclllllllllllloollllllllcccc:::::ccccclllllllll lllllllloooooolllooooollollooolllooooooolllooooolllllllllloooooooollooooooollooooollllooooolllooolloooolcccccc:::::::clooodddxxxkkkkkxdddoollllcccccclllllllllllloooollllloooooooooooooooooooooooooooooooooooooooollloooooooolllooolllllllllloooooooooooooooollooloooooooooooollloooooolooooooooooooooooooooooooooooooooooool:;,,;;;;;::::::::::cccccccccclllllllllloollolllllllccccc:::::ccccclllllllooolloollo :::c:cccccclllllllooooooooooooolloooooollooooooolooolllooooollooolllooooooolooooooolooooollooooooloooooolc::::::;;;:clooooodddxxxxkkkkkkxxxdddolllcccccclllllllloooooooooooddddoooooooooooooooooooooooooooooooooooooooooooooooolooooolooolloooooooooooooooooooooooooooooooooooolloooooooollllllloooooooooooooooooooooooooooolc:;;;;:::::cccccccllllllllllloollllllllllccccc::cc:cccccclllllllloooooooooooooooolo ::::::::::::::::::cccccccclllllllllllooooooooooooooollloooooolooooooooooooooooooooooooooolooooooooooooool:::::::;;;cllooooodddxxxxkkkkkkkkkxxxddoolllcccllllloooooooodddddddddoooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooolllllllllllcccccc::::::cloooooooooooooooooooooooooooolcccccccllllllllllllooolllllllllccccc::::cccccclllllllllooooooooooooooooooooooolooo :::::::::::::::::::::::::::::::::::cccccccllllllllllllllllloooooooooooooooooooooooooooooooooooooooooooollc::c::::;:loooooooddddxxxkkkkkkkkkkxxxdddooollccccllllllooodddooooooooooooooooooooooooooooooooooooooooodxxkkOOOkkkkkxdooooooooooooooooooooooolollllllllllccccccc:::::::::::::::::::::::loooooooooooooooooooooooooooolllllllllllllolllllllccccccccccccccccclllllllooollooooolooooooooooooooooloooooooooo lllllccccccccc:c:::::::::::::::::::::::::::::::::::::::cccccccccccllllllllllooooooooooooooooooooooooooooolc:::::::clooooooooddddxxxkkkkkkkkkkxxxxxddddoollcclllloooooooooooooooooooooooooooooooooooooooooooodxkO0KKK000OOOOOOOOkkkxolllllllllccccccccc::::::::::::::::::::::::::::::::::::::::::loooooooooooooooooooooooooooollllllllllllool::::::cccccccllllllllooooooooloooooooooooooooooooooooooooooooooooooo loolooollollllllllllllccccccccc:::::::::::::::::::::::::::::::::::::::::::::cccccccccccccclllcllllllllllllc::::::cloooooooooddddxxxkkkkkkkkkxxxxxxxxxxxxxddollllooloooooooooooooooooooooooooooooooooooooolloO0OO0KNNNNNXK0OOOOOOOOkkxlc::::::::::::::::::::::::::::::::::::::cccccccccccllllllcclooooooooooooooooooooooooooooolllllllllollolccclllllllooolloooooollloolloooooolooooooooooooooooooooooooooooooooo llllooooooooooooooooooooolllllllllllllcccccccccccccccc::c::::::::::::::::::::::::::::::::::::::::::::::::::::::;;:loodddoooooddddxxxkkkkkkkxxxxxxxxxxxxxxxxdolllolcllccccclooooooooooooooooooooooooooooooloxkxdxkO0KXXNNNNX0OxxkOOOOO0ko::::::::::::::ccccccccccccccccccllllllllllllllllolooollllloooooooooooooooooooooooooooolllllllloollollllllloooloooooooooooooooooolooooooooooooooooooooooooooooooooooooooo ::::ccccccllllllllllllloooloooooooooooloooollllllllllllllllllcccccccccccccccccccccccccccc::::::::::::::::::::::::cloooddoooooddddxxxkkkkkkkxxxxxdxxxxxxxxddddoollccll:::::cooooooooooooooooooooooooooooodddollllodxkkO0KXXNNNK0xddxkO00Odccccccccclllllllllllllllllloooolloooooooolllollllloollllloooooooooooooooooooooooooooollllllllollooolllllloooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo llccccccccccc::cccccccccccllllllllloooooooooooooooooooooooooooooooolllllllllllllllllllllllllcclccccccccccccccc:::clooddddoooooddddxxxkkkkkkxxxddddxxdddddddddooollclolc:::coooooooooooooooooooooooooooxkkxlllllllllooxkO00KXXNNX0xooddddxdollllllooooooooooooooooooooolloolllllllcclcccclllloollllooooooooooooooooooooooooooooolllloloooooooollooloooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo lllloooolllllllllllccccccccccccccccccccccclllllllllllllooooooooooooooooooooooooooooooooooooooooooooollllllllollcccooddddddooooodddxxxkkkkkkkxxxxxxxxxdddddddoooollllodollclooooooooooooooooooooooooodxxxxollc:;,;:clllodxkO0KXXNNNKOxdoooxdoolooooooooollllllllllllcccccccc::cc:ccccccccloolllllllooooooooooooooooooooooooooooolllollooooooolllooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo oooooooooooooooooooooooolllllllllllllcccccccccccccccccccccccccllllllllllllllllooooooooooooooooooooooooooooooooooloodddddddddoooddddxxkkkkkkkkkkkkkkkxxxddooooooolllldxdollloooooooooooooooooooooodddlclll:;:;,'''',;cclllodxO0KXNNNNXKOdoddlclccccccccccccccccccccccccccclllllllllllllllloooooollloooooooooooooooooooooooooooooollllooolooooollooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo ooooooooooooooooooooooooooooooooooooooooooolllollllllllllllccccccccccccccccccccccccccccccclllllllllllllllllllllloddddddddddddddddddxxkkkkkkkkkkkkkkkkkxxddoooooolllodkxolllooooooooooooooooooooddol::::::::cccc:;,'''',:llloxOO00KKXXXXKOxolcclcclclllllllllllllloooooooooooooooolllllllloooooollllooooooooooooooooooooooooooooolloooooooooooolooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo ooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooolllllllllllllllllllcccclccccccccccccccccldxxddxxxdddddddddddxxkkkkkkkkkkkxxxxxkkkxxddddooooodxkkdlllooooooooooooooooooddolc:ccc:::lxkOkkkxddoc:;,;codk000OOOKKKKXXKOdooooooooooooooooooooooooooooooooooooooooooollooooooolllooooooooooooooooooooooooooooolllooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo ooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooodxxxxxxxxxxdddxddddddxkkOOOkkkxxxxxxxxxxxxxddxxddddddxkkdooodxxdooooooooooooddolcllllccccloxkkkkOO00KKKKK00KKK00000OOKKKKKXXX0kdoooooooooooooooooooooooooooooooooooooooollloooooolllooooooooooooooooooooooooooooolllooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo ooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooxkkkkkxxxxxxxxxxxxddddxkOOOOOkkxxxdddddddddddxxxxxddxxkkOOOkdxO00OOkxxdooooooolllllllllccclloddddxxxkOKKKXXXK0OOO0KKK00KXXKKXXXXKOxoooooooooooooooooooooooooooooooooooooollloooooollloooooooooooooooooooooooooooooollooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooodxkOkkkkkkkkkkkkkkkxxdddxkOO0OOkxxddddddddooddxkkkkkxxkkkkOO00xloxkO0KKK0Okxollllllllllllcccclodooooooodxxxxk0KK0OOO00KK00KKKKKKXXXXX0kdoooooooooooooooooooooooooooooooooooolloooooollloooooooooooooooooooooooooooooollooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooodxkOOOOOkkkkkkkOkkkkkkxxdddxOO00OOxxdddooooooddkkkOOOkkkkkOOOOO0Oolooddxkkxxddoollooollllllcc:::clllccclloolccldk0KK00000000O0KKKKKKKKKXX0xooooooooooooooooooooooooooooooooooollooooooollooooooooooooooooddoooooooooooollooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo ooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooodxkOO00000OOOOOOOOOOOOOOkkxxddxkO000OkxddoooooddxkOOOOOOOOOOOOOOOO00d:cooooooooddooooooolllllllccc:::::,,,'':olccccldk00000O000OO00KKKKKKKKKX0xooooooooooooooooooooooooooooooooooolooooooolllooodoooooooodddddooooooooooooolooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo ooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooddxkOO00000000000OOOO0000000OOkkxxxxkO000OkdddddddxkkO00000OOOOOOO00OO000xl:cloddddddddoooollllllllllccc:cclcclodxxollllllodxO0000000OO0KKKKKKKXXXKKOdooooooooooooooooooooooooooooooooolooooooolllodoodoooooooddoooooooddddooooolloooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooodxkkO00KKKKKKKKKKK00000000000KKK00OkkkxxkO0KK0kxdddxxkOO00KKK00OOOO0000000KK0kdc::coddddddooooolllllllllllccccoddxkkkxxxdoodddddoxO0000000O0KKKKKKKXXXXXK0kdoooooooooooooooooooooooooooooooloooooooollooooddoodddddddddddoodoooooooolloooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooood ooooooooooooooooooooooooooooooooodooooooooooooooooooooooooooooooooooooooooooooooooooooooodxkkO0KKKKKKKKKKKKKKKKKKKKKKKKKKKKKXKKK00OOkkxk0KKKOOkkOOO000KKXXKK0OO00KKKKK0KKXK0kol::clooooooolllllllllllllllllllodddoooddxdodddxxdddkO00000000KKKKKXXXXXXKXKOdooooooooooooooooooooooooooooooooooooooollooooodoodddddddddddoodooooodoolloooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo oooooooooooooooooooooooodoooooooooooooooooooooooooooooooooooooooooooooooooooooooooooodxkO0KKKKKKKKKKKKKKKKKKKKXKKXXKKKKKKKXXXXXXXK00OkkkOKXK00000KKKKKXXXXKKKKKXXXXXXXXXXXXX0kdlccccllllllllllllllllllllllllllcclllllloddxxxdxdddddkO0000000KKKKKXXKXXXKKK0xoooooooooooooooooooooooooooooooooooooollooddddoodddddddddddddddddddddoolodooooooooooooooooooooooooooooooooooooooooooooooooooooooooooodddoooooooooooo ooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooddddddoodkO0KKKKKKKKKKKKKKKKKKKKKXXXXXXXXXXXXXXXXNNXNNNXKK00OkO0KXKKKXXXXXXXXNNXXXXNNNNNNNNXXXNNNNNX0kddocccccccccclllllllllllllloddoooollllllooxkxxxdddddxkO00000000KKKKKKKKKKKKOdoooooooooooooooooooooooooooooooooooooolooddddddddddooddddddddoddooddooloooooooooooooooooooooooooooooooooooooooooooooooooooooddddooodddooddooooooood
go
MIT
d80440f3d57b45c829dbf513306f7adf9a0f3f76
2026-01-07T08:35:44.381088Z
true
v2ray/v2ray-core
https://github.com/v2ray/v2ray-core/blob/d80440f3d57b45c829dbf513306f7adf9a0f3f76/functions_test.go
functions_test.go
package core_test import ( "context" "crypto/rand" "io" "testing" "time" "github.com/golang/protobuf/proto" "github.com/google/go-cmp/cmp" "v2ray.com/core" "v2ray.com/core/app/dispatcher" "v2ray.com/core/app/proxyman" "v2ray.com/core/common" "v2ray.com/core/common/net" "v2ray.com/core/common/serial" "v2ray.com/core/proxy/freedom" "v2ray.com/core/testing/servers/tcp" "v2ray.com/core/testing/servers/udp" ) func xor(b []byte) []byte { r := make([]byte, len(b)) for i, v := range b { r[i] = v ^ 'c' } return r } func xor2(b []byte) []byte { r := make([]byte, len(b)) for i, v := range b { r[i] = v ^ 'd' } return r } func TestV2RayDial(t *testing.T) { tcpServer := tcp.Server{ MsgProcessor: xor, } dest, err := tcpServer.Start() common.Must(err) defer tcpServer.Close() config := &core.Config{ App: []*serial.TypedMessage{ serial.ToTypedMessage(&dispatcher.Config{}), serial.ToTypedMessage(&proxyman.InboundConfig{}), serial.ToTypedMessage(&proxyman.OutboundConfig{}), }, Outbound: []*core.OutboundHandlerConfig{ { ProxySettings: serial.ToTypedMessage(&freedom.Config{}), }, }, } cfgBytes, err := proto.Marshal(config) common.Must(err) server, err := core.StartInstance("protobuf", cfgBytes) common.Must(err) defer server.Close() conn, err := core.Dial(context.Background(), server, dest) common.Must(err) defer conn.Close() const size = 10240 * 1024 payload := make([]byte, size) common.Must2(rand.Read(payload)) if _, err := conn.Write(payload); err != nil { t.Fatal(err) } receive := make([]byte, size) if _, err := io.ReadFull(conn, receive); err != nil { t.Fatal("failed to read all response: ", err) } if r := cmp.Diff(xor(receive), payload); r != "" { t.Error(r) } } func TestV2RayDialUDPConn(t *testing.T) { udpServer := udp.Server{ MsgProcessor: xor, } dest, err := udpServer.Start() common.Must(err) defer udpServer.Close() config := &core.Config{ App: []*serial.TypedMessage{ serial.ToTypedMessage(&dispatcher.Config{}), serial.ToTypedMessage(&proxyman.InboundConfig{}), serial.ToTypedMessage(&proxyman.OutboundConfig{}), }, Outbound: []*core.OutboundHandlerConfig{ { ProxySettings: serial.ToTypedMessage(&freedom.Config{}), }, }, } cfgBytes, err := proto.Marshal(config) common.Must(err) server, err := core.StartInstance("protobuf", cfgBytes) common.Must(err) defer server.Close() conn, err := core.Dial(context.Background(), server, dest) common.Must(err) defer conn.Close() const size = 1024 payload := make([]byte, size) common.Must2(rand.Read(payload)) for i := 0; i < 2; i++ { if _, err := conn.Write(payload); err != nil { t.Fatal(err) } } time.Sleep(time.Millisecond * 500) receive := make([]byte, size*2) for i := 0; i < 2; i++ { n, err := conn.Read(receive) if err != nil { t.Fatal("expect no error, but got ", err) } if n != size { t.Fatal("expect read size ", size, " but got ", n) } if r := cmp.Diff(xor(receive[:n]), payload); r != "" { t.Fatal(r) } } } func TestV2RayDialUDP(t *testing.T) { udpServer1 := udp.Server{ MsgProcessor: xor, } dest1, err := udpServer1.Start() common.Must(err) defer udpServer1.Close() udpServer2 := udp.Server{ MsgProcessor: xor2, } dest2, err := udpServer2.Start() common.Must(err) defer udpServer2.Close() config := &core.Config{ App: []*serial.TypedMessage{ serial.ToTypedMessage(&dispatcher.Config{}), serial.ToTypedMessage(&proxyman.InboundConfig{}), serial.ToTypedMessage(&proxyman.OutboundConfig{}), }, Outbound: []*core.OutboundHandlerConfig{ { ProxySettings: serial.ToTypedMessage(&freedom.Config{}), }, }, } cfgBytes, err := proto.Marshal(config) common.Must(err) server, err := core.StartInstance("protobuf", cfgBytes) common.Must(err) defer server.Close() conn, err := core.DialUDP(context.Background(), server) common.Must(err) defer conn.Close() const size = 1024 { payload := make([]byte, size) common.Must2(rand.Read(payload)) if _, err := conn.WriteTo(payload, &net.UDPAddr{ IP: dest1.Address.IP(), Port: int(dest1.Port), }); err != nil { t.Fatal(err) } receive := make([]byte, size) if _, _, err := conn.ReadFrom(receive); err != nil { t.Fatal(err) } if r := cmp.Diff(xor(receive), payload); r != "" { t.Error(r) } } { payload := make([]byte, size) common.Must2(rand.Read(payload)) if _, err := conn.WriteTo(payload, &net.UDPAddr{ IP: dest2.Address.IP(), Port: int(dest2.Port), }); err != nil { t.Fatal(err) } receive := make([]byte, size) if _, _, err := conn.ReadFrom(receive); err != nil { t.Fatal(err) } if r := cmp.Diff(xor2(receive), payload); r != "" { t.Error(r) } } }
go
MIT
d80440f3d57b45c829dbf513306f7adf9a0f3f76
2026-01-07T08:35:44.381088Z
false
v2ray/v2ray-core
https://github.com/v2ray/v2ray-core/blob/d80440f3d57b45c829dbf513306f7adf9a0f3f76/app/app.go
app/app.go
// Package app contains feature implementations of V2Ray. The features may be enabled during runtime. package app
go
MIT
d80440f3d57b45c829dbf513306f7adf9a0f3f76
2026-01-07T08:35:44.381088Z
false
v2ray/v2ray-core
https://github.com/v2ray/v2ray-core/blob/d80440f3d57b45c829dbf513306f7adf9a0f3f76/app/dispatcher/default.go
app/dispatcher/default.go
// +build !confonly package dispatcher //go:generate go run v2ray.com/core/common/errors/errorgen import ( "context" "strings" "sync" "time" "v2ray.com/core" "v2ray.com/core/common" "v2ray.com/core/common/buf" "v2ray.com/core/common/log" "v2ray.com/core/common/net" "v2ray.com/core/common/protocol" "v2ray.com/core/common/session" "v2ray.com/core/features/outbound" "v2ray.com/core/features/policy" "v2ray.com/core/features/routing" routing_session "v2ray.com/core/features/routing/session" "v2ray.com/core/features/stats" "v2ray.com/core/transport" "v2ray.com/core/transport/pipe" ) var ( errSniffingTimeout = newError("timeout on sniffing") ) type cachedReader struct { sync.Mutex reader *pipe.Reader cache buf.MultiBuffer } func (r *cachedReader) Cache(b *buf.Buffer) { mb, _ := r.reader.ReadMultiBufferTimeout(time.Millisecond * 100) r.Lock() if !mb.IsEmpty() { r.cache, _ = buf.MergeMulti(r.cache, mb) } b.Clear() rawBytes := b.Extend(buf.Size) n := r.cache.Copy(rawBytes) b.Resize(0, int32(n)) r.Unlock() } func (r *cachedReader) readInternal() buf.MultiBuffer { r.Lock() defer r.Unlock() if r.cache != nil && !r.cache.IsEmpty() { mb := r.cache r.cache = nil return mb } return nil } func (r *cachedReader) ReadMultiBuffer() (buf.MultiBuffer, error) { mb := r.readInternal() if mb != nil { return mb, nil } return r.reader.ReadMultiBuffer() } func (r *cachedReader) ReadMultiBufferTimeout(timeout time.Duration) (buf.MultiBuffer, error) { mb := r.readInternal() if mb != nil { return mb, nil } return r.reader.ReadMultiBufferTimeout(timeout) } func (r *cachedReader) Interrupt() { r.Lock() if r.cache != nil { r.cache = buf.ReleaseMulti(r.cache) } r.Unlock() r.reader.Interrupt() } // DefaultDispatcher is a default implementation of Dispatcher. type DefaultDispatcher struct { ohm outbound.Manager router routing.Router policy policy.Manager stats stats.Manager } func init() { common.Must(common.RegisterConfig((*Config)(nil), func(ctx context.Context, config interface{}) (interface{}, error) { d := new(DefaultDispatcher) if err := core.RequireFeatures(ctx, func(om outbound.Manager, router routing.Router, pm policy.Manager, sm stats.Manager) error { return d.Init(config.(*Config), om, router, pm, sm) }); err != nil { return nil, err } return d, nil })) } // Init initializes DefaultDispatcher. func (d *DefaultDispatcher) Init(config *Config, om outbound.Manager, router routing.Router, pm policy.Manager, sm stats.Manager) error { d.ohm = om d.router = router d.policy = pm d.stats = sm return nil } // Type implements common.HasType. func (*DefaultDispatcher) Type() interface{} { return routing.DispatcherType() } // Start implements common.Runnable. func (*DefaultDispatcher) Start() error { return nil } // Close implements common.Closable. func (*DefaultDispatcher) Close() error { return nil } func (d *DefaultDispatcher) getLink(ctx context.Context) (*transport.Link, *transport.Link) { opt := pipe.OptionsFromContext(ctx) uplinkReader, uplinkWriter := pipe.New(opt...) downlinkReader, downlinkWriter := pipe.New(opt...) inboundLink := &transport.Link{ Reader: downlinkReader, Writer: uplinkWriter, } outboundLink := &transport.Link{ Reader: uplinkReader, Writer: downlinkWriter, } sessionInbound := session.InboundFromContext(ctx) var user *protocol.MemoryUser if sessionInbound != nil { user = sessionInbound.User } if user != nil && len(user.Email) > 0 { p := d.policy.ForLevel(user.Level) if p.Stats.UserUplink { name := "user>>>" + user.Email + ">>>traffic>>>uplink" if c, _ := stats.GetOrRegisterCounter(d.stats, name); c != nil { inboundLink.Writer = &SizeStatWriter{ Counter: c, Writer: inboundLink.Writer, } } } if p.Stats.UserDownlink { name := "user>>>" + user.Email + ">>>traffic>>>downlink" if c, _ := stats.GetOrRegisterCounter(d.stats, name); c != nil { outboundLink.Writer = &SizeStatWriter{ Counter: c, Writer: outboundLink.Writer, } } } } return inboundLink, outboundLink } func shouldOverride(result SniffResult, domainOverride []string) bool { for _, p := range domainOverride { if strings.HasPrefix(result.Protocol(), p) { return true } } return false } // Dispatch implements routing.Dispatcher. func (d *DefaultDispatcher) Dispatch(ctx context.Context, destination net.Destination) (*transport.Link, error) { if !destination.IsValid() { panic("Dispatcher: Invalid destination.") } ob := &session.Outbound{ Target: destination, } ctx = session.ContextWithOutbound(ctx, ob) inbound, outbound := d.getLink(ctx) content := session.ContentFromContext(ctx) if content == nil { content = new(session.Content) ctx = session.ContextWithContent(ctx, content) } sniffingRequest := content.SniffingRequest if destination.Network != net.Network_TCP || !sniffingRequest.Enabled { go d.routedDispatch(ctx, outbound, destination) } else { go func() { cReader := &cachedReader{ reader: outbound.Reader.(*pipe.Reader), } outbound.Reader = cReader result, err := sniffer(ctx, cReader) if err == nil { content.Protocol = result.Protocol() } if err == nil && shouldOverride(result, sniffingRequest.OverrideDestinationForProtocol) { domain := result.Domain() newError("sniffed domain: ", domain).WriteToLog(session.ExportIDToError(ctx)) destination.Address = net.ParseAddress(domain) ob.Target = destination } d.routedDispatch(ctx, outbound, destination) }() } return inbound, nil } func sniffer(ctx context.Context, cReader *cachedReader) (SniffResult, error) { payload := buf.New() defer payload.Release() sniffer := NewSniffer() totalAttempt := 0 for { select { case <-ctx.Done(): return nil, ctx.Err() default: totalAttempt++ if totalAttempt > 2 { return nil, errSniffingTimeout } cReader.Cache(payload) if !payload.IsEmpty() { result, err := sniffer.Sniff(payload.Bytes()) if err != common.ErrNoClue { return result, err } } if payload.IsFull() { return nil, errUnknownContent } } } } func (d *DefaultDispatcher) routedDispatch(ctx context.Context, link *transport.Link, destination net.Destination) { var handler outbound.Handler skipRoutePick := false if content := session.ContentFromContext(ctx); content != nil { skipRoutePick = content.SkipRoutePick } if d.router != nil && !skipRoutePick { if route, err := d.router.PickRoute(routing_session.AsRoutingContext(ctx)); err == nil { tag := route.GetOutboundTag() if h := d.ohm.GetHandler(tag); h != nil { newError("taking detour [", tag, "] for [", destination, "]").WriteToLog(session.ExportIDToError(ctx)) handler = h } else { newError("non existing tag: ", tag).AtWarning().WriteToLog(session.ExportIDToError(ctx)) } } else { newError("default route for ", destination).WriteToLog(session.ExportIDToError(ctx)) } } if handler == nil { handler = d.ohm.GetDefaultHandler() } if handler == nil { newError("default outbound handler not exist").WriteToLog(session.ExportIDToError(ctx)) common.Close(link.Writer) common.Interrupt(link.Reader) return } if accessMessage := log.AccessMessageFromContext(ctx); accessMessage != nil { if tag := handler.Tag(); tag != "" { accessMessage.Detour = tag } log.Record(accessMessage) } handler.Dispatch(ctx, link) }
go
MIT
d80440f3d57b45c829dbf513306f7adf9a0f3f76
2026-01-07T08:35:44.381088Z
false
v2ray/v2ray-core
https://github.com/v2ray/v2ray-core/blob/d80440f3d57b45c829dbf513306f7adf9a0f3f76/app/dispatcher/stats.go
app/dispatcher/stats.go
// +build !confonly package dispatcher import ( "v2ray.com/core/common" "v2ray.com/core/common/buf" "v2ray.com/core/features/stats" ) type SizeStatWriter struct { Counter stats.Counter Writer buf.Writer } func (w *SizeStatWriter) WriteMultiBuffer(mb buf.MultiBuffer) error { w.Counter.Add(int64(mb.Len())) return w.Writer.WriteMultiBuffer(mb) } func (w *SizeStatWriter) Close() error { return common.Close(w.Writer) } func (w *SizeStatWriter) Interrupt() { common.Interrupt(w.Writer) }
go
MIT
d80440f3d57b45c829dbf513306f7adf9a0f3f76
2026-01-07T08:35:44.381088Z
false
v2ray/v2ray-core
https://github.com/v2ray/v2ray-core/blob/d80440f3d57b45c829dbf513306f7adf9a0f3f76/app/dispatcher/errors.generated.go
app/dispatcher/errors.generated.go
package dispatcher import "v2ray.com/core/common/errors" type errPathObjHolder struct{} func newError(values ...interface{}) *errors.Error { return errors.New(values...).WithPathObj(errPathObjHolder{}) }
go
MIT
d80440f3d57b45c829dbf513306f7adf9a0f3f76
2026-01-07T08:35:44.381088Z
false
v2ray/v2ray-core
https://github.com/v2ray/v2ray-core/blob/d80440f3d57b45c829dbf513306f7adf9a0f3f76/app/dispatcher/sniffer.go
app/dispatcher/sniffer.go
// +build !confonly package dispatcher import ( "v2ray.com/core/common" "v2ray.com/core/common/protocol/bittorrent" "v2ray.com/core/common/protocol/http" "v2ray.com/core/common/protocol/tls" ) type SniffResult interface { Protocol() string Domain() string } type protocolSniffer func([]byte) (SniffResult, error) type Sniffer struct { sniffer []protocolSniffer } func NewSniffer() *Sniffer { return &Sniffer{ sniffer: []protocolSniffer{ func(b []byte) (SniffResult, error) { return http.SniffHTTP(b) }, func(b []byte) (SniffResult, error) { return tls.SniffTLS(b) }, func(b []byte) (SniffResult, error) { return bittorrent.SniffBittorrent(b) }, }, } } var errUnknownContent = newError("unknown content") func (s *Sniffer) Sniff(payload []byte) (SniffResult, error) { var pendingSniffer []protocolSniffer for _, s := range s.sniffer { result, err := s(payload) if err == common.ErrNoClue { pendingSniffer = append(pendingSniffer, s) continue } if err == nil && result != nil { return result, nil } } if len(pendingSniffer) > 0 { s.sniffer = pendingSniffer return nil, common.ErrNoClue } return nil, errUnknownContent }
go
MIT
d80440f3d57b45c829dbf513306f7adf9a0f3f76
2026-01-07T08:35:44.381088Z
false
v2ray/v2ray-core
https://github.com/v2ray/v2ray-core/blob/d80440f3d57b45c829dbf513306f7adf9a0f3f76/app/dispatcher/stats_test.go
app/dispatcher/stats_test.go
package dispatcher_test import ( "testing" . "v2ray.com/core/app/dispatcher" "v2ray.com/core/common" "v2ray.com/core/common/buf" ) type TestCounter int64 func (c *TestCounter) Value() int64 { return int64(*c) } func (c *TestCounter) Add(v int64) int64 { x := int64(*c) + v *c = TestCounter(x) return x } func (c *TestCounter) Set(v int64) int64 { *c = TestCounter(v) return v } func TestStatsWriter(t *testing.T) { var c TestCounter writer := &SizeStatWriter{ Counter: &c, Writer: buf.Discard, } mb := buf.MergeBytes(nil, []byte("abcd")) common.Must(writer.WriteMultiBuffer(mb)) mb = buf.MergeBytes(nil, []byte("efg")) common.Must(writer.WriteMultiBuffer(mb)) if c.Value() != 7 { t.Fatal("unexpected counter value. want 7, but got ", c.Value()) } }
go
MIT
d80440f3d57b45c829dbf513306f7adf9a0f3f76
2026-01-07T08:35:44.381088Z
false
v2ray/v2ray-core
https://github.com/v2ray/v2ray-core/blob/d80440f3d57b45c829dbf513306f7adf9a0f3f76/app/dispatcher/dispatcher.go
app/dispatcher/dispatcher.go
// +build !confonly package dispatcher //go:generate go run v2ray.com/core/common/errors/errorgen
go
MIT
d80440f3d57b45c829dbf513306f7adf9a0f3f76
2026-01-07T08:35:44.381088Z
false
v2ray/v2ray-core
https://github.com/v2ray/v2ray-core/blob/d80440f3d57b45c829dbf513306f7adf9a0f3f76/app/dispatcher/config.pb.go
app/dispatcher/config.pb.go
// Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.25.0 // protoc v3.13.0 // source: app/dispatcher/config.proto package dispatcher import ( proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" ) const ( // Verify that this generated code is sufficiently up-to-date. _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) // Verify that runtime/protoimpl is sufficiently up-to-date. _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) // This is a compile-time assertion that a sufficiently up-to-date version // of the legacy proto package is being used. const _ = proto.ProtoPackageIsVersion4 type SessionConfig struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *SessionConfig) Reset() { *x = SessionConfig{} if protoimpl.UnsafeEnabled { mi := &file_app_dispatcher_config_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *SessionConfig) String() string { return protoimpl.X.MessageStringOf(x) } func (*SessionConfig) ProtoMessage() {} func (x *SessionConfig) ProtoReflect() protoreflect.Message { mi := &file_app_dispatcher_config_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use SessionConfig.ProtoReflect.Descriptor instead. func (*SessionConfig) Descriptor() ([]byte, []int) { return file_app_dispatcher_config_proto_rawDescGZIP(), []int{0} } type Config struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Settings *SessionConfig `protobuf:"bytes,1,opt,name=settings,proto3" json:"settings,omitempty"` } func (x *Config) Reset() { *x = Config{} if protoimpl.UnsafeEnabled { mi := &file_app_dispatcher_config_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Config) String() string { return protoimpl.X.MessageStringOf(x) } func (*Config) ProtoMessage() {} func (x *Config) ProtoReflect() protoreflect.Message { mi := &file_app_dispatcher_config_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Config.ProtoReflect.Descriptor instead. func (*Config) Descriptor() ([]byte, []int) { return file_app_dispatcher_config_proto_rawDescGZIP(), []int{1} } func (x *Config) GetSettings() *SessionConfig { if x != nil { return x.Settings } return nil } var File_app_dispatcher_config_proto protoreflect.FileDescriptor var file_app_dispatcher_config_proto_rawDesc = []byte{ 0x0a, 0x1b, 0x61, 0x70, 0x70, 0x2f, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x76, 0x32, 0x72, 0x61, 0x79, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x70, 0x2e, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x22, 0x15, 0x0a, 0x0d, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x4e, 0x0a, 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x44, 0x0a, 0x08, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x76, 0x32, 0x72, 0x61, 0x79, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x70, 0x2e, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x08, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x42, 0x5c, 0x0a, 0x1d, 0x63, 0x6f, 0x6d, 0x2e, 0x76, 0x32, 0x72, 0x61, 0x79, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x70, 0x2e, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x50, 0x01, 0x5a, 0x1d, 0x76, 0x32, 0x72, 0x61, 0x79, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x70, 0x2f, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0xaa, 0x02, 0x19, 0x56, 0x32, 0x52, 0x61, 0x79, 0x2e, 0x43, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x70, 0x70, 0x2e, 0x44, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( file_app_dispatcher_config_proto_rawDescOnce sync.Once file_app_dispatcher_config_proto_rawDescData = file_app_dispatcher_config_proto_rawDesc ) func file_app_dispatcher_config_proto_rawDescGZIP() []byte { file_app_dispatcher_config_proto_rawDescOnce.Do(func() { file_app_dispatcher_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_app_dispatcher_config_proto_rawDescData) }) return file_app_dispatcher_config_proto_rawDescData } var file_app_dispatcher_config_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_app_dispatcher_config_proto_goTypes = []interface{}{ (*SessionConfig)(nil), // 0: v2ray.core.app.dispatcher.SessionConfig (*Config)(nil), // 1: v2ray.core.app.dispatcher.Config } var file_app_dispatcher_config_proto_depIdxs = []int32{ 0, // 0: v2ray.core.app.dispatcher.Config.settings:type_name -> v2ray.core.app.dispatcher.SessionConfig 1, // [1:1] is the sub-list for method output_type 1, // [1:1] is the sub-list for method input_type 1, // [1:1] is the sub-list for extension type_name 1, // [1:1] is the sub-list for extension extendee 0, // [0:1] is the sub-list for field type_name } func init() { file_app_dispatcher_config_proto_init() } func file_app_dispatcher_config_proto_init() { if File_app_dispatcher_config_proto != nil { return } if !protoimpl.UnsafeEnabled { file_app_dispatcher_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SessionConfig); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_app_dispatcher_config_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Config); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_app_dispatcher_config_proto_rawDesc, NumEnums: 0, NumMessages: 2, NumExtensions: 0, NumServices: 0, }, GoTypes: file_app_dispatcher_config_proto_goTypes, DependencyIndexes: file_app_dispatcher_config_proto_depIdxs, MessageInfos: file_app_dispatcher_config_proto_msgTypes, }.Build() File_app_dispatcher_config_proto = out.File file_app_dispatcher_config_proto_rawDesc = nil file_app_dispatcher_config_proto_goTypes = nil file_app_dispatcher_config_proto_depIdxs = nil }
go
MIT
d80440f3d57b45c829dbf513306f7adf9a0f3f76
2026-01-07T08:35:44.381088Z
false
v2ray/v2ray-core
https://github.com/v2ray/v2ray-core/blob/d80440f3d57b45c829dbf513306f7adf9a0f3f76/app/reverse/reverse.go
app/reverse/reverse.go
// +build !confonly package reverse //go:generate go run v2ray.com/core/common/errors/errorgen import ( "context" "v2ray.com/core" "v2ray.com/core/common" "v2ray.com/core/common/errors" "v2ray.com/core/common/net" "v2ray.com/core/features/outbound" "v2ray.com/core/features/routing" ) const ( internalDomain = "reverse.internal.v2ray.com" ) func isDomain(dest net.Destination, domain string) bool { return dest.Address.Family().IsDomain() && dest.Address.Domain() == domain } func isInternalDomain(dest net.Destination) bool { return isDomain(dest, internalDomain) } func init() { common.Must(common.RegisterConfig((*Config)(nil), func(ctx context.Context, config interface{}) (interface{}, error) { r := new(Reverse) if err := core.RequireFeatures(ctx, func(d routing.Dispatcher, om outbound.Manager) error { return r.Init(config.(*Config), d, om) }); err != nil { return nil, err } return r, nil })) } type Reverse struct { bridges []*Bridge portals []*Portal } func (r *Reverse) Init(config *Config, d routing.Dispatcher, ohm outbound.Manager) error { for _, bConfig := range config.BridgeConfig { b, err := NewBridge(bConfig, d) if err != nil { return err } r.bridges = append(r.bridges, b) } for _, pConfig := range config.PortalConfig { p, err := NewPortal(pConfig, ohm) if err != nil { return err } r.portals = append(r.portals, p) } return nil } func (r *Reverse) Type() interface{} { return (*Reverse)(nil) } func (r *Reverse) Start() error { for _, b := range r.bridges { if err := b.Start(); err != nil { return err } } for _, p := range r.portals { if err := p.Start(); err != nil { return err } } return nil } func (r *Reverse) Close() error { var errs []error for _, b := range r.bridges { errs = append(errs, b.Close()) } for _, p := range r.portals { errs = append(errs, p.Close()) } return errors.Combine(errs...) }
go
MIT
d80440f3d57b45c829dbf513306f7adf9a0f3f76
2026-01-07T08:35:44.381088Z
false
v2ray/v2ray-core
https://github.com/v2ray/v2ray-core/blob/d80440f3d57b45c829dbf513306f7adf9a0f3f76/app/reverse/errors.generated.go
app/reverse/errors.generated.go
package reverse import "v2ray.com/core/common/errors" type errPathObjHolder struct{} func newError(values ...interface{}) *errors.Error { return errors.New(values...).WithPathObj(errPathObjHolder{}) }
go
MIT
d80440f3d57b45c829dbf513306f7adf9a0f3f76
2026-01-07T08:35:44.381088Z
false
v2ray/v2ray-core
https://github.com/v2ray/v2ray-core/blob/d80440f3d57b45c829dbf513306f7adf9a0f3f76/app/reverse/portal.go
app/reverse/portal.go
// +build !confonly package reverse import ( "context" "sync" "time" "github.com/golang/protobuf/proto" "v2ray.com/core/common" "v2ray.com/core/common/buf" "v2ray.com/core/common/mux" "v2ray.com/core/common/net" "v2ray.com/core/common/session" "v2ray.com/core/common/task" "v2ray.com/core/features/outbound" "v2ray.com/core/transport" "v2ray.com/core/transport/pipe" ) type Portal struct { ohm outbound.Manager tag string domain string picker *StaticMuxPicker client *mux.ClientManager } func NewPortal(config *PortalConfig, ohm outbound.Manager) (*Portal, error) { if config.Tag == "" { return nil, newError("portal tag is empty") } if config.Domain == "" { return nil, newError("portal domain is empty") } picker, err := NewStaticMuxPicker() if err != nil { return nil, err } return &Portal{ ohm: ohm, tag: config.Tag, domain: config.Domain, picker: picker, client: &mux.ClientManager{ Picker: picker, }, }, nil } func (p *Portal) Start() error { return p.ohm.AddHandler(context.Background(), &Outbound{ portal: p, tag: p.tag, }) } func (p *Portal) Close() error { return p.ohm.RemoveHandler(context.Background(), p.tag) } func (p *Portal) HandleConnection(ctx context.Context, link *transport.Link) error { outboundMeta := session.OutboundFromContext(ctx) if outboundMeta == nil { return newError("outbound metadata not found").AtError() } if isDomain(outboundMeta.Target, p.domain) { muxClient, err := mux.NewClientWorker(*link, mux.ClientStrategy{}) if err != nil { return newError("failed to create mux client worker").Base(err).AtWarning() } worker, err := NewPortalWorker(muxClient) if err != nil { return newError("failed to create portal worker").Base(err) } p.picker.AddWorker(worker) return nil } return p.client.Dispatch(ctx, link) } type Outbound struct { portal *Portal tag string } func (o *Outbound) Tag() string { return o.tag } func (o *Outbound) Dispatch(ctx context.Context, link *transport.Link) { if err := o.portal.HandleConnection(ctx, link); err != nil { newError("failed to process reverse connection").Base(err).WriteToLog(session.ExportIDToError(ctx)) common.Interrupt(link.Writer) } } func (o *Outbound) Start() error { return nil } func (o *Outbound) Close() error { return nil } type StaticMuxPicker struct { access sync.Mutex workers []*PortalWorker cTask *task.Periodic } func NewStaticMuxPicker() (*StaticMuxPicker, error) { p := &StaticMuxPicker{} p.cTask = &task.Periodic{ Execute: p.cleanup, Interval: time.Second * 30, } p.cTask.Start() return p, nil } func (p *StaticMuxPicker) cleanup() error { p.access.Lock() defer p.access.Unlock() var activeWorkers []*PortalWorker for _, w := range p.workers { if !w.Closed() { activeWorkers = append(activeWorkers, w) } } if len(activeWorkers) != len(p.workers) { p.workers = activeWorkers } return nil } func (p *StaticMuxPicker) PickAvailable() (*mux.ClientWorker, error) { p.access.Lock() defer p.access.Unlock() if len(p.workers) == 0 { return nil, newError("empty worker list") } var minIdx int = -1 var minConn uint32 = 9999 for i, w := range p.workers { if w.draining { continue } if w.client.ActiveConnections() < minConn { minConn = w.client.ActiveConnections() minIdx = i } } if minIdx == -1 { for i, w := range p.workers { if w.IsFull() { continue } if w.client.ActiveConnections() < minConn { minConn = w.client.ActiveConnections() minIdx = i } } } if minIdx != -1 { return p.workers[minIdx].client, nil } return nil, newError("no mux client worker available") } func (p *StaticMuxPicker) AddWorker(worker *PortalWorker) { p.access.Lock() defer p.access.Unlock() p.workers = append(p.workers, worker) } type PortalWorker struct { client *mux.ClientWorker control *task.Periodic writer buf.Writer reader buf.Reader draining bool } func NewPortalWorker(client *mux.ClientWorker) (*PortalWorker, error) { opt := []pipe.Option{pipe.WithSizeLimit(16 * 1024)} uplinkReader, uplinkWriter := pipe.New(opt...) downlinkReader, downlinkWriter := pipe.New(opt...) ctx := context.Background() ctx = session.ContextWithOutbound(ctx, &session.Outbound{ Target: net.UDPDestination(net.DomainAddress(internalDomain), 0), }) f := client.Dispatch(ctx, &transport.Link{ Reader: uplinkReader, Writer: downlinkWriter, }) if !f { return nil, newError("unable to dispatch control connection") } w := &PortalWorker{ client: client, reader: downlinkReader, writer: uplinkWriter, } w.control = &task.Periodic{ Execute: w.heartbeat, Interval: time.Second * 2, } w.control.Start() return w, nil } func (w *PortalWorker) heartbeat() error { if w.client.Closed() { return newError("client worker stopped") } if w.draining || w.writer == nil { return newError("already disposed") } msg := &Control{} msg.FillInRandom() if w.client.TotalConnections() > 256 { w.draining = true msg.State = Control_DRAIN defer func() { common.Close(w.writer) common.Interrupt(w.reader) w.writer = nil }() } b, err := proto.Marshal(msg) common.Must(err) mb := buf.MergeBytes(nil, b) return w.writer.WriteMultiBuffer(mb) } func (w *PortalWorker) IsFull() bool { return w.client.IsFull() } func (w *PortalWorker) Closed() bool { return w.client.Closed() }
go
MIT
d80440f3d57b45c829dbf513306f7adf9a0f3f76
2026-01-07T08:35:44.381088Z
false
v2ray/v2ray-core
https://github.com/v2ray/v2ray-core/blob/d80440f3d57b45c829dbf513306f7adf9a0f3f76/app/reverse/config.go
app/reverse/config.go
// +build !confonly package reverse import ( "crypto/rand" "io" "v2ray.com/core/common/dice" ) func (c *Control) FillInRandom() { randomLength := dice.Roll(64) c.Random = make([]byte, randomLength) io.ReadFull(rand.Reader, c.Random) }
go
MIT
d80440f3d57b45c829dbf513306f7adf9a0f3f76
2026-01-07T08:35:44.381088Z
false
v2ray/v2ray-core
https://github.com/v2ray/v2ray-core/blob/d80440f3d57b45c829dbf513306f7adf9a0f3f76/app/reverse/bridge.go
app/reverse/bridge.go
// +build !confonly package reverse import ( "context" "time" "github.com/golang/protobuf/proto" "v2ray.com/core/common/mux" "v2ray.com/core/common/net" "v2ray.com/core/common/session" "v2ray.com/core/common/task" "v2ray.com/core/features/routing" "v2ray.com/core/transport" "v2ray.com/core/transport/pipe" ) // Bridge is a component in reverse proxy, that relays connections from Portal to local address. type Bridge struct { dispatcher routing.Dispatcher tag string domain string workers []*BridgeWorker monitorTask *task.Periodic } // NewBridge creates a new Bridge instance. func NewBridge(config *BridgeConfig, dispatcher routing.Dispatcher) (*Bridge, error) { if config.Tag == "" { return nil, newError("bridge tag is empty") } if config.Domain == "" { return nil, newError("bridge domain is empty") } b := &Bridge{ dispatcher: dispatcher, tag: config.Tag, domain: config.Domain, } b.monitorTask = &task.Periodic{ Execute: b.monitor, Interval: time.Second * 2, } return b, nil } func (b *Bridge) cleanup() { var activeWorkers []*BridgeWorker for _, w := range b.workers { if w.IsActive() { activeWorkers = append(activeWorkers, w) } } if len(activeWorkers) != len(b.workers) { b.workers = activeWorkers } } func (b *Bridge) monitor() error { b.cleanup() var numConnections uint32 var numWorker uint32 for _, w := range b.workers { if w.IsActive() { numConnections += w.Connections() numWorker++ } } if numWorker == 0 || numConnections/numWorker > 16 { worker, err := NewBridgeWorker(b.domain, b.tag, b.dispatcher) if err != nil { newError("failed to create bridge worker").Base(err).AtWarning().WriteToLog() return nil } b.workers = append(b.workers, worker) } return nil } func (b *Bridge) Start() error { return b.monitorTask.Start() } func (b *Bridge) Close() error { return b.monitorTask.Close() } type BridgeWorker struct { tag string worker *mux.ServerWorker dispatcher routing.Dispatcher state Control_State } func NewBridgeWorker(domain string, tag string, d routing.Dispatcher) (*BridgeWorker, error) { ctx := context.Background() ctx = session.ContextWithInbound(ctx, &session.Inbound{ Tag: tag, }) link, err := d.Dispatch(ctx, net.Destination{ Network: net.Network_TCP, Address: net.DomainAddress(domain), Port: 0, }) if err != nil { return nil, err } w := &BridgeWorker{ dispatcher: d, tag: tag, } worker, err := mux.NewServerWorker(context.Background(), w, link) if err != nil { return nil, err } w.worker = worker return w, nil } func (w *BridgeWorker) Type() interface{} { return routing.DispatcherType() } func (w *BridgeWorker) Start() error { return nil } func (w *BridgeWorker) Close() error { return nil } func (w *BridgeWorker) IsActive() bool { return w.state == Control_ACTIVE && !w.worker.Closed() } func (w *BridgeWorker) Connections() uint32 { return w.worker.ActiveConnections() } func (w *BridgeWorker) handleInternalConn(link transport.Link) { go func() { reader := link.Reader for { mb, err := reader.ReadMultiBuffer() if err != nil { break } for _, b := range mb { var ctl Control if err := proto.Unmarshal(b.Bytes(), &ctl); err != nil { newError("failed to parse proto message").Base(err).WriteToLog() break } if ctl.State != w.state { w.state = ctl.State } } } }() } func (w *BridgeWorker) Dispatch(ctx context.Context, dest net.Destination) (*transport.Link, error) { if !isInternalDomain(dest) { ctx = session.ContextWithInbound(ctx, &session.Inbound{ Tag: w.tag, }) return w.dispatcher.Dispatch(ctx, dest) } opt := []pipe.Option{pipe.WithSizeLimit(16 * 1024)} uplinkReader, uplinkWriter := pipe.New(opt...) downlinkReader, downlinkWriter := pipe.New(opt...) w.handleInternalConn(transport.Link{ Reader: downlinkReader, Writer: uplinkWriter, }) return &transport.Link{ Reader: uplinkReader, Writer: downlinkWriter, }, nil }
go
MIT
d80440f3d57b45c829dbf513306f7adf9a0f3f76
2026-01-07T08:35:44.381088Z
false