repo
stringlengths
6
47
file_url
stringlengths
77
269
file_path
stringlengths
5
186
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-07 08:35:43
2026-01-07 08:55:24
truncated
bool
2 classes
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/parsers/qwen3vl.go
model/parsers/qwen3vl.go
package parsers import ( "context" "encoding/json" "log/slog" "strings" "unicode" "github.com/ollama/ollama/api" "github.com/ollama/ollama/logutil" ) // TODO: call the init function const ( CollectingThinkingContent qwenParserState = iota CollectingContent CollectingToolContent ThinkingDoneEatingWhitespace ToolCallDoneEatingWhitespace ) const ( thinkingCloseTag = "</think>" ) type Qwen3VLParser struct { state qwenParserState buffer strings.Builder tools []api.Tool hasThinkingSupport bool } func (p *Qwen3VLParser) HasToolSupport() bool { return true } func (p *Qwen3VLParser) HasThinkingSupport() bool { return p.hasThinkingSupport } func (p *Qwen3VLParser) setInitialState(lastMessage *api.Message) { prefill := lastMessage != nil && lastMessage.Role == "assistant" if !p.HasThinkingSupport() { p.state = CollectingContent return } if prefill && lastMessage.Content != "" { p.state = CollectingContent return } p.state = CollectingThinkingContent } func (p *Qwen3VLParser) Init(tools []api.Tool, lastMessage *api.Message, thinkValue *api.ThinkValue) []api.Tool { p.tools = tools p.setInitialState(lastMessage) return tools } type qwenEventThinkingContent struct { content string } func (qwenEventThinkingContent) isQwenEvent() {} func (p *Qwen3VLParser) Add(s string, done bool) (content string, thinking string, calls []api.ToolCall, err error) { p.buffer.WriteString(s) events := p.parseEvents() var contentSb strings.Builder var thinkingSb strings.Builder for _, event := range events { switch event := event.(type) { case qwenEventRawToolCall: toolCall, err := parseJSONToolCall(event, p.tools) if err != nil { slog.Warn("qwen tool call parsing failed", "error", err) return "", "", nil, err } calls = append(calls, toolCall) case qwenEventThinkingContent: thinkingSb.WriteString(event.content) case qwenEventContent: // TODO(drifkin): if the same turn contains multiple interleaved content // events, we naively append them together here. contentSb.WriteString(event.content) } } return contentSb.String(), thinkingSb.String(), calls, nil } func (p *Qwen3VLParser) parseEvents() []qwenEvent { var all []qwenEvent keepLooping := true for keepLooping { var events []qwenEvent events, keepLooping = p.eat() if len(events) > 0 { all = append(all, events...) } } if len(all) > 0 { slog.Log(context.TODO(), logutil.LevelTrace, "qwen events parsed", "events", all, "state", p.state, "buffer", p.buffer.String()) } return all } func (p *Qwen3VLParser) eatLeadingWhitespaceAndTransitionTo(nextState qwenParserState) ([]qwenEvent, bool) { trimmed := strings.TrimLeftFunc(p.buffer.String(), unicode.IsSpace) p.buffer.Reset() if trimmed == "" { return nil, false } p.state = nextState p.buffer.WriteString(trimmed) return nil, true } func (p *Qwen3VLParser) eat() ([]qwenEvent, bool) { var events []qwenEvent switch p.state { case CollectingContent: if strings.Contains(p.buffer.String(), toolOpenTag) { // events = emitContentBeforeTag(p, events, toolOpenTag) before, _ := splitAtTag(&p.buffer, toolOpenTag, false) if len(before) > 0 { events = append(events, qwenEventContent{content: before}) } p.state = CollectingToolContent return events, true } else if overlapLen := overlap(p.buffer.String(), toolOpenTag); overlapLen > 0 { beforePartialTag := p.buffer.String()[:len(p.buffer.String())-overlapLen] trailingWhitespaceLen := trailingWhitespaceLen(beforePartialTag) ambiguousStart := len(beforePartialTag) - trailingWhitespaceLen unambiguous := p.buffer.String()[:ambiguousStart] ambiguous := p.buffer.String()[ambiguousStart:] p.buffer.Reset() p.buffer.WriteString(ambiguous) if len(unambiguous) > 0 { events = append(events, qwenEventContent{content: unambiguous}) } return events, false } else { whitespaceLen := trailingWhitespaceLen(p.buffer.String()) ambiguousStart := len(p.buffer.String()) - whitespaceLen unambiguous := p.buffer.String()[:ambiguousStart] ambiguous := p.buffer.String()[ambiguousStart:] p.buffer.Reset() p.buffer.WriteString(ambiguous) if len(unambiguous) > 0 { events = append(events, qwenEventContent{content: unambiguous}) } return events, false } case CollectingToolContent: if strings.Contains(p.buffer.String(), toolCloseTag) { split := strings.SplitN(p.buffer.String(), toolCloseTag, 2) before := split[0] // do we also need to do it to tool calls? if len(before) == 0 { slog.Warn("qwen tool call closing tag found but no content before it") } after := split[1] events = append(events, qwenEventRawToolCall{raw: before}) p.buffer.Reset() p.buffer.WriteString(after) p.state = ToolCallDoneEatingWhitespace return events, true } else { return events, false } case CollectingThinkingContent: if strings.Contains(p.buffer.String(), thinkingCloseTag) { thinking, remaining := splitAtTag(&p.buffer, thinkingCloseTag, true) if len(thinking) > 0 { events = append(events, qwenEventThinkingContent{content: thinking}) } if remaining == "" { p.state = ThinkingDoneEatingWhitespace } else { p.state = CollectingContent } return events, true } else if overlapLen := overlap(p.buffer.String(), thinkingCloseTag); overlapLen > 0 { beforePartialTag := p.buffer.String()[:len(p.buffer.String())-overlapLen] trailingWhitespaceLen := trailingWhitespaceLen(beforePartialTag) ambiguousStart := len(beforePartialTag) - trailingWhitespaceLen unambiguous := p.buffer.String()[:ambiguousStart] ambiguous := p.buffer.String()[ambiguousStart:] p.buffer.Reset() p.buffer.WriteString(ambiguous) if len(unambiguous) > 0 { events = append(events, qwenEventThinkingContent{content: unambiguous}) } return events, false } else { whitespaceLen := trailingWhitespaceLen(p.buffer.String()) ambiguousStart := len(p.buffer.String()) - whitespaceLen unambiguous := p.buffer.String()[:ambiguousStart] ambiguous := p.buffer.String()[ambiguousStart:] p.buffer.Reset() p.buffer.WriteString(ambiguous) if len(unambiguous) > 0 { events = append(events, qwenEventThinkingContent{content: unambiguous}) } return events, false } case ThinkingDoneEatingWhitespace: return p.eatLeadingWhitespaceAndTransitionTo(CollectingContent) case ToolCallDoneEatingWhitespace: return p.eatLeadingWhitespaceAndTransitionTo(CollectingContent) default: panic("unreachable") } } func parseJSONToolCall(raw qwenEventRawToolCall, tools []api.Tool) (api.ToolCall, error) { var toolCallFunction api.ToolCallFunction if err := json.Unmarshal([]byte(raw.raw), &toolCallFunction); err != nil { return api.ToolCall{}, err } toolCall := api.ToolCall{} toolCall.Function = toolCallFunction return toolCall, nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/parsers/qwen3coder.go
model/parsers/qwen3coder.go
package parsers import ( "context" "encoding/json" "encoding/xml" "fmt" "log/slog" "math" "regexp" "strconv" "strings" "unicode" "unicode/utf8" "github.com/ollama/ollama/api" "github.com/ollama/ollama/logutil" ) type qwenParserState int const ( toolOpenTag = "<tool_call>" toolCloseTag = "</tool_call>" ) const ( qwenParserState_LookingForToolStart qwenParserState = iota qwenParserState_CollectingToolContent ) type Qwen3CoderParser struct { state qwenParserState acc strings.Builder tools []api.Tool } func (p *Qwen3CoderParser) HasToolSupport() bool { return true } func (p *Qwen3CoderParser) HasThinkingSupport() bool { return false } func (p *Qwen3CoderParser) Init(tools []api.Tool, lastMessage *api.Message, thinkValue *api.ThinkValue) []api.Tool { p.tools = tools return tools // Qwen doesn't modify tools } func (p *Qwen3CoderParser) Add(s string, done bool) (content string, thinking string, calls []api.ToolCall, err error) { p.acc.WriteString(s) events := p.parseEvents() var toolCalls []api.ToolCall var sb strings.Builder for _, event := range events { switch event := event.(type) { case qwenEventRawToolCall: toolCall, err := parseToolCall(event, p.tools) if err != nil { slog.Warn("qwen tool call parsing failed", "error", err) return "", "", nil, err } toolCalls = append(toolCalls, toolCall) case qwenEventContent: // TODO(drifkin): if the same turn contains multiple interleaved content // events, we naively append them together here. See the note below about // `qwenEvent`s for more details sb.WriteString(event.content) } } return sb.String(), "", toolCalls, nil } func (p *Qwen3CoderParser) parseEvents() []qwenEvent { var all []qwenEvent keepLooping := true for keepLooping { var events []qwenEvent events, keepLooping = eat(p) if len(events) > 0 { all = append(all, events...) } } if len(all) > 0 { slog.Log(context.TODO(), logutil.LevelTrace, "qwen events parsed", "events", all, "state", p.state, "acc", p.acc.String()) } return all } // we use some internal event types in order to communicate between `Add` and // `eat`. We do this to support interleaving content and parallel tool calls in // the parser, even though qwen3-coder isn't supposed to do this. Our API // doesn't currently support models outputting multiple messages in a turn, so // we wouldn't be able to represent it yet, but there's no reason to prevent the // parser from supporting it, especially for future models if they end up using // a similar format. type qwenEvent interface { isQwenEvent() } type qwenEventRawToolCall struct { raw string } type qwenEventContent struct { content string } func (qwenEventContent) isQwenEvent() {} func (qwenEventRawToolCall) isQwenEvent() {} // eat consumes the parser's buffer, and returns a list of any unambiguous // events from the current parser state. If the parser transitions to another // state, it may have additional events to emit on the next call, which is what // the second return value indicates func eat(p *Qwen3CoderParser) ([]qwenEvent, bool) { var events []qwenEvent switch p.state { case qwenParserState_LookingForToolStart: if strings.Contains(p.acc.String(), toolOpenTag) { // we found a full tool open tag, so we can emit the content before the // tag, being sure to trim any trailing whitespace split := strings.SplitN(p.acc.String(), toolOpenTag, 2) before := split[0] before = strings.TrimRightFunc(before, unicode.IsSpace) if len(before) > 0 { events = append(events, qwenEventContent{content: before}) } after := split[1] p.acc.Reset() p.acc.WriteString(after) p.state = qwenParserState_CollectingToolContent return events, true } else if overlap := overlap(p.acc.String(), toolOpenTag); overlap > 0 { // we found a partial tool open tag, so we can emit the unambiguous part, // which is the (trailing-whitespace trimmed) content before the partial // tool open tag beforePartialTag := p.acc.String()[:len(p.acc.String())-overlap] trailingWhitespaceLen := trailingWhitespaceLen(beforePartialTag) ambiguousStart := len(beforePartialTag) - trailingWhitespaceLen unambiguous := p.acc.String()[:ambiguousStart] ambiguous := p.acc.String()[ambiguousStart:] p.acc.Reset() p.acc.WriteString(ambiguous) if len(unambiguous) > 0 { events = append(events, qwenEventContent{content: unambiguous}) } return events, false } else { // we found content that is entirely not a tool call. We should withhold // any trailing whitespace in case this is the end of the content whitespaceLen := trailingWhitespaceLen(p.acc.String()) ambiguousStart := len(p.acc.String()) - whitespaceLen unambiguous := p.acc.String()[:ambiguousStart] ambiguous := p.acc.String()[ambiguousStart:] p.acc.Reset() p.acc.WriteString(ambiguous) if len(unambiguous) > 0 { events = append(events, qwenEventContent{content: unambiguous}) } return events, false } case qwenParserState_CollectingToolContent: if strings.Contains(p.acc.String(), toolCloseTag) { split := strings.SplitN(p.acc.String(), toolCloseTag, 2) before := split[0] if len(before) == 0 { slog.Warn("qwen tool call closing tag found but no content before it") } // remove any whitespace between the tool call and any content after it after := strings.TrimLeftFunc(split[1], unicode.IsSpace) p.acc.Reset() p.acc.WriteString(after) events = append(events, qwenEventRawToolCall{raw: before}) p.state = qwenParserState_LookingForToolStart return events, true } else { // note that we don't need to check the overlap here because we only plan // on parsing the tool call once we see the full closing tag. We don't // stream back the unparsed tool content, so there's no need to be eager // here return events, false } default: panic("unreachable") } } // TODO(drifkin): move this to a shared location // longest overlap between suffix of s and prefix of delim func overlap(s, delim string) int { max := min(len(delim), len(s)) for i := max; i > 0; i-- { if strings.HasSuffix(s, delim[:i]) { return i } } return 0 } func trailingWhitespaceLen(s string) int { remaining := s total := 0 for len(remaining) > 0 { r, size := utf8.DecodeLastRuneInString(remaining) // if it's an invalid utf8 rune, assume it isn't whitespace if r == utf8.RuneError && size == 1 { break } if !unicode.IsSpace(r) { break } total += size remaining = remaining[:len(remaining)-size] } return total } type XMLFunctionCall struct { XMLName xml.Name `xml:"function"` Name string `xml:"name,attr"` Parameters []XMLParameter `xml:"parameter"` } type XMLParameter struct { Name string `xml:"name,attr"` Value string `xml:",chardata"` } // parseToolCall parses a raw tool call string into an api.ToolCall. // The raw string follows an xml-like format, here's an example: // // <function=get_current_temperature> // <parameter=location> // San Francisco // </parameter> // <parameter=unit> // celsius // </parameter> // </function> func parseToolCall(raw qwenEventRawToolCall, tools []api.Tool) (api.ToolCall, error) { toolCall := api.ToolCall{} xmlString := transformToXML(raw.raw) var functionCall XMLFunctionCall err := xml.Unmarshal([]byte(xmlString), &functionCall) if err != nil { return api.ToolCall{}, err } toolCall.Function = api.ToolCallFunction{ Name: functionCall.Name, } // Find the matching tool to get parameter types var matchedTool *api.Tool for i := range tools { if tools[i].Function.Name == functionCall.Name { matchedTool = &tools[i] break } } toolCall.Function.Arguments = api.NewToolCallFunctionArguments() for _, parameter := range functionCall.Parameters { // Look up the parameter type if we found the tool var paramType api.PropertyType if matchedTool != nil && matchedTool.Function.Parameters.Properties != nil { if prop, ok := matchedTool.Function.Parameters.Properties.Get(parameter.Name); ok { // Handle anyOf by collecting all types from the union if len(prop.AnyOf) > 0 { for _, anyOfProp := range prop.AnyOf { paramType = append(paramType, anyOfProp.Type...) } } else { paramType = prop.Type } } } toolCall.Function.Arguments.Set(parameter.Name, parseValue(parameter.Value, paramType)) } return toolCall, nil } // parseValue converts a raw string value to the appropriate type based on the parameter type specification. // // For union types (multiple types in PropertyType, which we support but doesn't // seem as though the reference parser does type coercion with those types in // mind) we use a type precedence approach: // 1. null - checked first regardless of declared types (matches reference implementation) // 2. boolean - only "true"/"false" are valid booleans // 3. integer - must parse as a whole number // 4. number - must parse as numeric (returns int if no decimal part) // 5. array - must parse as valid JSON array // 6. object - must parse as valid JSON object // 7. string - always succeeds (least specific type) // // This precedence ensures we return the most specific type that successfully parses, // following the principle of least surprise. For example, with PropertyType{"string", "number"}, // "123" becomes 123 (number), while "hello" becomes "hello" (string). func parseValue(raw string, paramType api.PropertyType) any { // first remove a single leading newlines, and a single trailing newline (if // they exist). This follows the reference implementation raw = strings.TrimPrefix(raw, "\n") raw = strings.TrimSuffix(raw, "\n") // Check for null first (case-insensitive) - this takes precedence over any type if strings.ToLower(raw) == "null" { return nil } // If no type is specified, default to string if len(paramType) == 0 { return raw } // Check if any of the specified types match, using type precedence // Order: boolean -> integer -> number -> array -> object -> string typeSet := make(map[string]bool) for _, t := range paramType { typeSet[t] = true } // Try boolean first (most restrictive) if typeSet["boolean"] { lower := strings.ToLower(raw) switch lower { case "true": return true case "false": return false } // If not a valid boolean but boolean is the only type, return false (matching reference) if len(paramType) == 1 { return false } // Otherwise try other types } // Try integer if typeSet["integer"] { if i, err := strconv.ParseInt(raw, 10, 64); err == nil { // Return as int if it fits in int32, otherwise int64 if i >= math.MinInt32 && i <= math.MaxInt32 { return int(i) } return i } // If integer is the only type and parsing failed, fall back to string if len(paramType) == 1 { return raw } } // Try number (float) if typeSet["number"] { if f, err := strconv.ParseFloat(raw, 64); err == nil { // If the number has no decimal part, return as int (matching reference) if f == math.Trunc(f) { i := int64(f) if i >= math.MinInt32 && i <= math.MaxInt32 { return int(i) } return i } return f } // If number is the only type and parsing failed, fall back to string if len(paramType) == 1 { return raw } } // Try array if typeSet["array"] { var arr []any if err := json.Unmarshal([]byte(raw), &arr); err == nil { return arr } // If array is the only type and parsing failed, fall back to string if len(paramType) == 1 { return raw } } // Try object if typeSet["object"] { var obj map[string]any if err := json.Unmarshal([]byte(raw), &obj); err == nil { return obj } // If object is the only type and parsing failed, fall back to string if len(paramType) == 1 { return raw } } // String always succeeds (or if "string" is in the type set) if typeSet["string"] { return raw } // If we get here, none of the types matched and string wasn't an option // We return string as a fallback. The reference implementation will attempt // to parse the value as a python literal, but we purposefully don't support // that return raw } var ( qwenTagRegex = regexp.MustCompile(`<(\w+)=([^>]+)>`) qwenXMLTagRegex = regexp.MustCompile(`</?(?:function|parameter)(?:\s+name="[^"]*")?>`) ) // transformToXML transforms a raw qwen tool call with xml-like tags into valid // xml so that it can be parsed by any xml parser func transformToXML(raw string) string { // take the form `<tag=abc>` and transform it to `<tag name="abc">`, taking // care to properly escape the string that becomes the attribute value transformed := qwenTagRegex.ReplaceAllStringFunc(raw, func(match string) string { groups := qwenTagRegex.FindStringSubmatch(match) tag := groups[1] var escapedValue strings.Builder _ = xml.EscapeText(&escapedValue, []byte(groups[2])) // error is always nil for strings.Builder return fmt.Sprintf(`<%s name="%s">`, tag, escapedValue.String()) }) // Walk the resulting string, escaping any character data that sits between the // xml tags we just emitted var out strings.Builder lastIdx := 0 for _, loc := range qwenXMLTagRegex.FindAllStringIndex(transformed, -1) { if loc[0] > lastIdx { escapeTextNode(&out, transformed[lastIdx:loc[0]]) } out.WriteString(transformed[loc[0]:loc[1]]) lastIdx = loc[1] } if lastIdx < len(transformed) { escapeTextNode(&out, transformed[lastIdx:]) } return out.String() } // escapeTextNode escapes XML character data without altering other characters // like newlines or tabs (which is why we don't use xml.EscapeText for this) func escapeTextNode(sb *strings.Builder, s string) { for _, r := range s { switch r { case '&': sb.WriteString("&amp;") case '<': sb.WriteString("&lt;") case '>': sb.WriteString("&gt;") default: sb.WriteRune(r) } } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/parsers/functiongemma_test.go
model/parsers/functiongemma_test.go
package parsers import ( "testing" "github.com/google/go-cmp/cmp" "github.com/ollama/ollama/api" "github.com/stretchr/testify/assert" ) func TestFunctionGemmaParser(t *testing.T) { tests := []struct { name string chunks []string tools []api.Tool expectedCalls []api.ToolCall expectedText string }{ { name: "plain_content", chunks: []string{"H", "e", "l", "l", "o", ",", " ", "w", "o", "r", "l", "d", "!"}, expectedCalls: nil, expectedText: "Hello, world!", }, { name: "simple_tool_call", chunks: []string{ "<", "start", "_", "function", "_", "call", ">", "call", ":", "get", "_", "weather", "{", "city", ":", "<", "escape", ">", "Paris", "<", "escape", ">", "}", "<", "end", "_", "function", "_", "call", ">", }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]api.ToolProperty{ "city": {Type: api.PropertyType{"string"}}, }), }, }, }, }, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"city": "Paris"}), }, }, }, expectedText: "", }, { name: "content_before_tool_call", chunks: []string{ "L", "et", " ", "me", " ", "check", ".", "<", "start", "_", "function", "_", "call", ">", "call", ":", "get", "_", "weather", "{", "city", ":", "<", "escape", ">", "Paris", "<", "escape", ">", "}", "<", "end", "_", "function", "_", "call", ">", }, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"city": "Paris"}), }, }, }, expectedText: "Let me check.", }, { name: "numeric_arguments", chunks: []string{ "<", "start", "_", "function", "_", "call", ">", "call", ":", "add", "{", "a", ":", "1", ",", "b", ":", "2", "}", "<", "end", "_", "function", "_", "call", ">", }, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "add", Arguments: testArgs(map[string]any{"a": int64(1), "b": int64(2)}), }, }, }, expectedText: "", }, { name: "boolean_arguments", chunks: []string{ "<", "start", "_", "function", "_", "call", ">", "call", ":", "set", "_", "flag", "{", "enabled", ":", "true", ",", "verbose", ":", "false", "}", "<", "end", "_", "function", "_", "call", ">", }, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "set_flag", Arguments: testArgs(map[string]any{"enabled": true, "verbose": false}), }, }, }, expectedText: "", }, { name: "multiple_tool_calls", chunks: []string{ "<", "start", "_", "function", "_", "call", ">", "call", ":", "get", "_", "weather", "{", "city", ":", "<", "escape", ">", "Paris", "<", "escape", ">", "}", "<", "end", "_", "function", "_", "call", ">", "<", "start", "_", "function", "_", "call", ">", "call", ":", "get", "_", "weather", "{", "city", ":", "<", "escape", ">", "London", "<", "escape", ">", "}", "<", "end", "_", "function", "_", "call", ">", }, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"city": "Paris"}), }, }, { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"city": "London"}), }, }, }, expectedText: "", }, { name: "array_argument", chunks: []string{ "<", "start", "_", "function", "_", "call", ">", "call", ":", "process", "{", "items", ":", "[", "<", "escape", ">", "a", "<", "escape", ">", ",", "<", "escape", ">", "b", "<", "escape", ">", ",", "<", "escape", ">", "c", "<", "escape", ">", "]", "}", "<", "end", "_", "function", "_", "call", ">", }, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "process", Arguments: testArgs(map[string]any{"items": []any{"a", "b", "c"}}), }, }, }, expectedText: "", }, { name: "object_argument", chunks: []string{ "<", "start", "_", "function", "_", "call", ">", "call", ":", "update", "{", "data", ":", "{", "name", ":", "<", "escape", ">", "test", "<", "escape", ">", ",", "value", ":", "42", "}", "}", "<", "end", "_", "function", "_", "call", ">", }, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "update", Arguments: testArgs(map[string]any{ "data": map[string]any{"name": "test", "value": int64(42)}, }), }, }, }, expectedText: "", }, { name: "empty_input", chunks: []string{}, expectedCalls: nil, expectedText: "", }, { name: "tool_call_with_no_arguments", chunks: []string{ "<", "start", "_", "function", "_", "call", ">", "call", ":", "get", "_", "time", "{", "}", "<", "end", "_", "function", "_", "call", ">", }, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_time", Arguments: api.NewToolCallFunctionArguments(), }, }, }, expectedText: "", }, { name: "content_with_angle_brackets", chunks: []string{ "The", " ", "result", " ", "is", " ", "a", " ", "<", "value", ">", " ", "tag", }, expectedCalls: nil, expectedText: "The result is a <value> tag", }, { name: "float_argument", chunks: []string{ "<", "start", "_", "function", "_", "call", ">", "call", ":", "set", "_", "temp", "{", "value", ":", "3", ".", "14", "}", "<", "end", "_", "function", "_", "call", ">", }, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "set_temp", Arguments: testArgs(map[string]any{"value": 3.14}), }, }, }, expectedText: "", }, { name: "content_after_tool_call", chunks: []string{ "<", "start", "_", "function", "_", "call", ">", "call", ":", "test", "{", "}", "<", "end", "_", "function", "_", "call", ">", "Done", "!", }, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "test", Arguments: api.NewToolCallFunctionArguments(), }, }, }, expectedText: "Done!", }, { name: "unicode_content_and_arguments", chunks: []string{ "こんにちは", " ", "<", "start", "_", "function", "_", "call", ">", "call", ":", "greet", "{", "name", ":", "<", "escape", ">", "日本語", "<", "escape", ">", "}", "<", "end", "_", "function", "_", "call", ">", }, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "greet", Arguments: testArgs(map[string]any{"name": "日本語"}), }, }, }, expectedText: "こんにちは ", }, { name: "multiple_params_sorted", chunks: []string{ "<", "start", "_", "function", "_", "call", ">", "call", ":", "search", "{", "query", ":", "<", "escape", ">", "test", "<", "escape", ">", ",", "limit", ":", "10", ",", "offset", ":", "0", "}", "<", "end", "_", "function", "_", "call", ">", }, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "search", Arguments: testArgs(map[string]any{ "query": "test", "limit": int64(10), "offset": int64(0), }), }, }, }, expectedText: "", }, { name: "nested_object_argument", chunks: []string{ "<", "start", "_", "function", "_", "call", ">", "call", ":", "create", "{", "config", ":", "{", "settings", ":", "{", "enabled", ":", "true", ",", "name", ":", "<", "escape", ">", "test", "<", "escape", ">", "}", "}", "}", "<", "end", "_", "function", "_", "call", ">", }, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "create", Arguments: testArgs(map[string]any{ "config": map[string]any{ "settings": map[string]any{ "enabled": true, "name": "test", }, }, }), }, }, }, expectedText: "", }, { name: "partial_start_tag_in_content", chunks: []string{ "Hello", " ", "<", "start", " ", "world", }, expectedCalls: nil, expectedText: "Hello <start world", }, { name: "parallel_tool_calls", chunks: []string{ "<", "start", "_", "function", "_", "call", ">", "call", ":", "get", "_", "weather", "{", "city", ":", "<", "escape", ">", "Paris", "<", "escape", ">", "}", "<", "end", "_", "function", "_", "call", ">", "<", "start", "_", "function", "_", "call", ">", "call", ":", "get", "_", "time", "{", "timezone", ":", "<", "escape", ">", "UTC", "<", "escape", ">", "}", "<", "end", "_", "function", "_", "call", ">", }, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"city": "Paris"}), }, }, { Function: api.ToolCallFunction{ Name: "get_time", Arguments: testArgs(map[string]any{"timezone": "UTC"}), }, }, }, expectedText: "", }, { name: "content_between_tool_calls", chunks: []string{ "<", "start", "_", "function", "_", "call", ">", "call", ":", "first", "{", "}", "<", "end", "_", "function", "_", "call", ">", "Some", " ", "text", " ", "here", "<", "start", "_", "function", "_", "call", ">", "call", ":", "second", "{", "}", "<", "end", "_", "function", "_", "call", ">", }, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "first", Arguments: api.NewToolCallFunctionArguments(), }, }, { Function: api.ToolCallFunction{ Name: "second", Arguments: api.NewToolCallFunctionArguments(), }, }, }, expectedText: "Some text here", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { parser := &FunctionGemmaParser{} parser.Init(tt.tools, nil, nil) var allContent string var allCalls []api.ToolCall for i, chunk := range tt.chunks { done := i == len(tt.chunks)-1 content, _, calls, err := parser.Add(chunk, done) assert.NoError(t, err) allContent += content allCalls = append(allCalls, calls...) } // Handle empty chunks case if len(tt.chunks) == 0 { content, _, calls, err := parser.Add("", true) assert.NoError(t, err) allContent = content allCalls = calls } assert.Equal(t, tt.expectedText, allContent) if diff := cmp.Diff(tt.expectedCalls, allCalls, argsComparer); diff != "" { t.Errorf("calls mismatch (-want +got):\n%s", diff) } }) } } func TestFunctionGemmaParser_HasSupport(t *testing.T) { parser := &FunctionGemmaParser{} assert.True(t, parser.HasToolSupport()) assert.False(t, parser.HasThinkingSupport()) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/parsers/deepseek3_test.go
model/parsers/deepseek3_test.go
package parsers import ( "testing" "github.com/google/go-cmp/cmp" "github.com/ollama/ollama/api" ) func TestDeepSeekParser(t *testing.T) { tests := []struct { name string input string expectedContent string expectedThinking string expectedCalls []api.ToolCall hasThinking bool }{ { name: "simple_content", input: "Hello, how are you?", expectedContent: "Hello, how are you?", hasThinking: false, }, { name: "thinking_content", input: "I need to think about this...</think>The answer is 42.", expectedThinking: "I need to think about this...", expectedContent: "The answer is 42.", hasThinking: true, }, { name: "no_thinking_simple", input: "Just a regular response.", expectedContent: "Just a regular response.", hasThinking: false, }, { name: "thinking_with_newlines", input: "Let me think:\n- Point 1\n- Point 2</think>\n\nHere's my answer.", expectedThinking: "Let me think:\n- Point 1\n- Point 2", expectedContent: "Here's my answer.", hasThinking: true, }, { name: "tool_call_simple", input: "I'll check the weather.<|tool▁calls▁begin|><|tool▁call▁begin|>get_weather<|tool▁sep|>{\"location\":\"Paris\"}<|tool▁call▁end|><|tool▁calls▁end|>", expectedContent: "I'll check the weather.", expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Paris", }), }, }, }, hasThinking: false, }, { name: "multiple_tool_calls", input: "Getting weather for both cities.<|tool▁calls▁begin|><|tool▁call▁begin|>get_weather<|tool▁sep|>{\"location\":\"Paris\"}<|tool▁call▁end|><|tool▁call▁begin|>get_weather<|tool▁sep|>{\"location\":\"London\"}<|tool▁call▁end|><|tool▁calls▁end|>", expectedContent: "Getting weather for both cities.", expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Paris", }), }, }, { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "London", }), }, }, }, hasThinking: false, }, { name: "tool_output", input: "Here's the weather: <|tool▁output▁begin|>Temperature: 22°C, Sunny<|tool▁output▁end|> Hope that helps!", expectedContent: "Here's the weather: Temperature: 22°C, Sunny Hope that helps!", hasThinking: false, }, { name: "complex_tool_arguments", input: "Processing data.<|tool▁calls▁begin|><|tool▁call▁begin|>process_data<|tool▁sep|>{\"items\":[\"item1\",\"item2\"],\"config\":{\"enabled\":true,\"threshold\":0.95}}<|tool▁call▁end|><|tool▁calls▁end|>", expectedContent: "Processing data.", expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "process_data", Arguments: testArgs(map[string]any{ "items": []interface{}{"item1", "item2"}, "config": map[string]interface{}{"enabled": true, "threshold": 0.95}, }), }, }, }, hasThinking: false, }, { name: "thinking_with_tool_call", // technically this can't happen, but the parser can handle it input: "Let me check the weather...</think>I'll get that for you.<|tool▁calls▁begin|><|tool▁call▁begin|>get_weather<|tool▁sep|>{\"location\":\"Paris\"}<|tool▁call▁end|><|tool▁calls▁end|>", expectedThinking: "Let me check the weather...", expectedContent: "I'll get that for you.", expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Paris", }), }, }, }, hasThinking: true, }, { name: "empty_content", input: "", expectedContent: "", hasThinking: false, }, { name: "only_thinking", input: "Just thinking content</think>", expectedThinking: "Just thinking content", expectedContent: "", hasThinking: true, }, { name: "multiple_tool_outputs", input: "Results: <|tool▁output▁begin|>Paris: 22°C<|tool▁output▁end|> and <|tool▁output▁begin|>London: 18°C<|tool▁output▁end|>", expectedContent: "Results: Paris: 22°C and London: 18°C", hasThinking: false, }, { name: "unicode_content", input: "مرحبا بالعالم! 你好世界! 🌍", expectedContent: "مرحبا بالعالم! 你好世界! 🌍", hasThinking: false, }, { name: "emoji_passthrough", input: "Task completed ✅ 🎉", expectedContent: "Task completed ✅ 🎉", hasThinking: false, }, { name: "emoji_after_tool_call", input: "I'll help you.<|tool▁calls▁begin|><|tool▁call▁begin|>get_weather<|tool▁sep|>{\"location\":\"Tokyo\"}<|tool▁call▁end|><|tool▁calls▁end|>完成 ✅", expectedContent: "I'll help you.完成 ✅", expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Tokyo", }), }, }, }, hasThinking: false, }, { name: "newlines_and_whitespace", input: "Line 1\n\nLine 3\t\tTabbed content", expectedContent: "Line 1\n\nLine 3\t\tTabbed content", hasThinking: false, }, { name: "thinking_with_unicode", input: "我在思考这个问题...</think>答案是42。", expectedThinking: "我在思考这个问题...", expectedContent: "答案是42。", hasThinking: true, }, { name: "tool_call_with_unicode_args", input: "Searching for information.<|tool▁calls▁begin|><|tool▁call▁begin|>search<|tool▁sep|>{\"query\":\"北京天气\",\"language\":\"中文\"}<|tool▁call▁end|><|tool▁calls▁end|>", expectedContent: "Searching for information.", expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "search", Arguments: testArgs(map[string]any{ "query": "北京天气", "language": "中文", }), }, }, }, hasThinking: false, }, { name: "tool_output_with_unicode", input: "天气信息: <|tool▁output▁begin|>北京: 25°C, 晴天<|tool▁output▁end|> 希望对您有帮助!", expectedContent: "天气信息: 北京: 25°C, 晴天 希望对您有帮助!", hasThinking: false, }, { name: "mixed_content_with_special_chars", input: "Price: $100 & tax @ 10% = $110 <|tool▁output▁begin|>Total: $110<|tool▁output▁end|> (final)", expectedContent: "Price: $100 & tax @ 10% = $110 Total: $110 (final)", hasThinking: false, }, { name: "tool_call_with_special_chars", input: "Processing data.<|tool▁calls▁begin|><|tool▁call▁begin|>execute_command<|tool▁sep|>{\"command\":\"ls && echo \\\"done\\\"\",\"path\":\"/home/user\"}<|tool▁call▁end|><|tool▁calls▁end|>", expectedContent: "Processing data.", expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "execute_command", Arguments: testArgs(map[string]any{ "command": "ls && echo \"done\"", "path": "/home/user", }), }, }, }, hasThinking: false, }, { name: "thinking_with_special_chars", input: "Let me calculate: 2+2=4 & 3*3=9...</think>The results are correct!", expectedThinking: "Let me calculate: 2+2=4 & 3*3=9...", expectedContent: "The results are correct!", hasThinking: true, }, { name: "empty_tool_call_args", input: "Pinging server.<|tool▁calls▁begin|><|tool▁call▁begin|>ping<|tool▁sep|>{}<|tool▁call▁end|><|tool▁calls▁end|>", expectedContent: "Pinging server.", expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "ping", Arguments: api.NewToolCallFunctionArguments(), }, }, }, hasThinking: false, }, { name: "empty_tool_output", input: "Checking status: <|tool▁output▁begin|><|tool▁output▁end|> No output received.", expectedContent: "Checking status: No output received.", hasThinking: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { parser := &DeepSeek3Parser{hasThinkingSupport: tt.hasThinking} parser.Init([]api.Tool{}, nil, &api.ThinkValue{Value: tt.hasThinking}) content, thinking, calls, err := parser.Add(tt.input, true) if err != nil { t.Fatalf("Add() error = %v", err) } if diff := cmp.Diff(tt.expectedContent, content); diff != "" { t.Errorf("Content mismatch (-want +got):\n%s", diff) } if diff := cmp.Diff(tt.expectedThinking, thinking); diff != "" { t.Errorf("Thinking mismatch (-want +got):\n%s", diff) } if diff := cmp.Diff(tt.expectedCalls, calls, argsComparer); diff != "" { t.Errorf("Tool calls mismatch (-want +got):\n%s", diff) } }) } } func TestDeepSeekParser_Streaming(t *testing.T) { tests := []struct { name string chunks []string expectedContent string expectedThinking string expectedCalls []api.ToolCall hasThinking bool }{ { name: "streaming_simple_content", chunks: []string{"Hello, ", "how are ", "you?"}, expectedContent: "Hello, how are you?", hasThinking: false, }, { name: "streaming_thinking", chunks: []string{"I need to ", "think about this", "...</think>", "The answer is 42."}, expectedThinking: "I need to think about this...", expectedContent: "The answer is 42.", hasThinking: true, }, { name: "streaming_tool_call", chunks: []string{"I'll check weather.", "<|tool▁calls▁begin|>", "<|tool▁call▁begin|>get_weather", "<|tool▁sep|>{\"location\":\"Paris\"}", "<|tool▁call▁end|><|tool▁calls▁end|>"}, expectedContent: "I'll check weather.", expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Paris", }), }, }, }, hasThinking: false, }, { name: "streaming_thinking_with_partial_tag", chunks: []string{"Thinking about this", "...</", "think>", "Done thinking."}, expectedThinking: "Thinking about this...", expectedContent: "Done thinking.", hasThinking: true, }, { name: "streaming_tool_output", chunks: []string{"Weather info: ", "<|tool▁output▁begin|>", "25°C, Sunny", "<|tool▁output▁end|>", " Enjoy!"}, expectedContent: "Weather info: 25°C, Sunny Enjoy!", hasThinking: false, }, { name: "streaming_with_split_tags", chunks: []string{"Content before ", "<|tool▁calls▁begin|><|tool▁call▁begin|>test", "<|tool▁sep|>{}", "<|tool▁call▁end|><|tool▁calls▁end|>", " after"}, expectedContent: "Content before after", expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "test", Arguments: api.NewToolCallFunctionArguments(), }, }, }, hasThinking: false, }, { name: "streaming_thinking_with_split_end_tag", chunks: []string{"Thinking content", "</th", "ink>", "Regular content"}, expectedThinking: "Thinking content", expectedContent: "Regular content", hasThinking: true, }, { name: "streaming_unicode_content", chunks: []string{"مرحبا ", "بالعالم! ", "你好", "世界!"}, expectedContent: "مرحبا بالعالم! 你好世界!", hasThinking: false, }, { name: "streaming_multiple_tool_outputs", chunks: []string{"Results: ", "<|tool▁output▁begin|>", "Paris: 22°C", "<|tool▁output▁end|>", " and ", "<|tool▁output▁begin|>", "London: 18°C", "<|tool▁output▁end|>"}, expectedContent: "Results: Paris: 22°C and London: 18°C", hasThinking: false, }, { name: "streaming_tool_call_with_split_json", chunks: []string{"Processing.", "<|tool▁calls▁begin|><|tool▁call▁begin|>calc<|tool▁sep|>{\"x\":", "42,\"y\":", "24}<|tool▁call▁end|><|tool▁calls▁end|>"}, expectedContent: "Processing.", expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "calc", Arguments: testArgs(map[string]any{ "x": float64(42), "y": float64(24), }), }, }, }, hasThinking: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { parser := &DeepSeek3Parser{hasThinkingSupport: tt.hasThinking} parser.Init([]api.Tool{}, nil, &api.ThinkValue{Value: tt.hasThinking}) var allContent, allThinking string var allCalls []api.ToolCall for i, chunk := range tt.chunks { done := i == len(tt.chunks)-1 content, thinking, calls, err := parser.Add(chunk, done) if err != nil { t.Fatalf("Add() error = %v", err) } allContent += content allThinking += thinking allCalls = append(allCalls, calls...) } if diff := cmp.Diff(tt.expectedContent, allContent); diff != "" { t.Errorf("Content mismatch (-want +got):\n%s", diff) } if diff := cmp.Diff(tt.expectedThinking, allThinking); diff != "" { t.Errorf("Thinking mismatch (-want +got):\n%s", diff) } if diff := cmp.Diff(tt.expectedCalls, allCalls, argsComparer); diff != "" { t.Errorf("Tool calls mismatch (-want +got):\n%s", diff) } }) } } func TestDeepSeekParser_HasThinkingSupport(t *testing.T) { tests := []struct { name string hasThinking bool expectedSupport bool }{ { name: "thinking_enabled", hasThinking: true, expectedSupport: true, }, { name: "thinking_disabled", hasThinking: false, expectedSupport: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { parser := &DeepSeek3Parser{hasThinkingSupport: tt.hasThinking} if got := parser.HasThinkingSupport(); got != tt.expectedSupport { t.Errorf("HasThinkingSupport() = %v, want %v", got, tt.expectedSupport) } }) } } func TestDeepSeekParser_HasToolSupport(t *testing.T) { parser := &DeepSeek3Parser{} if !parser.HasToolSupport() { t.Error("HasToolSupport() should return true") } } func TestDeepSeekParser_Init(t *testing.T) { parser := &DeepSeek3Parser{hasThinkingSupport: true} tools := []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "test_tool", }, }, } returnedTools := parser.Init(tools, nil, &api.ThinkValue{Value: true}) if diff := cmp.Diff(tools, returnedTools, toolsComparer); diff != "" { t.Errorf("Init() returned tools mismatch (-want +got):\n%s", diff) } // Test initial state is set to thinking when enabled if parser.state != DeepSeekCollectingThinking { t.Errorf("Expected initial state to be DeepSeekCollectingThinking, got %v", parser.state) } } func TestDeepSeek3Parser_parseToolCallContent(t *testing.T) { tests := []struct { name string content string expected api.ToolCall expectError bool }{ { name: "valid_tool_call", content: "get_weather<|tool▁sep|>{\"location\":\"Paris\"}", expected: api.ToolCall{ Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Paris", }), }, }, }, { name: "complex_arguments", content: "process_data<|tool▁sep|>{\"items\":[\"a\",\"b\"],\"config\":{\"enabled\":true}}", expected: api.ToolCall{ Function: api.ToolCallFunction{ Name: "process_data", Arguments: testArgs(map[string]any{ "items": []interface{}{"a", "b"}, "config": map[string]interface{}{"enabled": true}, }), }, }, }, { name: "empty_arguments", content: "ping<|tool▁sep|>{}", expected: api.ToolCall{ Function: api.ToolCallFunction{ Name: "ping", Arguments: api.NewToolCallFunctionArguments(), }, }, }, { name: "unicode_in_tool_name", content: "获取天气<|tool▁sep|>{\"城市\":\"北京\"}", expected: api.ToolCall{ Function: api.ToolCallFunction{ Name: "获取天气", Arguments: testArgs(map[string]any{ "城市": "北京", }), }, }, }, { name: "special_chars_in_arguments", content: "execute<|tool▁sep|>{\"command\":\"ls && echo \\\"done\\\"\",\"path\":\"/home/user\"}", expected: api.ToolCall{ Function: api.ToolCallFunction{ Name: "execute", Arguments: testArgs(map[string]any{ "command": "ls && echo \"done\"", "path": "/home/user", }), }, }, }, { name: "numeric_arguments", content: "calculate<|tool▁sep|>{\"x\":3.14,\"y\":42,\"enabled\":true}", expected: api.ToolCall{ Function: api.ToolCallFunction{ Name: "calculate", Arguments: testArgs(map[string]any{ "x": 3.14, "y": float64(42), "enabled": true, }), }, }, }, { name: "invalid_format_no_separator", content: "get_weather{\"location\":\"Paris\"}", expectError: true, }, { name: "invalid_json", content: "get_weather<|tool▁sep|>{invalid json}", expectError: true, }, { name: "empty_tool_name", content: "<|tool▁sep|>{\"arg\":\"value\"}", expectError: false, // This should work, just empty name expected: api.ToolCall{ Function: api.ToolCallFunction{ Name: "", Arguments: testArgs(map[string]any{ "arg": "value", }), }, }, }, { name: "missing_json_part", content: "tool_name<|tool▁sep|>", expectError: true, }, } parser := &DeepSeek3Parser{} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result, err := parser.parseToolCallContent(tt.content) if tt.expectError { if err == nil { t.Error("Expected error but got none") } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } if diff := cmp.Diff(tt.expected, result, argsComparer); diff != "" { t.Errorf("parseToolCallContent() mismatch (-want +got):\n%s", diff) } }) } } func TestDeepSeekParser_EdgeCases(t *testing.T) { tests := []struct { name string input string expectedContent string expectedThinking string hasThinking bool }{ { name: "nested_think_tags_in_thinking", input: "Outer thinking <think>inner</think> content</think>Final content", expectedThinking: "Outer thinking <think>inner", expectedContent: "content</think>Final content", hasThinking: true, }, { name: "multiple_think_close_tags", input: "First thought</think>Second thought</think>Final content", expectedThinking: "First thought", expectedContent: "Second thought</think>Final content", hasThinking: true, }, { name: "empty_thinking_content", input: "</think>Just content", expectedThinking: "", expectedContent: "Just content", hasThinking: true, }, { name: "thinking_disabled_with_think_tags", input: "Some content</think>More content", expectedContent: "Some content</think>More content", hasThinking: false, }, { name: "malformed_tool_call_missing_sep", input: "Testing.<|tool▁calls▁begin|><|tool▁call▁begin|>bad_tool{\"arg\":\"value\"}<|tool▁call▁end|><|tool▁calls▁end|>", expectedContent: "Testing.", hasThinking: false, }, { name: "malformed_tool_call_invalid_json", input: "Testing.<|tool▁calls▁begin|><|tool▁call▁begin|>bad_tool<|tool▁sep|>{invalid json}<|tool▁call▁end|><|tool▁calls▁end|>", expectedContent: "Testing.", hasThinking: false, }, { name: "partial_tool_tag_at_end", input: "Content with partial <|tool▁calls▁", expectedContent: "Content with partial <|tool▁calls▁", hasThinking: false, }, { name: "partial_think_tag_at_end", input: "Thinking content</th", expectedContent: "Thinking content</th", hasThinking: false, }, { name: "partial_think_tag_at_end_with_thinking", input: "Thinking content</th", expectedThinking: "Thinking content", expectedContent: "", hasThinking: true, }, { name: "whitespace_only_content", input: " \n\t ", expectedContent: " \n\t ", hasThinking: false, }, { name: "tool_output_with_newlines", input: "Output:\n<|tool▁output▁begin|>Line 1\nLine 2\nLine 3<|tool▁output▁end|>\nDone.", expectedContent: "Output:\nLine 1\nLine 2\nLine 3\nDone.", hasThinking: false, }, { name: "consecutive_tool_calls", input: "First.<|tool▁calls▁begin|><|tool▁call▁begin|>tool1<|tool▁sep|>{}<|tool▁call▁end|><|tool▁calls▁end|>Second.<|tool▁calls▁begin|><|tool▁call▁begin|>tool2<|tool▁sep|>{}<|tool▁call▁end|><|tool▁calls▁end|>", expectedContent: "First.", hasThinking: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { parser := &DeepSeek3Parser{hasThinkingSupport: tt.hasThinking} parser.Init([]api.Tool{}, nil, &api.ThinkValue{Value: tt.hasThinking}) content, thinking, _, err := parser.Add(tt.input, true) if err != nil { t.Fatalf("Add() error = %v", err) } if diff := cmp.Diff(tt.expectedContent, content); diff != "" { t.Errorf("Content mismatch (-want +got):\n%s", diff) } if diff := cmp.Diff(tt.expectedThinking, thinking); diff != "" { t.Errorf("Thinking mismatch (-want +got):\n%s", diff) } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/parsers/qwen3coder_test.go
model/parsers/qwen3coder_test.go
package parsers import ( "reflect" "testing" "github.com/ollama/ollama/api" ) // tool creates a test tool with the given name and properties func tool(name string, props map[string]api.ToolProperty) api.Tool { t := api.Tool{Type: "function", Function: api.ToolFunction{Name: name}} t.Function.Parameters.Type = "object" t.Function.Parameters.Properties = testPropsMap(props) return t } func TestQwenParserStreaming(t *testing.T) { type step struct { input string wantEvents []qwenEvent } cases := []struct { desc string steps []step only bool }{ { desc: "simple message streamed word by word", steps: []step{ { input: "hi", wantEvents: []qwenEvent{qwenEventContent{content: "hi"}}, }, { input: " there", wantEvents: []qwenEvent{qwenEventContent{content: " there"}}, }, }, }, { desc: "content before tool call", steps: []step{ { input: "hi there<tool_call>", wantEvents: []qwenEvent{qwenEventContent{content: "hi there"}}, }, }, }, { desc: "multiple tool calls in one message", steps: []step{ { input: "before1<tool_call>in tool call</tool_call>after1<tool_call>in tool call 2</tool_call>after2", wantEvents: []qwenEvent{ qwenEventContent{content: "before1"}, qwenEventRawToolCall{raw: "in tool call"}, qwenEventContent{content: "after1"}, qwenEventRawToolCall{raw: "in tool call 2"}, qwenEventContent{content: "after2"}, }, }, }, }, { desc: "tool calls with split tags", steps: []step{ { input: "before<tool", wantEvents: []qwenEvent{ qwenEventContent{content: "before"}, }, }, { input: "_call>in tool call</tool", wantEvents: []qwenEvent{}, }, { input: "_call>af", wantEvents: []qwenEvent{ qwenEventRawToolCall{raw: "in tool call"}, qwenEventContent{content: "af"}, }, }, { input: "ter", wantEvents: []qwenEvent{ qwenEventContent{content: "ter"}, }, }, }, }, { desc: "trailing whitespace between content and tool call", steps: []step{ { input: "abc\n<tool_call>def</tool_call>", wantEvents: []qwenEvent{ qwenEventContent{content: "abc"}, qwenEventRawToolCall{raw: "def"}, }, }, }, }, { desc: "unambiguous empty: partial tool open at buffer start", steps: []step{ { input: "<tool_ca", wantEvents: []qwenEvent{}, }, { input: "ll>abc</tool_call>", wantEvents: []qwenEvent{ qwenEventRawToolCall{raw: "abc"}, }, }, }, }, { desc: "trailing whitespace between tool call and content", steps: []step{ { input: "<tool_call>abc</tool_call>\ndef", wantEvents: []qwenEvent{ qwenEventRawToolCall{raw: "abc"}, qwenEventContent{content: "def"}, }, }, }, }, { desc: "empty content before tool call", steps: []step{ { input: "\n<tool_call>abc</tool_call>", wantEvents: []qwenEvent{ qwenEventRawToolCall{raw: "abc"}, }, }, }, }, { desc: "partial tool open tag fakeout", steps: []step{ { input: "abc\n<tool_call", wantEvents: []qwenEvent{ // \n should not be emitted yet because `<tool_call` might be a tool // open tag, in which case the whitespace should be trimmed qwenEventContent{content: "abc"}, }, }, { input: " fakeout", wantEvents: []qwenEvent{ qwenEventContent{content: "\n<tool_call fakeout"}, }, }, }, }, { desc: "token-by-token whitespace handling", steps: []step{ { input: "a", wantEvents: []qwenEvent{ qwenEventContent{content: "a"}, }, }, { input: "\n", wantEvents: []qwenEvent{}, }, { input: "b", wantEvents: []qwenEvent{ qwenEventContent{content: "\nb"}, }, }, }, }, { desc: "unicode content", steps: []step{ { input: "你好 🌍<tool_call>test</tool_call>مرحبا", wantEvents: []qwenEvent{ qwenEventContent{content: "你好 🌍"}, qwenEventRawToolCall{raw: "test"}, qwenEventContent{content: "مرحبا"}, }, }, }, }, { desc: "arabic text handling", steps: []step{ { input: "مرحبا بالعالم", wantEvents: []qwenEvent{qwenEventContent{content: "مرحبا بالعالم"}}, }, }, }, { desc: "emoji passthrough", steps: []step{ { input: "✅", wantEvents: []qwenEvent{qwenEventContent{content: "✅"}}, }, }, }, { desc: "emoji after tool call", steps: []step{ { input: "<tool_call>test</tool_call>完成 ✅", wantEvents: []qwenEvent{ qwenEventRawToolCall{raw: "test"}, qwenEventContent{content: "完成 ✅"}, }, }, }, }, { desc: "unicode streaming with whitespace handling", steps: []step{ { input: "مرحبا", wantEvents: []qwenEvent{ qwenEventContent{content: "مرحبا"}, }, }, { input: " \n", wantEvents: []qwenEvent{}, }, { input: "世界", wantEvents: []qwenEvent{ qwenEventContent{content: " \n世界"}, }, }, }, }, { desc: "non-breaking space withheld across chunks", steps: []step{ { input: "Hello\u00a0", wantEvents: []qwenEvent{ qwenEventContent{content: "Hello"}, }, }, { input: "world", wantEvents: []qwenEvent{ qwenEventContent{content: "\u00a0world"}, }, }, }, }, { desc: "ideographic space before partial tool", steps: []step{ { input: "Hello\u3000<tool", wantEvents: []qwenEvent{ qwenEventContent{content: "Hello"}, }, }, { input: "_call>abc", wantEvents: []qwenEvent{}, }, { input: "</tool_call>def", wantEvents: []qwenEvent{ qwenEventRawToolCall{raw: "abc"}, qwenEventContent{content: "def"}, }, }, }, }, { desc: "ideographic space before partial tool fakeout", steps: []step{ { input: "Hello\u3000<tool", wantEvents: []qwenEvent{ qwenEventContent{content: "Hello"}, }, }, { input: "fakeout>abc", wantEvents: []qwenEvent{ qwenEventContent{content: "\u3000<toolfakeout>abc"}, }, }, }, }, { desc: "unicode with partial tool tag", steps: []step{ { input: "测试🎯 <to", wantEvents: []qwenEvent{ qwenEventContent{content: "测试🎯"}, }, }, }, }, } anyOnlies := false for _, tc := range cases { if tc.only { anyOnlies = true } } for _, tc := range cases { if anyOnlies && !tc.only { continue } t.Run(tc.desc, func(t *testing.T) { parser := Qwen3CoderParser{} for i, step := range tc.steps { parser.acc.WriteString(step.input) gotEvents := parser.parseEvents() if len(gotEvents) == 0 && len(step.wantEvents) == 0 { // avoid deep equal on empty vs. nil slices continue } if !reflect.DeepEqual(gotEvents, step.wantEvents) { t.Errorf("step %d: input %q: got events %#v, want %#v", i, step.input, gotEvents, step.wantEvents) } } }) } } func TestQwenToolParser(t *testing.T) { type step struct { name string rawToolCall string tools []api.Tool wantToolCall api.ToolCall } steps := []step{ { name: "simple tool call", tools: []api.Tool{}, rawToolCall: `<function=get_current_temperature> <parameter=location> San Francisco </parameter> <parameter=unit> celsius </parameter> </function>`, wantToolCall: api.ToolCall{ Function: api.ToolCallFunction{ Name: "get_current_temperature", Arguments: testArgs(map[string]any{ "location": "San Francisco", "unit": "celsius", }), }, }, }, { name: "names with spaces", tools: []api.Tool{}, rawToolCall: `<function=get current temperature> <parameter=location with spaces> San Francisco </parameter> <parameter=unit with spaces> celsius </parameter> </function>`, wantToolCall: api.ToolCall{ Function: api.ToolCallFunction{ Name: "get current temperature", Arguments: testArgs(map[string]any{ "location with spaces": "San Francisco", "unit with spaces": "celsius", }), }, }, }, // this mirrors the reference implementation's behavior, but unclear if it // ever happens. If so, then we should probably remove them instead, this // test is to just document the current behavior and test that we don't get // xml errors { name: "names with quotes", tools: []api.Tool{}, rawToolCall: `<function="get current temperature"> <parameter="location with spaces"> San Francisco </parameter> <parameter="unit with spaces"> "celsius" </parameter> </function>`, wantToolCall: api.ToolCall{ Function: api.ToolCallFunction{ Name: "\"get current temperature\"", Arguments: testArgs(map[string]any{ "\"location with spaces\"": "San Francisco", "\"unit with spaces\"": "\"celsius\"", }), }, }, }, { name: "tool call with typed parameters", tools: []api.Tool{ tool("calculate", map[string]api.ToolProperty{ "x": {Type: api.PropertyType{"number"}}, "y": {Type: api.PropertyType{"integer"}}, "enabled": {Type: api.PropertyType{"boolean"}}, "items": {Type: api.PropertyType{"array"}}, }), }, rawToolCall: `<function=calculate> <parameter=x> 3.14 </parameter> <parameter=y> 42 </parameter> <parameter=enabled> true </parameter> <parameter=items> ["a", "b", "c"] </parameter> </function>`, wantToolCall: api.ToolCall{ Function: api.ToolCallFunction{ Name: "calculate", Arguments: testArgs(map[string]any{ "x": 3.14, "y": 42, "enabled": true, "items": []any{"a", "b", "c"}, }), }, }, }, // regression test for <https://github.com/ollama/ollama/issues/12357> { name: "ampersands in parameter values", tools: []api.Tool{}, rawToolCall: `<function=exec> <parameter=command> ls && echo "done" </parameter> </function>`, wantToolCall: api.ToolCall{ Function: api.ToolCallFunction{ Name: "exec", Arguments: testArgs(map[string]any{ "command": "ls && echo \"done\"", }), }, }, }, { name: "angle brackets in parameter values", tools: []api.Tool{}, rawToolCall: `<function=exec> <parameter=command> ls && echo "a > b and a < b" </parameter> </function>`, wantToolCall: api.ToolCall{ Function: api.ToolCallFunction{ Name: "exec", Arguments: testArgs(map[string]any{ "command": "ls && echo \"a > b and a < b\"", }), }, }, }, { name: "unicode in function names and parameters", tools: []api.Tool{}, rawToolCall: `<function=获取天气> <parameter=城市> 北京 </parameter> <parameter=message> Hello! 你好! 🌟 مرحبا </parameter> </function>`, wantToolCall: api.ToolCall{ Function: api.ToolCallFunction{ Name: "获取天气", Arguments: testArgs(map[string]any{ "城市": "北京", "message": "Hello! 你好! 🌟 مرحبا", }), }, }, }, } for i, step := range steps { gotToolCall, err := parseToolCall(qwenEventRawToolCall{raw: step.rawToolCall}, step.tools) if err != nil { t.Errorf("step %d (%s): %v", i, step.name, err) } if !toolCallEqual(gotToolCall, step.wantToolCall) { t.Errorf("step %d (%s): got tool call %#v, want %#v", i, step.name, gotToolCall, step.wantToolCall) } } } func TestTrailingWhitespaceLenUnicode(t *testing.T) { cases := []struct { name string input string want int }{ { name: "ascii space", input: "Hello ", want: 1, }, { name: "non-breaking space", input: "Hello\u00a0", want: 2, }, { name: "ideographic space", input: "Hello\u3000", want: 3, }, { name: "multiple runes of whitespace", input: "Hi\u00a0\u3000", want: 5, }, } for _, tc := range cases { got := trailingWhitespaceLen(tc.input) if got != tc.want { t.Errorf("%s: trailingWhitespaceLen(%q) = %d, want %d", tc.name, tc.input, got, tc.want) } } } func TestQwenToolCallValueParsing(t *testing.T) { cases := []struct { desc string raw string paramType api.PropertyType want any }{ { desc: "default string value (no type specified)", paramType: api.PropertyType{}, raw: "some-string", want: "some-string", }, { desc: "trim a single leading and trailing newline", paramType: api.PropertyType{}, raw: "\nsome-string\n", want: "some-string", }, { desc: "trim at most one leading and trailing newline", paramType: api.PropertyType{}, raw: "\n\nsome-string\n\n", want: "\nsome-string\n", }, { desc: "newline really has to be the first character to be trimmed", paramType: api.PropertyType{}, raw: " \nsome-string\n ", want: " \nsome-string\n ", }, { desc: "numeric type", paramType: api.PropertyType{"number"}, raw: "123", want: 123, }, // Integer parsing tests { desc: "integer type", paramType: api.PropertyType{"integer"}, raw: "42", want: 42, }, { desc: "negative integer", paramType: api.PropertyType{"integer"}, raw: "-100", want: -100, }, { desc: "zero integer", paramType: api.PropertyType{"integer"}, raw: "0", want: 0, }, { desc: "integer with leading zeros", paramType: api.PropertyType{"integer"}, raw: "007", want: 7, }, { desc: "large integer", paramType: api.PropertyType{"integer"}, raw: "2147483648", // Just beyond int32 max want: int64(2147483648), }, // Float/number parsing tests { desc: "float type", paramType: api.PropertyType{"number"}, raw: "3.14", want: 3.14, }, { desc: "negative float", paramType: api.PropertyType{"number"}, raw: "-273.15", want: -273.15, }, { desc: "float without decimal part", paramType: api.PropertyType{"number"}, raw: "100.0", want: 100, }, { desc: "scientific notation positive", paramType: api.PropertyType{"number"}, raw: "1.23e5", want: 123000, // Will be int since it has no decimal part }, { desc: "scientific notation negative", paramType: api.PropertyType{"number"}, raw: "1.5e-3", want: 0.0015, }, { desc: "very small float", paramType: api.PropertyType{"number"}, raw: "0.00000001", want: 0.00000001, }, // String parsing tests { desc: "explicit string type", paramType: api.PropertyType{"string"}, raw: "hello world", want: "hello world", }, { desc: "string with special characters", paramType: api.PropertyType{"string"}, raw: "/usr/local/bin/test-file_v2.0.sh", want: "/usr/local/bin/test-file_v2.0.sh", }, { desc: "string with quotes", paramType: api.PropertyType{"string"}, raw: `He said "hello" to me`, want: `He said "hello" to me`, }, { desc: "multiline string", paramType: api.PropertyType{"string"}, raw: "line one\nline two\nline three", want: "line one\nline two\nline three", }, { desc: "empty string", paramType: api.PropertyType{"string"}, raw: "", want: "", }, { desc: "string that looks like a number", paramType: api.PropertyType{"string"}, raw: "12345", want: "12345", }, // Boolean parsing tests { desc: "boolean true", paramType: api.PropertyType{"boolean"}, raw: "true", want: true, }, { desc: "boolean false", paramType: api.PropertyType{"boolean"}, raw: "false", want: false, }, { desc: "boolean case insensitive true", paramType: api.PropertyType{"boolean"}, raw: "True", want: true, }, { desc: "boolean case insensitive false", paramType: api.PropertyType{"boolean"}, raw: "FALSE", want: false, }, // Null parsing tests { desc: "null value lowercase", paramType: api.PropertyType{"string"}, raw: "null", want: nil, }, { desc: "null value case insensitive", paramType: api.PropertyType{"integer"}, raw: "NULL", want: nil, }, // Array parsing tests { desc: "array of strings", paramType: api.PropertyType{"array"}, raw: `["foo", "bar", "baz"]`, want: []any{"foo", "bar", "baz"}, }, { desc: "array of numbers", paramType: api.PropertyType{"array"}, raw: `[1, 2.5, 3]`, want: []any{float64(1), 2.5, float64(3)}, }, { desc: "array of mixed types", paramType: api.PropertyType{"array"}, raw: `["string", 123, true, null]`, want: []any{"string", float64(123), true, nil}, }, { desc: "empty array", paramType: api.PropertyType{"array"}, raw: `[]`, want: []any{}, }, // Object parsing tests { desc: "simple object", paramType: api.PropertyType{"object"}, raw: `{"key": "value", "number": 42}`, want: map[string]any{"key": "value", "number": float64(42)}, }, { desc: "nested object", paramType: api.PropertyType{"object"}, raw: `{"outer": {"inner": "value"}}`, want: map[string]any{"outer": map[string]any{"inner": "value"}}, }, { desc: "empty object", paramType: api.PropertyType{"object"}, raw: `{}`, want: map[string]any{}, }, // Error cases and fallback behavior { desc: "invalid integer falls back to string", paramType: api.PropertyType{"integer"}, raw: "not-a-number", want: "not-a-number", }, { desc: "invalid float falls back to string", paramType: api.PropertyType{"number"}, raw: "3.14.159", want: "3.14.159", }, { desc: "invalid boolean falls back to false", paramType: api.PropertyType{"boolean"}, raw: "yes", want: false, }, { desc: "invalid JSON array falls back to string", paramType: api.PropertyType{"array"}, raw: "[1, 2, unclosed", want: "[1, 2, unclosed", }, { desc: "invalid JSON object falls back to string", paramType: api.PropertyType{"object"}, raw: `{"key": unclosed`, want: `{"key": unclosed`, }, // Edge cases { desc: "integer overflow should use int64", paramType: api.PropertyType{"integer"}, raw: "2147483648", // Beyond int32 max want: int64(2147483648), }, { desc: "float with many decimal places", paramType: api.PropertyType{"number"}, raw: "3.141592653589793", want: 3.141592653589793, }, { desc: "string with JSON-like content", paramType: api.PropertyType{"string"}, raw: `{"this": "is", "just": "a string"}`, want: `{"this": "is", "just": "a string"}`, }, { desc: "whitespace-only string", paramType: api.PropertyType{"string"}, raw: " ", want: " ", }, // Unknown parameter (no type specified in tools) { desc: "parameter not in tool definition defaults to string", paramType: api.PropertyType{}, raw: "some value", want: "some value", }, // Union type tests { desc: "string or number union - valid number", paramType: api.PropertyType{"string", "number"}, raw: "42.5", want: 42.5, }, { desc: "string or number union - non-numeric string", paramType: api.PropertyType{"string", "number"}, raw: "hello", want: "hello", }, { desc: "number or string union - valid number (order shouldn't matter)", paramType: api.PropertyType{"number", "string"}, raw: "42.5", want: 42.5, }, { desc: "integer or null union - valid integer", paramType: api.PropertyType{"integer", "null"}, raw: "123", want: 123, }, { desc: "integer or null union - null value", paramType: api.PropertyType{"integer", "null"}, raw: "null", want: nil, }, { desc: "null or integer union - null value (order shouldn't matter)", paramType: api.PropertyType{"null", "integer"}, raw: "null", want: nil, }, { desc: "boolean or string union - valid boolean", paramType: api.PropertyType{"boolean", "string"}, raw: "true", want: true, }, { desc: "boolean or string union - non-boolean becomes string", paramType: api.PropertyType{"boolean", "string"}, raw: "yes", want: "yes", }, { desc: "string or boolean union - valid boolean (precedence test)", paramType: api.PropertyType{"string", "boolean"}, raw: "false", want: false, // Should be boolean, not string "false" }, { desc: "integer or number union - integer value", paramType: api.PropertyType{"integer", "number"}, raw: "42", want: 42, }, { desc: "integer or number union - float value", paramType: api.PropertyType{"integer", "number"}, raw: "42.5", want: 42.5, }, { desc: "number or integer union - integer value (precedence test)", paramType: api.PropertyType{"number", "integer"}, raw: "42", want: 42, // Should try integer first due to precedence }, { desc: "array or object union - valid array", paramType: api.PropertyType{"array", "object"}, raw: `[1, 2, 3]`, want: []any{float64(1), float64(2), float64(3)}, }, { desc: "array or object union - valid object", paramType: api.PropertyType{"array", "object"}, raw: `{"key": "value"}`, want: map[string]any{"key": "value"}, }, { desc: "object or array union - valid array (precedence test)", paramType: api.PropertyType{"object", "array"}, raw: `[1, 2, 3]`, want: []any{float64(1), float64(2), float64(3)}, }, { desc: "complex multi-type union - null", paramType: api.PropertyType{"string", "number", "boolean", "null"}, raw: "null", want: nil, }, { desc: "complex multi-type union - boolean", paramType: api.PropertyType{"string", "number", "boolean", "null"}, raw: "true", want: true, }, { desc: "complex multi-type union - number", paramType: api.PropertyType{"string", "number", "boolean", "null"}, raw: "3.14", want: 3.14, }, { desc: "complex multi-type union - string", paramType: api.PropertyType{"string", "number", "boolean", "null"}, raw: "hello", want: "hello", }, { desc: "integer string union - integer string becomes integer", paramType: api.PropertyType{"integer", "string"}, raw: "123", want: 123, }, { desc: "string integer union - integer string becomes integer (precedence)", paramType: api.PropertyType{"string", "integer"}, raw: "123", want: 123, // Integer has higher precedence than string }, { desc: "anyOf array or string - with array of objects", paramType: api.PropertyType{"array", "string"}, raw: `[{"content": "task 1", "status": "pending", "priority": "high", "id": "1"}, {"content": "task 2", "status": "completed", "priority": "low", "id": "2"}]`, want: []any{ map[string]any{"content": "task 1", "status": "pending", "priority": "high", "id": "1"}, map[string]any{"content": "task 2", "status": "completed", "priority": "low", "id": "2"}, }, }, { desc: "anyOf array or string - with plain string", paramType: api.PropertyType{"array", "string"}, raw: "Error: could not load data", want: "Error: could not load data", }, } for _, tc := range cases { t.Run(tc.desc, func(t *testing.T) { got := parseValue(tc.raw, tc.paramType) if !reflect.DeepEqual(got, tc.want) { t.Errorf("got %v (type %T), want %v (type %T)", got, got, tc.want, tc.want) } }) } } func TestQwenXMLTransform(t *testing.T) { cases := []struct { desc string raw string want string }{ { desc: "simple example", raw: `<function=get_current_temperature> <parameter=location> San Francisco </parameter> <parameter=unit> celsius </parameter> </function>`, want: `<function name="get_current_temperature"> <parameter name="location"> San Francisco </parameter> <parameter name="unit"> celsius </parameter> </function>`, }, // even though quotes aren't expected in these tags, we have these tests to // make sure they're escaped so they don't blow up the xml parser in case // they happen { desc: "names with quotes", raw: `<function="get current temperature"> <parameter="location with spaces"> San Francisco </parameter> <parameter="unit with spaces"> celsius </parameter> </function>`, want: `<function name="&#34;get current temperature&#34;"> <parameter name="&#34;location with spaces&#34;"> San Francisco </parameter> <parameter name="&#34;unit with spaces&#34;"> celsius </parameter> </function>`, }, { desc: "ampersands in parameter values", raw: `<function=get_current_temperature> <parameter=location> San Francisco & San Jose </parameter> </function>`, want: `<function name="get_current_temperature"> <parameter name="location"> San Francisco &amp; San Jose </parameter> </function>`, }, } for _, tc := range cases { got := transformToXML(tc.raw) if got != tc.want { t.Errorf("got %q, want %q", got, tc.want) } } } func TestTrailingWhitespaceLen(t *testing.T) { cases := []struct { desc string s string want int }{ {desc: "no whitespace", s: "abc", want: 0}, {desc: "trailing whitespace", s: "abc ", want: 1}, {desc: "trailing whitespace with newlines", s: "abc \n", want: 2}, {desc: "only whitespace", s: " \n ", want: 4}, {desc: "leading whitespace doesn't count", s: " \n abc", want: 0}, {desc: "unicode with trailing space", s: "测试🎯 ", want: 1}, {desc: "unicode with trailing tab and newline", s: "مرحبا\t\n", want: 2}, } for _, tc := range cases { got := trailingWhitespaceLen(tc.s) if got != tc.want { t.Errorf("got %d, want %d", got, tc.want) } } } func TestOverlapFunction(t *testing.T) { cases := []struct { desc string s string delim string want int }{ {desc: "no overlap", s: "hello", delim: "<tool", want: 0}, {desc: "full overlap", s: "hello<tool", delim: "<tool>", want: 5}, {desc: "partial overlap", s: "hello<to", delim: "<tool>", want: 3}, {desc: "unicode with partial overlap", s: "测试🎯<to", delim: "<tool>", want: 3}, {desc: "unicode string with no overlap", s: "مرحبا", delim: "<tool>", want: 0}, {desc: "unicode at boundary", s: "世界<", delim: "<tool>", want: 1}, {desc: "unicode delimiter single rune", s: "hello🔧", delim: "🔧工具", want: len("🔧")}, {desc: "unicode delimiter multiple runes", s: "hello🔧工", delim: "🔧工具", want: len("🔧工")}, } for _, tc := range cases { t.Run(tc.desc, func(t *testing.T) { got := overlap(tc.s, tc.delim) if got != tc.want { t.Errorf("overlap(%q, %q) = %d, want %d", tc.s, tc.delim, got, tc.want) } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/parsers/parsers_test.go
model/parsers/parsers_test.go
package parsers import ( "strings" "testing" "github.com/ollama/ollama/api" ) type mockParser struct { name string } func (m *mockParser) Init(tools []api.Tool, lastMessage *api.Message, thinkValue *api.ThinkValue) []api.Tool { return tools } func (m *mockParser) Add(s string, done bool) (content string, thinking string, calls []api.ToolCall, err error) { return "mock:" + s, "", nil, nil } func (m *mockParser) HasToolSupport() bool { return false } func (m *mockParser) HasThinkingSupport() bool { return false } func TestRegisterCustomParser(t *testing.T) { // Register a custom parser Register("custom-parser", func() Parser { return &mockParser{name: "custom"} }) // Retrieve it parser := ParserForName("custom-parser") if parser == nil { t.Fatal("expected parser to be registered") } // Test it works content, _, _, err := parser.Add("test", false) if err != nil { t.Fatalf("unexpected error: %v", err) } if content != "mock:test" { t.Errorf("expected 'mock:test', got %q", content) } } func TestBuiltInParsersStillWork(t *testing.T) { tests := []struct { name string }{ {"passthrough"}, {"qwen3-coder"}, {"harmony"}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { parser := ParserForName(tt.name) if parser == nil { t.Fatalf("expected built-in parser %q to exist", tt.name) } }) } } func TestOverrideBuiltInParser(t *testing.T) { // Override a built-in parser Register("passthrough", func() Parser { return &mockParser{name: "override"} }) // Should get the override parser := ParserForName("passthrough") if parser == nil { t.Fatal("expected parser to exist") } // Test it's the override content, _, _, err := parser.Add("test", false) if err != nil { t.Fatalf("unexpected error: %v", err) } if content != "mock:test" { t.Errorf("expected 'mock:test' from override, got %q", content) } } func TestUnknownParserReturnsNil(t *testing.T) { parser := ParserForName("nonexistent-parser") if parser != nil { t.Error("expected nil for unknown parser") } } func TestSplitAtTag(t *testing.T) { tests := []struct { name string input string tag string trimAfter bool wantBefore string wantAfter string wantSB string // expected content of strings.Builder after operation }{ { name: "basic split with trimAfter true", input: "hello <!-- split --> world", tag: "<!-- split -->", trimAfter: true, wantBefore: "hello", wantAfter: "world", wantSB: "world", }, { name: "basic split with trimAfter false", input: "hello <!-- split --> world", tag: "<!-- split -->", trimAfter: false, wantBefore: "hello", wantAfter: " world", wantSB: " world", }, { name: "tag at beginning with trimAfter true", input: "<!-- split -->world", tag: "<!-- split -->", trimAfter: true, wantBefore: "", wantAfter: "world", wantSB: "world", }, { name: "tag at beginning with trimAfter false", input: "<!-- split --> world", tag: "<!-- split -->", trimAfter: false, wantBefore: "", wantAfter: " world", wantSB: " world", }, { name: "tag at end with trimAfter true", input: "hello <!-- split -->", tag: "<!-- split -->", trimAfter: true, wantBefore: "hello", wantAfter: "", wantSB: "", }, { name: "tag at end with trimAfter false", input: "hello <!-- split -->", tag: "<!-- split -->", trimAfter: false, wantBefore: "hello", wantAfter: "", wantSB: "", }, { name: "multiple tags splits at first occurrence", input: "hello <!-- split --> world <!-- split --> end", tag: "<!-- split -->", trimAfter: true, wantBefore: "hello", wantAfter: "world <!-- split --> end", wantSB: "world <!-- split --> end", }, { name: "tag not present", input: "hello world", tag: "<!-- split -->", trimAfter: true, wantBefore: "hello world", wantAfter: "", wantSB: "", }, { name: "empty input", input: "", tag: "<!-- split -->", trimAfter: true, wantBefore: "", wantAfter: "", wantSB: "", }, { name: "only whitespace before tag", input: " \t\n<!-- split -->world", tag: "<!-- split -->", trimAfter: true, wantBefore: "", wantAfter: "world", wantSB: "world", }, { name: "only whitespace after tag with trimAfter true", input: "hello<!-- split --> \t\n", tag: "<!-- split -->", trimAfter: true, wantBefore: "hello", wantAfter: "", wantSB: "", }, { name: "only whitespace after tag with trimAfter false", input: "hello<!-- split --> \t\n", tag: "<!-- split -->", trimAfter: false, wantBefore: "hello", wantAfter: " \t\n", wantSB: " \t\n", }, { name: "complex whitespace trimming", input: " hello \t\n <!-- split --> \n\t world ", tag: "<!-- split -->", trimAfter: true, wantBefore: " hello", wantAfter: "world ", wantSB: "world ", }, { name: "tag with special characters", input: "text <tag attr=\"value\"> more text", tag: "<tag attr=\"value\">", trimAfter: true, wantBefore: "text", wantAfter: "more text", wantSB: "more text", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { sb := &strings.Builder{} sb.WriteString(tt.input) before, after := splitAtTag(sb, tt.tag, tt.trimAfter) // Check return values if before != tt.wantBefore { t.Errorf("splitAtTag() before = %q, want %q", before, tt.wantBefore) } if after != tt.wantAfter { t.Errorf("splitAtTag() after = %q, want %q", after, tt.wantAfter) } // Check strings.Builder state if sb.String() != tt.wantSB { t.Errorf("strings.Builder after split = %q, want %q", sb.String(), tt.wantSB) } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/parsers/cogito_test.go
model/parsers/cogito_test.go
package parsers import ( "strings" "testing" "github.com/google/go-cmp/cmp" "github.com/ollama/ollama/api" ) func TestCogitoParser(t *testing.T) { tests := []struct { name string input string expectedContent string expectedThinking string expectedToolCalls []api.ToolCall tools []api.Tool lastMessage *api.Message }{ { name: "simple_content", input: "This is a simple response.", expectedContent: "This is a simple response.", expectedThinking: "", }, { name: "thinking_only", input: "This is thinking content.</think>This is response content.", expectedContent: "This is response content.", expectedThinking: "This is thinking content.", }, { name: "tool_call_simple", input: `<|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>get_weather ` + "```json\n" + `{"location":"Paris"} ` + "```" + `<|tool▁call▁end|><|tool▁calls▁end|>`, expectedToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Paris", }), }, }, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Parameters: api.ToolFunctionParameters{ Properties: testPropsMap(map[string]api.ToolProperty{ "location": {Type: api.PropertyType{"string"}}, }), }, }, }, }, }, { name: "thinking_with_tool_call", input: `I need to check the weather.</think><|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>get_weather ` + "```json\n" + `{"location":"Paris"} ` + "```" + `<|tool▁call▁end|><|tool▁calls▁end|>`, expectedContent: "I need to check the weather.</think>", expectedThinking: "", // No thinking when tools are present (Cogito-specific behavior) expectedToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Paris", }), }, }, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Parameters: api.ToolFunctionParameters{ Properties: testPropsMap(map[string]api.ToolProperty{ "location": {Type: api.PropertyType{"string"}}, }), }, }, }, }, }, { name: "multiple_tool_calls", input: `<|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>get_weather ` + "```json\n" + `{"location":"Paris"} ` + "```" + `<|tool▁call▁end|> <|tool▁call▁begin|>function<|tool▁sep|>get_weather ` + "```json\n" + `{"location":"London"} ` + "```" + `<|tool▁call▁end|><|tool▁calls▁end|>`, expectedToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Paris", }), }, }, { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "London", }), }, }, }, tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Parameters: api.ToolFunctionParameters{ Properties: testPropsMap(map[string]api.ToolProperty{ "location": {Type: api.PropertyType{"string"}}, }), }, }, }, }, }, { name: "complex_tool_arguments", input: `<|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>process_data ` + "```json\n" + `{"items":["item1","item2"],"config":{"enabled":true,"threshold":0.95},"count":42} ` + "```" + `<|tool▁call▁end|><|tool▁calls▁end|>`, expectedToolCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "process_data", Arguments: testArgs(map[string]any{ "items": []any{"item1", "item2"}, "config": map[string]any{"enabled": true, "threshold": 0.95}, "count": 42.0, }), }, }, }, }, { name: "tool_output_parsing", input: `<|tool▁outputs▁begin|><|tool▁output▁begin|>{"temperature": 22, "condition": "sunny"}<|tool▁output▁end|><|tool▁outputs▁end|>`, expectedContent: "", expectedThinking: "", }, { name: "thinking_with_multiline_content", input: `This is line 1 This is line 2 This is line 3</think>Final response here.`, expectedContent: "Final response here.", expectedThinking: "This is line 1\nThis is line 2\nThis is line 3", }, { name: "no_thinking_simple", input: "This is content.", expectedContent: "This is content.", expectedThinking: "", }, { name: "prefill_content_only", input: "Continuing from previous content.", expectedContent: "Continuing from previous content.", lastMessage: &api.Message{ Role: "assistant", Content: "Previous content", }, }, { name: "prefill_with_thinking", input: "Continuing thinking</think>Continuing content.", expectedContent: "Continuing content.", expectedThinking: "Continuing thinking", lastMessage: &api.Message{ Role: "assistant", }, }, // Edge cases { name: "nested_think_tags_in_thinking", input: "I'm thinking <think>nested</think> more thinking</think>Final content.", expectedContent: "more thinking</think>Final content.", expectedThinking: "I'm thinking <think>nested", }, { name: "multiple_think_close_tags", input: "First thinking</think>Content</think>More content.", expectedContent: "Content</think>More content.", expectedThinking: "First thinking", }, { name: "empty_thinking_content", input: "</think>Just content here.", expectedContent: "</think>Just content here.", expectedThinking: "", }, { name: "thinking_disabled_with_think_tags", input: "Content with </think> tags should be treated as content.", expectedContent: "Content with </think> tags should be treated as content.", expectedThinking: "", lastMessage: &api.Message{ Role: "assistant", Content: "existing", // Forces non-thinking mode }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { // Use thinking-enabled parser for tests that expect thinking hasThinking := tt.expectedThinking != "" parser := &CogitoParser{} // it has thinking support parser.Init(tt.tools, tt.lastMessage, &api.ThinkValue{Value: hasThinking}) // but we should set it with the request that the user wants content, thinking, toolCalls, err := parser.Add(tt.input, true) if err != nil { t.Fatalf("Add() error = %v", err) } if diff := cmp.Diff(tt.expectedContent, content); diff != "" { t.Errorf("content mismatch (-want +got):\n%s", diff) } if diff := cmp.Diff(tt.expectedThinking, thinking); diff != "" { t.Errorf("thinking mismatch (-want +got):\n%s", diff) } if diff := cmp.Diff(tt.expectedToolCalls, toolCalls, argsComparer); diff != "" { t.Errorf("tool calls mismatch (-want +got):\n%s", diff) } }) } } func TestCogitoParser_Streaming(t *testing.T) { parser := &CogitoParser{} parser.Init(nil, nil, &api.ThinkValue{Value: true}) chunks := []string{ "This is ", "thinking content", ".</think>This is ", "content.<|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>test_tool\n```json\n{\"arg\":\"value\"}\n```<|tool▁call▁end|><|tool▁calls▁end|>", } var finalContent, finalThinking strings.Builder var finalToolCalls []api.ToolCall for i, chunk := range chunks { done := i == len(chunks)-1 content, thinking, toolCalls, err := parser.Add(chunk, done) if err != nil { t.Fatalf("Add() error on chunk %d: %v", i, err) } finalContent.WriteString(content) finalThinking.WriteString(thinking) finalToolCalls = append(finalToolCalls, toolCalls...) } expectedContent := "This is content." expectedThinking := "This is thinking content." expectedToolCalls := []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "test_tool", Arguments: testArgs(map[string]any{ "arg": "value", }), }, }, } if finalContent.String() != expectedContent { t.Errorf("expected content %q, got %q", expectedContent, finalContent.String()) } if finalThinking.String() != expectedThinking { t.Errorf("expected thinking %q, got %q", expectedThinking, finalThinking.String()) } if diff := cmp.Diff(expectedToolCalls, finalToolCalls, argsComparer); diff != "" { t.Errorf("tool calls mismatch (-want +got):\n%s", diff) } } func TestCogitoParser_StreamingEdgeCases(t *testing.T) { tests := []struct { name string chunks []string expectedContent string expectedThinking string expectedToolCalls []api.ToolCall hasThinkingSupport bool }{ { name: "split_thinking_tag", chunks: []string{ "This is thinking content</thi", "nk>This is content.", }, expectedContent: "This is content.", expectedThinking: "This is thinking content", hasThinkingSupport: true, }, { name: "split_tool_calls_begin_tag_conservative_parsing", chunks: []string{ "Content before<|tool▁calls▁beg", "in|><|tool▁call▁begin|>function<|tool▁sep|>test\n```json\n{}\n```<|tool▁call▁end|><|tool▁calls▁end|>", }, // Parser is conservative - treats incomplete tags as content expectedContent: "Content before<|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>test\n```json\n{}\n```<|tool▁call▁end|><|tool▁calls▁end|>", expectedToolCalls: nil, hasThinkingSupport: false, }, { name: "thinking_disabled_with_split_tags", chunks: []string{ "Content with </thi", "nk> should be treated as content.", }, expectedContent: "Content with </think> should be treated as content.", expectedThinking: "", hasThinkingSupport: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { parser := &CogitoParser{} parser.Init(nil, nil, &api.ThinkValue{Value: tt.hasThinkingSupport}) var finalContent, finalThinking strings.Builder var finalToolCalls []api.ToolCall for i, chunk := range tt.chunks { done := i == len(tt.chunks)-1 content, thinking, toolCalls, err := parser.Add(chunk, done) if err != nil { t.Fatalf("Add() error on chunk %d: %v", i, err) } finalContent.WriteString(content) finalThinking.WriteString(thinking) finalToolCalls = append(finalToolCalls, toolCalls...) } if finalContent.String() != tt.expectedContent { t.Errorf("expected content %q, got %q", tt.expectedContent, finalContent.String()) } if finalThinking.String() != tt.expectedThinking { t.Errorf("expected thinking %q, got %q", tt.expectedThinking, finalThinking.String()) } if diff := cmp.Diff(tt.expectedToolCalls, finalToolCalls, argsComparer); diff != "" { t.Errorf("tool calls mismatch (-want +got):\n%s", diff) } }) } } func TestCogitoParser_HasToolSupport(t *testing.T) { parser := &CogitoParser{} if !parser.HasToolSupport() { t.Error("CogitoParser should support tools") } } func TestCogitoParser_Init(t *testing.T) { parser := &CogitoParser{} tools := []api.Tool{ {Function: api.ToolFunction{Name: "test_tool"}}, } lastMessage := &api.Message{Role: "assistant", Content: "previous"} returnedTools := parser.Init(tools, lastMessage, nil) if len(returnedTools) != len(tools) { t.Errorf("expected %d tools returned, got %d", len(tools), len(returnedTools)) } } func TestCogitoParser_parseToolCallContent(t *testing.T) { tests := []struct { name string content string expected api.ToolCall expectError bool }{ { name: "valid_tool_call_standard_format", content: `function<|tool▁sep|>get_weather ` + "```json\n" + `{"location":"Paris"} ` + "```", expected: api.ToolCall{ Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Paris", }), }, }, expectError: false, }, { name: "valid_tool_call_complex_args", content: `function<|tool▁sep|>process_data ` + "```json\n" + `{"items":["item1","item2"],"config":{"enabled":true},"count":42} ` + "```", expected: api.ToolCall{ Function: api.ToolCallFunction{ Name: "process_data", Arguments: testArgs(map[string]any{ "items": []any{"item1", "item2"}, "config": map[string]any{"enabled": true}, "count": 42.0, }), }, }, expectError: false, }, { name: "valid_tool_call_empty_args", content: `function<|tool▁sep|>no_args_tool ` + "```json\n" + `{} ` + "```", expected: api.ToolCall{ Function: api.ToolCallFunction{ Name: "no_args_tool", Arguments: api.NewToolCallFunctionArguments(), }, }, expectError: false, }, { name: "missing_separator", content: `functionget_weather` + "```json\n" + `{"location":"Paris"}` + "\n```", expected: api.ToolCall{}, expectError: true, }, { name: "invalid_function_type", content: `not_function<|tool▁sep|>get_weather` + "```json\n" + `{"location":"Paris"}` + "\n```", expected: api.ToolCall{}, expectError: true, }, { name: "missing_json_block_start", content: `function<|tool▁sep|>get_weather{"location":"Paris"}` + "```", expected: api.ToolCall{}, expectError: true, }, { name: "missing_json_block_end", content: `function<|tool▁sep|>get_weather` + "```json\n" + `{"location":"Paris"}`, expected: api.ToolCall{}, expectError: true, }, { name: "invalid_json", content: `function<|tool▁sep|>get_weather` + "```json\n" + `{location:Paris}` + "\n```", expected: api.ToolCall{}, expectError: true, }, { name: "empty_function_type", content: `<|tool▁sep|>get_weather` + "```json\n" + `{"location":"Paris"}` + "\n```", expected: api.ToolCall{}, expectError: true, }, { name: "tool_with_spaces_in_name", content: `function<|tool▁sep|> get_weather ` + "```json\n" + `{"location":"Paris"} ` + "```", expected: api.ToolCall{ Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Paris", }), }, }, expectError: false, }, { name: "tool_with_multiline_json", content: `function<|tool▁sep|>get_weather ` + "```json\n" + `{ "location": "Paris", "units": "metric" } ` + "```", expected: api.ToolCall{ Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{ "location": "Paris", "units": "metric", }), }, }, expectError: false, }, { name: "tool_with_nested_objects", content: `function<|tool▁sep|>complex_tool ` + "```json\n" + `{"nested":{"deep":{"value":123}}} ` + "```", expected: api.ToolCall{ Function: api.ToolCallFunction{ Name: "complex_tool", Arguments: testArgs(map[string]any{ "nested": map[string]any{ "deep": map[string]any{ "value": 123.0, }, }, }), }, }, expectError: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { parser := &CogitoParser{} result, err := parser.parseToolCallContent(tt.content) if tt.expectError { if err == nil { t.Errorf("expected error but got none") } return } if err != nil { t.Fatalf("unexpected error: %v", err) } if diff := cmp.Diff(tt.expected, result, argsComparer); diff != "" { t.Errorf("tool call mismatch (-want +got):\n%s", diff) } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/parsers/qwen3vl_nonthinking_test.go
model/parsers/qwen3vl_nonthinking_test.go
package parsers import ( "reflect" "testing" "github.com/ollama/ollama/api" ) func TestQwen3VLNonThinkingParserStreaming(t *testing.T) { type step struct { input string wantEvents []qwenEvent } cases := []struct { desc string steps []step only bool }{ { desc: "simple thinking", steps: []step{ {input: "abc</think>", wantEvents: []qwenEvent{qwenEventContent{content: "abc</think>"}}}, }, }, { desc: "simple trip thinking", steps: []step{ {input: "<think>abc</think>", wantEvents: []qwenEvent{qwenEventContent{content: "<think>abc</think>"}}}, }, }, { desc: "thinking with split tags", steps: []step{ {input: "abc", wantEvents: []qwenEvent{qwenEventContent{content: "abc"}}}, {input: "</think>", wantEvents: []qwenEvent{qwenEventContent{content: "</think>"}}}, }, }, { desc: "multiple think tags", steps: []step{ {input: "abc<think>actually, is not thinking</think>", wantEvents: []qwenEvent{qwenEventContent{content: "abc<think>actually, is not thinking</think>"}}}, }, }, { desc: "thinking and tool call", steps: []step{ { input: "I'm thinking</think><tool_call>I'm tool calling</tool_call>", wantEvents: []qwenEvent{ qwenEventContent{content: "I'm thinking</think>"}, qwenEventRawToolCall{raw: "I'm tool calling"}, }, }, }, }, { desc: "nested thinking (outside thinking, inside thinking)", steps: []step{ { input: "I'm thinking<think>I'm nested thinking</think></think>", wantEvents: []qwenEvent{ qwenEventContent{content: "I'm thinking<think>I'm nested thinking</think></think>"}, }, }, }, }, { desc: "interleaved thinking", steps: []step{ { input: "<think>I'm thinking</think>I'm actually content</think>", wantEvents: []qwenEvent{ qwenEventContent{content: "<think>I'm thinking</think>I'm actually content</think>"}, }, }, }, }, { desc: "nested thinking and tool call (outside thinking, inside tool call)", steps: []step{ { input: "I'm thinking<tool_call>I'm nested tool call</tool_call></think>", wantEvents: []qwenEvent{ qwenEventContent{content: "I'm thinking"}, qwenEventRawToolCall{raw: "I'm nested tool call"}, qwenEventContent{content: "</think>"}, }, }, }, }, { desc: "nested thinking and tool call (outside tool call, inside thinking)", steps: []step{ { input: "<tool_call>I'm nested tool call<think>I'm thinking</think></tool_call>", wantEvents: []qwenEvent{ qwenEventRawToolCall{raw: "I'm nested tool call<think>I'm thinking</think>"}, }, }, }, }, { desc: "interleaved thinking and tool call", steps: []step{ { input: "I'm thinking<tool_call>I'm NOT a nested tool call</think></tool_call><tool_call>I'm nested tool call 2<think></tool_call></think>", wantEvents: []qwenEvent{ qwenEventContent{content: "I'm thinking"}, qwenEventRawToolCall{raw: "I'm NOT a nested tool call</think>"}, qwenEventRawToolCall{raw: "I'm nested tool call 2<think>"}, qwenEventContent{content: "</think>"}, }, }, }, }, { desc: "emit unambiguous before partial tool open (trailing ws)", steps: []step{ { input: "abc\u00a0\n<tool_call", wantEvents: []qwenEvent{qwenEventContent{content: "abc"}}, }, { input: " fakeout", wantEvents: []qwenEvent{qwenEventContent{content: "\u00a0\n<tool_call fakeout"}}, }, }, }, { desc: "unambiguous empty: partial tool open at buffer start", steps: []step{ { input: "<tool_ca", wantEvents: []qwenEvent{}, }, { input: "ll>abc</tool_call>", wantEvents: []qwenEvent{ qwenEventRawToolCall{raw: "abc"}, }, }, }, }, { desc: "partial thinking tag fakeout", steps: []step{ { input: "abc</think", wantEvents: []qwenEvent{qwenEventContent{content: "abc</think"}}, }, { input: " fakeout", wantEvents: []qwenEvent{qwenEventContent{content: " fakeout"}}, }, }, }, { desc: "partial thinking incomplete", steps: []step{ { input: "abc<think>unfinished<", // when something is ambiguious, we dont emit anything wantEvents: []qwenEvent{qwenEventContent{content: "abc<think>unfinished"}}, }, }, }, { desc: "test with split tool and content", steps: []step{ { input: "abc<tool_call>unfinished</", // when something is ambiguious, we dont emit anything wantEvents: []qwenEvent{ qwenEventContent{content: "abc"}, }, }, { input: "tool_call> def", wantEvents: []qwenEvent{ qwenEventRawToolCall{raw: "unfinished"}, qwenEventContent{content: "def"}, }, }, }, }, } anyOnlies := false for _, tc := range cases { if tc.only { anyOnlies = true } } for _, tc := range cases { if anyOnlies && !tc.only { continue } t.Run(tc.desc, func(t *testing.T) { parser := Qwen3VLParser{hasThinkingSupport: false} parser.Init([]api.Tool{}, nil, nil) for i, step := range tc.steps { parser.buffer.WriteString(step.input) gotEvents := parser.parseEvents() if len(gotEvents) == 0 && len(step.wantEvents) == 0 { // avoid deep equal on empty vs. nil slices continue } if !reflect.DeepEqual(gotEvents, step.wantEvents) { t.Errorf("step %d: input %q: got events %#v, want %#v", i, step.input, gotEvents, step.wantEvents) } } }) } } func TestQwenOldParserStreaming(t *testing.T) { type step struct { input string wantEvents []qwenEvent } cases := []struct { desc string steps []step only bool }{ { desc: "simple message streamed word by word", steps: []step{ { input: "hi", wantEvents: []qwenEvent{qwenEventContent{content: "hi"}}, }, { input: " there", wantEvents: []qwenEvent{qwenEventContent{content: " there"}}, }, }, }, { desc: "content before tool call", steps: []step{ { input: "hi there<tool_call>", wantEvents: []qwenEvent{qwenEventContent{content: "hi there"}}, }, }, }, { desc: "multiple tool calls in one message", steps: []step{ { input: "before1<tool_call>in tool call</tool_call>after1<tool_call>in tool call 2</tool_call>after2", wantEvents: []qwenEvent{ qwenEventContent{content: "before1"}, qwenEventRawToolCall{raw: "in tool call"}, qwenEventContent{content: "after1"}, qwenEventRawToolCall{raw: "in tool call 2"}, qwenEventContent{content: "after2"}, }, }, }, }, { desc: "tool calls with split tags", steps: []step{ { input: "before<tool", wantEvents: []qwenEvent{ qwenEventContent{content: "before"}, }, }, { input: "_call>in tool call</tool", wantEvents: []qwenEvent{}, }, { input: "_call>af", wantEvents: []qwenEvent{ qwenEventRawToolCall{raw: "in tool call"}, qwenEventContent{content: "af"}, }, }, { input: "ter", wantEvents: []qwenEvent{ qwenEventContent{content: "ter"}, }, }, }, }, { desc: "trailing whitespace between content and tool call", steps: []step{ { input: "abc\n<tool_call>def</tool_call>", wantEvents: []qwenEvent{ qwenEventContent{content: "abc"}, qwenEventRawToolCall{raw: "def"}, }, }, }, }, { desc: "trailing whitespace between tool call and content", steps: []step{ { input: "<tool_call>abc</tool_call>\ndef", wantEvents: []qwenEvent{ qwenEventRawToolCall{raw: "abc"}, qwenEventContent{content: "def"}, }, }, }, }, { desc: "empty content before tool call", steps: []step{ { input: "\n<tool_call>abc</tool_call>", wantEvents: []qwenEvent{ qwenEventRawToolCall{raw: "abc"}, }, }, }, }, { desc: "partial tool open tag fakeout", steps: []step{ { input: "abc\n<tool_call", wantEvents: []qwenEvent{ // \n should not be emitted yet because `<tool_call` might be a tool // open tag, in which case the whitespace should be trimmed qwenEventContent{content: "abc"}, }, }, { input: " fakeout", wantEvents: []qwenEvent{ qwenEventContent{content: "\n<tool_call fakeout"}, }, }, }, }, { desc: "token-by-token whitespace handling", steps: []step{ { input: "a", wantEvents: []qwenEvent{ qwenEventContent{content: "a"}, }, }, { input: "\n", wantEvents: []qwenEvent{}, }, { input: "b", wantEvents: []qwenEvent{ qwenEventContent{content: "\nb"}, }, }, }, }, { desc: "unicode content", steps: []step{ { input: "你好 🌍<tool_call>test</tool_call>مرحبا", wantEvents: []qwenEvent{ qwenEventContent{content: "你好 🌍"}, qwenEventRawToolCall{raw: "test"}, qwenEventContent{content: "مرحبا"}, }, }, }, }, { desc: "arabic text handling", steps: []step{ { input: "مرحبا بالعالم", wantEvents: []qwenEvent{qwenEventContent{content: "مرحبا بالعالم"}}, }, }, }, { desc: "emoji passthrough", steps: []step{ { input: "✅", wantEvents: []qwenEvent{qwenEventContent{content: "✅"}}, }, }, }, { desc: "emoji after tool call", steps: []step{ { input: "<tool_call>test</tool_call>完成 ✅", wantEvents: []qwenEvent{ qwenEventRawToolCall{raw: "test"}, qwenEventContent{content: "完成 ✅"}, }, }, }, }, { desc: "unicode streaming with whitespace handling", steps: []step{ { input: "مرحبا", wantEvents: []qwenEvent{ qwenEventContent{content: "مرحبا"}, }, }, { input: " \n", wantEvents: []qwenEvent{}, }, { input: "世界", wantEvents: []qwenEvent{ qwenEventContent{content: " \n世界"}, }, }, }, }, { desc: "non-breaking space withheld across chunks", steps: []step{ { input: "Hello\u00a0", wantEvents: []qwenEvent{ qwenEventContent{content: "Hello"}, }, }, { input: "world", wantEvents: []qwenEvent{ qwenEventContent{content: "\u00a0world"}, }, }, }, }, { desc: "ideographic space before partial tool", steps: []step{ { input: "Hello\u3000<tool", wantEvents: []qwenEvent{ qwenEventContent{content: "Hello"}, }, }, { input: "_call>abc", wantEvents: []qwenEvent{}, }, { input: "</tool_call>def", wantEvents: []qwenEvent{ qwenEventRawToolCall{raw: "abc"}, qwenEventContent{content: "def"}, }, }, }, }, { desc: "ideographic space before partial tool fakeout", steps: []step{ { input: "Hello\u3000<tool", wantEvents: []qwenEvent{ qwenEventContent{content: "Hello"}, }, }, { input: "fakeout>abc", wantEvents: []qwenEvent{ qwenEventContent{content: "\u3000<toolfakeout>abc"}, }, }, }, }, { desc: "unicode with partial tool tag", steps: []step{ { input: "测试🎯 <to", wantEvents: []qwenEvent{ qwenEventContent{content: "测试🎯"}, }, }, }, }, } anyOnlies := false for _, tc := range cases { if tc.only { anyOnlies = true } } for _, tc := range cases { if anyOnlies && !tc.only { continue } t.Run(tc.desc, func(t *testing.T) { parser := Qwen3VLParser{hasThinkingSupport: false} parser.Init([]api.Tool{}, nil, nil) for i, step := range tc.steps { parser.buffer.WriteString(step.input) gotEvents := parser.parseEvents() if len(gotEvents) == 0 && len(step.wantEvents) == 0 { // avoid deep equal on empty vs. nil slices continue } if !reflect.DeepEqual(gotEvents, step.wantEvents) { t.Errorf("step %d: input %q: got events %#v, want %#v", i, step.input, gotEvents, step.wantEvents) } } }) } } func TestQwen3VLNonThinkingToolParser(t *testing.T) { type step struct { name string rawToolCall string tools []api.Tool wantToolCall api.ToolCall } steps := []step{ { name: "simple tool call", tools: []api.Tool{}, rawToolCall: `{"name": "get-current-weather", "arguments": {"location": "San Francisco, CA", "unit": "fahrenheit"}}`, wantToolCall: api.ToolCall{ Function: api.ToolCallFunction{ Name: "get-current-weather", Arguments: testArgs(map[string]any{ "location": "San Francisco, CA", "unit": "fahrenheit", }), }, }, }, { name: "names with spaces", tools: []api.Tool{}, rawToolCall: `{"name": "get current temperature", "arguments": {"location with spaces": "San Francisco", "unit with spaces": "celsius"}}`, wantToolCall: api.ToolCall{ Function: api.ToolCallFunction{ Name: "get current temperature", Arguments: testArgs(map[string]any{ "location with spaces": "San Francisco", "unit with spaces": "celsius", }), }, }, }, { name: "names with quotes", tools: []api.Tool{}, rawToolCall: `{"name": "\"get current temperature\"", "arguments": {"\"location with spaces\"": "San Francisco", "\"unit with spaces\"": "\"celsius\""}}`, wantToolCall: api.ToolCall{ Function: api.ToolCallFunction{ Name: "\"get current temperature\"", Arguments: testArgs(map[string]any{ "\"location with spaces\"": "San Francisco", "\"unit with spaces\"": "\"celsius\"", }), }, }, }, { name: "tool call with typed parameters (json types)", tools: []api.Tool{}, rawToolCall: `{"name": "calculate", "arguments": {"x": 3.14, "y": 42, "enabled": true, "items": ["a", "b", "c"]}}`, wantToolCall: api.ToolCall{ Function: api.ToolCallFunction{ Name: "calculate", Arguments: testArgs(map[string]any{ "x": 3.14, "y": float64(42), "enabled": true, "items": []any{"a", "b", "c"}, }), }, }, }, { name: "ampersands in parameter values", tools: []api.Tool{}, rawToolCall: `{"name": "exec", "arguments": {"command": "ls && echo \"done\""}}`, wantToolCall: api.ToolCall{ Function: api.ToolCallFunction{ Name: "exec", Arguments: testArgs(map[string]any{ "command": "ls && echo \"done\"", }), }, }, }, { name: "angle brackets in parameter values", tools: []api.Tool{}, rawToolCall: `{"name": "exec", "arguments": {"command": "ls && echo \"a > b and a < b\""}}`, wantToolCall: api.ToolCall{ Function: api.ToolCallFunction{ Name: "exec", Arguments: testArgs(map[string]any{ "command": "ls && echo \"a > b and a < b\"", }), }, }, }, { name: "unicode in function names and parameters", tools: []api.Tool{}, rawToolCall: `{"name": "获取天气", "arguments": {"城市": "北京", "message": "Hello! 你好! 🌟 مرحبا"}}`, wantToolCall: api.ToolCall{ Function: api.ToolCallFunction{ Name: "获取天气", Arguments: testArgs(map[string]any{ "城市": "北京", "message": "Hello! 你好! 🌟 مرحبا", }), }, }, }, } for i, step := range steps { gotToolCall, err := parseJSONToolCall(qwenEventRawToolCall{raw: step.rawToolCall}, step.tools) if err != nil { t.Errorf("step %d (%s): %v", i, step.name, err) } if !toolCallEqual(gotToolCall, step.wantToolCall) { t.Errorf("step %d (%s): got tool call %#v, want %#v", i, step.name, gotToolCall, step.wantToolCall) } } } func TestQwen3VLNonThinkingToolCallWhitespaceHandling(t *testing.T) { type step struct { input string wantEvents []qwenEvent } cases := []struct { desc string steps []step only bool }{ { desc: "whitespace inside tool call preserves trailing space", steps: []step{ { input: "before<tool_call> tool content </tool_call>after", wantEvents: []qwenEvent{ qwenEventContent{content: "before"}, qwenEventRawToolCall{raw: " tool content "}, qwenEventContent{content: "after"}, }, }, }, }, { desc: "whitespace inside tool call preserves trailing space", steps: []step{ { input: "\n \n \n \n \n \n blahhhhhhhhhh blahhhh blahhhh \n\n\n\t\t <tool_call> tool content </tool_call> \n\n\n\n\n\n\n after", wantEvents: []qwenEvent{ qwenEventContent{content: "\n \n \n \n \n \n blahhhhhhhhhh blahhhh blahhhh"}, qwenEventRawToolCall{raw: " tool content "}, qwenEventContent{content: "after"}, }, }, }, }, { desc: "whitespace inside tool call preserves trailing space", steps: []step{ { input: "<tool_call> tool content </tool_call> ", wantEvents: []qwenEvent{ qwenEventRawToolCall{raw: " tool content "}, }, }, { input: "\n \n \n \n \n \n blahhhhhhhhhh blahhhh blahhhh \n\n\n\t\t <tool_call> anotha one </tool_call> \n\n\n\n\n\n\n after \n\n\n\n\n\n blep", wantEvents: []qwenEvent{ qwenEventContent{content: "blahhhhhhhhhh blahhhh blahhhh"}, qwenEventRawToolCall{raw: " anotha one "}, qwenEventContent{content: "after \n\n\n\n\n\n blep"}, }, }, }, }, { desc: "whitespace between content and tool call", steps: []step{ { input: "content \n <tool_call>tool</tool_call> \n more content", wantEvents: []qwenEvent{ qwenEventContent{content: "content"}, qwenEventRawToolCall{raw: "tool"}, qwenEventContent{content: "more content"}, }, }, }, }, { desc: "consecutive tool calls with whitespace", steps: []step{ { input: "<tool_call>first</tool_call> \n <tool_call>second</tool_call> \n <tool_call>third</tool_call>", wantEvents: []qwenEvent{ qwenEventRawToolCall{raw: "first"}, qwenEventRawToolCall{raw: "second"}, qwenEventRawToolCall{raw: "third"}, }, }, }, }, { desc: "whitespace before and after tool open tag", steps: []step{ { input: "text \n <tool_call>content</tool_call>", wantEvents: []qwenEvent{ qwenEventContent{content: "text"}, qwenEventRawToolCall{raw: "content"}, }, }, }, }, { desc: "unicode whitespace around tool calls", steps: []step{ { input: "text\u00a0\u3000<tool_call>content</tool_call>\u00a0\u3000text", wantEvents: []qwenEvent{ qwenEventContent{content: "text"}, qwenEventRawToolCall{raw: "content"}, qwenEventContent{content: "text"}, }, }, }, }, { desc: "empty tool call with surrounding whitespace", steps: []step{ { input: "before <tool_call></tool_call> after", wantEvents: []qwenEvent{ qwenEventContent{content: "before"}, qwenEventRawToolCall{raw: ""}, qwenEventContent{content: "after"}, }, }, }, }, { desc: "whitespace in tool call split across chunks", steps: []step{ { input: "before<tool_call> ", wantEvents: []qwenEvent{qwenEventContent{content: "before"}}, }, { input: "tool", wantEvents: []qwenEvent{}, }, { input: " </tool_call>after", wantEvents: []qwenEvent{ qwenEventRawToolCall{raw: " tool "}, qwenEventContent{content: "after"}, }, }, }, }, { desc: "mixed whitespace types between tool calls", steps: []step{ { input: "<tool_call>first</tool_call> \t\n\r <tool_call>second</tool_call>", wantEvents: []qwenEvent{ qwenEventRawToolCall{raw: "first"}, qwenEventRawToolCall{raw: "second"}, }, }, }, }, } anyOnlies := false for _, tc := range cases { if tc.only { anyOnlies = true } } for _, tc := range cases { if anyOnlies && !tc.only { continue } t.Run(tc.desc, func(t *testing.T) { parser := Qwen3VLParser{hasThinkingSupport: false} parser.Init([]api.Tool{}, nil, nil) for i, step := range tc.steps { parser.buffer.WriteString(step.input) gotEvents := parser.parseEvents() if len(gotEvents) == 0 && len(step.wantEvents) == 0 { continue } if !reflect.DeepEqual(gotEvents, step.wantEvents) { t.Errorf("step %d: input %q: got events %#v, want %#v", i, step.input, gotEvents, step.wantEvents) } } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/parsers/cogito.go
model/parsers/cogito.go
package parsers import ( "encoding/json" "errors" "log/slog" "strings" "unicode" "github.com/ollama/ollama/api" ) type CogitoParserState int const ( CogitoCollectingThinking CogitoParserState = iota CogitoCollectingContent CogitoCollectingToolCalls CogitoCollectingToolOutput ) const ( cogitoThinkingCloseTag = "</think>" cogitoToolCallsBeginTag = "<|tool▁calls▁begin|>" cogitoToolCallsEndTag = "<|tool▁calls▁end|>" cogitoToolCallBeginTag = "<|tool▁call▁begin|>" cogitoToolCallEndTag = "<|tool▁call▁end|>" cogitoToolSepTag = "<|tool▁sep|>" cogitoToolOutputBeginTag = "<|tool▁output▁begin|>" cogitoToolOutputEndTag = "<|tool▁output▁end|>" cogitoToolOutputsBeginTag = "<|tool▁outputs▁begin|>" cogitoToolOutputsEndTag = "<|tool▁outputs▁end|>" ) type CogitoParser struct { state CogitoParserState buffer strings.Builder } func (p *CogitoParser) HasToolSupport() bool { return true } func (p *CogitoParser) HasThinkingSupport() bool { return true } func (p *CogitoParser) setInitialState(lastMessage *api.Message, tools []api.Tool, thinkValue *api.ThinkValue) { prefill := lastMessage != nil && lastMessage.Role == "assistant" // Check both model capability AND request preference thinkingEnabled := thinkValue != nil && thinkValue.Bool() // thinkingEnabled should be set to false for tools if !thinkingEnabled { p.state = CogitoCollectingContent return } if prefill && lastMessage.Content != "" { p.state = CogitoCollectingContent return } // Note: for cogito, if there are tools, then we don't want to be thinking if len(tools) > 0 { p.state = CogitoCollectingContent return } p.state = CogitoCollectingThinking } func (p *CogitoParser) Init(tools []api.Tool, lastMessage *api.Message, thinkValue *api.ThinkValue) []api.Tool { p.setInitialState(lastMessage, tools, thinkValue) return tools } type cogitoEvent interface { isCogitoEvent() } type cogitoEventThinkingContent struct { content string } type cogitoEventContent struct { content string } type cogitoEventToolCall struct { toolCall api.ToolCall } func (cogitoEventThinkingContent) isCogitoEvent() {} func (cogitoEventContent) isCogitoEvent() {} func (cogitoEventToolCall) isCogitoEvent() {} func (p *CogitoParser) Add(s string, done bool) (content string, thinking string, calls []api.ToolCall, err error) { p.buffer.WriteString(s) events := p.parseEvents() var toolCalls []api.ToolCall var contentSb strings.Builder var thinkingSb strings.Builder for _, event := range events { switch event := event.(type) { case cogitoEventToolCall: toolCalls = append(toolCalls, event.toolCall) case cogitoEventThinkingContent: thinkingSb.WriteString(event.content) case cogitoEventContent: contentSb.WriteString(event.content) } } return contentSb.String(), thinkingSb.String(), toolCalls, nil } func (p *CogitoParser) parseEvents() []cogitoEvent { var all []cogitoEvent keepLooping := true for keepLooping { var events []cogitoEvent events, keepLooping = p.eat() if len(events) > 0 { all = append(all, events...) } } return all } func (p *CogitoParser) eat() ([]cogitoEvent, bool) { var events []cogitoEvent bufStr := p.buffer.String() if bufStr == "" { return events, false } switch p.state { case CogitoCollectingThinking: if strings.Contains(bufStr, cogitoThinkingCloseTag) { // thinking[</think>] -> content split := strings.SplitN(bufStr, cogitoThinkingCloseTag, 2) thinking := split[0] thinking = strings.TrimRightFunc(thinking, unicode.IsSpace) remaining := split[1] remaining = strings.TrimLeftFunc(remaining, unicode.IsSpace) p.buffer.Reset() p.buffer.WriteString(remaining) p.state = CogitoCollectingContent if len(thinking) > 0 { events = append(events, cogitoEventThinkingContent{content: thinking}) } return events, true } else if overlapLen := overlap(bufStr, cogitoThinkingCloseTag); overlapLen > 0 { // partial </think> beforePartialTag := bufStr[:len(bufStr)-overlapLen] trailingLen := trailingWhitespaceLen(beforePartialTag) ambiguousStart := len(beforePartialTag) - trailingLen unambiguous := bufStr[:ambiguousStart] ambiguous := bufStr[ambiguousStart:] p.buffer.Reset() p.buffer.WriteString(ambiguous) if len(unambiguous) > 0 { events = append(events, cogitoEventThinkingContent{content: unambiguous}) } return events, false } else { // otherwise its thinking content whitespaceLen := trailingWhitespaceLen(bufStr) ambiguousStart := len(bufStr) - whitespaceLen unambiguous := bufStr[:ambiguousStart] ambiguous := bufStr[ambiguousStart:] p.buffer.Reset() p.buffer.WriteString(ambiguous) if len(unambiguous) > 0 { events = append(events, cogitoEventThinkingContent{content: unambiguous}) } return events, false } case CogitoCollectingContent: switch { case strings.Contains(bufStr, cogitoToolCallsBeginTag): // content[<|tool▁calls▁begin|>] -> tool calls split := strings.SplitN(bufStr, cogitoToolCallsBeginTag, 2) contentBefore := strings.TrimRightFunc(split[0], unicode.IsSpace) remaining := split[1] p.buffer.Reset() p.buffer.WriteString(remaining) p.state = CogitoCollectingToolCalls if len(contentBefore) > 0 { events = append(events, cogitoEventContent{content: contentBefore}) } return events, true case strings.Contains(bufStr, cogitoToolOutputsBeginTag): // content[<|tool▁outputs▁begin|>] -> tool outputs split := strings.SplitN(bufStr, cogitoToolOutputsBeginTag, 2) contentBefore := strings.TrimRightFunc(split[0], unicode.IsSpace) remaining := split[1] p.buffer.Reset() p.buffer.WriteString(remaining) p.state = CogitoCollectingToolOutput if len(contentBefore) > 0 { events = append(events, cogitoEventContent{content: contentBefore}) } return events, true default: // otherwise its content p.buffer.Reset() if len(bufStr) > 0 { events = append(events, cogitoEventContent{content: bufStr}) } return events, false } case CogitoCollectingToolCalls: if idx := strings.Index(bufStr, cogitoToolCallBeginTag); idx != -1 { startIdx := idx + len(cogitoToolCallBeginTag) if endIdx := strings.Index(bufStr[startIdx:], cogitoToolCallEndTag); endIdx != -1 { toolCallContent := bufStr[startIdx : startIdx+endIdx] if toolCall, err := p.parseToolCallContent(toolCallContent); err == nil { remaining := bufStr[startIdx+endIdx+len(cogitoToolCallEndTag):] remaining = strings.TrimLeftFunc(remaining, unicode.IsSpace) p.buffer.Reset() p.buffer.WriteString(remaining) events = append(events, cogitoEventToolCall{toolCall: toolCall}) return events, true } else { slog.Warn("cogito tool call parsing failed", "error", err) } } } if idx := strings.Index(bufStr, cogitoToolCallsEndTag); idx != -1 { remaining := bufStr[idx+len(cogitoToolCallsEndTag):] remaining = strings.TrimLeftFunc(remaining, unicode.IsSpace) p.buffer.Reset() p.buffer.WriteString(remaining) p.state = CogitoCollectingContent return events, true } return events, false case CogitoCollectingToolOutput: if idx := strings.Index(bufStr, cogitoToolOutputBeginTag); idx != -1 { startIdx := idx + len(cogitoToolOutputBeginTag) if endIdx := strings.Index(bufStr[startIdx:], cogitoToolOutputEndTag); endIdx != -1 { remaining := bufStr[startIdx+endIdx+len(cogitoToolOutputEndTag):] remaining = strings.TrimLeftFunc(remaining, unicode.IsSpace) p.buffer.Reset() p.buffer.WriteString(remaining) return events, true } } if idx := strings.Index(bufStr, cogitoToolOutputsEndTag); idx != -1 { remaining := bufStr[idx+len(cogitoToolOutputsEndTag):] remaining = strings.TrimLeftFunc(remaining, unicode.IsSpace) p.buffer.Reset() p.buffer.WriteString(remaining) p.state = CogitoCollectingContent return events, true } return events, false } return events, false } func (p *CogitoParser) parseToolCallContent(content string) (api.ToolCall, error) { // Expected format: function<|tool▁sep|>tool_name\n```json\n{args}\n``` parts := strings.SplitN(content, cogitoToolSepTag, 2) if len(parts) < 2 { return api.ToolCall{}, errors.New("invalid format") } nameAndArgs := parts[1] jsonStart := strings.Index(nameAndArgs, "\n```json\n") if jsonStart == -1 { return api.ToolCall{}, errors.New("invalid format") } toolName := strings.TrimSpace(nameAndArgs[:jsonStart]) jsonContent := nameAndArgs[jsonStart+len("\n```json\n"):] jsonEnd := strings.Index(jsonContent, "\n```") if jsonEnd == -1 { return api.ToolCall{}, errors.New("invalid format") } argsJSON := jsonContent[:jsonEnd] var args api.ToolCallFunctionArguments if err := json.Unmarshal([]byte(argsJSON), &args); err != nil { return api.ToolCall{}, err } return api.ToolCall{ Function: api.ToolCallFunction{ Name: toolName, Arguments: args, }, }, nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/parsers/olmo3_test.go
model/parsers/olmo3_test.go
package parsers import ( "testing" "github.com/google/go-cmp/cmp" "github.com/ollama/ollama/api" ) func TestOlmo3Parser(t *testing.T) { tests := []struct { name string input string expectedContent string expectedThinking string expectedCalls []api.ToolCall }{ { name: "simple content", input: "Hello, how can I help you?", expectedContent: "Hello, how can I help you?", }, { name: "simple tool call", input: `<function_calls>get_weather(location="San Francisco")</function_calls>`, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"location": "San Francisco"}), }, }, }, }, { name: "content then tool call", input: `Let me check the weather.<function_calls>get_weather(location="NYC")</function_calls>`, expectedContent: "Let me check the weather.", expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"location": "NYC"}), }, }, }, }, { name: "tool call with multiple arguments", input: `<function_calls>book_flight(from="SFO", to="NYC", date="2024-01-15")</function_calls>`, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "book_flight", Arguments: testArgs(map[string]any{ "from": "SFO", "to": "NYC", "date": "2024-01-15", }), }, }, }, }, { name: "multiple tool calls", input: `<function_calls>get_weather(location="San Francisco") get_weather(location="New York")</function_calls>`, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"location": "San Francisco"}), }, }, { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"location": "New York"}), }, }, }, }, { name: "tool call with numeric argument", input: `<function_calls>set_temperature(value=72)</function_calls>`, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "set_temperature", Arguments: testArgs(map[string]any{"value": int64(72)}), }, }, }, }, { name: "tool call with float argument", input: `<function_calls>set_price(amount=19.99)</function_calls>`, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "set_price", Arguments: testArgs(map[string]any{"amount": 19.99}), }, }, }, }, { name: "tool call with boolean argument", input: `<function_calls>toggle_setting(enabled=true)</function_calls>`, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "toggle_setting", Arguments: testArgs(map[string]any{"enabled": true}), }, }, }, }, { name: "tool call with null argument", input: `<function_calls>clear_value(field=null)</function_calls>`, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "clear_value", Arguments: testArgs(map[string]any{"field": nil}), }, }, }, }, { name: "tool call with array argument", input: `<function_calls>process_items(items=["apple", "banana", "cherry"])</function_calls>`, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "process_items", Arguments: testArgs(map[string]any{"items": []any{"apple", "banana", "cherry"}}), }, }, }, }, { name: "tool call with dict argument", input: `<function_calls>update_config(settings={"theme": "dark", "fontSize": 14})</function_calls>`, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "update_config", Arguments: testArgs(map[string]any{ "settings": map[string]any{ "theme": "dark", "fontSize": int64(14), }, }), }, }, }, }, { name: "tool call with nested dict", input: `<function_calls>create_request(data={"user": {"name": "John", "age": 30}, "active": true})</function_calls>`, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "create_request", Arguments: testArgs(map[string]any{ "data": map[string]any{ "user": map[string]any{ "name": "John", "age": int64(30), }, "active": true, }, }), }, }, }, }, { name: "tool call with no arguments", input: `<function_calls>get_current_time()</function_calls>`, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_current_time", Arguments: testArgs(map[string]any{}), }, }, }, }, { name: "tool call with single quotes", input: `<function_calls>search(query='hello world')</function_calls>`, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "search", Arguments: testArgs(map[string]any{"query": "hello world"}), }, }, }, }, { name: "tool call with escaped quotes", input: `<function_calls>search(query="say \"hello\"")</function_calls>`, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "search", Arguments: testArgs(map[string]any{"query": `say "hello"`}), }, }, }, }, { name: "tool call with mixed argument types", input: `<function_calls>create_user(name="John", age=30, active=true)</function_calls>`, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "create_user", Arguments: testArgs(map[string]any{ "name": "John", "age": int64(30), "active": true, }), }, }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { p := &Olmo3Parser{} p.Init(nil, nil, nil) content, thinking, calls, err := p.Add(tt.input, false) if err != nil { t.Fatalf("unexpected error: %v", err) } // Drain remaining content finalContent, finalThinking, finalCalls, err := p.Add("", true) if err != nil { t.Fatalf("unexpected error on done: %v", err) } content += finalContent thinking += finalThinking calls = append(calls, finalCalls...) if diff := cmp.Diff(content, tt.expectedContent); diff != "" { t.Errorf("content mismatch (-got +want):\n%s", diff) } if diff := cmp.Diff(thinking, tt.expectedThinking); diff != "" { t.Errorf("thinking mismatch (-got +want):\n%s", diff) } if diff := cmp.Diff(calls, tt.expectedCalls, argsComparer); diff != "" { t.Errorf("calls mismatch (-got +want):\n%s", diff) } }) } } func TestOlmo3Parser_Streaming(t *testing.T) { tests := []struct { name string chunks []string expectedContent string expectedCalls []api.ToolCall }{ { name: "streaming content", chunks: []string{"Hello, ", "how ", "can I help?"}, expectedContent: "Hello, how can I help?", }, { name: "streaming tool call", chunks: []string{"<function_", "calls>get_weather", "(location=\"SF\")", "</function_calls>"}, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"location": "SF"}), }, }, }, }, { name: "streaming content then tool call", chunks: []string{"Let me check.", "<function_calls>", "get_weather(location=\"NYC\")", "</function_calls>"}, expectedContent: "Let me check.", expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"location": "NYC"}), }, }, }, }, { name: "tool call tag split across chunks", chunks: []string{"<func", "tion_calls>test()</function_calls>"}, expectedCalls: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "test", Arguments: testArgs(map[string]any{}), }, }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { p := &Olmo3Parser{} p.Init(nil, nil, nil) var allContent string var allCalls []api.ToolCall for _, chunk := range tt.chunks { content, _, calls, err := p.Add(chunk, false) if err != nil { t.Fatalf("unexpected error: %v", err) } allContent += content allCalls = append(allCalls, calls...) } // Drain content, _, calls, err := p.Add("", true) if err != nil { t.Fatalf("unexpected error on done: %v", err) } allContent += content allCalls = append(allCalls, calls...) if diff := cmp.Diff(allContent, tt.expectedContent); diff != "" { t.Errorf("content mismatch (-got +want):\n%s", diff) } if diff := cmp.Diff(allCalls, tt.expectedCalls, argsComparer); diff != "" { t.Errorf("calls mismatch (-got +want):\n%s", diff) } }) } } func TestOlmo3Parser_HasToolSupport(t *testing.T) { p := &Olmo3Parser{} if !p.HasToolSupport() { t.Error("expected HasToolSupport to return true") } } func TestOlmo3Parser_HasThinkingSupport(t *testing.T) { p := &Olmo3Parser{} if p.HasThinkingSupport() { t.Error("expected HasThinkingSupport to return false") } } func TestParseOlmo3FunctionCalls(t *testing.T) { tests := []struct { name string input string expected []api.ToolCall wantErr bool }{ { name: "simple call", input: `get_weather(location="SF")`, expected: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"location": "SF"}), }, }, }, }, { name: "multiple args", input: `send_email(to="user@example.com", subject="Hello", body="Test message")`, expected: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "send_email", Arguments: testArgs(map[string]any{ "to": "user@example.com", "subject": "Hello", "body": "Test message", }), }, }, }, }, { name: "multiple calls with newlines", input: `get_weather(location="SF") get_time(timezone="PST")`, expected: []api.ToolCall{ { Function: api.ToolCallFunction{ Name: "get_weather", Arguments: testArgs(map[string]any{"location": "SF"}), }, }, { Function: api.ToolCallFunction{ Name: "get_time", Arguments: testArgs(map[string]any{"timezone": "PST"}), }, }, }, }, { name: "empty input", input: "", expected: nil, }, { name: "whitespace only", input: " \n ", expected: nil, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { calls, err := parseOlmo3FunctionCalls(tt.input) if (err != nil) != tt.wantErr { t.Errorf("parseOlmo3FunctionCalls() error = %v, wantErr %v", err, tt.wantErr) return } if diff := cmp.Diff(calls, tt.expected, argsComparer); diff != "" { t.Errorf("calls mismatch (-got +want):\n%s", diff) } }) } } func TestParseOlmo3Value(t *testing.T) { tests := []struct { name string input string expected any }{ {"string double quotes", `"hello"`, "hello"}, {"string single quotes", `'hello'`, "hello"}, {"integer", "42", int64(42)}, {"negative integer", "-10", int64(-10)}, {"float", "3.14", 3.14}, {"boolean true", "true", true}, {"boolean True", "True", true}, {"boolean false", "false", false}, {"null", "null", nil}, {"None", "None", nil}, {"empty array", "[]", []any{}}, {"array with strings", `["a", "b"]`, []any{"a", "b"}}, {"array with numbers", "[1, 2, 3]", []any{int64(1), int64(2), int64(3)}}, {"empty object", "{}", map[string]any{}}, {"simple object", `{"name": "John"}`, map[string]any{"name": "John"}}, {"object with number", `{"age": 30}`, map[string]any{"age": int64(30)}}, {"object with multiple keys", `{"a": 1, "b": 2}`, map[string]any{"a": int64(1), "b": int64(2)}}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result, err := parseOlmo3Value(tt.input) if err != nil { t.Fatalf("unexpected error: %v", err) } if diff := cmp.Diff(result, tt.expected); diff != "" { t.Errorf("value mismatch (-got +want):\n%s", diff) } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/model/parsers/functiongemma.go
model/parsers/functiongemma.go
package parsers import ( "fmt" "regexp" "strings" "github.com/ollama/ollama/api" ) type FunctionGemmaParserState int const ( FunctionGemmaCollectingContent FunctionGemmaParserState = iota FunctionGemmaCollectingToolCalls ) const ( functionGemmaFunctionCallOpen = "<start_function_call>" functionGemmaFunctionCallClose = "<end_function_call>" ) // This format uses <start_function_call>call:name{args}<end_function_call> for tool calls. type FunctionGemmaParser struct { state FunctionGemmaParserState buffer strings.Builder tools []api.Tool } func (p *FunctionGemmaParser) HasToolSupport() bool { return true } func (p *FunctionGemmaParser) HasThinkingSupport() bool { return false } func (p *FunctionGemmaParser) Init(tools []api.Tool, lastMessage *api.Message, thinkValue *api.ThinkValue) []api.Tool { p.tools = tools p.state = FunctionGemmaCollectingContent return tools } type functionGemmaEvent interface { isFunctionGemmaEvent() } type FunctionGemmaEventContent struct { content string } type functionGemmaEventToolCall struct { toolCall api.ToolCall } func (FunctionGemmaEventContent) isFunctionGemmaEvent() {} func (functionGemmaEventToolCall) isFunctionGemmaEvent() {} func (p *FunctionGemmaParser) Add(s string, done bool) (content string, thinking string, calls []api.ToolCall, err error) { p.buffer.WriteString(s) events := p.parseEvents() var toolCalls []api.ToolCall var contentSb strings.Builder for _, event := range events { switch event := event.(type) { case functionGemmaEventToolCall: toolCalls = append(toolCalls, event.toolCall) case FunctionGemmaEventContent: contentSb.WriteString(event.content) } } return contentSb.String(), "", toolCalls, nil } func (p *FunctionGemmaParser) parseEvents() []functionGemmaEvent { var all []functionGemmaEvent keepLooping := true for keepLooping { var events []functionGemmaEvent events, keepLooping = p.eat() if len(events) > 0 { all = append(all, events...) } } return all } // emitWithPartialCheck extracts unambiguous content before a potential partial tag func (p *FunctionGemmaParser) emitWithPartialCheck(bufStr, tag string) (unambiguous, ambiguous string) { if overlapLen := overlap(bufStr, tag); overlapLen > 0 { beforePartialTag := bufStr[:len(bufStr)-overlapLen] return beforePartialTag, bufStr[len(beforePartialTag):] } return bufStr, "" } func (p *FunctionGemmaParser) eat() ([]functionGemmaEvent, bool) { bufStr := p.buffer.String() if bufStr == "" { return nil, false } switch p.state { case FunctionGemmaCollectingContent: if strings.Contains(bufStr, functionGemmaFunctionCallOpen) { split := strings.SplitN(bufStr, functionGemmaFunctionCallOpen, 2) content := split[0] p.buffer.Reset() p.buffer.WriteString(split[1]) p.state = FunctionGemmaCollectingToolCalls if content != "" { return []functionGemmaEvent{FunctionGemmaEventContent{content: content}}, true } return nil, true } unambig, ambig := p.emitWithPartialCheck(bufStr, functionGemmaFunctionCallOpen) p.buffer.Reset() p.buffer.WriteString(ambig) if unambig != "" { return []functionGemmaEvent{FunctionGemmaEventContent{content: unambig}}, false } return nil, false case FunctionGemmaCollectingToolCalls: if strings.Contains(bufStr, functionGemmaFunctionCallClose) { split := strings.SplitN(bufStr, functionGemmaFunctionCallClose, 2) remaining := split[1] p.buffer.Reset() p.buffer.WriteString(remaining) var events []functionGemmaEvent if tc, err := p.parseToolCall(split[0]); err == nil { events = append(events, functionGemmaEventToolCall{toolCall: tc}) } if !strings.Contains(remaining, functionGemmaFunctionCallOpen) { p.state = FunctionGemmaCollectingContent } return events, true } return nil, false } return nil, false } // Matches call:function_name{args} var functionGemmaCallRegex = regexp.MustCompile(`call:([^{]+)\{(.*)\}`) func (p *FunctionGemmaParser) parseToolCall(content string) (api.ToolCall, error) { toolCall := api.ToolCall{} // Extract function name and arguments match := functionGemmaCallRegex.FindStringSubmatch(content) if len(match) < 3 { return toolCall, nil } toolCall.Function.Name = match[1] argsStr := match[2] // Parse arguments toolCall.Function.Arguments = p.parseArguments(argsStr) return toolCall, nil } // parseArguments parses the key:value,key:value format func (p *FunctionGemmaParser) parseArguments(argsStr string) api.ToolCallFunctionArguments { args := api.NewToolCallFunctionArguments() if argsStr == "" { return args } // Split by comma, but handle nested structures parts := p.splitArguments(argsStr) for _, part := range parts { // Find the first colon to split key:value colonIdx := strings.Index(part, ":") if colonIdx == -1 { continue } key := part[:colonIdx] value := part[colonIdx+1:] // Parse the value args.Set(key, p.parseValue(value)) } return args } // splitArguments splits arguments by comma, respecting nested structures func (p *FunctionGemmaParser) splitArguments(argsStr string) []string { var parts []string var current strings.Builder depth := 0 inEscape := false for i := 0; i < len(argsStr); i++ { ch := argsStr[i] // Check for <escape> tags if i+8 <= len(argsStr) && argsStr[i:i+8] == "<escape>" { inEscape = !inEscape current.WriteString("<escape>") i += 7 // Skip the rest of <escape> continue } if !inEscape { switch ch { case '{', '[': depth++ current.WriteByte(ch) case '}', ']': depth-- current.WriteByte(ch) case ',': if depth == 0 { if current.Len() > 0 { parts = append(parts, current.String()) current.Reset() } continue } current.WriteByte(ch) default: current.WriteByte(ch) } } else { current.WriteByte(ch) } } if current.Len() > 0 { parts = append(parts, current.String()) } return parts } // parseValue parses a single value from the FunctionGemma format func (p *FunctionGemmaParser) parseValue(value string) any { // Check for escaped string if strings.HasPrefix(value, "<escape>") && strings.HasSuffix(value, "<escape>") { // Remove the escape tags return value[8 : len(value)-8] } // Check for boolean if value == "true" { return true } if value == "false" { return false } // Check for number if num, ok := parseNumber(value); ok { return num } // Check for array if strings.HasPrefix(value, "[") && strings.HasSuffix(value, "]") { return p.parseArray(value[1 : len(value)-1]) } // Check for object if strings.HasPrefix(value, "{") && strings.HasSuffix(value, "}") { return p.parseObject(value[1 : len(value)-1]) } // Default to string return value } // parseArray parses an array value func (p *FunctionGemmaParser) parseArray(content string) []any { var result []any parts := p.splitArguments(content) for _, part := range parts { result = append(result, p.parseValue(part)) } return result } // parseObject parses an object value func (p *FunctionGemmaParser) parseObject(content string) map[string]any { result := make(map[string]any) parts := p.splitArguments(content) for _, part := range parts { colonIdx := strings.Index(part, ":") if colonIdx == -1 { continue } key := part[:colonIdx] value := part[colonIdx+1:] result[key] = p.parseValue(value) } return result } // parseNumber tries to parse a string as a number func parseNumber(s string) (any, bool) { // Try integer first var intVal int64 if _, err := fmt.Sscanf(s, "%d", &intVal); err == nil { // Check if the entire string was consumed if fmt.Sprintf("%d", intVal) == s { return intVal, true } } // Try float var floatVal float64 if _, err := fmt.Sscanf(s, "%f", &floatVal); err == nil { return floatVal, true } return nil, false }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/version/version.go
version/version.go
package version var Version string = "0.0.0"
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/middleware/openai_test.go
middleware/openai_test.go
package middleware import ( "bytes" "encoding/base64" "encoding/json" "io" "net/http" "net/http/httptest" "reflect" "strings" "testing" "time" "github.com/gin-gonic/gin" "github.com/google/go-cmp/cmp" "github.com/ollama/ollama/api" "github.com/ollama/ollama/openai" ) // testPropsMap creates a ToolPropertiesMap from a map (convenience function for tests) func testPropsMap(m map[string]api.ToolProperty) *api.ToolPropertiesMap { props := api.NewToolPropertiesMap() for k, v := range m { props.Set(k, v) } return props } // testArgs creates ToolCallFunctionArguments from a map (convenience function for tests) func testArgs(m map[string]any) api.ToolCallFunctionArguments { args := api.NewToolCallFunctionArguments() for k, v := range m { args.Set(k, v) } return args } // argsComparer provides cmp options for comparing ToolCallFunctionArguments by value var argsComparer = cmp.Comparer(func(a, b api.ToolCallFunctionArguments) bool { return cmp.Equal(a.ToMap(), b.ToMap()) }) // propsComparer provides cmp options for comparing ToolPropertiesMap by value var propsComparer = cmp.Comparer(func(a, b *api.ToolPropertiesMap) bool { if a == nil && b == nil { return true } if a == nil || b == nil { return false } return cmp.Equal(a.ToMap(), b.ToMap()) }) const ( prefix = `data:image/jpeg;base64,` image = `iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNk+A8AAQUBAScY42YAAAAASUVORK5CYII=` ) var ( False = false True = true ) func captureRequestMiddleware(capturedRequest any) gin.HandlerFunc { return func(c *gin.Context) { bodyBytes, _ := io.ReadAll(c.Request.Body) c.Request.Body = io.NopCloser(bytes.NewReader(bodyBytes)) err := json.Unmarshal(bodyBytes, capturedRequest) if err != nil { c.AbortWithStatusJSON(http.StatusInternalServerError, "failed to unmarshal request") } c.Next() } } func TestChatMiddleware(t *testing.T) { type testCase struct { name string body string req api.ChatRequest err openai.ErrorResponse } var capturedRequest *api.ChatRequest testCases := []testCase{ { name: "chat handler", body: `{ "model": "test-model", "messages": [ {"role": "user", "content": "Hello"} ] }`, req: api.ChatRequest{ Model: "test-model", Messages: []api.Message{ { Role: "user", Content: "Hello", }, }, Options: map[string]any{ "temperature": 1.0, "top_p": 1.0, }, Stream: &False, }, }, { name: "chat handler with options", body: `{ "model": "test-model", "messages": [ {"role": "user", "content": "Hello"} ], "stream": true, "max_tokens": 999, "seed": 123, "stop": ["\n", "stop"], "temperature": 3.0, "frequency_penalty": 4.0, "presence_penalty": 5.0, "top_p": 6.0, "response_format": {"type": "json_object"} }`, req: api.ChatRequest{ Model: "test-model", Messages: []api.Message{ { Role: "user", Content: "Hello", }, }, Options: map[string]any{ "num_predict": 999.0, // float because JSON doesn't distinguish between float and int "seed": 123.0, "stop": []any{"\n", "stop"}, "temperature": 3.0, "frequency_penalty": 4.0, "presence_penalty": 5.0, "top_p": 6.0, }, Format: json.RawMessage(`"json"`), Stream: &True, }, }, { name: "chat handler with streaming usage", body: `{ "model": "test-model", "messages": [ {"role": "user", "content": "Hello"} ], "stream": true, "stream_options": {"include_usage": true}, "max_tokens": 999, "seed": 123, "stop": ["\n", "stop"], "temperature": 3.0, "frequency_penalty": 4.0, "presence_penalty": 5.0, "top_p": 6.0, "response_format": {"type": "json_object"} }`, req: api.ChatRequest{ Model: "test-model", Messages: []api.Message{ { Role: "user", Content: "Hello", }, }, Options: map[string]any{ "num_predict": 999.0, // float because JSON doesn't distinguish between float and int "seed": 123.0, "stop": []any{"\n", "stop"}, "temperature": 3.0, "frequency_penalty": 4.0, "presence_penalty": 5.0, "top_p": 6.0, }, Format: json.RawMessage(`"json"`), Stream: &True, }, }, { name: "chat handler with image content", body: `{ "model": "test-model", "messages": [ { "role": "user", "content": [ { "type": "text", "text": "Hello" }, { "type": "image_url", "image_url": { "url": "` + prefix + image + `" } } ] } ] }`, req: api.ChatRequest{ Model: "test-model", Messages: []api.Message{ { Role: "user", Content: "Hello", }, { Role: "user", Images: []api.ImageData{ func() []byte { img, _ := base64.StdEncoding.DecodeString(image) return img }(), }, }, }, Options: map[string]any{ "temperature": 1.0, "top_p": 1.0, }, Stream: &False, }, }, { name: "chat handler with tools", body: `{ "model": "test-model", "messages": [ {"role": "user", "content": "What's the weather like in Paris Today?"}, {"role": "assistant", "tool_calls": [{"id": "id", "type": "function", "function": {"name": "get_current_weather", "arguments": "{\"location\": \"Paris, France\", \"format\": \"celsius\"}"}}]} ] }`, req: api.ChatRequest{ Model: "test-model", Messages: []api.Message{ { Role: "user", Content: "What's the weather like in Paris Today?", }, { Role: "assistant", ToolCalls: []api.ToolCall{ { ID: "id", Function: api.ToolCallFunction{ Name: "get_current_weather", Arguments: testArgs(map[string]any{ "location": "Paris, France", "format": "celsius", }), }, }, }, }, }, Options: map[string]any{ "temperature": 1.0, "top_p": 1.0, }, Stream: &False, }, }, { name: "chat handler with tools and content", body: `{ "model": "test-model", "messages": [ {"role": "user", "content": "What's the weather like in Paris Today?"}, {"role": "assistant", "content": "Let's see what the weather is like in Paris", "tool_calls": [{"id": "id", "type": "function", "function": {"name": "get_current_weather", "arguments": "{\"location\": \"Paris, France\", \"format\": \"celsius\"}"}}]} ] }`, req: api.ChatRequest{ Model: "test-model", Messages: []api.Message{ { Role: "user", Content: "What's the weather like in Paris Today?", }, { Role: "assistant", Content: "Let's see what the weather is like in Paris", ToolCalls: []api.ToolCall{ { ID: "id", Function: api.ToolCallFunction{ Name: "get_current_weather", Arguments: testArgs(map[string]any{ "location": "Paris, France", "format": "celsius", }), }, }, }, }, }, Options: map[string]any{ "temperature": 1.0, "top_p": 1.0, }, Stream: &False, }, }, { name: "chat handler with tools and empty content", body: `{ "model": "test-model", "messages": [ {"role": "user", "content": "What's the weather like in Paris Today?"}, {"role": "assistant", "content": "", "tool_calls": [{"id": "id", "type": "function", "function": {"name": "get_current_weather", "arguments": "{\"location\": \"Paris, France\", \"format\": \"celsius\"}"}}]} ] }`, req: api.ChatRequest{ Model: "test-model", Messages: []api.Message{ { Role: "user", Content: "What's the weather like in Paris Today?", }, { Role: "assistant", ToolCalls: []api.ToolCall{ { ID: "id", Function: api.ToolCallFunction{ Name: "get_current_weather", Arguments: testArgs(map[string]any{ "location": "Paris, France", "format": "celsius", }), }, }, }, }, }, Options: map[string]any{ "temperature": 1.0, "top_p": 1.0, }, Stream: &False, }, }, { name: "chat handler with tools and thinking content", body: `{ "model": "test-model", "messages": [ {"role": "user", "content": "What's the weather like in Paris Today?"}, {"role": "assistant", "reasoning": "Let's see what the weather is like in Paris", "tool_calls": [{"id": "id", "type": "function", "function": {"name": "get_current_weather", "arguments": "{\"location\": \"Paris, France\", \"format\": \"celsius\"}"}}]} ] }`, req: api.ChatRequest{ Model: "test-model", Messages: []api.Message{ { Role: "user", Content: "What's the weather like in Paris Today?", }, { Role: "assistant", Thinking: "Let's see what the weather is like in Paris", ToolCalls: []api.ToolCall{ { ID: "id", Function: api.ToolCallFunction{ Name: "get_current_weather", Arguments: testArgs(map[string]any{ "location": "Paris, France", "format": "celsius", }), }, }, }, }, }, Options: map[string]any{ "temperature": 1.0, "top_p": 1.0, }, Stream: &False, }, }, { name: "tool response with call ID", body: `{ "model": "test-model", "messages": [ {"role": "user", "content": "What's the weather like in Paris Today?"}, {"role": "assistant", "tool_calls": [{"id": "id_abc", "type": "function", "function": {"name": "get_current_weather", "arguments": "{\"location\": \"Paris, France\", \"format\": \"celsius\"}"}}]}, {"role": "tool", "tool_call_id": "id_abc", "content": "The weather in Paris is 20 degrees Celsius"} ] }`, req: api.ChatRequest{ Model: "test-model", Messages: []api.Message{ { Role: "user", Content: "What's the weather like in Paris Today?", }, { Role: "assistant", ToolCalls: []api.ToolCall{ { ID: "id_abc", Function: api.ToolCallFunction{ Name: "get_current_weather", Arguments: testArgs(map[string]any{ "location": "Paris, France", "format": "celsius", }), }, }, }, }, { Role: "tool", Content: "The weather in Paris is 20 degrees Celsius", ToolName: "get_current_weather", ToolCallID: "id_abc", }, }, Options: map[string]any{ "temperature": 1.0, "top_p": 1.0, }, Stream: &False, }, }, { name: "tool response with name", body: `{ "model": "test-model", "messages": [ {"role": "user", "content": "What's the weather like in Paris Today?"}, {"role": "assistant", "tool_calls": [{"id": "id", "type": "function", "function": {"name": "get_current_weather", "arguments": "{\"location\": \"Paris, France\", \"format\": \"celsius\"}"}}]}, {"role": "tool", "name": "get_current_weather", "content": "The weather in Paris is 20 degrees Celsius"} ] }`, req: api.ChatRequest{ Model: "test-model", Messages: []api.Message{ { Role: "user", Content: "What's the weather like in Paris Today?", }, { Role: "assistant", ToolCalls: []api.ToolCall{ { ID: "id", Function: api.ToolCallFunction{ Name: "get_current_weather", Arguments: testArgs(map[string]any{ "location": "Paris, France", "format": "celsius", }), }, }, }, }, { Role: "tool", Content: "The weather in Paris is 20 degrees Celsius", ToolName: "get_current_weather", }, }, Options: map[string]any{ "temperature": 1.0, "top_p": 1.0, }, Stream: &False, }, }, { name: "chat handler with streaming tools", body: `{ "model": "test-model", "messages": [ {"role": "user", "content": "What's the weather like in Paris?"} ], "stream": true, "tools": [{ "type": "function", "function": { "name": "get_weather", "description": "Get the current weather", "parameters": { "type": "object", "required": ["location"], "properties": { "location": { "type": "string", "description": "The city and state" }, "unit": { "type": "string", "enum": ["celsius", "fahrenheit"] } } } } }] }`, req: api.ChatRequest{ Model: "test-model", Messages: []api.Message{ { Role: "user", Content: "What's the weather like in Paris?", }, }, Tools: []api.Tool{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Description: "Get the current weather", Parameters: api.ToolFunctionParameters{ Type: "object", Required: []string{"location"}, Properties: testPropsMap(map[string]api.ToolProperty{ "location": { Type: api.PropertyType{"string"}, Description: "The city and state", }, "unit": { Type: api.PropertyType{"string"}, Enum: []any{"celsius", "fahrenheit"}, }, }), }, }, }, }, Options: map[string]any{ "temperature": 1.0, "top_p": 1.0, }, Stream: &True, }, }, { name: "chat handler error forwarding", body: `{ "model": "test-model", "messages": [ {"role": "user", "content": 2} ] }`, err: openai.ErrorResponse{ Error: openai.Error{ Message: "invalid message content type: float64", Type: "invalid_request_error", }, }, }, } endpoint := func(c *gin.Context) { c.Status(http.StatusOK) } gin.SetMode(gin.TestMode) router := gin.New() router.Use(ChatMiddleware(), captureRequestMiddleware(&capturedRequest)) router.Handle(http.MethodPost, "/api/chat", endpoint) for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { req, _ := http.NewRequest(http.MethodPost, "/api/chat", strings.NewReader(tc.body)) req.Header.Set("Content-Type", "application/json") defer func() { capturedRequest = nil }() resp := httptest.NewRecorder() router.ServeHTTP(resp, req) var errResp openai.ErrorResponse if resp.Code != http.StatusOK { if err := json.Unmarshal(resp.Body.Bytes(), &errResp); err != nil { t.Fatal(err) } return } if diff := cmp.Diff(&tc.req, capturedRequest, argsComparer, propsComparer); diff != "" { t.Fatalf("requests did not match: %+v", diff) } if diff := cmp.Diff(tc.err, errResp); diff != "" { t.Fatalf("errors did not match for %s:\n%s", tc.name, diff) } }) } } func TestCompletionsMiddleware(t *testing.T) { type testCase struct { name string body string req api.GenerateRequest err openai.ErrorResponse } var capturedRequest *api.GenerateRequest testCases := []testCase{ { name: "completions handler", body: `{ "model": "test-model", "prompt": "Hello", "temperature": 0.8, "stop": ["\n", "stop"], "suffix": "suffix" }`, req: api.GenerateRequest{ Model: "test-model", Prompt: "Hello", Options: map[string]any{ "frequency_penalty": 0.0, "presence_penalty": 0.0, "temperature": 0.8, "top_p": 1.0, "stop": []any{"\n", "stop"}, }, Suffix: "suffix", Stream: &False, }, }, { name: "completions handler stream", body: `{ "model": "test-model", "prompt": "Hello", "stream": true, "temperature": 0.8, "stop": ["\n", "stop"], "suffix": "suffix" }`, req: api.GenerateRequest{ Model: "test-model", Prompt: "Hello", Options: map[string]any{ "frequency_penalty": 0.0, "presence_penalty": 0.0, "temperature": 0.8, "top_p": 1.0, "stop": []any{"\n", "stop"}, }, Suffix: "suffix", Stream: &True, }, }, { name: "completions handler stream with usage", body: `{ "model": "test-model", "prompt": "Hello", "stream": true, "stream_options": {"include_usage": true}, "temperature": 0.8, "stop": ["\n", "stop"], "suffix": "suffix" }`, req: api.GenerateRequest{ Model: "test-model", Prompt: "Hello", Options: map[string]any{ "frequency_penalty": 0.0, "presence_penalty": 0.0, "temperature": 0.8, "top_p": 1.0, "stop": []any{"\n", "stop"}, }, Suffix: "suffix", Stream: &True, }, }, { name: "completions handler error forwarding", body: `{ "model": "test-model", "prompt": "Hello", "temperature": null, "stop": [1, 2], "suffix": "suffix" }`, err: openai.ErrorResponse{ Error: openai.Error{ Message: "invalid type for 'stop' field: float64", Type: "invalid_request_error", }, }, }, } endpoint := func(c *gin.Context) { c.Status(http.StatusOK) } gin.SetMode(gin.TestMode) router := gin.New() router.Use(CompletionsMiddleware(), captureRequestMiddleware(&capturedRequest)) router.Handle(http.MethodPost, "/api/generate", endpoint) for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { req, _ := http.NewRequest(http.MethodPost, "/api/generate", strings.NewReader(tc.body)) req.Header.Set("Content-Type", "application/json") resp := httptest.NewRecorder() router.ServeHTTP(resp, req) var errResp openai.ErrorResponse if resp.Code != http.StatusOK { if err := json.Unmarshal(resp.Body.Bytes(), &errResp); err != nil { t.Fatal(err) } } if capturedRequest != nil && !reflect.DeepEqual(tc.req, *capturedRequest) { t.Fatal("requests did not match") } if !reflect.DeepEqual(tc.err, errResp) { t.Fatal("errors did not match") } capturedRequest = nil }) } } func TestEmbeddingsMiddleware(t *testing.T) { type testCase struct { name string body string req api.EmbedRequest err openai.ErrorResponse } var capturedRequest *api.EmbedRequest testCases := []testCase{ { name: "embed handler single input", body: `{ "input": "Hello", "model": "test-model" }`, req: api.EmbedRequest{ Input: "Hello", Model: "test-model", }, }, { name: "embed handler batch input", body: `{ "input": ["Hello", "World"], "model": "test-model" }`, req: api.EmbedRequest{ Input: []any{"Hello", "World"}, Model: "test-model", }, }, { name: "embed handler error forwarding", body: `{ "model": "test-model" }`, err: openai.ErrorResponse{ Error: openai.Error{ Message: "invalid input", Type: "invalid_request_error", }, }, }, } endpoint := func(c *gin.Context) { c.Status(http.StatusOK) } gin.SetMode(gin.TestMode) router := gin.New() router.Use(EmbeddingsMiddleware(), captureRequestMiddleware(&capturedRequest)) router.Handle(http.MethodPost, "/api/embed", endpoint) for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { req, _ := http.NewRequest(http.MethodPost, "/api/embed", strings.NewReader(tc.body)) req.Header.Set("Content-Type", "application/json") resp := httptest.NewRecorder() router.ServeHTTP(resp, req) var errResp openai.ErrorResponse if resp.Code != http.StatusOK { if err := json.Unmarshal(resp.Body.Bytes(), &errResp); err != nil { t.Fatal(err) } } if capturedRequest != nil && !reflect.DeepEqual(tc.req, *capturedRequest) { t.Fatal("requests did not match") } if !reflect.DeepEqual(tc.err, errResp) { t.Fatal("errors did not match") } capturedRequest = nil }) } } func TestListMiddleware(t *testing.T) { type testCase struct { name string endpoint func(c *gin.Context) resp string } testCases := []testCase{ { name: "list handler", endpoint: func(c *gin.Context) { c.JSON(http.StatusOK, api.ListResponse{ Models: []api.ListModelResponse{ { Name: "test-model", ModifiedAt: time.Unix(int64(1686935002), 0).UTC(), }, }, }) }, resp: `{ "object": "list", "data": [ { "id": "test-model", "object": "model", "created": 1686935002, "owned_by": "library" } ] }`, }, { name: "list handler empty output", endpoint: func(c *gin.Context) { c.JSON(http.StatusOK, api.ListResponse{}) }, resp: `{ "object": "list", "data": null }`, }, } gin.SetMode(gin.TestMode) for _, tc := range testCases { router := gin.New() router.Use(ListMiddleware()) router.Handle(http.MethodGet, "/api/tags", tc.endpoint) req, _ := http.NewRequest(http.MethodGet, "/api/tags", nil) resp := httptest.NewRecorder() router.ServeHTTP(resp, req) var expected, actual map[string]any err := json.Unmarshal([]byte(tc.resp), &expected) if err != nil { t.Fatalf("failed to unmarshal expected response: %v", err) } err = json.Unmarshal(resp.Body.Bytes(), &actual) if err != nil { t.Fatalf("failed to unmarshal actual response: %v", err) } if !reflect.DeepEqual(expected, actual) { t.Errorf("responses did not match\nExpected: %+v\nActual: %+v", expected, actual) } } } func TestRetrieveMiddleware(t *testing.T) { type testCase struct { name string endpoint func(c *gin.Context) resp string } testCases := []testCase{ { name: "retrieve handler", endpoint: func(c *gin.Context) { c.JSON(http.StatusOK, api.ShowResponse{ ModifiedAt: time.Unix(int64(1686935002), 0).UTC(), }) }, resp: `{ "id":"test-model", "object":"model", "created":1686935002, "owned_by":"library"} `, }, { name: "retrieve handler error forwarding", endpoint: func(c *gin.Context) { c.JSON(http.StatusBadRequest, gin.H{"error": "model not found"}) }, resp: `{ "error": { "code": null, "message": "model not found", "param": null, "type": "api_error" } }`, }, } gin.SetMode(gin.TestMode) for _, tc := range testCases { router := gin.New() router.Use(RetrieveMiddleware()) router.Handle(http.MethodGet, "/api/show/:model", tc.endpoint) req, _ := http.NewRequest(http.MethodGet, "/api/show/test-model", nil) resp := httptest.NewRecorder() router.ServeHTTP(resp, req) var expected, actual map[string]any err := json.Unmarshal([]byte(tc.resp), &expected) if err != nil { t.Fatalf("failed to unmarshal expected response: %v", err) } err = json.Unmarshal(resp.Body.Bytes(), &actual) if err != nil { t.Fatalf("failed to unmarshal actual response: %v", err) } if !reflect.DeepEqual(expected, actual) { t.Errorf("responses did not match\nExpected: %+v\nActual: %+v", expected, actual) } } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/middleware/openai_encoding_format_test.go
middleware/openai_encoding_format_test.go
package middleware import ( "encoding/base64" "encoding/json" "net/http" "net/http/httptest" "strings" "testing" "github.com/gin-gonic/gin" "github.com/ollama/ollama/api" "github.com/ollama/ollama/openai" ) func TestEmbeddingsMiddleware_EncodingFormats(t *testing.T) { testCases := []struct { name string encodingFormat string expectType string // "array" or "string" verifyBase64 bool }{ {"float format", "float", "array", false}, {"base64 format", "base64", "string", true}, {"default format", "", "array", false}, } gin.SetMode(gin.TestMode) endpoint := func(c *gin.Context) { resp := api.EmbedResponse{ Embeddings: [][]float32{{0.1, -0.2, 0.3}}, PromptEvalCount: 5, } c.JSON(http.StatusOK, resp) } router := gin.New() router.Use(EmbeddingsMiddleware()) router.Handle(http.MethodPost, "/api/embed", endpoint) for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { body := `{"input": "test", "model": "test-model"` if tc.encodingFormat != "" { body += `, "encoding_format": "` + tc.encodingFormat + `"` } body += `}` req, _ := http.NewRequest(http.MethodPost, "/api/embed", strings.NewReader(body)) req.Header.Set("Content-Type", "application/json") resp := httptest.NewRecorder() router.ServeHTTP(resp, req) if resp.Code != http.StatusOK { t.Fatalf("expected status 200, got %d", resp.Code) } var result openai.EmbeddingList if err := json.Unmarshal(resp.Body.Bytes(), &result); err != nil { t.Fatalf("failed to unmarshal response: %v", err) } if len(result.Data) != 1 { t.Fatalf("expected 1 embedding, got %d", len(result.Data)) } switch tc.expectType { case "array": if _, ok := result.Data[0].Embedding.([]interface{}); !ok { t.Errorf("expected array, got %T", result.Data[0].Embedding) } case "string": embStr, ok := result.Data[0].Embedding.(string) if !ok { t.Errorf("expected string, got %T", result.Data[0].Embedding) } else if tc.verifyBase64 { decoded, err := base64.StdEncoding.DecodeString(embStr) if err != nil { t.Errorf("invalid base64: %v", err) } else if len(decoded) != 12 { t.Errorf("expected 12 bytes, got %d", len(decoded)) } } } }) } } func TestEmbeddingsMiddleware_BatchWithBase64(t *testing.T) { gin.SetMode(gin.TestMode) endpoint := func(c *gin.Context) { resp := api.EmbedResponse{ Embeddings: [][]float32{ {0.1, 0.2}, {0.3, 0.4}, {0.5, 0.6}, }, PromptEvalCount: 10, } c.JSON(http.StatusOK, resp) } router := gin.New() router.Use(EmbeddingsMiddleware()) router.Handle(http.MethodPost, "/api/embed", endpoint) body := `{ "input": ["hello", "world", "test"], "model": "test-model", "encoding_format": "base64" }` req, _ := http.NewRequest(http.MethodPost, "/api/embed", strings.NewReader(body)) req.Header.Set("Content-Type", "application/json") resp := httptest.NewRecorder() router.ServeHTTP(resp, req) if resp.Code != http.StatusOK { t.Fatalf("expected status 200, got %d", resp.Code) } var result openai.EmbeddingList if err := json.Unmarshal(resp.Body.Bytes(), &result); err != nil { t.Fatalf("failed to unmarshal response: %v", err) } if len(result.Data) != 3 { t.Fatalf("expected 3 embeddings, got %d", len(result.Data)) } // All should be base64 strings for i := range 3 { embeddingStr, ok := result.Data[i].Embedding.(string) if !ok { t.Errorf("embedding %d: expected string, got %T", i, result.Data[i].Embedding) continue } // Verify it's valid base64 if _, err := base64.StdEncoding.DecodeString(embeddingStr); err != nil { t.Errorf("embedding %d: invalid base64: %v", i, err) } // Check index if result.Data[i].Index != i { t.Errorf("embedding %d: expected index %d, got %d", i, i, result.Data[i].Index) } } } func TestEmbeddingsMiddleware_InvalidEncodingFormat(t *testing.T) { gin.SetMode(gin.TestMode) endpoint := func(c *gin.Context) { c.Status(http.StatusOK) } router := gin.New() router.Use(EmbeddingsMiddleware()) router.Handle(http.MethodPost, "/api/embed", endpoint) testCases := []struct { name string encodingFormat string shouldFail bool }{ {"valid: float", "float", false}, {"valid: base64", "base64", false}, {"valid: FLOAT (uppercase)", "FLOAT", false}, {"valid: BASE64 (uppercase)", "BASE64", false}, {"valid: Float (mixed)", "Float", false}, {"valid: Base64 (mixed)", "Base64", false}, {"invalid: json", "json", true}, {"invalid: hex", "hex", true}, {"invalid: invalid_format", "invalid_format", true}, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { body := `{ "input": "test", "model": "test-model", "encoding_format": "` + tc.encodingFormat + `" }` req, _ := http.NewRequest(http.MethodPost, "/api/embed", strings.NewReader(body)) req.Header.Set("Content-Type", "application/json") resp := httptest.NewRecorder() router.ServeHTTP(resp, req) if tc.shouldFail { if resp.Code != http.StatusBadRequest { t.Errorf("expected status 400, got %d", resp.Code) } var errResp openai.ErrorResponse if err := json.Unmarshal(resp.Body.Bytes(), &errResp); err != nil { t.Fatalf("failed to unmarshal error response: %v", err) } if errResp.Error.Type != "invalid_request_error" { t.Errorf("expected error type 'invalid_request_error', got %q", errResp.Error.Type) } if !strings.Contains(errResp.Error.Message, "encoding_format") { t.Errorf("expected error message to mention encoding_format, got %q", errResp.Error.Message) } } else { if resp.Code != http.StatusOK { t.Errorf("expected status 200, got %d: %s", resp.Code, resp.Body.String()) } } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/middleware/openai.go
middleware/openai.go
package middleware import ( "bytes" "encoding/json" "fmt" "io" "math/rand" "net/http" "strings" "github.com/gin-gonic/gin" "github.com/ollama/ollama/api" "github.com/ollama/ollama/openai" ) type BaseWriter struct { gin.ResponseWriter } type ChatWriter struct { stream bool streamOptions *openai.StreamOptions id string toolCallSent bool BaseWriter } type CompleteWriter struct { stream bool streamOptions *openai.StreamOptions id string BaseWriter } type ListWriter struct { BaseWriter } type RetrieveWriter struct { BaseWriter model string } type EmbedWriter struct { BaseWriter model string encodingFormat string } func (w *BaseWriter) writeError(data []byte) (int, error) { var serr api.StatusError err := json.Unmarshal(data, &serr) if err != nil { return 0, err } w.ResponseWriter.Header().Set("Content-Type", "application/json") err = json.NewEncoder(w.ResponseWriter).Encode(openai.NewError(http.StatusInternalServerError, serr.Error())) if err != nil { return 0, err } return len(data), nil } func (w *ChatWriter) writeResponse(data []byte) (int, error) { var chatResponse api.ChatResponse err := json.Unmarshal(data, &chatResponse) if err != nil { return 0, err } // chat chunk if w.stream { c := openai.ToChunk(w.id, chatResponse, w.toolCallSent) d, err := json.Marshal(c) if err != nil { return 0, err } if !w.toolCallSent && len(c.Choices) > 0 && len(c.Choices[0].Delta.ToolCalls) > 0 { w.toolCallSent = true } w.ResponseWriter.Header().Set("Content-Type", "text/event-stream") _, err = w.ResponseWriter.Write([]byte(fmt.Sprintf("data: %s\n\n", d))) if err != nil { return 0, err } if chatResponse.Done { if w.streamOptions != nil && w.streamOptions.IncludeUsage { u := openai.ToUsage(chatResponse) c.Usage = &u c.Choices = []openai.ChunkChoice{} d, err := json.Marshal(c) if err != nil { return 0, err } _, err = w.ResponseWriter.Write([]byte(fmt.Sprintf("data: %s\n\n", d))) if err != nil { return 0, err } } _, err = w.ResponseWriter.Write([]byte("data: [DONE]\n\n")) if err != nil { return 0, err } } return len(data), nil } // chat completion w.ResponseWriter.Header().Set("Content-Type", "application/json") err = json.NewEncoder(w.ResponseWriter).Encode(openai.ToChatCompletion(w.id, chatResponse)) if err != nil { return 0, err } return len(data), nil } func (w *ChatWriter) Write(data []byte) (int, error) { code := w.ResponseWriter.Status() if code != http.StatusOK { return w.writeError(data) } return w.writeResponse(data) } func (w *CompleteWriter) writeResponse(data []byte) (int, error) { var generateResponse api.GenerateResponse err := json.Unmarshal(data, &generateResponse) if err != nil { return 0, err } // completion chunk if w.stream { c := openai.ToCompleteChunk(w.id, generateResponse) if w.streamOptions != nil && w.streamOptions.IncludeUsage { c.Usage = &openai.Usage{} } d, err := json.Marshal(c) if err != nil { return 0, err } w.ResponseWriter.Header().Set("Content-Type", "text/event-stream") _, err = w.ResponseWriter.Write([]byte(fmt.Sprintf("data: %s\n\n", d))) if err != nil { return 0, err } if generateResponse.Done { if w.streamOptions != nil && w.streamOptions.IncludeUsage { u := openai.ToUsageGenerate(generateResponse) c.Usage = &u c.Choices = []openai.CompleteChunkChoice{} d, err := json.Marshal(c) if err != nil { return 0, err } _, err = w.ResponseWriter.Write([]byte(fmt.Sprintf("data: %s\n\n", d))) if err != nil { return 0, err } } _, err = w.ResponseWriter.Write([]byte("data: [DONE]\n\n")) if err != nil { return 0, err } } return len(data), nil } // completion w.ResponseWriter.Header().Set("Content-Type", "application/json") err = json.NewEncoder(w.ResponseWriter).Encode(openai.ToCompletion(w.id, generateResponse)) if err != nil { return 0, err } return len(data), nil } func (w *CompleteWriter) Write(data []byte) (int, error) { code := w.ResponseWriter.Status() if code != http.StatusOK { return w.writeError(data) } return w.writeResponse(data) } func (w *ListWriter) writeResponse(data []byte) (int, error) { var listResponse api.ListResponse err := json.Unmarshal(data, &listResponse) if err != nil { return 0, err } w.ResponseWriter.Header().Set("Content-Type", "application/json") err = json.NewEncoder(w.ResponseWriter).Encode(openai.ToListCompletion(listResponse)) if err != nil { return 0, err } return len(data), nil } func (w *ListWriter) Write(data []byte) (int, error) { code := w.ResponseWriter.Status() if code != http.StatusOK { return w.writeError(data) } return w.writeResponse(data) } func (w *RetrieveWriter) writeResponse(data []byte) (int, error) { var showResponse api.ShowResponse err := json.Unmarshal(data, &showResponse) if err != nil { return 0, err } // retrieve completion w.ResponseWriter.Header().Set("Content-Type", "application/json") err = json.NewEncoder(w.ResponseWriter).Encode(openai.ToModel(showResponse, w.model)) if err != nil { return 0, err } return len(data), nil } func (w *RetrieveWriter) Write(data []byte) (int, error) { code := w.ResponseWriter.Status() if code != http.StatusOK { return w.writeError(data) } return w.writeResponse(data) } func (w *EmbedWriter) writeResponse(data []byte) (int, error) { var embedResponse api.EmbedResponse err := json.Unmarshal(data, &embedResponse) if err != nil { return 0, err } w.ResponseWriter.Header().Set("Content-Type", "application/json") err = json.NewEncoder(w.ResponseWriter).Encode(openai.ToEmbeddingList(w.model, embedResponse, w.encodingFormat)) if err != nil { return 0, err } return len(data), nil } func (w *EmbedWriter) Write(data []byte) (int, error) { code := w.ResponseWriter.Status() if code != http.StatusOK { return w.writeError(data) } return w.writeResponse(data) } func ListMiddleware() gin.HandlerFunc { return func(c *gin.Context) { w := &ListWriter{ BaseWriter: BaseWriter{ResponseWriter: c.Writer}, } c.Writer = w c.Next() } } func RetrieveMiddleware() gin.HandlerFunc { return func(c *gin.Context) { var b bytes.Buffer if err := json.NewEncoder(&b).Encode(api.ShowRequest{Name: c.Param("model")}); err != nil { c.AbortWithStatusJSON(http.StatusInternalServerError, openai.NewError(http.StatusInternalServerError, err.Error())) return } c.Request.Body = io.NopCloser(&b) w := &RetrieveWriter{ BaseWriter: BaseWriter{ResponseWriter: c.Writer}, model: c.Param("model"), } c.Writer = w c.Next() } } func CompletionsMiddleware() gin.HandlerFunc { return func(c *gin.Context) { var req openai.CompletionRequest err := c.ShouldBindJSON(&req) if err != nil { c.AbortWithStatusJSON(http.StatusBadRequest, openai.NewError(http.StatusBadRequest, err.Error())) return } var b bytes.Buffer genReq, err := openai.FromCompleteRequest(req) if err != nil { c.AbortWithStatusJSON(http.StatusBadRequest, openai.NewError(http.StatusBadRequest, err.Error())) return } if err := json.NewEncoder(&b).Encode(genReq); err != nil { c.AbortWithStatusJSON(http.StatusInternalServerError, openai.NewError(http.StatusInternalServerError, err.Error())) return } c.Request.Body = io.NopCloser(&b) w := &CompleteWriter{ BaseWriter: BaseWriter{ResponseWriter: c.Writer}, stream: req.Stream, id: fmt.Sprintf("cmpl-%d", rand.Intn(999)), streamOptions: req.StreamOptions, } c.Writer = w c.Next() } } func EmbeddingsMiddleware() gin.HandlerFunc { return func(c *gin.Context) { var req openai.EmbedRequest err := c.ShouldBindJSON(&req) if err != nil { c.AbortWithStatusJSON(http.StatusBadRequest, openai.NewError(http.StatusBadRequest, err.Error())) return } // Validate encoding_format parameter if req.EncodingFormat != "" { if !strings.EqualFold(req.EncodingFormat, "float") && !strings.EqualFold(req.EncodingFormat, "base64") { c.AbortWithStatusJSON(http.StatusBadRequest, openai.NewError(http.StatusBadRequest, fmt.Sprintf("Invalid value for 'encoding_format' = %s. Supported values: ['float', 'base64'].", req.EncodingFormat))) return } } if req.Input == "" { req.Input = []string{""} } if req.Input == nil { c.AbortWithStatusJSON(http.StatusBadRequest, openai.NewError(http.StatusBadRequest, "invalid input")) return } if v, ok := req.Input.([]any); ok && len(v) == 0 { c.AbortWithStatusJSON(http.StatusBadRequest, openai.NewError(http.StatusBadRequest, "invalid input")) return } var b bytes.Buffer if err := json.NewEncoder(&b).Encode(api.EmbedRequest{Model: req.Model, Input: req.Input, Dimensions: req.Dimensions}); err != nil { c.AbortWithStatusJSON(http.StatusInternalServerError, openai.NewError(http.StatusInternalServerError, err.Error())) return } c.Request.Body = io.NopCloser(&b) w := &EmbedWriter{ BaseWriter: BaseWriter{ResponseWriter: c.Writer}, model: req.Model, encodingFormat: req.EncodingFormat, } c.Writer = w c.Next() } } func ChatMiddleware() gin.HandlerFunc { return func(c *gin.Context) { var req openai.ChatCompletionRequest err := c.ShouldBindJSON(&req) if err != nil { c.AbortWithStatusJSON(http.StatusBadRequest, openai.NewError(http.StatusBadRequest, err.Error())) return } if len(req.Messages) == 0 { c.AbortWithStatusJSON(http.StatusBadRequest, openai.NewError(http.StatusBadRequest, "[] is too short - 'messages'")) return } var b bytes.Buffer chatReq, err := openai.FromChatRequest(req) if err != nil { c.AbortWithStatusJSON(http.StatusBadRequest, openai.NewError(http.StatusBadRequest, err.Error())) return } if err := json.NewEncoder(&b).Encode(chatReq); err != nil { c.AbortWithStatusJSON(http.StatusInternalServerError, openai.NewError(http.StatusInternalServerError, err.Error())) return } c.Request.Body = io.NopCloser(&b) w := &ChatWriter{ BaseWriter: BaseWriter{ResponseWriter: c.Writer}, stream: req.Stream, id: fmt.Sprintf("chatcmpl-%d", rand.Intn(999)), streamOptions: req.StreamOptions, } c.Writer = w c.Next() } } type ResponsesWriter struct { BaseWriter converter *openai.ResponsesStreamConverter model string stream bool responseID string itemID string } func (w *ResponsesWriter) writeEvent(eventType string, data any) error { d, err := json.Marshal(data) if err != nil { return err } _, err = w.ResponseWriter.Write([]byte(fmt.Sprintf("event: %s\ndata: %s\n\n", eventType, d))) if err != nil { return err } if f, ok := w.ResponseWriter.(http.Flusher); ok { f.Flush() } return nil } func (w *ResponsesWriter) writeResponse(data []byte) (int, error) { var chatResponse api.ChatResponse if err := json.Unmarshal(data, &chatResponse); err != nil { return 0, err } if w.stream { w.ResponseWriter.Header().Set("Content-Type", "text/event-stream") events := w.converter.Process(chatResponse) for _, event := range events { if err := w.writeEvent(event.Event, event.Data); err != nil { return 0, err } } return len(data), nil } // Non-streaming response w.ResponseWriter.Header().Set("Content-Type", "application/json") response := openai.ToResponse(w.model, w.responseID, w.itemID, chatResponse) return len(data), json.NewEncoder(w.ResponseWriter).Encode(response) } func (w *ResponsesWriter) Write(data []byte) (int, error) { code := w.ResponseWriter.Status() if code != http.StatusOK { return w.writeError(data) } return w.writeResponse(data) } func ResponsesMiddleware() gin.HandlerFunc { return func(c *gin.Context) { var req openai.ResponsesRequest if err := c.ShouldBindJSON(&req); err != nil { c.AbortWithStatusJSON(http.StatusBadRequest, openai.NewError(http.StatusBadRequest, err.Error())) return } chatReq, err := openai.FromResponsesRequest(req) if err != nil { c.AbortWithStatusJSON(http.StatusBadRequest, openai.NewError(http.StatusBadRequest, err.Error())) return } // Check if client requested streaming (defaults to false) streamRequested := req.Stream != nil && *req.Stream // Pass streaming preference to the underlying chat request chatReq.Stream = &streamRequested var b bytes.Buffer if err := json.NewEncoder(&b).Encode(chatReq); err != nil { c.AbortWithStatusJSON(http.StatusInternalServerError, openai.NewError(http.StatusInternalServerError, err.Error())) return } c.Request.Body = io.NopCloser(&b) responseID := fmt.Sprintf("resp_%d", rand.Intn(999999)) itemID := fmt.Sprintf("msg_%d", rand.Intn(999999)) w := &ResponsesWriter{ BaseWriter: BaseWriter{ResponseWriter: c.Writer}, converter: openai.NewResponsesStreamConverter(responseID, itemID, req.Model), model: req.Model, stream: streamRequested, responseID: responseID, itemID: itemID, } // Set headers based on streaming mode if streamRequested { c.Writer.Header().Set("Content-Type", "text/event-stream") c.Writer.Header().Set("Cache-Control", "no-cache") c.Writer.Header().Set("Connection", "keep-alive") } c.Writer = w c.Next() } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/kvcache/wrapper.go
kvcache/wrapper.go
package kvcache import ( "math" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/model/input" ) // Wrapper cache is a container for multiple types of caches, // such as for the encoding and decoding portions of a model. type WrapperCache struct { // caches we are wrapping caches []Cache // cache to be used for this layer curType int } func NewWrapperCache(caches ...Cache) *WrapperCache { return &WrapperCache{ caches: caches, } } func (c *WrapperCache) Init(backend ml.Backend, dtype ml.DType, maxSequences, capacity, maxBatch int) { for _, cache := range c.caches { cache.Init(backend, dtype, maxSequences, capacity, maxBatch) } } func (c *WrapperCache) SetConfig(config ml.CacheConfig) { for _, cache := range c.caches { cache.SetConfig(config) } } func (c *WrapperCache) Close() { for _, cache := range c.caches { cache.Close() } } func (c *WrapperCache) StartForward(ctx ml.Context, batch input.Batch, reserve bool) error { for i, cache := range c.caches { err := cache.StartForward(ctx, batch, reserve) if err != nil { // unwind on error - Remove with endIndex set to math.MaxInt32 does not fail for j := i - 1; j >= 0; j-- { for k := range batch.Positions { _ = c.caches[j].Remove(batch.Sequences[k], batch.Positions[k], math.MaxInt32) } } return err } } c.curType = 0 return nil } func (c *WrapperCache) SetLayer(layer int) { for _, cache := range c.caches { cache.SetLayer(layer) } } func (c *WrapperCache) SetLayerType(layerType int) { c.curType = layerType } func (c *WrapperCache) UnderlyingCache() Cache { return c.caches[c.curType] } func (c *WrapperCache) Get(ctx ml.Context) (ml.Tensor, ml.Tensor, ml.Tensor) { return c.caches[c.curType].Get(ctx) } func (c *WrapperCache) Put(ctx ml.Context, key, value ml.Tensor) { c.caches[c.curType].Put(ctx, key, value) } func (c *WrapperCache) CopyPrefix(srcSeq, dstSeq int, len int32) { for _, cache := range c.caches { cache.CopyPrefix(srcSeq, dstSeq, len) } } func (c *WrapperCache) CanResume(seq int, pos int32) bool { for _, cache := range c.caches { if !cache.CanResume(seq, pos) { return false } } return true } func (c *WrapperCache) Remove(seq int, beginIndex, endIndex int32) error { // If the one of these fails, the caller is supposed to retry with endIndex set to math.MaxInt32, which should not fail for _, cache := range c.caches { err := cache.Remove(seq, beginIndex, endIndex) if err != nil { return err } } return nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/kvcache/cache.go
kvcache/cache.go
package kvcache import ( "errors" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/model/input" ) var ( ErrKvCacheFull = errors.New("could not find a kv cache slot") ErrNotSupported = errors.New("model does not support operation") ) type Cache interface { // ** used by model implementations ** // SetLayer sets the active layer of the cache SetLayer(layer int) // Get returns the history of key and value tensors plus a mask // // The shape of the tensors is documented in the specific // cache implementation used. Get(ctx ml.Context) (ml.Tensor, ml.Tensor, ml.Tensor) // Put stores a batch of key and value in the cache // // The shape of the tensors is documented in the specific // cache implementation used. Put(ctx ml.Context, key, value ml.Tensor) // SetConfig controls optimizations (mostly backend-specific) that may transform // the output of the cache to work better with specific kernels. If not called, // the backend settings will be used. This works well when calling Attention. // // The config can be overridden by models, especially if they require vanilla // output when implementing their own version of attention. To do this, pass // an empty ml.CacheConfig. // // Most models will not need to use this. SetConfig(ml.CacheConfig) // ** cache management ** // Init sets up runtime parameters. // backend: Used to allocate cache data storage and execute management operations (such as defrag) // dtype: The data type for storing cache entries // maxSequences: The maximum number of sequences stored in the cache - across all batches // capacity: The number of cache entries to store, per sequence // maxBatch: The maximum number of tokens that can occur in a single batch Init(backend ml.Backend, dtype ml.DType, maxSequences, capacity, maxBatch int) // Close closes the cache and frees resources associated with it Close() // StartForward is called before the start of the model's forward pass. // For each token in the coming batch, there must be a corresponding // entry in positions and seqs. reserve is to preallocate memory // without actually storing data in the cache. StartForward(ctx ml.Context, batch input.Batch, reserve bool) error // CopyPrefix copies tokens in the range [0, len) from srcSeq to dstSeq CopyPrefix(srcSeq, dstSeq int, len int32) // CanResume returns true if the cache can continue with the next token at // the given position and sequence. Assumes that the caller has already // verified the contents of the cache. CanResume(seq int, pos int32) bool // Remove deletes tokens in the range [beginIndex, endIndex) from seq. Set // endIndex to math.MaxInt32 to remove everything starting at beginIndex. // // If an error occurs, the entire context for the sequence should be // removed by calling Remove(seq, 0, math.MaxInt32) Remove(seq int, beginIndex, endIndex int32) error }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/kvcache/causal.go
kvcache/causal.go
package kvcache import ( "errors" "fmt" "math" "slices" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/model/input" ) type shiftFn func(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) // Causal cache stores K and V tensors according to their position in the // sequence. Returns the history and a mask for attending to past tokens // // The tensors are of shape embed dim, kv heads, batch size // The mask is of shape history size, batch size type Causal struct { DType ml.DType // swaWindowSize is the number of tokens that will be included in the mask // during attention operations. swaMemorySize is the number of tokens that // will be retained in memory for partial prefix caching. Set to math.MaxInt32 // for unlimited or if sliding window attention is not being used. swaWindowSize int32 swaMemorySize int32 chunkSize int32 opts CausalOptions // maxBatch is the largest batch that we might receive maxBatch int // config controls mostly backend-specific optimizations config *ml.CacheConfig // ** current forward pass ** // size of the current batch curBatchSize int // locations for data storage for this batch curLoc ml.Tensor // mask of the cache as used by this batch curMask ml.Tensor // the active layer for Get and Put curLayer int // locations in the cache that are needed for this batch curCellRange cellRange // curSequences is the sequences corresponding to this pass's entries in the cache curSequences []int // curPositions is the positions corresponding to this pass's entries in the cache curPositions []int32 // ** cache metadata ** // for each possible location in the cache, stores the position and set of sequences // that reference the data there cells []cacheCell // maps from sequence to the range of locations where it is stored in the cache cellRanges map[int]cellRange // ** cache data storage ** shiftFn shiftFn backend ml.Backend ctxs map[int]ml.Context keys, values map[int]ml.Tensor } type cacheCell struct { pos int32 sequences []int } type cellRange struct { min int max int } func NewCausalCache(shift shiftFn) *Causal { return &Causal{ shiftFn: shift, ctxs: make(map[int]ml.Context), keys: make(map[int]ml.Tensor), values: make(map[int]ml.Tensor), } } func NewSWACache(windowSize int32, shift shiftFn) *Causal { return &Causal{ swaWindowSize: windowSize, shiftFn: shift, ctxs: make(map[int]ml.Context), keys: make(map[int]ml.Tensor), values: make(map[int]ml.Tensor), } } func NewSWAMemCache(windowSize int32, memorySize int32, shift shiftFn) *Causal { return &Causal{ swaWindowSize: windowSize, swaMemorySize: memorySize, shiftFn: shift, ctxs: make(map[int]ml.Context), keys: make(map[int]ml.Tensor), values: make(map[int]ml.Tensor), } } func NewChunkedAttentionCache(chunkSize int32, shift shiftFn) *Causal { return &Causal{ chunkSize: chunkSize, shiftFn: shift, ctxs: make(map[int]ml.Context), keys: make(map[int]ml.Tensor), values: make(map[int]ml.Tensor), } } func (c *Causal) Init(backend ml.Backend, dtype ml.DType, maxSequences, capacity, maxBatch int) { if c.config == nil { var config ml.CacheConfig if cc, ok := backend.(ml.BackendCacheConfig); ok { config = cc.CacheConfig() } c.config = &config } if c.config.CachePadding == 0 { c.config.CachePadding = 1 } if c.config.MaskDType == ml.DTypeOther { c.config.MaskDType = ml.DTypeF32 } if c.swaWindowSize == 0 { c.swaWindowSize = math.MaxInt32 } if c.swaMemorySize == 0 { c.swaMemorySize = c.swaWindowSize } // We will allocate space in the cache for the stop token, which won't be part of a follow on // sequence, so allocate an extra token of storage to ensure that we can jump back without // causing a cache break. As an optimization, only do this when we have parallel sequences // because the extra token will live in the batch buffer and won't get overwritten if we // only have a single sequence. if c.swaMemorySize != math.MaxInt32 && maxSequences > 1 { c.swaMemorySize = max(c.swaMemorySize, c.swaWindowSize+1) } if int(c.swaMemorySize) >= capacity { c.swaMemorySize = math.MaxInt32 } if c.swaMemorySize < c.swaWindowSize { panic(fmt.Errorf("sliding window memory (%v) must be at least as large as the window (%v)", c.swaMemorySize, c.swaWindowSize)) } var cacheSize int if c.swaMemorySize == math.MaxInt32 { cacheSize = maxSequences * capacity } else { cacheSize = (maxSequences * int(c.swaMemorySize)) + maxBatch } cacheSize = roundUp(cacheSize, c.config.CachePadding) c.cells = make([]cacheCell, cacheSize) c.DType = dtype c.cellRanges = make(map[int]cellRange) c.backend = backend c.maxBatch = maxBatch } func (c *Causal) SetConfig(config ml.CacheConfig) { if c.config != nil { panic("config cannot be changed after being previously set, either by the model or backend") } c.config = &config } func (c *Causal) Close() { for _, ctx := range c.ctxs { ctx.Close() } } func (c *Causal) StartForward(ctx ml.Context, batch input.Batch, reserve bool) error { c.curBatchSize = len(batch.Positions) c.curSequences = batch.Sequences c.curPositions = batch.Positions c.opts.Except = nil var locs []int32 if !reserve { c.updateSlidingWindow() var err error locs, err = c.findLocs() if err != nil { return err } for i, pos := range batch.Positions { seq := batch.Sequences[i] loc := int(locs[i]) c.cells[loc] = cacheCell{pos: pos, sequences: []int{seq}} seqRange, ok := c.cellRanges[seq] if !ok { seqRange = newRange() } seqRange.min = min(seqRange.min, loc) c.curCellRange.min = min(c.curCellRange.min, loc) seqRange.max = max(seqRange.max, loc) c.curCellRange.max = max(c.curCellRange.max, loc) c.cellRanges[seq] = seqRange } } else { // If we are reserving memory, don't update any of the cache metadata but set the size // to the worst case. locs = make([]int32, c.curBatchSize) for i := range locs { locs[i] = int32(i) } c.curCellRange.min = 0 c.curCellRange.max = len(c.cells) - 1 } c.curLoc = ctx.Input().FromInts(locs, len(locs)) c.curMask = c.buildMask(ctx) return nil } func newRange() cellRange { return cellRange{ min: math.MaxInt, max: 0, } } // Returns a slice of locations where each token in the batch should be stored func (c *Causal) findLocs() ([]int32, error) { loc := make([]int32, 0, c.curBatchSize) for i := range c.cells { if len(c.cells[i].sequences) == 0 { loc = append(loc, int32(i)) if len(loc) >= c.curBatchSize { return loc, nil } } } return nil, fmt.Errorf("%w (cache: %v batch: %v)", ErrKvCacheFull, len(c.cells), c.curBatchSize) } func (c *Causal) updateSlidingWindow() { c.curCellRange = newRange() if c.swaMemorySize == math.MaxInt32 { for _, seq := range c.curSequences { if seqRange, ok := c.cellRanges[seq]; ok { c.curCellRange.min = min(c.curCellRange.min, seqRange.min) c.curCellRange.max = max(c.curCellRange.max, seqRange.max) } } return } type lowestPosition struct { pos int32 curBatch bool } // create a map of unique sequences to the lowest position in that sequence lowestPos := make(map[int]lowestPosition) for i := range c.curPositions { seq := c.curSequences[i] lowest, ok := lowestPos[seq] if !ok { lowest = lowestPosition{pos: c.curPositions[i], curBatch: true} } else if c.curPositions[i] < lowest.pos { lowest.pos = c.curPositions[i] } lowestPos[seq] = lowest } // for any sequences are not part of this batch, clean up any tokens // that are no longer needed after the processing of the previous // batch for seq, seqRange := range c.cellRanges { if _, ok := lowestPos[seq]; !ok { var last int32 for i := seqRange.min; i <= seqRange.max; i++ { if slices.Contains(c.cells[i].sequences, seq) { last = max(last, c.cells[i].pos) } } lowestPos[seq] = lowestPosition{pos: last + 1, curBatch: false} } } // delete any entries that are beyond the window of the oldest position in the sequence for seq, lowest := range lowestPos { oldRange, ok := c.cellRanges[seq] if !ok { continue } newRange := newRange() for i := oldRange.min; i <= oldRange.max; i++ { if slices.Contains(c.cells[i].sequences, seq) { if c.cells[i].pos < lowest.pos-c.swaMemorySize { c.cells[i].sequences = slices.DeleteFunc(c.cells[i].sequences, func(s int) bool { return s == seq }) } else { newRange.min = min(newRange.min, i) newRange.max = max(newRange.max, i) } if lowest.curBatch && c.cells[i].pos >= lowest.pos-c.swaWindowSize { c.curCellRange.min = min(c.curCellRange.min, i) c.curCellRange.max = max(c.curCellRange.max, i) } } } c.cellRanges[seq] = newRange } } func roundDown(length, pad int) int { return (length / pad) * pad } func roundUp(length, pad int) int { return ((length + pad - 1) / pad) * pad } // Builds a mask of history x batch indicating whether for each token in the batch the // token in the history should apply. This is based on both the sequence and causality (the // position of the history is not ahead of the token in the batch). func (c *Causal) buildMask(ctx ml.Context) ml.Tensor { c.curCellRange.min = roundDown(c.curCellRange.min, c.config.CachePadding) c.curCellRange.max = roundUp(c.curCellRange.max+1, c.config.CachePadding) - 1 length := c.curCellRange.max - c.curCellRange.min + 1 mask := make([]float32, c.curBatchSize*length) for i := range c.curBatchSize { enabled := !slices.Contains(c.opts.Except, i) for j := c.curCellRange.min; j <= c.curCellRange.max; j++ { if !slices.Contains(c.cells[j].sequences, c.curSequences[i]) || (enabled && c.cells[j].pos > c.curPositions[i]) || c.chunkSize > 0 && c.cells[j].pos < c.curPositions[i]-c.curPositions[i]%c.chunkSize || c.cells[j].pos < c.curPositions[i]-c.swaWindowSize { mask[i*length+(j-c.curCellRange.min)] = float32(math.Inf(-1)) } } } maskTensor := ctx.Input().FromFloats(mask, length, c.curBatchSize) if c.config.MaskDType != ml.DTypeF32 { maskTensor = maskTensor.Cast(ctx, c.config.MaskDType) } return maskTensor } func (c *Causal) SetLayer(layer int) { c.curLayer = layer } type CausalOptions struct { // Enabled controls whether the causal mask is generated for a particular index in a batch Except []int } // SetCausal disables causal mask generation for a particular range of indicies in // the current batch for subsequent calls to Get. The state resets for the next forward pass. func (c *Causal) SetCausal(ctx ml.Context, opts CausalOptions) { if !slices.Equal(c.opts.Except, opts.Except) { c.opts = opts if ctx != nil { c.curMask = c.buildMask(ctx) } } } func (c *Causal) Get(ctx ml.Context) (ml.Tensor, ml.Tensor, ml.Tensor) { key := c.keys[c.curLayer] value := c.values[c.curLayer] kHeadDim := key.Dim(0) numKVHeads := key.Dim(1) rowSize := key.Stride(2) cachedSize := c.curMask.Dim(0) key = key.View(ctx, rowSize*c.curCellRange.min, kHeadDim, key.Stride(1), numKVHeads, key.Stride(2), cachedSize, ) if c.config.PermutedV { vHeadDim := value.Dim(1) elemSize := value.Stride(0) value = value.View(ctx, elemSize*c.curCellRange.min, cachedSize, value.Stride(1), vHeadDim, value.Stride(2), numKVHeads, ) } else { vHeadDim := value.Dim(0) rowSize := value.Stride(2) value = value.View(ctx, rowSize*c.curCellRange.min, vHeadDim, value.Stride(1), numKVHeads, value.Stride(2), cachedSize, ) } return key, value, c.curMask } func (c *Causal) Put(ctx ml.Context, key, value ml.Tensor) { kHeadDim := key.Dim(0) vHeadDim := value.Dim(0) numKVHeads := key.Dim(1) batchSize := key.Dim(2) if c.curBatchSize != batchSize { panic(fmt.Errorf("inconsistent batch sizes (layer: %v, batch size: %v layer batch size: %v)", c.curLayer, c.curBatchSize, batchSize)) } if _, ok := c.ctxs[c.curLayer]; !ok { c.ctxs[c.curLayer] = c.backend.NewContextSize(2).Layer(c.curLayer) } if _, ok := c.keys[c.curLayer]; !ok { c.keys[c.curLayer] = c.ctxs[c.curLayer].Zeros(c.DType, kHeadDim, numKVHeads, len(c.cells)) } if _, ok := c.values[c.curLayer]; !ok { if c.config.PermutedV { c.values[c.curLayer] = c.ctxs[c.curLayer].Zeros(c.DType, len(c.cells), vHeadDim, numKVHeads) } else { c.values[c.curLayer] = c.ctxs[c.curLayer].Zeros(c.DType, vHeadDim, numKVHeads, len(c.cells)) } } key = key.Reshape(ctx, kHeadDim*numKVHeads, batchSize) keyCache := c.keys[c.curLayer] keyCache = keyCache.Reshape(ctx, kHeadDim*numKVHeads, len(c.cells)) ctx.Forward(keyCache.SetRows(ctx, key, c.curLoc)) if c.config.PermutedV { value = value.Reshape(ctx, vHeadDim*numKVHeads, 1, batchSize) value = value.Permute(ctx, 2, 0, 1, 3) valueCache := c.values[c.curLayer] valueCache = valueCache.Reshape(ctx, 1, len(c.cells), vHeadDim*numKVHeads) ctx.Forward(valueCache.SetRows(ctx, value, c.curLoc)) } else { value = value.Reshape(ctx, vHeadDim*numKVHeads, batchSize) valueCache := c.values[c.curLayer] valueCache = valueCache.Reshape(ctx, vHeadDim*numKVHeads, len(c.cells)) ctx.Forward(valueCache.SetRows(ctx, value, c.curLoc)) } } func (c *Causal) CopyPrefix(srcSeq, dstSeq int, len int32) { seqRange := newRange() for i := range c.cells { // Remove the contents of dstSeq so that we only have the copied prefix, metadata will be reset at the end if slices.Contains(c.cells[i].sequences, dstSeq) { c.cells[i].sequences = slices.DeleteFunc(c.cells[i].sequences, func(s int) bool { return s == dstSeq }) } if slices.Contains(c.cells[i].sequences, srcSeq) && c.cells[i].pos < len { c.cells[i].sequences = append(c.cells[i].sequences, dstSeq) if i < seqRange.min { seqRange.min = i } if i > seqRange.max { seqRange.max = i } } } c.cellRanges[dstSeq] = seqRange } func (c *Causal) CanResume(seq int, pos int32) bool { if c.swaMemorySize == math.MaxInt32 { return true } seqRange, ok := c.cellRanges[seq] if !ok { return false } // for sliding window, check that the window of the new sequence is contained in // the window of what we are storing var first int32 = math.MaxInt32 var last int32 = -1 for i := seqRange.min; i <= seqRange.max; i++ { if slices.Contains(c.cells[i].sequences, seq) { first = min(first, c.cells[i].pos) last = max(last, c.cells[i].pos) } } if last == -1 { return false } posWindowStart := max(0, pos-c.swaWindowSize) return posWindowStart >= first && pos <= last+1 } func (c *Causal) shift(seq int, beginIndex, offset int32) error { if c.shiftFn == nil { return ErrNotSupported } seqRange := c.cellRanges[seq] for start := seqRange.min; start <= seqRange.max; start += c.maxBatch { size := min(seqRange.max-start+1, c.maxBatch) offsets := make([]int32, size) var batchFirst, batchLast int batchFirst = -1 for i := range offsets { cell := c.cells[start+i] if slices.Contains(cell.sequences, seq) && cell.pos >= beginIndex { offsets[i] = offset if batchFirst < 0 { batchFirst = i } batchLast = i } } if batchFirst < 0 { continue } offsets = offsets[batchFirst : batchLast+1] ctx := c.backend.NewContext() kShift := ctx.Input().FromInts(offsets, len(offsets)) for i, key := range c.keys { if key == nil { continue } kHeadDim := key.Dim(0) numKVHeads := key.Dim(1) rowSize := key.Stride(2) key = key.View(ctx, rowSize*(start+batchFirst), kHeadDim, key.Stride(1), numKVHeads, key.Stride(2), len(offsets), ) roped, err := c.shiftFn(ctx, i, key, kShift) if err != nil { ctx.Close() return err } ctx.Forward(roped.Copy(ctx, key)) } ctx.Compute() ctx.Close() } return nil } func (c *Causal) Remove(seq int, beginIndex, endIndex int32) error { // TODO(jessegross): We should check to see if removing the middle of the sequence will // cause the sliding window to encompass tokens that we no longer have. If so, then we // should return an error, which will trigger the runner to evaluate the full history and // rebuild the window. However, if we have multimodal inputs in our history, this reuse // results in use after free, so we don't do it for now. var offset int32 if endIndex != math.MaxInt32 { offset = beginIndex - endIndex } seqRange := newRange() for i := range c.cells { if slices.Contains(c.cells[i].sequences, seq) { if c.cells[i].pos >= beginIndex && c.cells[i].pos < endIndex { c.cells[i].sequences = slices.DeleteFunc(c.cells[i].sequences, func(s int) bool { return s == seq }) } else { if c.cells[i].pos >= endIndex { if slices.ContainsFunc(c.cells[i].sequences, func(s int) bool { return s != seq }) { return errors.New("shifting cells shared by multiple sequences not supported") } c.cells[i].pos += offset } if i < seqRange.min { seqRange.min = i } if i > seqRange.max { seqRange.max = i } } } } if seqRange == newRange() { delete(c.cellRanges, seq) return nil } c.cellRanges[seq] = seqRange if endIndex != math.MaxInt32 { err := c.shift(seq, endIndex+offset, offset) if err != nil { return err } } return nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/kvcache/causal_test.go
kvcache/causal_test.go
package kvcache import ( "fmt" "math" "slices" "testing" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/model/input" ) type testCase struct { name string in []float32 inShape []int seqs []int pos []int32 expected []float32 expectedShape []int expectedMask []float32 } func runPermutedVariants(t *testing.T, fn func(t *testing.T, backend *testBackend)) { t.Helper() for _, permuted := range []bool{false, true} { t.Run(fmt.Sprintf("PermutedV=%t", permuted), func(t *testing.T) { fn(t, &testBackend{permutedV: permuted}) }) } } func TestStore(t *testing.T) { runPermutedVariants(t, func(t *testing.T, backend *testBackend) { cache := NewCausalCache(nil) defer cache.Close() cache.Init(backend, ml.DTypeF16, 1, 16, 16) tests := []testCase{ { name: "FirstBatch", in: []float32{111, 211, 121, 221, 131, 231, 112, 212, 122, 222, 132, 232, 113, 213, 123, 223, 133, 233, 114, 214, 124, 224, 134, 234}, inShape: []int{2, 3, 4}, seqs: []int{0, 0, 0, 0}, pos: []int32{0, 1, 2, 3}, expected: []float32{111, 211, 121, 221, 131, 231, 112, 212, 122, 222, 132, 232, 113, 213, 123, 223, 133, 233, 114, 214, 124, 224, 134, 234}, expectedShape: []int{2, 3, 4}, expectedMask: []float32{0, float32(math.Inf(-1)), float32(math.Inf(-1)), float32(math.Inf(-1)), 0, 0, float32(math.Inf(-1)), float32(math.Inf(-1)), 0, 0, 0, float32(math.Inf(-1)), 0, 0, 0, 0}, }, { name: "SecondBatch", in: []float32{115, 215, 125, 225, 135, 235}, inShape: []int{2, 3, 1}, seqs: []int{0}, pos: []int32{4}, expected: []float32{111, 211, 121, 221, 131, 231, 112, 212, 122, 222, 132, 232, 113, 213, 123, 223, 133, 233, 114, 214, 124, 224, 134, 234, 115, 215, 125, 225, 135, 235}, expectedShape: []int{2, 3, 5}, expectedMask: []float32{0, 0, 0, 0, 0}, }, } testCache(t, backend, cache, tests) }) } func TestSWA(t *testing.T) { runPermutedVariants(t, func(t *testing.T, backend *testBackend) { cache := NewSWACache(1, nil) defer cache.Close() cache.Init(backend, ml.DTypeF16, 1, 16, 16) x := float32(math.Inf(-1)) tests := []testCase{ { name: "FirstBatch", in: []float32{1, 2, 3, 4}, inShape: []int{1, 1, 4}, seqs: []int{0, 0, 0, 0}, pos: []int32{0, 1, 2, 3}, expected: []float32{1, 2, 3, 4}, expectedShape: []int{1, 1, 4}, expectedMask: []float32{ 0, x, x, x, 0, 0, x, x, x, 0, 0, x, x, x, 0, 0, }, }, { name: "SecondBatch", in: []float32{5, 6}, inShape: []int{1, 1, 2}, seqs: []int{0, 0}, pos: []int32{4, 5}, expected: []float32{5, 6, 3, 4}, expectedShape: []int{1, 1, 4}, expectedMask: []float32{ 0, x, x, 0, 0, 0, x, x, }, }, } testCache(t, backend, cache, tests) }) } func TestSWASeparateBatches(t *testing.T) { runPermutedVariants(t, func(t *testing.T, backend *testBackend) { cache := NewSWACache(1, nil) defer cache.Close() cache.Init(backend, ml.DTypeF16, 2, 16, 2) x := float32(math.Inf(-1)) tests := []testCase{ { name: "First seq 0", in: []float32{1, 2}, inShape: []int{1, 1, 2}, seqs: []int{0, 0}, pos: []int32{0, 1}, expected: []float32{1, 2}, expectedShape: []int{1, 1, 2}, expectedMask: []float32{ 0, x, 0, 0, }, }, { name: "Second seq 0", in: []float32{3, 4}, inShape: []int{1, 1, 2}, seqs: []int{0, 0}, pos: []int32{2, 3}, expected: []float32{2, 3, 4}, expectedShape: []int{1, 1, 3}, expectedMask: []float32{ 0, 0, x, x, 0, 0, }, }, { name: "First seq 1", in: []float32{5, 6}, inShape: []int{1, 1, 2}, seqs: []int{1, 1}, pos: []int32{0, 1}, expected: []float32{5, 6}, expectedShape: []int{1, 1, 2}, expectedMask: []float32{ 0, x, 0, 0, }, }, { name: "Second seq 1", in: []float32{7, 8}, inShape: []int{1, 1, 2}, seqs: []int{1, 1}, pos: []int32{2, 3}, expected: []float32{6, 3, 4, 7, 8}, expectedShape: []int{1, 1, 5}, expectedMask: []float32{ 0, x, x, 0, x, x, x, x, 0, 0, }, }, { name: "Third seq 0", in: []float32{9, 10}, inShape: []int{1, 1, 2}, seqs: []int{0, 0}, pos: []int32{4, 5}, expected: []float32{9, 10, 3, 4}, expectedShape: []int{1, 1, 4}, expectedMask: []float32{ 0, x, x, 0, 0, 0, x, x, }, }, } testCache(t, backend, cache, tests) }) } func TestSWAMem(t *testing.T) { runPermutedVariants(t, func(t *testing.T, backend *testBackend) { cache := NewSWAMemCache(1, 3, nil) defer cache.Close() cache.Init(backend, ml.DTypeF16, 1, 16, 16) x := float32(math.Inf(-1)) tests := []testCase{ { name: "FirstBatch", in: []float32{1, 2, 3, 4}, inShape: []int{1, 1, 4}, seqs: []int{0, 0, 0, 0}, pos: []int32{0, 1, 2, 3}, expected: []float32{1, 2, 3, 4}, expectedShape: []int{1, 1, 4}, expectedMask: []float32{ 0, x, x, x, 0, 0, x, x, x, 0, 0, x, x, x, 0, 0, }, }, { name: "SecondBatch", in: []float32{5, 6}, inShape: []int{1, 1, 2}, seqs: []int{0, 0}, pos: []int32{4, 5}, expected: []float32{5, 2, 3, 4, 6}, expectedShape: []int{1, 1, 5}, expectedMask: []float32{ 0, x, x, 0, x, 0, x, x, x, 0, }, }, } testCache(t, backend, cache, tests) }) } func TestChunkedAttention(t *testing.T) { runPermutedVariants(t, func(t *testing.T, backend *testBackend) { cache := NewChunkedAttentionCache(2, nil) defer cache.Close() cache.Init(backend, ml.DTypeF16, 1, 16, 16) x := float32(math.Inf(-1)) testCache( t, backend, cache, []testCase{ { name: "FirstBatch", in: []float32{1, 2, 3, 4}, inShape: []int{1, 1, 4}, seqs: []int{0, 0, 0, 0}, pos: []int32{0, 1, 2, 3}, expected: []float32{1, 2, 3, 4}, expectedShape: []int{1, 1, 4}, expectedMask: []float32{ 0, x, x, x, 0, 0, x, x, x, x, 0, x, x, x, 0, 0, }, }, { name: "SecondBatch", in: []float32{5, 6, 7}, inShape: []int{1, 1, 3}, seqs: []int{0, 0, 0}, pos: []int32{4, 5, 6}, expected: []float32{1, 2, 3, 4, 5, 6, 7}, expectedShape: []int{1, 1, 7}, expectedMask: []float32{ x, x, x, x, 0, x, x, x, x, x, x, 0, 0, x, x, x, x, x, x, x, 0, }, }, { name: "ThirdBatch", in: []float32{8, 9}, inShape: []int{1, 1, 2}, seqs: []int{0, 0}, pos: []int32{7, 8}, expected: []float32{1, 2, 3, 4, 5, 6, 7, 8, 9}, expectedShape: []int{1, 1, 9}, expectedMask: []float32{ x, x, x, x, x, x, 0, 0, x, x, x, x, x, x, x, x, x, 0, }, }, }, ) }) } func TestSequences(t *testing.T) { runPermutedVariants(t, func(t *testing.T, backend *testBackend) { cache := NewCausalCache(nil) defer cache.Close() cache.Init(backend, ml.DTypeF16, 1, 16, 16) tests := []testCase{ { name: "FirstBatch", in: []float32{1, 2, 3, 4}, inShape: []int{1, 1, 4}, seqs: []int{0, 0, 1, 1}, pos: []int32{0, 1, 0, 1}, expected: []float32{1, 2, 3, 4}, expectedShape: []int{1, 1, 4}, expectedMask: []float32{0, float32(math.Inf(-1)), float32(math.Inf(-1)), float32(math.Inf(-1)), 0, 0, float32(math.Inf(-1)), float32(math.Inf(-1)), float32(math.Inf(-1)), float32(math.Inf(-1)), 0, float32(math.Inf(-1)), float32(math.Inf(-1)), float32(math.Inf(-1)), 0, 0}, }, { name: "SecondBatch", in: []float32{5, 6}, inShape: []int{1, 1, 2}, seqs: []int{0, 1}, pos: []int32{2, 2}, expected: []float32{1, 2, 3, 4, 5, 6}, expectedShape: []int{1, 1, 6}, expectedMask: []float32{0, 0, float32(math.Inf(-1)), float32(math.Inf(-1)), 0, float32(math.Inf(-1)), float32(math.Inf(-1)), float32(math.Inf(-1)), 0, 0, float32(math.Inf(-1)), 0}, }, } testCache(t, backend, cache, tests) }) } func TestRemove(t *testing.T) { runPermutedVariants(t, func(t *testing.T, backend *testBackend) { cache := NewCausalCache(func(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) { return key.Add(ctx, shift), nil }) defer cache.Close() cache.Init(backend, ml.DTypeF16, 1, 16, 16) x := float32(math.Inf(-1)) tests := []testCase{ { name: "FirstBatch", in: []float32{1, 2, 3, 4}, inShape: []int{1, 1, 4}, seqs: []int{0, 0, 1, 1}, pos: []int32{0, 1, 0, 1}, expected: []float32{1, 2, 3, 4}, expectedShape: []int{1, 1, 4}, expectedMask: []float32{ 0, x, x, x, 0, 0, x, x, x, x, 0, x, x, x, 0, 0, }, }, } testCache(t, backend, cache, tests) err := cache.Remove(0, 1, math.MaxInt32) if err != nil { panic(err) } tests = []testCase{ { name: "RemoveEnd", in: []float32{5, 6}, inShape: []int{1, 1, 2}, seqs: []int{0, 1}, pos: []int32{1, 2}, expected: []float32{1, 5, 3, 4, 6}, expectedShape: []int{1, 1, 5}, expectedMask: []float32{ 0, 0, x, x, x, x, x, 0, 0, 0, }, }, } testCache(t, backend, cache, tests) err = cache.Remove(0, 0, 1) if err != nil { panic(err) } tests = []testCase{ { name: "RemoveMiddle", in: []float32{7, 8}, inShape: []int{1, 1, 2}, seqs: []int{0, 0}, pos: []int32{1, 2}, expected: []float32{7, 4, 3, 4, 6, 8}, expectedShape: []int{1, 1, 6}, expectedMask: []float32{ 0, 0, x, x, x, x, 0, 0, x, x, x, 0, }, }, } testCache(t, backend, cache, tests) }) } func TestCopy(t *testing.T) { runPermutedVariants(t, func(t *testing.T, backend *testBackend) { cache := NewCausalCache(func(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) { return key, nil }) defer cache.Close() cache.Init(backend, ml.DTypeF16, 1, 16, 16) tests := []testCase{ { name: "FirstBatch", in: []float32{1, 2, 3, 4}, inShape: []int{1, 1, 4}, seqs: []int{0, 0, 0, 0}, pos: []int32{0, 1, 2, 3}, expected: []float32{1, 2, 3, 4}, expectedShape: []int{1, 1, 4}, expectedMask: []float32{0, float32(math.Inf(-1)), float32(math.Inf(-1)), float32(math.Inf(-1)), 0, 0, float32(math.Inf(-1)), float32(math.Inf(-1)), 0, 0, 0, float32(math.Inf(-1)), 0, 0, 0, 0}, }, } testCache(t, backend, cache, tests) cache.CopyPrefix(0, 1, 2) tests = []testCase{ { name: "Copy", in: []float32{5, 6}, inShape: []int{1, 1, 2}, seqs: []int{1, 1}, pos: []int32{3, 4}, expected: []float32{1, 2, 3, 4, 5, 6}, expectedShape: []int{1, 1, 6}, expectedMask: []float32{0, 0, float32(math.Inf(-1)), float32(math.Inf(-1)), 0, float32(math.Inf(-1)), 0, 0, float32(math.Inf(-1)), float32(math.Inf(-1)), 0, 0}, }, } testCache(t, backend, cache, tests) }) } func testCache(t *testing.T, backend ml.Backend, cache Cache, tests []testCase) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { context := backend.NewContext() defer context.Close() err := cache.StartForward(context, input.Batch{Positions: test.pos, Sequences: test.seqs}, false) if err != nil { panic(err) } cache.SetLayer(0) tensor := context.FromFloats(test.in, test.inShape...) cache.Put(context, tensor, tensor) out, _, mask := cache.Get(context) context.Forward(out, mask).Compute(out, mask) if !slices.Equal(out.Floats(), test.expected) { t.Errorf("TestCache: have %v; want %v", out.Floats(), test.expected) } if !slices.Equal(out.Shape(), test.expectedShape) { t.Errorf("TestCache: has shape %v; want %v", out.Shape(), test.expectedShape) } if !slices.Equal(mask.Floats(), test.expectedMask) { t.Errorf("TestCache: have mask: have %v want %v", mask.Floats(), test.expectedMask) } }) } } func TestCanResume(t *testing.T) { runPermutedVariants(t, func(t *testing.T, backend *testBackend) { windowSize := int32(4) cache := NewSWACache(windowSize, nil) defer cache.Close() cache.Init(backend, ml.DTypeF16, 1, 16, 16) context := backend.NewContext() defer context.Close() err := cache.StartForward(context, input.Batch{ Positions: []int32{0, 1, 2, 3, 4}, Sequences: []int{0, 0, 0, 0, 0}, }, false) if err != nil { t.Fatalf("StartForward failed: %v", err) } cache.SetLayer(0) tensor := context.FromFloats([]float32{1, 2, 3, 4, 5}, 1, 1, 5) cache.Put(context, tensor, tensor) // with window size 4, nothing has slid out of the window yet if !cache.CanResume(0, 0) { t.Errorf("CanResume(0, 0) = false, want true (within window)") } if !cache.CanResume(0, 1) { t.Errorf("CanResume(0, 1) = false, want true (within window)") } if !cache.CanResume(0, 2) { t.Errorf("CanResume(0, 2) = false, want true (within window)") } if !cache.CanResume(0, 3) { t.Errorf("CanResume(0, 3) = false, want true (latest position)") } if !cache.CanResume(0, 4) { t.Errorf("CanResume(0, 4) = false, want true (latest position)") } // shift window by adding position 5 err = cache.StartForward(context, input.Batch{ Positions: []int32{5}, Sequences: []int{0}, }, false) if err != nil { t.Fatalf("StartForward failed: %v", err) } cache.SetLayer(0) tensor = context.FromFloats([]float32{6}, 1, 1, 1) cache.Put(context, tensor, tensor) // only the latest position has overlapping windows if cache.CanResume(0, 0) { t.Errorf("after shift: CanResume(0, 0) = true, want false (outside window)") } if cache.CanResume(0, 1) { t.Errorf("after shift: CanResume(0, 1) = true, want false (outside window)") } if cache.CanResume(0, 2) { t.Errorf("after shift: CanResume(0, 2) = true, want false (outside window)") } if cache.CanResume(0, 3) { t.Errorf("after shift: CanResume(0, 3) = true, want false (outside window)") } if cache.CanResume(0, 4) { t.Errorf("after shift: CanResume(0, 4) = true, want false (outside window)") } if !cache.CanResume(0, 5) { t.Errorf("after shift: CanResume(0, 5) = false, want true (latest position)") } }) } func TestCanResumeSWAMem(t *testing.T) { runPermutedVariants(t, func(t *testing.T, backend *testBackend) { windowSize := int32(4) memSize := int32(5) cache := NewSWAMemCache(windowSize, memSize, nil) defer cache.Close() cache.Init(backend, ml.DTypeF16, 1, 16, 16) context := backend.NewContext() defer context.Close() err := cache.StartForward(context, input.Batch{ Positions: []int32{0, 1, 2, 3, 4, 5, 6}, Sequences: []int{0, 0, 0, 0, 0, 0, 0}, }, false) if err != nil { t.Fatalf("StartForward failed: %v", err) } cache.SetLayer(0) tensor := context.FromFloats([]float32{1, 2, 3, 4, 5, 6, 7}, 1, 1, 7) cache.Put(context, tensor, tensor) // shift window by adding position 7 err = cache.StartForward(context, input.Batch{ Positions: []int32{7}, Sequences: []int{0}, }, false) if err != nil { t.Fatalf("StartForward failed: %v", err) } cache.SetLayer(0) tensor = context.FromFloats([]float32{8}, 1, 1, 1) cache.Put(context, tensor, tensor) // only the latest position has overlapping windows if cache.CanResume(0, 0) { t.Errorf("after shift: CanResume(0, 0) = true, want false (outside window)") } if cache.CanResume(0, 1) { t.Errorf("after shift: CanResume(0, 1) = true, want false (outside window)") } if cache.CanResume(0, 2) { t.Errorf("after shift: CanResume(0, 2) = true, want false (outside window)") } if cache.CanResume(0, 3) { t.Errorf("after shift: CanResume(0, 3) = true, want false (outside window)") } if cache.CanResume(0, 4) { t.Errorf("after shift: CanResume(0, 4) = true, want false (outside window)") } if cache.CanResume(0, 5) { t.Errorf("after shift: CanResume(0, 5) = true, want false (outside window)") } if !cache.CanResume(0, 6) { t.Errorf("after shift: CanResume(0, 6) = false, want true (inside window)") } if !cache.CanResume(0, 7) { t.Errorf("after shift: CanResume(0, 7) = false, want true (latest position)") } }) } type testBackend struct { ml.Backend permutedV bool } func (b *testBackend) NewContext() ml.Context { return &testContext{} } func (b *testBackend) NewContextSize(int) ml.Context { return &testContext{} } func (b *testBackend) CacheConfig() ml.CacheConfig { return ml.CacheConfig{PermutedV: b.permutedV} } type testContext struct { ml.Context } func (c *testContext) Empty(dtype ml.DType, shape ...int) ml.Tensor { total := 0 if len(shape) > 0 { total = 1 for _, s := range shape { total *= s } } return &testTensor{dtype: dtype, elementSize: 4, data: make([]float32, total), shape: shape} } func (c *testContext) Zeros(dtype ml.DType, shape ...int) ml.Tensor { return c.Empty(dtype, shape...) } func (c *testContext) FromFloats(s []float32, shape ...int) ml.Tensor { t := c.Empty(ml.DTypeF32, shape...).(*testTensor) copy(t.data, s) return t } func (c *testContext) FromInts(s []int32, shape ...int) ml.Tensor { f := make([]float32, len(s)) for i := range f { f[i] = float32(s[i]) } out := c.FromFloats(f, shape...) out.(*testTensor).dtype = ml.DTypeI32 return out } func (c *testContext) Arange(start, stop, step float32, dtype ml.DType) ml.Tensor { s := make([]float32, 0, int((stop-start)/step)) for i := start; i < stop; i += step { s = append(s, i) } out := c.FromFloats(s, len(s)) out.(*testTensor).dtype = dtype return out } func (c *testContext) Input() ml.Context { return c } func (c *testContext) Layer(int) ml.Context { return c } func (c *testContext) Forward(...ml.Tensor) ml.Context { return c } func (c *testContext) Compute(...ml.Tensor) {} func (c *testContext) Reserve() {} func (c *testContext) MaxGraphNodes() int { return 10 } func (c *testContext) Close() {} type testTensor struct { ml.Tensor dtype ml.DType elementSize int data []float32 shape []int } func (t *testTensor) Dim(n int) int { return t.shape[n] } func (t *testTensor) Stride(n int) int { stride := t.elementSize for i := range n { stride *= t.shape[i] } return stride } func (t *testTensor) Shape() []int { return t.shape } func (t *testTensor) DType() ml.DType { return t.dtype } func (t *testTensor) Floats() []float32 { out := make([]float32, len(t.data)) copy(out, t.data) return out } func (t *testTensor) Neg(ctx ml.Context) ml.Tensor { out := ctx.Empty(t.DType(), t.Shape()...).(*testTensor) for i := range out.data { out.data[i] = -t.data[i] } return out } func (t *testTensor) Add(ctx ml.Context, t2 ml.Tensor) ml.Tensor { out := ctx.Empty(t.DType(), t.Shape()...).(*testTensor) for i := range out.data { out.data[i] = t.data[i] + t2.(*testTensor).data[i] } return out } func (t *testTensor) Reshape(ctx ml.Context, shape ...int) ml.Tensor { return &testTensor{ dtype: t.dtype, elementSize: t.elementSize, data: t.data, shape: shape, } } func (t *testTensor) View(ctx ml.Context, offset int, shape ...int) ml.Tensor { offset /= t.elementSize var s []int switch len(shape) { case 1: s = []int{shape[0]} case 3: s = []int{shape[0], shape[2]} case 5: s = []int{shape[0], shape[2], shape[4]} default: panic("unsupported number of dimensions") } context := &testContext{} view := context.Empty(t.dtype, s...).(*testTensor) view.data = t.data[offset : offset+len(view.data)] return view } func (t *testTensor) Permute(ctx ml.Context, order ...int) ml.Tensor { if len(t.shape) > 4 || len(order) > 4 { panic("permute only supports up to 4 dimensions") } if len(order) != len(t.shape) && len(order) != 4 { panic("invalid number of dimensions for permute") } // ggml_permute expects 4 axes, so fill in any missing dimensions. orderFull := append(make([]int, 0, 4), order...) for len(orderFull) < 4 { orderFull = append(orderFull, len(orderFull)) } seen := [4]bool{} shape4 := [4]int{1, 1, 1, 1} for i := 0; i < len(t.shape) && i < 4; i++ { shape4[i] = t.shape[i] } newShape4 := [4]int{1, 1, 1, 1} for axis := range 4 { dst := orderFull[axis] if dst < 0 || dst >= 4 { panic("invalid axis for permute") } if seen[dst] { panic("duplicate axis for permute") } seen[dst] = true newShape4[dst] = shape4[axis] } total := len(t.data) newData := make([]float32, total) if total > 0 { oldDims := shape4 newDims := newShape4 oldStride := [4]int{1, 1, 1, 1} newStride := [4]int{1, 1, 1, 1} for i := 1; i < 4; i++ { oldStride[i] = oldStride[i-1] * oldDims[i-1] newStride[i] = newStride[i-1] * newDims[i-1] } var coords [4]int var newCoords [4]int for idx := range total { remainder := idx for axis := range 4 { dim := oldDims[axis] if dim == 0 { coords[axis] = 0 continue } coords[axis] = remainder % dim remainder /= dim } for axis := range 4 { newCoords[orderFull[axis]] = coords[axis] } newIndex := 0 for axis := range 4 { if newDims[axis] == 0 { continue } newIndex += newCoords[axis] * newStride[axis] } newData[newIndex] = t.data[idx] } } numDims := 4 for numDims > 1 && newShape4[numDims-1] <= 1 { numDims-- } newShape := make([]int, numDims) copy(newShape, newShape4[:numDims]) return &testTensor{ dtype: t.dtype, elementSize: t.elementSize, data: newData, shape: newShape, } } func (t *testTensor) SetRows(ctx ml.Context, src ml.Tensor, idxs ml.Tensor) ml.Tensor { dst := t srcTensor := src.(*testTensor) idxTensor := idxs.(*testTensor) shapeTo4D := func(shape []int) [4]int { out := [4]int{1, 1, 1, 1} for i := 0; i < len(shape) && i < 4; i++ { out[i] = shape[i] } return out } computeStrides := func(shape [4]int) [4]int { out := [4]int{1, 1, 1, 1} for i := 1; i < 4; i++ { out[i] = out[i-1] * shape[i-1] } return out } dstShape4D := shapeTo4D(dst.shape) srcShape4D := shapeTo4D(srcTensor.shape) idxShape4D := shapeTo4D(idxTensor.shape) if dstShape4D[0] != srcShape4D[0] || dstShape4D[2] != srcShape4D[2] || dstShape4D[3] != srcShape4D[3] { panic("SetRows requires matching tensor shapes") } if srcShape4D[1] != idxShape4D[0] { panic("SetRows rows/index mismatch") } if srcShape4D[2]%idxShape4D[1] != 0 || srcShape4D[3]%idxShape4D[2] != 0 { panic("SetRows cannot broadcast indices") } if idxShape4D[3] != 1 { panic("SetRows expects 1D or 2D index tensors") } dstStride := computeStrides(dstShape4D) srcStride := computeStrides(srcShape4D) idxStride := computeStrides(idxShape4D) numColumns := srcShape4D[0] numRows := srcShape4D[1] for dim3Index := range dstShape4D[3] { for dim2Index := range dstShape4D[2] { idxDim2 := 0 idxDim3 := 0 if idxShape4D[1] > 0 { idxDim2 = dim2Index % idxShape4D[1] } if idxShape4D[2] > 0 { idxDim3 = dim3Index % idxShape4D[2] } idxBase := idxDim3*idxStride[2] + idxDim2*idxStride[1] srcBase := dim3Index*srcStride[3] + dim2Index*srcStride[2] dstBase := dim3Index*dstStride[3] + dim2Index*dstStride[2] for row := range numRows { idx := int(idxTensor.data[idxBase+row*idxStride[0]]) if idx < 0 || idx >= dstShape4D[1] { panic("SetRows index out of range") } srcOffset := srcBase + row*srcStride[1] dstOffset := dstBase + idx*dstStride[1] copy(dst.data[dstOffset:dstOffset+numColumns], srcTensor.data[srcOffset:srcOffset+numColumns]) } } } return dst } func (t *testTensor) Copy(ctx ml.Context, t2 ml.Tensor) ml.Tensor { copy(t2.(*testTensor).data, t.data) return nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/kvcache/encoder.go
kvcache/encoder.go
package kvcache import ( "fmt" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/model/input" ) // Encoder cache stores K and V tensors that are position independent // // The tensors can be of any shape and will be returned as they were stored // The mask is currently always nil // // Not currently safe for multiple sequences type EncoderCache struct { // config controls mostly backend-specific optimizations config *ml.CacheConfig // ** current forward pass ** // the active layer for Get and Put curLayer int // if something is stored during this pass, this // will be the position (but there is no guarantee // anything will be stored) curPos int32 // curReserve indicates that this forward pass is only for // memory reservation and we should not update our metadata // based on it. curReserve bool // ** cache metadata ** // was something stored in the cache? encoderCached bool // position of the cached data encoderPos int32 // ** cache data storage ** backend ml.Backend ctxs map[int]ml.Context keys, values map[int]ml.Tensor } func NewEncoderCache() *EncoderCache { return &EncoderCache{ ctxs: make(map[int]ml.Context), keys: make(map[int]ml.Tensor), values: make(map[int]ml.Tensor), } } func (c *EncoderCache) Init(backend ml.Backend, dtype ml.DType, maxSequences, capacity, maxBatch int) { if c.config == nil { var config ml.CacheConfig if cc, ok := backend.(ml.BackendCacheConfig); ok { config = cc.CacheConfig() } c.config = &config } if maxSequences > 1 { panic(fmt.Errorf("encoder cache does not support multiple sequences; requested: %v", maxSequences)) } if c.config.CachePadding != 0 && c.config.CachePadding != 1 { panic(fmt.Errorf("encoder cache is unable to enforce requested CachePadding (%v)", c.config.CachePadding)) } c.backend = backend } func (c *EncoderCache) SetConfig(config ml.CacheConfig) { if c.config != nil { panic("config cannot be changed after being previously set, either by the model or backend") } c.config = &config } func (c *EncoderCache) Close() { for _, ctx := range c.ctxs { ctx.Close() } } func (c *EncoderCache) StartForward(ctx ml.Context, batch input.Batch, reserve bool) error { // We work with the most recent image if len(batch.Multimodal) > 0 { c.curPos = batch.Positions[batch.Multimodal[len(batch.Multimodal)-1].Index] } c.curReserve = reserve return nil } func (c *EncoderCache) SetLayer(layer int) { c.curLayer = layer } func (c *EncoderCache) EncoderCached() bool { return c.encoderCached } func (c *EncoderCache) Get(ctx ml.Context) (ml.Tensor, ml.Tensor, ml.Tensor) { return c.keys[c.curLayer], c.values[c.curLayer], nil } func (c *EncoderCache) Put(ctx ml.Context, key, value ml.Tensor) { if !c.curReserve { c.encoderPos = c.curPos c.encoderCached = true } if c.config.PermutedV { value = value.Permute(ctx, 1, 2, 0, 3) } if _, ok := c.ctxs[c.curLayer]; !ok { c.ctxs[c.curLayer] = c.backend.NewContextSize(2).Layer(c.curLayer) } if _, ok := c.keys[c.curLayer]; !ok { c.keys[c.curLayer] = c.ctxs[c.curLayer].Empty(key.DType(), key.Shape()...) } if _, ok := c.values[c.curLayer]; !ok { c.values[c.curLayer] = c.ctxs[c.curLayer].Empty(value.DType(), value.Shape()...) } ctx.Forward( key.Copy(ctx, c.keys[c.curLayer]), value.Copy(ctx, c.values[c.curLayer]), ) } func (c *EncoderCache) CopyPrefix(srcSeq, dstSeq int, len int32) { panic("encoder cache does not support multiple sequences") } func (c *EncoderCache) CanResume(seq int, pos int32) bool { return true } func (c *EncoderCache) Remove(seq int, beginIndex, endIndex int32) error { if c.encoderPos >= beginIndex && c.encoderPos < endIndex { c.encoderCached = false } return nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/harmony/harmonyparser_test.go
harmony/harmonyparser_test.go
package harmony import ( "fmt" "reflect" "testing" ) func TestHeaderParsing(t *testing.T) { tests := []struct { in, wantRole, wantChannel, wantRecipient string }{ { in: "assistant<|channel|>analysis", wantRole: "assistant", wantChannel: "analysis", wantRecipient: "", }, { in: "assistant<|channel|>analysis to=functions.get_weather", wantRole: "assistant", wantChannel: "analysis", wantRecipient: "functions.get_weather", }, { in: "assistant to=functions.get_weather<|channel|>analysis", wantRole: "assistant", wantChannel: "analysis", wantRecipient: "functions.get_weather", }, // special case where the role is replaced by the recipient (matches reference code) { in: "to=functions.get_weather<|channel|>analysis", wantRole: "tool", wantChannel: "analysis", wantRecipient: "functions.get_weather", }, // extra token after the recipient is ignored { in: "assistant to=functions.get_weather abc<|channel|>analysis", wantRole: "assistant", wantChannel: "analysis", wantRecipient: "functions.get_weather", }, // with constrain tag, recipient after channel tag { in: "assistant<|channel|>commentary to=functions.get_weather <|constrain|>json", wantRole: "assistant", wantChannel: "commentary", wantRecipient: "functions.get_weather", }, // with constrain tag, recipient before channel tag { in: "assistant to=functions.get_weather<|channel|>commentary <|constrain|>json", wantRole: "assistant", wantChannel: "commentary", wantRecipient: "functions.get_weather", }, // constrain tag without space { in: "assistant<|channel|>commentary to=functions.get_weather<|constrain|>json", wantRole: "assistant", wantChannel: "commentary", wantRecipient: "functions.get_weather", }, // constrain tag without space, different order { in: "assistant to=functions.get_weather<|channel|>commentary<|constrain|>json", wantRole: "assistant", wantChannel: "commentary", wantRecipient: "functions.get_weather", }, } for i, tt := range tests { parser := HarmonyParser{ MessageStartTag: "<|start|>", MessageEndTag: "<|end|>", HeaderEndTag: "<|message|>", } header := parser.parseHeader(tt.in) if header.Role != tt.wantRole { t.Errorf("case %d: got role \"%s\", want \"%s\"", i, header.Role, tt.wantRole) } if header.Channel != tt.wantChannel { t.Errorf("case %d: got channel \"%s\", want \"%s\"", i, header.Channel, tt.wantChannel) } if header.Recipient != tt.wantRecipient { t.Errorf("case %d: got recipient \"%s\", want \"%s\"", i, header.Recipient, tt.wantRecipient) } } } func TestHarmonyParserHeaderEvent(t *testing.T) { tests := []struct { in, wantRole, wantChannel, wantRecipient string implicitStart bool }{ { in: "<|start|>user<|message|>What is 2 + 2?<|end|>", wantRole: "user", wantChannel: "", wantRecipient: "", }, { in: "<|start|>assistant<|channel|>analysis<|message|>What is 2 + 2?<|end|>", wantRole: "assistant", wantChannel: "analysis", wantRecipient: "", }, { in: "<|start|>assistant<|channel|>commentary to=functions.get_weather <|constrain|>json<|message|>{\"location\":\"San Francisco\"}<|call|><|start|>functions.get_weather to=assistant<|message|>{\"sunny\": true, \"temperature\": 20}<|end|>", wantRole: "assistant", wantChannel: "commentary", wantRecipient: "functions.get_weather", }, { in: "<|channel|>analysis<|message|>User asks weather in SF. We need location. Use get_current_weather with location \"San Francisco, CA\".<|end|><|start|>assistant<|channel|>commentary to=functions.get_current_weather <|constrain|>json<|message|>{\"location\":\"San Francisco, CA\"}<|call|>", wantRole: "assistant", wantChannel: "analysis", wantRecipient: "", implicitStart: true, }, } for i, tt := range tests { parser := HarmonyParser{ MessageStartTag: "<|start|>", MessageEndTag: "<|end|>", HeaderEndTag: "<|message|>", } if tt.implicitStart { parser.AddImplicitStart() } gotEvents := parser.AddContent(tt.in) if len(gotEvents) == 0 { t.Errorf("case %d: got no events, want at least one", i) } var firstHeaderEvent *HarmonyEventHeaderComplete // print events for _, event := range gotEvents { fmt.Printf("event: %+v\n", event) } for _, event := range gotEvents { if event, ok := event.(HarmonyEventHeaderComplete); ok { firstHeaderEvent = &event break } } if firstHeaderEvent == nil { t.Errorf("case %d: got no header complete event, want one", i) continue } gotHeader := firstHeaderEvent.Header if gotHeader.Role != tt.wantRole || gotHeader.Channel != tt.wantChannel || gotHeader.Recipient != tt.wantRecipient { t.Errorf("case %d: got header %+v, want role=%s channel=%s recipient=%s", i, gotHeader, tt.wantRole, tt.wantChannel, tt.wantRecipient) } } } func TestHarmonyParserNonStreaming(t *testing.T) { tests := []struct { in string implicitStart bool wantEvents []HarmonyEvent }{ { in: "<|start|>user<|message|>What is 2 + 2?<|end|>", wantEvents: []HarmonyEvent{ HarmonyEventMessageStart{}, HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "user", Channel: "", Recipient: ""}}, HarmonyEventContentEmitted{Content: "What is 2 + 2?"}, HarmonyEventMessageEnd{}, }, }, { in: "<|start|>assistant<|channel|>analysis<|message|>The answer is 4<|end|>", wantEvents: []HarmonyEvent{ HarmonyEventMessageStart{}, HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "assistant", Channel: "analysis", Recipient: ""}}, HarmonyEventContentEmitted{Content: "The answer is 4"}, HarmonyEventMessageEnd{}, }, }, { in: "<|start|>assistant<|channel|>commentary to=functions.calc<|message|>Computing...<|end|>", wantEvents: []HarmonyEvent{ HarmonyEventMessageStart{}, HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "assistant", Channel: "commentary", Recipient: "functions.calc"}}, HarmonyEventContentEmitted{Content: "Computing..."}, HarmonyEventMessageEnd{}, }, }, { in: "<|start|>user<|message|><|end|>", wantEvents: []HarmonyEvent{ HarmonyEventMessageStart{}, HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "user", Channel: "", Recipient: ""}}, HarmonyEventMessageEnd{}, }, }, { in: "<|start|>user<|message|>Hello<|end|><|start|>assistant<|message|>Hi!<|end|>", wantEvents: []HarmonyEvent{ HarmonyEventMessageStart{}, HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "user", Channel: "", Recipient: ""}}, HarmonyEventContentEmitted{Content: "Hello"}, HarmonyEventMessageEnd{}, HarmonyEventMessageStart{}, HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "assistant", Channel: "", Recipient: ""}}, HarmonyEventContentEmitted{Content: "Hi!"}, HarmonyEventMessageEnd{}, }, }, { in: "<|channel|>analysis<|message|>Thinking about the request<|end|>", implicitStart: true, wantEvents: []HarmonyEvent{HarmonyEventMessageStart{}, HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "assistant", Channel: "analysis", Recipient: ""}}, HarmonyEventContentEmitted{Content: "Thinking about the request"}, HarmonyEventMessageEnd{}}, }, } for i, tt := range tests { parser := HarmonyParser{ MessageStartTag: "<|start|>", MessageEndTag: "<|end|>", HeaderEndTag: "<|message|>", } if tt.implicitStart { parser.AddImplicitStart() } gotEvents := parser.AddContent(tt.in) if !reflect.DeepEqual(gotEvents, tt.wantEvents) { t.Errorf("case %d: got events %#v, want %#v", i, gotEvents, tt.wantEvents) } } } func TestHarmonyParserStreaming(t *testing.T) { type step struct { input string wantEvents []HarmonyEvent } cases := []struct { desc string implicitStart bool steps []step }{ { desc: "simple message streamed character by character", steps: []step{ { input: "<", wantEvents: nil, }, { input: "|", wantEvents: nil, }, { input: "start|>u", wantEvents: []HarmonyEvent{HarmonyEventMessageStart{}}, }, { input: "ser<|mess", wantEvents: nil, }, { input: "age|>Hi", wantEvents: []HarmonyEvent{ HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "user", Channel: "", Recipient: ""}}, HarmonyEventContentEmitted{Content: "Hi"}, }, }, { input: " there", wantEvents: []HarmonyEvent{HarmonyEventContentEmitted{Content: " there"}}, }, { input: "<|e", wantEvents: nil, }, { input: "nd|>", wantEvents: []HarmonyEvent{HarmonyEventMessageEnd{}}, }, }, }, { desc: "message with channel streamed", steps: []step{ { input: "<|start|>assistant", wantEvents: []HarmonyEvent{HarmonyEventMessageStart{}}, }, { input: "<|chan", wantEvents: nil, }, { input: "nel|>analysis", wantEvents: nil, }, { input: "<|message|>", wantEvents: []HarmonyEvent{HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "assistant", Channel: "analysis", Recipient: ""}}}, }, { input: "Thinking", wantEvents: []HarmonyEvent{HarmonyEventContentEmitted{Content: "Thinking"}}, }, { input: "...", wantEvents: []HarmonyEvent{HarmonyEventContentEmitted{Content: "..."}}, }, { input: "<|end|>", wantEvents: []HarmonyEvent{HarmonyEventMessageEnd{}}, }, }, }, { desc: "message with channel and recipient", steps: []step{ { input: "<|start|>assistant<|channel|>commentary to=functions.calc<|message|>", wantEvents: []HarmonyEvent{ HarmonyEventMessageStart{}, HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "assistant", Channel: "commentary", Recipient: "functions.calc"}}, }, }, { input: "{\"x\": 5}", wantEvents: []HarmonyEvent{HarmonyEventContentEmitted{Content: "{\"x\": 5}"}}, }, { input: "<|end|>", wantEvents: []HarmonyEvent{HarmonyEventMessageEnd{}}, }, }, }, { desc: "message with channel and recipient (receipient before channel)", steps: []step{ { input: "<|start|>assistant to=functions.calc<|channel|>commentary<|message|>", wantEvents: []HarmonyEvent{ HarmonyEventMessageStart{}, HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "assistant", Channel: "commentary", Recipient: "functions.calc"}}, }, }, { input: "{\"x\": 5}", wantEvents: []HarmonyEvent{HarmonyEventContentEmitted{Content: "{\"x\": 5}"}}, }, { input: "<|end|>", wantEvents: []HarmonyEvent{HarmonyEventMessageEnd{}}, }, }, }, { desc: "implicit start with channel", implicitStart: true, steps: []step{ { input: "<|channel|>thinking", wantEvents: []HarmonyEvent{HarmonyEventMessageStart{}}, }, { input: "<|message|>", wantEvents: []HarmonyEvent{HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "assistant", Channel: "thinking", Recipient: ""}}}, }, { input: "Processing request", wantEvents: []HarmonyEvent{HarmonyEventContentEmitted{Content: "Processing request"}}, }, { input: "<|end|>", wantEvents: []HarmonyEvent{HarmonyEventMessageEnd{}}, }, }, }, { desc: "multiple messages streamed", steps: []step{ { input: "<|start|>user<|message|>Hello<|end|>", wantEvents: []HarmonyEvent{ HarmonyEventMessageStart{}, HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "user", Channel: "", Recipient: ""}}, HarmonyEventContentEmitted{Content: "Hello"}, HarmonyEventMessageEnd{}, }, }, { input: "<|start|>", wantEvents: []HarmonyEvent{HarmonyEventMessageStart{}}, }, { input: "assistant<|message|>", wantEvents: []HarmonyEvent{HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "assistant", Channel: "", Recipient: ""}}}, }, { input: "Hi!", wantEvents: []HarmonyEvent{HarmonyEventContentEmitted{Content: "Hi!"}}, }, { input: "<|end|>", wantEvents: []HarmonyEvent{HarmonyEventMessageEnd{}}, }, }, }, { desc: "empty message", steps: []step{ { input: "<|start|>system<|message|><|end|>", wantEvents: []HarmonyEvent{ HarmonyEventMessageStart{}, HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "system", Channel: "", Recipient: ""}}, HarmonyEventMessageEnd{}, }, }, }, }, { desc: "partial tag that looks like end but isn't", steps: []step{ { input: "<|start|>user<|message|>test<|e", wantEvents: []HarmonyEvent{ HarmonyEventMessageStart{}, HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "user", Channel: "", Recipient: ""}}, HarmonyEventContentEmitted{Content: "test"}, }, }, { input: "xample|>more", wantEvents: []HarmonyEvent{HarmonyEventContentEmitted{Content: "<|example|>more"}}, }, { input: "<|end|>", wantEvents: []HarmonyEvent{HarmonyEventMessageEnd{}}, }, }, }, } for _, tc := range cases { t.Run(tc.desc, func(t *testing.T) { parser := HarmonyParser{ MessageStartTag: "<|start|>", MessageEndTag: "<|end|>", HeaderEndTag: "<|message|>", } if tc.implicitStart { parser.AddImplicitStart() } for i, step := range tc.steps { gotEvents := parser.AddContent(step.input) if !reflect.DeepEqual(gotEvents, step.wantEvents) { t.Errorf("step %d: input %q: got events %#v, want %#v", i, step.input, gotEvents, step.wantEvents) } } }) } } // TestFunctionConvertToValidChars tests only FunctionNameMap.convert(), which doesn't // handle any saving (and therefore no dupe handling) func TestFunctionConvertToValidChars(t *testing.T) { tests := []struct { name string in string want string }{ {name: "replace spaces with underscores", in: "get weather", want: "get_weather"}, {name: "replace hyphens with underscores", in: "get-weather", want: "get_weather"}, {name: "replace periods with underscores", in: "get.weather", want: "get_weather"}, {name: "disallow non-word characters", in: "get weather!", want: "get_weather"}, {name: "strip out invalid non-alphanumeric unicode characters", in: "a🫠bc", want: "abc"}, {name: "names that only contain invalid characters", in: "🫠", want: "unnamed"}, {name: "leading number", in: "123", want: "_123"}, {name: "$ allowed", in: "$", want: "$"}, // show that we allow weird unicode letter characters, though we might want // to convert them to their closest ASCII equivalents in the future {name: "allow weird unicode letter characters", in: "𝓸𝓵𝓵𝓪𝓶𝓪", want: "𝓸𝓵𝓵𝓪𝓶𝓪"}, // names that look like words but are invalid (i.e., not ID_Start/ID_Continue) {name: "disallow non-word characters that look like words", in: "ⓞⓛⓛⓐⓜⓐ123", want: "_123"}, } for i, tt := range tests { t.Run(tt.name, func(t *testing.T) { parser := NewFunctionNameMap() got := parser.convertToValidChars(tt.in) if got != tt.want { t.Errorf("case %d: got %q, want %q", i, got, tt.want) } }) } } func TestFunctionConvertAndAdd(t *testing.T) { // make a fresh map for each test, but within a test use the same map so we can test for dupe handling tests := []struct { name string in []string want []string }{ {name: "basic dupe handling", in: []string{"get weather", "get weather"}, want: []string{"get_weather", "get_weather_2"}}, {name: "dupes from different user-specified names", in: []string{"get weather", "get_weather", "get-weather"}, want: []string{"get_weather", "get_weather_2", "get_weather_3"}}, {name: "non dupes after dupes", in: []string{"get weather", "get_weather", "get-weather", "something-different"}, want: []string{"get_weather", "get_weather_2", "get_weather_3", "something_different"}}, {name: "multiple sets of dupes", in: []string{"a", "a", "b", "a", "a", "b", "a"}, want: []string{"a", "a_2", "b", "a_3", "a_4", "b_2", "a_5"}}, {name: "built-in functions should not be renamed", in: []string{"browser.open", "python", "not.a.built-in.function", "browser.not_a_real_built_in"}, want: []string{"browser.open", "python", "not_a_built_in_function", "browser_not_a_real_built_in"}}, } for i, tt := range tests { parser := NewFunctionNameMap() t.Run(tt.name, func(t *testing.T) { for j, in := range tt.in { got := parser.ConvertAndAdd(in) want := tt.want[j] if got != want { t.Errorf("case %d: got %q, want %q", i, got, want) } // check that the maps are correct if parser.userToHarmony[in] != want { t.Errorf("case %d: userToHarmony[%q] = %q, want %q", i, in, parser.userToHarmony[in], want) } if parser.harmonyToUser[want] != in { t.Errorf("case %d: harmonyToUser[%q] = %q, want %q", i, want, parser.harmonyToUser[want], in) } } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/harmony/harmonyparser.go
harmony/harmonyparser.go
package harmony import ( "encoding/json" "fmt" "log/slog" "strings" "unicode" "github.com/ollama/ollama/api" "github.com/ollama/ollama/logutil" ) type harmonyParserState int const ( harmonyParserState_LookingForMessageStart harmonyParserState = iota harmonyParserState_ParsingHeader harmonyParserState_ParsingContent ) func (s harmonyParserState) String() string { switch s { // we're looking for the message start tag case harmonyParserState_LookingForMessageStart: return "LookingForMessageStart" case harmonyParserState_ParsingHeader: return "ParsingHeader" case harmonyParserState_ParsingContent: return "ParsingContent" default: return "Unknown" } } type HarmonyParser struct { state harmonyParserState MessageStartTag string MessageEndTag string HeaderEndTag string acc strings.Builder lifetimeAcc strings.Builder } type HarmonyEvent interface { isHarmonyEvent() } type HarmonyEventMessageStart struct{} func (HarmonyEventMessageStart) isHarmonyEvent() {} type HarmonyEventHeaderComplete struct { Header HarmonyHeader } func (HarmonyEventHeaderComplete) isHarmonyEvent() {} type HarmonyEventContentEmitted struct { Content string } func (HarmonyEventContentEmitted) isHarmonyEvent() {} type HarmonyEventMessageEnd struct{} func (HarmonyEventMessageEnd) isHarmonyEvent() {} type HarmonyHeader struct { Role string Channel string Recipient string } func (s *HarmonyParser) AddImplicitStart() { s.acc.WriteString("<|start|>assistant") } func (s *HarmonyParser) AddImplicitStartOrPrefill(lastMessage *api.Message) { if lastMessage != nil && lastMessage.Role == "assistant" { // handle prefilling conditions if lastMessage.Content != "" { s.acc.WriteString("<|start|>assistant<|channel|>final<|message|>") return } else if lastMessage.Thinking != "" { s.acc.WriteString("<|start|>assistant<|channel|>analysis<|message|>") return } } s.AddImplicitStart() } func (s *HarmonyParser) AddContent(content string) []HarmonyEvent { s.lifetimeAcc.WriteString(content) s.acc.WriteString(content) var events []HarmonyEvent keepLooping := true // we loop because we might pass through multiple parsing states in a single // call to addContent, and we want to make sure callers don't have to wait for // data that's already unambiguous for keepLooping { var newEvents []HarmonyEvent newEvents, keepLooping = eat(s) events = append(events, newEvents...) } return events } // the additional bool return is true iff we should continue eating func eat(s *HarmonyParser) ([]HarmonyEvent, bool) { switch s.state { case harmonyParserState_LookingForMessageStart: // does the acc contain the message start tag? if strings.Contains(s.acc.String(), s.MessageStartTag) { // split the acc into the message start tag and the rest split := strings.SplitN(s.acc.String(), s.MessageStartTag, 2) before := split[0] if before != "" { slog.Warn("harmony parser: found message start tag in the middle of the content", "content", s.acc.String()) } after := split[1] s.acc.Reset() s.acc.WriteString(after) s.state = harmonyParserState_ParsingHeader return []HarmonyEvent{HarmonyEventMessageStart{}}, true } // no match, so we keep accumulating return nil, false case harmonyParserState_ParsingHeader: if strings.Contains(s.acc.String(), s.HeaderEndTag) { split := strings.SplitN(s.acc.String(), s.HeaderEndTag, 2) header := split[0] after := split[1] s.acc.Reset() s.acc.WriteString(after) s.state = harmonyParserState_ParsingContent return []HarmonyEvent{HarmonyEventHeaderComplete{Header: s.parseHeader(header)}}, true } return nil, false case harmonyParserState_ParsingContent: if strings.Contains(s.acc.String(), s.MessageEndTag) { // if we already have the message end tag, we can emit the content up to it split := strings.SplitN(s.acc.String(), s.MessageEndTag, 2) content := split[0] after := split[1] s.acc.Reset() s.acc.WriteString(after) s.state = harmonyParserState_LookingForMessageStart events := []HarmonyEvent{} if content != "" { events = append(events, HarmonyEventContentEmitted{Content: content}) } events = append(events, HarmonyEventMessageEnd{}) return events, true } else if overlapLen := overlap(s.acc.String(), s.MessageEndTag); overlapLen > 0 { // if our suffix contains the start of the message end tag, we can emit // the content up to the start of the message end tag content := s.acc.String()[:len(s.acc.String())-overlapLen] remaining := s.acc.String()[len(s.acc.String())-overlapLen:] s.acc.Reset() s.acc.WriteString(remaining) // emit the content we know isn't part of the message end tag, and keep // accumulating to disambiguate the rest if content == "" { return nil, false } return []HarmonyEvent{HarmonyEventContentEmitted{Content: content}}, false } else { // no end tag, so it's still normal content that we can immediately emit content := s.acc.String() if content == "" { return nil, false } s.acc.Reset() return []HarmonyEvent{HarmonyEventContentEmitted{Content: content}}, false } } return nil, false } func (s *HarmonyParser) parseHeader(raw string) HarmonyHeader { harmonyHeader := HarmonyHeader{} // if `<|constrain|>` is present, ensure it has a space before it so it gets // parsed as a separate token, even if the model didn't include the space if strings.Contains(raw, "<|constrain|>") { raw = strings.Replace(raw, "<|constrain|>", " <|constrain|>", 1) raw = strings.TrimSpace(raw) } // look for the optional channel tag, which is `<|channel|>` followed by the // channel name, all without any whitespace channelIndex := strings.Index(raw, "<|channel|>") if channelIndex != -1 { before := raw[:channelIndex] after := raw[channelIndex+len("<|channel|>"):] // the channel name is `after` all the way up to the first (if any) whitespace character idx := strings.IndexFunc(after, func(r rune) bool { return unicode.IsSpace(r) }) if idx == -1 { idx = len(after) } harmonyHeader.Channel = after[:idx] after = after[idx:] // now we remove the channel tag from the raw string to further process raw = before + after raw = strings.TrimSpace(raw) } // split the header into whitespace-separated tokens tokens := strings.Fields(raw) // the first token is treated as the role if len(tokens) == 0 { slog.Error("harmony parser: missing role in header", "header", raw) return harmonyHeader } role := tokens[0] tokens = tokens[1:] // special case: if role starts with to= then it's a tool call if strings.HasPrefix(role, "to=") { harmonyHeader.Recipient = role[3:] harmonyHeader.Role = "tool" } else { harmonyHeader.Role = role } // the recipient (if any) can be specified before or after the channel tag, so // we check it at the end once we've already parsed the channel and role if harmonyHeader.Recipient == "" && len(tokens) > 0 && strings.HasPrefix(tokens[0], "to=") { harmonyHeader.Recipient = tokens[0][3:] } return harmonyHeader } // longest overlap between suffix of s and prefix of delim func overlap(s, delim string) int { max := min(len(delim), len(s)) for i := max; i > 0; i-- { if strings.HasSuffix(s, delim[:i]) { return i } } return 0 } // harmonyMessageState represents the current state of message processing type harmonyMessageState int const ( harmonyMessageState_Normal harmonyMessageState = iota harmonyMessageState_Thinking harmonyMessageState_ToolCalling ) // HarmonyMessageHandler processes harmony events and accumulates content appropriately. // This is a higher level interface that maps harmony concepts into ollama concepts type HarmonyMessageHandler struct { state harmonyMessageState HarmonyParser *HarmonyParser FunctionNameMap *FunctionNameMap toolAccumulator *HarmonyToolCallAccumulator convertedTools map[string]struct{} } // NewHarmonyMessageHandler creates a new message handler func NewHarmonyMessageHandler() *HarmonyMessageHandler { return &HarmonyMessageHandler{ state: harmonyMessageState_Normal, HarmonyParser: &HarmonyParser{ MessageStartTag: "<|start|>", MessageEndTag: "<|end|>", HeaderEndTag: "<|message|>", }, FunctionNameMap: NewFunctionNameMap(), convertedTools: make(map[string]struct{}), } } // AddContent processes the content and returns the content, thinking, and tool content. // content and thinking are already fully parsed, but tool content still needs to be passed to the tool parser func (h *HarmonyMessageHandler) AddContent(content string, toolParser *HarmonyToolCallAccumulator) (string, string, string) { contentSb := strings.Builder{} thinkingSb := strings.Builder{} toolContentSb := strings.Builder{} events := h.HarmonyParser.AddContent(content) for _, event := range events { switch event := event.(type) { case HarmonyEventHeaderComplete: logutil.Trace("harmony event header complete", "header", event.Header) switch event.Header.Channel { case "analysis": if event.Header.Recipient != "" { h.state = harmonyMessageState_ToolCalling // event.Header.Recipient is the tool name, something like // "browser.search" for a built-in, or "functions.calc" for a // custom one toolParser.SetToolName(event.Header.Recipient) } else { h.state = harmonyMessageState_Thinking } case "commentary": if event.Header.Recipient != "" { h.state = harmonyMessageState_ToolCalling toolParser.SetToolName(event.Header.Recipient) } else { h.state = harmonyMessageState_Normal } case "final": h.state = harmonyMessageState_Normal } case HarmonyEventContentEmitted: logutil.Trace("harmony event content", "content", event.Content, "state", h.state) if h.state == harmonyMessageState_Normal { contentSb.WriteString(event.Content) } else if h.state == harmonyMessageState_Thinking { thinkingSb.WriteString(event.Content) } else if h.state == harmonyMessageState_ToolCalling { toolContentSb.WriteString(event.Content) } case HarmonyEventMessageEnd: h.state = harmonyMessageState_Normal } } return contentSb.String(), thinkingSb.String(), toolContentSb.String() } func (h *HarmonyMessageHandler) CreateToolParser() *HarmonyToolCallAccumulator { return &HarmonyToolCallAccumulator{ state: harmonyToolCallState_Normal, currentToolName: nil, } } type harmonyToolCallState int const ( harmonyToolCallState_Normal harmonyToolCallState = iota harmonyToolCallState_ToolCalling ) type HarmonyToolCallAccumulator struct { state harmonyToolCallState acc strings.Builder currentToolName *string } func (a *HarmonyToolCallAccumulator) SetToolName(toolName string) { a.currentToolName = &toolName } func (a *HarmonyToolCallAccumulator) Add(content string) { a.acc.WriteString(content) } func (a *HarmonyToolCallAccumulator) Drain() (*string, string) { str := a.acc.String() a.state = harmonyToolCallState_Normal a.acc.Reset() return a.currentToolName, str } func (a *HarmonyToolCallAccumulator) Content() string { return a.acc.String() } // FunctionNameMap maps a user-specified function name to a valid function // name for harmony (which look like TypeScript identifiers). This is needed to // transform user-specified function names, which might contain characters that // are not allowed in TypeScript identifiers type FunctionNameMap struct { userToHarmony map[string]string harmonyToUser map[string]string } func NewFunctionNameMap() *FunctionNameMap { return &FunctionNameMap{ userToHarmony: make(map[string]string), harmonyToUser: make(map[string]string), } } // Init initializes the handler with tools, optional last message, and think value // Implements the Parser interface func (h *HarmonyMessageHandler) Init(tools []api.Tool, lastMessage *api.Message, thinkValue *api.ThinkValue) []api.Tool { // Initialize the harmony parser if h.HarmonyParser == nil { h.HarmonyParser = &HarmonyParser{ MessageStartTag: "<|start|>", MessageEndTag: "<|end|>", HeaderEndTag: "<|message|>", } } // Handle prefill for chat mode if lastMessage != nil { h.HarmonyParser.AddImplicitStartOrPrefill(lastMessage) } else { h.HarmonyParser.AddImplicitStart() } // Initialize tool accumulator h.toolAccumulator = h.CreateToolParser() // Process tools and return renamed versions if len(tools) == 0 { return tools } processedTools := make([]api.Tool, len(tools)) copy(processedTools, tools) for i, tool := range processedTools { if tool.Function.Name != "" { processedTools[i].Function.Name = h.FunctionNameMap.ConvertAndAdd(tool.Function.Name) h.convertedTools[tool.Function.Name] = struct{}{} } } return processedTools } // Add implements the Parser interface - processes streamed content and extracts content, thinking, and tool calls func (h *HarmonyMessageHandler) Add(s string, done bool) (content string, thinking string, calls []api.ToolCall, err error) { content, thinking, toolContent := h.AddContent(s, h.toolAccumulator) if toolContent != "" { h.toolAccumulator.Add(toolContent) } // tool calls always happen one at a time, and always at the end of a message, // so for simplicity we defer parsing them until we know we're done if done { toolName, raw := h.toolAccumulator.Drain() if toolName != nil { name := strings.TrimPrefix(*toolName, "functions.") name = h.FunctionNameMap.OriginalFromConverted(name) var args api.ToolCallFunctionArguments if err := json.Unmarshal([]byte(raw), &args); err != nil { return "", "", nil, fmt.Errorf("error parsing tool call: raw='%s', err=%w", raw, err) } calls = append(calls, api.ToolCall{Function: api.ToolCallFunction{Name: name, Arguments: args}}) } } return content, thinking, calls, nil } // HasToolSupport implements the Parser interface func (h *HarmonyMessageHandler) HasToolSupport() bool { return true } // HasThinkingSupport implements the Parser interface func (h *HarmonyMessageHandler) HasThinkingSupport() bool { return true } func (m *FunctionNameMap) ConvertAndAdd(userFunctionName string) string { harmonyFunctionName := m.deriveName(userFunctionName) // built-in functions should not be renamed if userFunctionName == "browser.open" || userFunctionName == "browser.search" || userFunctionName == "browser.find" || userFunctionName == "python" { harmonyFunctionName = userFunctionName } m.userToHarmony[userFunctionName] = harmonyFunctionName m.harmonyToUser[harmonyFunctionName] = userFunctionName return harmonyFunctionName } // OriginalFromConverted looks up the reverse-mapping of a previously-converted // user->harmony function name. To unmap reliably, the mapping must exist, as // the conversion process is not reversible without the appropriate state func (m *FunctionNameMap) OriginalFromConverted(harmonyFunctionName string) string { if userFunctionName, ok := m.harmonyToUser[harmonyFunctionName]; ok { return userFunctionName } slog.Warn("harmony parser: no reverse mapping found for function name", "harmonyFunctionName", harmonyFunctionName) // fallback to the original function name if we can't find a mapping return harmonyFunctionName } // convertToValidChars converts a user-specified function name to a valid // TypeScript identifier. // // Limitations: // // - This doesn't restrict reserved TypeScript keywords. // - We don't perform a real ID_Start/ID_Continue check, and instead use the more // restrictive unicode.IsLetter/unicode.IsDigit check. Unclear what kind of // identifiers these models were trained on, so in the end we might want to // convert unicode-heavy identifiers to their closest ASCII equivalents. func (m *FunctionNameMap) convertToValidChars(userFunctionName string) string { mapper := func(r rune) rune { // first, replace certain characters with underscores if r == ' ' || r == '-' || r == '.' { return '_' } if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' || r == '$' { return r } // finally, remove any other characters return -1 } candidate := strings.Map(mapper, userFunctionName) // set a default name if we end up with nothing left if candidate == "" { return "unnamed" } // if the candidate starts with a number, prepend an underscore to make it a // valid identifier if unicode.IsDigit(rune(candidate[0])) { candidate = "_" + candidate } return candidate } func (m *FunctionNameMap) deriveName(userFunctionName string) string { originalCandidate := m.convertToValidChars(userFunctionName) candidate := originalCandidate // Check for dupes, and if so, add a number to the end. // We start at 2 because if we have dupes and the first is never renamed, it // makes sense for them to be named, say, `f`, `f_2`, `f_3` count := 2 for { if _, exists := m.harmonyToUser[candidate]; !exists { break } candidate = fmt.Sprintf("%s_%d", originalCandidate, count) count++ } return candidate }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/progress/progress.go
progress/progress.go
package progress import ( "bufio" "fmt" "io" "os" "sync" "time" "golang.org/x/term" ) const ( defaultTermWidth = 80 defaultTermHeight = 24 ) type State interface { String() string } type Progress struct { mu sync.Mutex // buffer output to minimize flickering on all terminals w *bufio.Writer pos int ticker *time.Ticker states []State } func NewProgress(w io.Writer) *Progress { p := &Progress{w: bufio.NewWriter(w)} go p.start() return p } func (p *Progress) stop() bool { for _, state := range p.states { if spinner, ok := state.(*Spinner); ok { spinner.Stop() } } if p.ticker != nil { p.ticker.Stop() p.ticker = nil p.render() return true } return false } func (p *Progress) Stop() bool { stopped := p.stop() if stopped { fmt.Fprint(p.w, "\n") p.w.Flush() } return stopped } func (p *Progress) StopAndClear() bool { defer p.w.Flush() fmt.Fprint(p.w, "\033[?25l") defer fmt.Fprint(p.w, "\033[?25h") stopped := p.stop() if stopped { // clear all progress lines for i := range p.pos { if i > 0 { fmt.Fprint(p.w, "\033[A") } fmt.Fprint(p.w, "\033[2K\033[1G") } } return stopped } func (p *Progress) Add(key string, state State) { p.mu.Lock() defer p.mu.Unlock() p.states = append(p.states, state) } func (p *Progress) render() { _, termHeight, err := term.GetSize(int(os.Stderr.Fd())) if err != nil { termHeight = defaultTermHeight } p.mu.Lock() defer p.mu.Unlock() defer p.w.Flush() // eliminate flickering on terminals that support synchronized output fmt.Fprint(p.w, "\033[?2026h") defer fmt.Fprint(p.w, "\033[?2026l") fmt.Fprint(p.w, "\033[?25l") defer fmt.Fprint(p.w, "\033[?25h") // move the cursor back to the beginning for range p.pos - 1 { fmt.Fprint(p.w, "\033[A") } fmt.Fprint(p.w, "\033[1G") // render progress lines maxHeight := min(len(p.states), termHeight) for i := len(p.states) - maxHeight; i < len(p.states); i++ { fmt.Fprint(p.w, p.states[i].String(), "\033[K") if i < len(p.states)-1 { fmt.Fprint(p.w, "\n") } } p.pos = len(p.states) } func (p *Progress) start() { p.ticker = time.NewTicker(100 * time.Millisecond) for range p.ticker.C { p.render() } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/progress/bar.go
progress/bar.go
package progress import ( "fmt" "os" "strings" "time" "golang.org/x/term" "github.com/ollama/ollama/format" ) type Bar struct { message string messageWidth int maxValue int64 initialValue int64 currentValue int64 started time.Time stopped time.Time maxBuckets int buckets []bucket } type bucket struct { updated time.Time value int64 } func NewBar(message string, maxValue, initialValue int64) *Bar { b := Bar{ message: message, messageWidth: -1, maxValue: maxValue, initialValue: initialValue, currentValue: initialValue, started: time.Now(), maxBuckets: 10, } if initialValue >= maxValue { b.stopped = time.Now() } return &b } // formatDuration limits the rendering of a time.Duration to 2 units func formatDuration(d time.Duration) string { switch { case d >= 100*time.Hour: return "99h+" case d >= time.Hour: return fmt.Sprintf("%dh%dm", int(d.Hours()), int(d.Minutes())%60) default: return d.Round(time.Second).String() } } func (b *Bar) String() string { termWidth, _, err := term.GetSize(int(os.Stderr.Fd())) if err != nil { termWidth = defaultTermWidth } var pre strings.Builder if len(b.message) > 0 { message := strings.TrimSpace(b.message) if b.messageWidth > 0 && len(message) > b.messageWidth { message = message[:b.messageWidth] } fmt.Fprintf(&pre, "%s", message) if padding := b.messageWidth - pre.Len(); padding > 0 { pre.WriteString(repeat(" ", padding)) } pre.WriteString(" ") } fmt.Fprintf(&pre, "%3.0f%%", b.percent()) var suf strings.Builder // max 13 characters: "999 MB/999 MB" if b.stopped.IsZero() { curValue := format.HumanBytes(b.currentValue) suf.WriteString(repeat(" ", 6-len(curValue))) suf.WriteString(curValue) suf.WriteString("/") maxValue := format.HumanBytes(b.maxValue) suf.WriteString(repeat(" ", 6-len(maxValue))) suf.WriteString(maxValue) } else { maxValue := format.HumanBytes(b.maxValue) suf.WriteString(repeat(" ", 6-len(maxValue))) suf.WriteString(maxValue) suf.WriteString(repeat(" ", 7)) } rate := b.rate() // max 10 characters: " 999 MB/s" if b.stopped.IsZero() && rate > 0 { suf.WriteString(" ") humanRate := format.HumanBytes(int64(rate)) suf.WriteString(repeat(" ", 6-len(humanRate))) suf.WriteString(humanRate) suf.WriteString("/s") } else { suf.WriteString(repeat(" ", 10)) } // max 8 characters: " 59m59s" if b.stopped.IsZero() && rate > 0 { suf.WriteString(" ") var remaining time.Duration if rate > 0 { remaining = time.Duration(int64(float64(b.maxValue-b.currentValue)/rate)) * time.Second } humanRemaining := formatDuration(remaining) suf.WriteString(repeat(" ", 6-len(humanRemaining))) suf.WriteString(humanRemaining) } else { suf.WriteString(repeat(" ", 8)) } var mid strings.Builder // add 5 extra spaces: 2 boundary characters and 1 space at each end f := termWidth - pre.Len() - suf.Len() - 5 n := int(float64(f) * b.percent() / 100) mid.WriteString(" ▕") if n > 0 { mid.WriteString(repeat("█", n)) } if f-n > 0 { mid.WriteString(repeat(" ", f-n)) } mid.WriteString("▏ ") return pre.String() + mid.String() + suf.String() } func (b *Bar) Set(value int64) { if value >= b.maxValue { value = b.maxValue } b.currentValue = value if b.currentValue >= b.maxValue { b.stopped = time.Now() } // throttle bucket updates to 1 per second if len(b.buckets) == 0 || time.Since(b.buckets[len(b.buckets)-1].updated) > time.Second { b.buckets = append(b.buckets, bucket{ updated: time.Now(), value: value, }) if len(b.buckets) > b.maxBuckets { b.buckets = b.buckets[1:] } } } func (b *Bar) percent() float64 { if b.maxValue > 0 { return float64(b.currentValue) / float64(b.maxValue) * 100 } return 0 } func (b *Bar) rate() float64 { var numerator, denominator float64 if !b.stopped.IsZero() { numerator = float64(b.currentValue - b.initialValue) denominator = b.stopped.Sub(b.started).Round(time.Second).Seconds() } else { switch len(b.buckets) { case 0: // noop case 1: numerator = float64(b.buckets[0].value - b.initialValue) denominator = b.buckets[0].updated.Sub(b.started).Round(time.Second).Seconds() default: first, last := b.buckets[0], b.buckets[len(b.buckets)-1] numerator = float64(last.value - first.value) denominator = last.updated.Sub(first.updated).Round(time.Second).Seconds() } } if denominator != 0 { return numerator / denominator } return 0 } func repeat(s string, n int) string { if n > 0 { return strings.Repeat(s, n) } return "" }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/progress/spinner.go
progress/spinner.go
package progress import ( "fmt" "strings" "sync/atomic" "time" ) type Spinner struct { message atomic.Value messageWidth int parts []string value int ticker *time.Ticker started time.Time stopped time.Time } func NewSpinner(message string) *Spinner { s := &Spinner{ parts: []string{ "⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏", }, started: time.Now(), } s.SetMessage(message) go s.start() return s } func (s *Spinner) SetMessage(message string) { s.message.Store(message) } func (s *Spinner) String() string { var sb strings.Builder if message, ok := s.message.Load().(string); ok && len(message) > 0 { message := strings.TrimSpace(message) if s.messageWidth > 0 && len(message) > s.messageWidth { message = message[:s.messageWidth] } fmt.Fprintf(&sb, "%s", message) if padding := s.messageWidth - sb.Len(); padding > 0 { sb.WriteString(strings.Repeat(" ", padding)) } sb.WriteString(" ") } if s.stopped.IsZero() { spinner := s.parts[s.value] sb.WriteString(spinner) sb.WriteString(" ") } return sb.String() } func (s *Spinner) start() { s.ticker = time.NewTicker(100 * time.Millisecond) for range s.ticker.C { s.value = (s.value + 1) % len(s.parts) if !s.stopped.IsZero() { return } } } func (s *Spinner) Stop() { if s.stopped.IsZero() { s.stopped = time.Now() } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/readline/term_windows.go
readline/term_windows.go
package readline import ( "golang.org/x/sys/windows" ) type State struct { mode uint32 } // IsTerminal checks if the given file descriptor is associated with a terminal func IsTerminal(fd uintptr) bool { var st uint32 err := windows.GetConsoleMode(windows.Handle(fd), &st) return err == nil } func SetRawMode(fd uintptr) (*State, error) { var st uint32 if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { return nil, err } // this enables raw mode by turning off various flags in the console mode: https://pkg.go.dev/golang.org/x/sys/windows#pkg-constants raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) // turn on ENABLE_VIRTUAL_TERMINAL_INPUT to enable escape sequences raw |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil { return nil, err } return &State{st}, nil } func UnsetRawMode(fd uintptr, state any) error { s := state.(*State) return windows.SetConsoleMode(windows.Handle(fd), s.mode) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/readline/term_linux.go
readline/term_linux.go
//go:build linux || solaris package readline import ( "syscall" "unsafe" ) const ( tcgets = 0x5401 tcsets = 0x5402 ) func getTermios(fd uintptr) (*Termios, error) { termios := new(Termios) _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, tcgets, uintptr(unsafe.Pointer(termios)), 0, 0, 0) if err != 0 { return nil, err } return termios, nil } func setTermios(fd uintptr, termios *Termios) error { _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, tcsets, uintptr(unsafe.Pointer(termios)), 0, 0, 0) if err != 0 { return err } return nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/readline/types.go
readline/types.go
package readline import "strconv" const ( CharNull = 0 CharLineStart = 1 CharBackward = 2 CharInterrupt = 3 CharDelete = 4 CharLineEnd = 5 CharForward = 6 CharBell = 7 CharCtrlH = 8 CharTab = 9 CharCtrlJ = 10 CharKill = 11 CharCtrlL = 12 CharEnter = 13 CharNext = 14 CharPrev = 16 CharBckSearch = 18 CharFwdSearch = 19 CharTranspose = 20 CharCtrlU = 21 CharCtrlW = 23 CharCtrlY = 25 CharCtrlZ = 26 CharEsc = 27 CharSpace = 32 CharEscapeEx = 91 CharBackspace = 127 ) const ( KeyDel = 51 KeyUp = 65 KeyDown = 66 KeyRight = 67 KeyLeft = 68 MetaEnd = 70 MetaStart = 72 ) const ( Esc = "\x1b" CursorSave = Esc + "[s" CursorRestore = Esc + "[u" CursorEOL = Esc + "[E" CursorBOL = Esc + "[1G" CursorHide = Esc + "[?25l" CursorShow = Esc + "[?25h" ClearToEOL = Esc + "[K" ClearLine = Esc + "[2K" ClearScreen = Esc + "[2J" CursorReset = Esc + "[0;0f" ColorGrey = Esc + "[38;5;245m" ColorDefault = Esc + "[0m" ColorBold = Esc + "[1m" StartBracketedPaste = Esc + "[?2004h" EndBracketedPaste = Esc + "[?2004l" ) func CursorUpN(n int) string { return Esc + "[" + strconv.Itoa(n) + "A" } func CursorDownN(n int) string { return Esc + "[" + strconv.Itoa(n) + "B" } func CursorRightN(n int) string { return Esc + "[" + strconv.Itoa(n) + "C" } func CursorLeftN(n int) string { return Esc + "[" + strconv.Itoa(n) + "D" } var ( CursorUp = CursorUpN(1) CursorDown = CursorDownN(1) CursorRight = CursorRightN(1) CursorLeft = CursorLeftN(1) ) const ( CharBracketedPaste = 50 CharBracketedPasteStart = "00~" CharBracketedPasteEnd = "01~" )
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/readline/buffer.go
readline/buffer.go
package readline import ( "fmt" "os" "github.com/emirpasic/gods/v2/lists/arraylist" "github.com/mattn/go-runewidth" "golang.org/x/term" ) type Buffer struct { DisplayPos int Pos int Buf *arraylist.List[rune] // LineHasSpace is an arraylist of bools to keep track of whether a line has a space at the end LineHasSpace *arraylist.List[bool] Prompt *Prompt LineWidth int Width int Height int } func NewBuffer(prompt *Prompt) (*Buffer, error) { fd := int(os.Stdout.Fd()) width, height := 80, 24 if termWidth, termHeight, err := term.GetSize(fd); err == nil { width, height = termWidth, termHeight } lwidth := width - len(prompt.prompt()) b := &Buffer{ DisplayPos: 0, Pos: 0, Buf: arraylist.New[rune](), LineHasSpace: arraylist.New[bool](), Prompt: prompt, Width: width, Height: height, LineWidth: lwidth, } return b, nil } func (b *Buffer) GetLineSpacing(line int) bool { hasSpace, _ := b.LineHasSpace.Get(line) return hasSpace } func (b *Buffer) MoveLeft() { if b.Pos > 0 { // asserts that we retrieve a rune if r, ok := b.Buf.Get(b.Pos - 1); ok { rLength := runewidth.RuneWidth(r) if b.DisplayPos%b.LineWidth == 0 { fmt.Print(CursorUp + CursorBOL + CursorRightN(b.Width)) if rLength == 2 { fmt.Print(CursorLeft) } line := b.DisplayPos/b.LineWidth - 1 hasSpace := b.GetLineSpacing(line) if hasSpace { b.DisplayPos -= 1 fmt.Print(CursorLeft) } } else { fmt.Print(CursorLeftN(rLength)) } b.Pos -= 1 b.DisplayPos -= rLength } } } func (b *Buffer) MoveLeftWord() { if b.Pos > 0 { var foundNonspace bool for { v, _ := b.Buf.Get(b.Pos - 1) if v == ' ' { if foundNonspace { break } } else { foundNonspace = true } b.MoveLeft() if b.Pos == 0 { break } } } } func (b *Buffer) MoveRight() { if b.Pos < b.Buf.Size() { if r, ok := b.Buf.Get(b.Pos); ok { rLength := runewidth.RuneWidth(r) b.Pos += 1 hasSpace := b.GetLineSpacing(b.DisplayPos / b.LineWidth) b.DisplayPos += rLength if b.DisplayPos%b.LineWidth == 0 { fmt.Print(CursorDown + CursorBOL + CursorRightN(len(b.Prompt.prompt()))) } else if (b.DisplayPos-rLength)%b.LineWidth == b.LineWidth-1 && hasSpace { fmt.Print(CursorDown + CursorBOL + CursorRightN(len(b.Prompt.prompt())+rLength)) b.DisplayPos += 1 } else if b.LineHasSpace.Size() > 0 && b.DisplayPos%b.LineWidth == b.LineWidth-1 && hasSpace { fmt.Print(CursorDown + CursorBOL + CursorRightN(len(b.Prompt.prompt()))) b.DisplayPos += 1 } else { fmt.Print(CursorRightN(rLength)) } } } } func (b *Buffer) MoveRightWord() { if b.Pos < b.Buf.Size() { for { b.MoveRight() v, _ := b.Buf.Get(b.Pos) if v == ' ' { break } if b.Pos == b.Buf.Size() { break } } } } func (b *Buffer) MoveToStart() { if b.Pos > 0 { currLine := b.DisplayPos / b.LineWidth if currLine > 0 { for range currLine { fmt.Print(CursorUp) } } fmt.Print(CursorBOL + CursorRightN(len(b.Prompt.prompt()))) b.Pos = 0 b.DisplayPos = 0 } } func (b *Buffer) MoveToEnd() { if b.Pos < b.Buf.Size() { currLine := b.DisplayPos / b.LineWidth totalLines := b.DisplaySize() / b.LineWidth if currLine < totalLines { for range totalLines - currLine { fmt.Print(CursorDown) } remainder := b.DisplaySize() % b.LineWidth fmt.Print(CursorBOL + CursorRightN(len(b.Prompt.prompt())+remainder)) } else { fmt.Print(CursorRightN(b.DisplaySize() - b.DisplayPos)) } b.Pos = b.Buf.Size() b.DisplayPos = b.DisplaySize() } } func (b *Buffer) DisplaySize() int { sum := 0 for i := range b.Buf.Size() { if r, ok := b.Buf.Get(i); ok { sum += runewidth.RuneWidth(r) } } return sum } func (b *Buffer) Add(r rune) { if b.Pos == b.Buf.Size() { b.AddChar(r, false) } else { b.AddChar(r, true) } } func (b *Buffer) AddChar(r rune, insert bool) { rLength := runewidth.RuneWidth(r) b.DisplayPos += rLength if b.Pos > 0 { if b.DisplayPos%b.LineWidth == 0 { fmt.Printf("%c", r) fmt.Printf("\n%s", b.Prompt.AltPrompt) if insert { b.LineHasSpace.Set(b.DisplayPos/b.LineWidth-1, false) } else { b.LineHasSpace.Add(false) } // this case occurs when a double-width rune crosses the line boundary } else if b.DisplayPos%b.LineWidth < (b.DisplayPos-rLength)%b.LineWidth { if insert { fmt.Print(ClearToEOL) } fmt.Printf("\n%s", b.Prompt.AltPrompt) b.DisplayPos += 1 fmt.Printf("%c", r) if insert { b.LineHasSpace.Set(b.DisplayPos/b.LineWidth-1, true) } else { b.LineHasSpace.Add(true) } } else { fmt.Printf("%c", r) } } else { fmt.Printf("%c", r) } if insert { b.Buf.Insert(b.Pos, r) } else { b.Buf.Add(r) } b.Pos += 1 if insert { b.drawRemaining() } } func (b *Buffer) countRemainingLineWidth(place int) int { var sum int counter := -1 var prevLen int for place <= b.LineWidth { counter += 1 sum += prevLen if r, ok := b.Buf.Get(b.Pos + counter); ok { place += runewidth.RuneWidth(r) prevLen = len(string(r)) } else { break } } return sum } func (b *Buffer) drawRemaining() { var place int remainingText := b.StringN(b.Pos) if b.Pos > 0 { place = b.DisplayPos % b.LineWidth } fmt.Print(CursorHide) // render the rest of the current line currLineLength := b.countRemainingLineWidth(place) currLine := remainingText[:min(currLineLength, len(remainingText))] currLineSpace := runewidth.StringWidth(currLine) remLength := runewidth.StringWidth(remainingText) if len(currLine) > 0 { fmt.Print(ClearToEOL + currLine + CursorLeftN(currLineSpace)) } else { fmt.Print(ClearToEOL) } if currLineSpace != b.LineWidth-place && currLineSpace != remLength { b.LineHasSpace.Set(b.DisplayPos/b.LineWidth, true) } else if currLineSpace != b.LineWidth-place { b.LineHasSpace.Remove(b.DisplayPos / b.LineWidth) } else { b.LineHasSpace.Set(b.DisplayPos/b.LineWidth, false) } if (b.DisplayPos+currLineSpace)%b.LineWidth == 0 && currLine == remainingText { fmt.Print(CursorRightN(currLineSpace)) fmt.Printf("\n%s", b.Prompt.AltPrompt) fmt.Print(CursorUp + CursorBOL + CursorRightN(b.Width-currLineSpace)) } // render the other lines if remLength > currLineSpace { remaining := (remainingText[len(currLine):]) var totalLines int var displayLength int var lineLength int = currLineSpace for _, c := range remaining { if displayLength == 0 || (displayLength+runewidth.RuneWidth(c))%b.LineWidth < displayLength%b.LineWidth { fmt.Printf("\n%s", b.Prompt.AltPrompt) totalLines += 1 if displayLength != 0 { if lineLength == b.LineWidth { b.LineHasSpace.Set(b.DisplayPos/b.LineWidth+totalLines-1, false) } else { b.LineHasSpace.Set(b.DisplayPos/b.LineWidth+totalLines-1, true) } } lineLength = 0 } displayLength += runewidth.RuneWidth(c) lineLength += runewidth.RuneWidth(c) fmt.Printf("%c", c) } fmt.Print(ClearToEOL + CursorUpN(totalLines) + CursorBOL + CursorRightN(b.Width-currLineSpace)) hasSpace := b.GetLineSpacing(b.DisplayPos / b.LineWidth) if hasSpace && b.DisplayPos%b.LineWidth != b.LineWidth-1 { fmt.Print(CursorLeft) } } fmt.Print(CursorShow) } func (b *Buffer) Remove() { if b.Buf.Size() > 0 && b.Pos > 0 { if r, ok := b.Buf.Get(b.Pos - 1); ok { rLength := runewidth.RuneWidth(r) hasSpace := b.GetLineSpacing(b.DisplayPos/b.LineWidth - 1) if b.DisplayPos%b.LineWidth == 0 { // if the user backspaces over the word boundary, do this magic to clear the line // and move to the end of the previous line fmt.Print(CursorBOL + ClearToEOL + CursorUp + CursorBOL + CursorRightN(b.Width)) if b.DisplaySize()%b.LineWidth < (b.DisplaySize()-rLength)%b.LineWidth { b.LineHasSpace.Remove(b.DisplayPos/b.LineWidth - 1) } if hasSpace { b.DisplayPos -= 1 fmt.Print(CursorLeft) } if rLength == 2 { fmt.Print(CursorLeft + " " + CursorLeftN(2)) } else { fmt.Print(" " + CursorLeft) } } else if (b.DisplayPos-rLength)%b.LineWidth == 0 && hasSpace { fmt.Print(CursorBOL + ClearToEOL + CursorUp + CursorBOL + CursorRightN(b.Width)) if b.Pos == b.Buf.Size() { b.LineHasSpace.Remove(b.DisplayPos/b.LineWidth - 1) } b.DisplayPos -= 1 } else { fmt.Print(CursorLeftN(rLength)) for range rLength { fmt.Print(" ") } fmt.Print(CursorLeftN(rLength)) } var eraseExtraLine bool if (b.DisplaySize()-1)%b.LineWidth == 0 || (rLength == 2 && ((b.DisplaySize()-2)%b.LineWidth == 0)) || b.DisplaySize()%b.LineWidth == 0 { eraseExtraLine = true } b.Pos -= 1 b.DisplayPos -= rLength b.Buf.Remove(b.Pos) if b.Pos < b.Buf.Size() { b.drawRemaining() // this erases a line which is left over when backspacing in the middle of a line and there // are trailing characters which go over the line width boundary if eraseExtraLine { remainingLines := (b.DisplaySize() - b.DisplayPos) / b.LineWidth fmt.Print(CursorDownN(remainingLines+1) + CursorBOL + ClearToEOL) place := b.DisplayPos % b.LineWidth fmt.Print(CursorUpN(remainingLines+1) + CursorRightN(place+len(b.Prompt.prompt()))) } } } } } func (b *Buffer) Delete() { if b.Buf.Size() > 0 && b.Pos < b.Buf.Size() { b.Buf.Remove(b.Pos) b.drawRemaining() if b.DisplaySize()%b.LineWidth == 0 { if b.DisplayPos != b.DisplaySize() { remainingLines := (b.DisplaySize() - b.DisplayPos) / b.LineWidth fmt.Print(CursorDownN(remainingLines) + CursorBOL + ClearToEOL) place := b.DisplayPos % b.LineWidth fmt.Print(CursorUpN(remainingLines) + CursorRightN(place+len(b.Prompt.prompt()))) } } } } func (b *Buffer) DeleteBefore() { if b.Pos > 0 { for cnt := b.Pos - 1; cnt >= 0; cnt-- { b.Remove() } } } func (b *Buffer) DeleteRemaining() { if b.DisplaySize() > 0 && b.Pos < b.DisplaySize() { charsToDel := b.Buf.Size() - b.Pos for range charsToDel { b.Delete() } } } func (b *Buffer) DeleteWord() { if b.Buf.Size() > 0 && b.Pos > 0 { var foundNonspace bool for { v, _ := b.Buf.Get(b.Pos - 1) if v == ' ' { if !foundNonspace { b.Remove() } else { break } } else { foundNonspace = true b.Remove() } if b.Pos == 0 { break } } } } func (b *Buffer) ClearScreen() { fmt.Print(ClearScreen + CursorReset + b.Prompt.prompt()) if b.IsEmpty() { ph := b.Prompt.placeholder() fmt.Print(ColorGrey + ph + CursorLeftN(len(ph)) + ColorDefault) } else { currPos := b.DisplayPos currIndex := b.Pos b.Pos = 0 b.DisplayPos = 0 b.drawRemaining() fmt.Print(CursorReset + CursorRightN(len(b.Prompt.prompt()))) if currPos > 0 { targetLine := currPos / b.LineWidth if targetLine > 0 { for range targetLine { fmt.Print(CursorDown) } } remainder := currPos % b.LineWidth if remainder > 0 { fmt.Print(CursorRightN(remainder)) } if currPos%b.LineWidth == 0 { fmt.Print(CursorBOL + b.Prompt.AltPrompt) } } b.Pos = currIndex b.DisplayPos = currPos } } func (b *Buffer) IsEmpty() bool { return b.Buf.Empty() } func (b *Buffer) Replace(r []rune) { b.DisplayPos = 0 b.Pos = 0 lineNums := b.DisplaySize() / b.LineWidth b.Buf.Clear() fmt.Print(CursorBOL + ClearToEOL) for range lineNums { fmt.Print(CursorUp + CursorBOL + ClearToEOL) } fmt.Print(CursorBOL + b.Prompt.prompt()) for _, c := range r { b.Add(c) } } func (b *Buffer) String() string { return b.StringN(0) } func (b *Buffer) StringN(n int) string { return b.StringNM(n, 0) } func (b *Buffer) StringNM(n, m int) string { var s string if m == 0 { m = b.Buf.Size() } for cnt := n; cnt < m; cnt++ { c, _ := b.Buf.Get(cnt) s += string(c) } return s }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/readline/errors.go
readline/errors.go
package readline import ( "errors" ) var ErrInterrupt = errors.New("Interrupt") type InterruptError struct { Line []rune } func (*InterruptError) Error() string { return "Interrupted" }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/readline/term_bsd.go
readline/term_bsd.go
//go:build darwin || freebsd || netbsd || openbsd package readline import ( "syscall" "unsafe" ) func getTermios(fd uintptr) (*Termios, error) { termios := new(Termios) _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, syscall.TIOCGETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0) if err != 0 { return nil, err } return termios, nil } func setTermios(fd uintptr, termios *Termios) error { _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, syscall.TIOCSETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0) if err != 0 { return err } return nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/readline/history.go
readline/history.go
package readline import ( "bufio" "errors" "fmt" "io" "os" "path/filepath" "strings" "github.com/emirpasic/gods/v2/lists/arraylist" ) type History struct { Buf *arraylist.List[string] Autosave bool Pos int Limit int Filename string Enabled bool } func NewHistory() (*History, error) { h := &History{ Buf: arraylist.New[string](), Limit: 100, // resizeme Autosave: true, Enabled: true, } err := h.Init() if err != nil { return nil, err } return h, nil } func (h *History) Init() error { home, err := os.UserHomeDir() if err != nil { return err } path := filepath.Join(home, ".ollama", "history") if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil { return err } h.Filename = path f, err := os.OpenFile(path, os.O_CREATE|os.O_RDONLY, 0o600) if err != nil { if errors.Is(err, os.ErrNotExist) { return nil } return err } defer f.Close() r := bufio.NewReader(f) for { line, err := r.ReadString('\n') if err != nil { if errors.Is(err, io.EOF) { break } return err } line = strings.TrimSpace(line) if len(line) == 0 { continue } h.Add(line) } return nil } func (h *History) Add(s string) { h.Buf.Add(s) h.Compact() h.Pos = h.Size() if h.Autosave { _ = h.Save() } } func (h *History) Compact() { s := h.Buf.Size() if s > h.Limit { for range s - h.Limit { h.Buf.Remove(0) } } } func (h *History) Clear() { h.Buf.Clear() } func (h *History) Prev() (line string) { if h.Pos > 0 { h.Pos -= 1 } line, _ = h.Buf.Get(h.Pos) return line } func (h *History) Next() (line string) { if h.Pos < h.Buf.Size() { h.Pos += 1 line, _ = h.Buf.Get(h.Pos) } return line } func (h *History) Size() int { return h.Buf.Size() } func (h *History) Save() error { if !h.Enabled { return nil } tmpFile := h.Filename + ".tmp" f, err := os.OpenFile(tmpFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_APPEND, 0o600) if err != nil { return err } defer f.Close() buf := bufio.NewWriter(f) for cnt := range h.Size() { line, _ := h.Buf.Get(cnt) fmt.Fprintln(buf, line) } buf.Flush() f.Close() if err = os.Rename(tmpFile, h.Filename); err != nil { return err } return nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/readline/term.go
readline/term.go
//go:build aix || darwin || dragonfly || freebsd || (linux && !appengine) || netbsd || openbsd || os400 || solaris package readline import ( "syscall" ) type Termios syscall.Termios func SetRawMode(fd uintptr) (*Termios, error) { termios, err := getTermios(fd) if err != nil { return nil, err } newTermios := *termios newTermios.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON newTermios.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN newTermios.Cflag &^= syscall.CSIZE | syscall.PARENB newTermios.Cflag |= syscall.CS8 newTermios.Cc[syscall.VMIN] = 1 newTermios.Cc[syscall.VTIME] = 0 return termios, setTermios(fd, &newTermios) } func UnsetRawMode(fd uintptr, termios any) error { t := termios.(*Termios) return setTermios(fd, t) } // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { _, err := getTermios(fd) return err == nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/readline/readline.go
readline/readline.go
package readline import ( "bufio" "fmt" "io" "os" ) type Prompt struct { Prompt string AltPrompt string Placeholder string AltPlaceholder string UseAlt bool } func (p *Prompt) prompt() string { if p.UseAlt { return p.AltPrompt } return p.Prompt } func (p *Prompt) placeholder() string { if p.UseAlt { return p.AltPlaceholder } return p.Placeholder } type Terminal struct { reader *bufio.Reader rawmode bool termios any } type Instance struct { Prompt *Prompt Terminal *Terminal History *History Pasting bool } func New(prompt Prompt) (*Instance, error) { term, err := NewTerminal() if err != nil { return nil, err } history, err := NewHistory() if err != nil { return nil, err } return &Instance{ Prompt: &prompt, Terminal: term, History: history, }, nil } func (i *Instance) Readline() (string, error) { if !i.Terminal.rawmode { fd := os.Stdin.Fd() termios, err := SetRawMode(fd) if err != nil { return "", err } i.Terminal.rawmode = true i.Terminal.termios = termios } prompt := i.Prompt.prompt() if i.Pasting { // force alt prompt when pasting prompt = i.Prompt.AltPrompt } fmt.Print(prompt) defer func() { fd := os.Stdin.Fd() //nolint:errcheck UnsetRawMode(fd, i.Terminal.termios) i.Terminal.rawmode = false }() buf, _ := NewBuffer(i.Prompt) var esc bool var escex bool var metaDel bool var currentLineBuf []rune for { // don't show placeholder when pasting unless we're in multiline mode showPlaceholder := !i.Pasting || i.Prompt.UseAlt if buf.IsEmpty() && showPlaceholder { ph := i.Prompt.placeholder() fmt.Print(ColorGrey + ph + CursorLeftN(len(ph)) + ColorDefault) } r, err := i.Terminal.Read() if buf.IsEmpty() { fmt.Print(ClearToEOL) } if err != nil { return "", io.EOF } if escex { escex = false switch r { case KeyUp: i.historyPrev(buf, &currentLineBuf) case KeyDown: i.historyNext(buf, &currentLineBuf) case KeyLeft: buf.MoveLeft() case KeyRight: buf.MoveRight() case CharBracketedPaste: var code string for range 3 { r, err = i.Terminal.Read() if err != nil { return "", io.EOF } code += string(r) } if code == CharBracketedPasteStart { i.Pasting = true } else if code == CharBracketedPasteEnd { i.Pasting = false } case KeyDel: if buf.DisplaySize() > 0 { buf.Delete() } metaDel = true case MetaStart: buf.MoveToStart() case MetaEnd: buf.MoveToEnd() default: // skip any keys we don't know about continue } continue } else if esc { esc = false switch r { case 'b': buf.MoveLeftWord() case 'f': buf.MoveRightWord() case CharBackspace: buf.DeleteWord() case CharEscapeEx: escex = true } continue } switch r { case CharNull: continue case CharEsc: esc = true case CharInterrupt: return "", ErrInterrupt case CharPrev: i.historyPrev(buf, &currentLineBuf) case CharNext: i.historyNext(buf, &currentLineBuf) case CharLineStart: buf.MoveToStart() case CharLineEnd: buf.MoveToEnd() case CharBackward: buf.MoveLeft() case CharForward: buf.MoveRight() case CharBackspace, CharCtrlH: buf.Remove() case CharTab: // todo: convert back to real tabs for range 8 { buf.Add(' ') } case CharDelete: if buf.DisplaySize() > 0 { buf.Delete() } else { return "", io.EOF } case CharKill: buf.DeleteRemaining() case CharCtrlU: buf.DeleteBefore() case CharCtrlL: buf.ClearScreen() case CharCtrlW: buf.DeleteWord() case CharCtrlZ: fd := os.Stdin.Fd() return handleCharCtrlZ(fd, i.Terminal.termios) case CharEnter, CharCtrlJ: output := buf.String() if output != "" { i.History.Add(output) } buf.MoveToEnd() fmt.Println() return output, nil default: if metaDel { metaDel = false continue } if r >= CharSpace || r == CharEnter || r == CharCtrlJ { buf.Add(r) } } } } func (i *Instance) HistoryEnable() { i.History.Enabled = true } func (i *Instance) HistoryDisable() { i.History.Enabled = false } func (i *Instance) historyPrev(buf *Buffer, currentLineBuf *[]rune) { if i.History.Pos > 0 { if i.History.Pos == i.History.Size() { *currentLineBuf = []rune(buf.String()) } buf.Replace([]rune(i.History.Prev())) } } func (i *Instance) historyNext(buf *Buffer, currentLineBuf *[]rune) { if i.History.Pos < i.History.Size() { buf.Replace([]rune(i.History.Next())) if i.History.Pos == i.History.Size() { buf.Replace(*currentLineBuf) } } } func NewTerminal() (*Terminal, error) { fd := os.Stdin.Fd() termios, err := SetRawMode(fd) if err != nil { return nil, err } if err := UnsetRawMode(fd, termios); err != nil { return nil, err } t := &Terminal{ reader: bufio.NewReader(os.Stdin), } return t, nil } func (t *Terminal) Read() (rune, error) { r, _, err := t.reader.ReadRune() if err != nil { return 0, err } return r, nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/readline/readline_unix.go
readline/readline_unix.go
//go:build !windows package readline import ( "syscall" ) func handleCharCtrlZ(fd uintptr, termios any) (string, error) { t := termios.(*Termios) if err := UnsetRawMode(fd, t); err != nil { return "", err } _ = syscall.Kill(0, syscall.SIGSTOP) // on resume... return "", nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/readline/readline_windows.go
readline/readline_windows.go
package readline func handleCharCtrlZ(fd uintptr, state any) (string, error) { // not supported return "", nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/api/types_test.go
api/types_test.go
package api import ( "encoding/json" "errors" "math" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // testPropsMap creates a ToolPropertiesMap from a map (convenience function for tests, order not preserved) func testPropsMap(m map[string]ToolProperty) *ToolPropertiesMap { props := NewToolPropertiesMap() for k, v := range m { props.Set(k, v) } return props } // testArgs creates ToolCallFunctionArguments from a map (convenience function for tests, order not preserved) func testArgs(m map[string]any) ToolCallFunctionArguments { args := NewToolCallFunctionArguments() for k, v := range m { args.Set(k, v) } return args } func TestKeepAliveParsingFromJSON(t *testing.T) { tests := []struct { name string req string exp *Duration }{ { name: "Unset", req: `{ }`, exp: nil, }, { name: "Positive Integer", req: `{ "keep_alive": 42 }`, exp: &Duration{42 * time.Second}, }, { name: "Positive Float", req: `{ "keep_alive": 42.5 }`, exp: &Duration{42500 * time.Millisecond}, }, { name: "Positive Integer String", req: `{ "keep_alive": "42m" }`, exp: &Duration{42 * time.Minute}, }, { name: "Negative Integer", req: `{ "keep_alive": -1 }`, exp: &Duration{math.MaxInt64}, }, { name: "Negative Float", req: `{ "keep_alive": -3.14 }`, exp: &Duration{math.MaxInt64}, }, { name: "Negative Integer String", req: `{ "keep_alive": "-1m" }`, exp: &Duration{math.MaxInt64}, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { var dec ChatRequest err := json.Unmarshal([]byte(test.req), &dec) require.NoError(t, err) assert.Equal(t, test.exp, dec.KeepAlive) }) } } func TestDurationMarshalUnmarshal(t *testing.T) { tests := []struct { name string input time.Duration expected time.Duration }{ { "negative duration", time.Duration(-1), time.Duration(math.MaxInt64), }, { "positive duration", 42 * time.Second, 42 * time.Second, }, { "another positive duration", 42 * time.Minute, 42 * time.Minute, }, { "zero duration", time.Duration(0), time.Duration(0), }, { "max duration", time.Duration(math.MaxInt64), time.Duration(math.MaxInt64), }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { b, err := json.Marshal(Duration{test.input}) require.NoError(t, err) var d Duration err = json.Unmarshal(b, &d) require.NoError(t, err) assert.Equal(t, test.expected, d.Duration, "input %v, marshalled %v, got %v", test.input, string(b), d.Duration) }) } } func TestUseMmapParsingFromJSON(t *testing.T) { tr := true fa := false tests := []struct { name string req string exp *bool }{ { name: "Undefined", req: `{ }`, exp: nil, }, { name: "True", req: `{ "use_mmap": true }`, exp: &tr, }, { name: "False", req: `{ "use_mmap": false }`, exp: &fa, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { var oMap map[string]any err := json.Unmarshal([]byte(test.req), &oMap) require.NoError(t, err) opts := DefaultOptions() err = opts.FromMap(oMap) require.NoError(t, err) assert.Equal(t, test.exp, opts.UseMMap) }) } } func TestUseMmapFormatParams(t *testing.T) { tr := true fa := false tests := []struct { name string req map[string][]string exp *bool err error }{ { name: "True", req: map[string][]string{ "use_mmap": {"true"}, }, exp: &tr, err: nil, }, { name: "False", req: map[string][]string{ "use_mmap": {"false"}, }, exp: &fa, err: nil, }, { name: "Numeric True", req: map[string][]string{ "use_mmap": {"1"}, }, exp: &tr, err: nil, }, { name: "Numeric False", req: map[string][]string{ "use_mmap": {"0"}, }, exp: &fa, err: nil, }, { name: "invalid string", req: map[string][]string{ "use_mmap": {"foo"}, }, exp: nil, err: errors.New("invalid bool value [foo]"), }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { resp, err := FormatParams(test.req) require.Equal(t, test.err, err) respVal, ok := resp["use_mmap"] if test.exp != nil { assert.True(t, ok, "resp: %v", resp) assert.Equal(t, *test.exp, *respVal.(*bool)) } }) } } func TestMessage_UnmarshalJSON(t *testing.T) { tests := []struct { input string expected string }{ {`{"role": "USER", "content": "Hello!"}`, "user"}, {`{"role": "System", "content": "Initialization complete."}`, "system"}, {`{"role": "assistant", "content": "How can I help you?"}`, "assistant"}, {`{"role": "TOOl", "content": "Access granted."}`, "tool"}, } for _, test := range tests { var msg Message if err := json.Unmarshal([]byte(test.input), &msg); err != nil { t.Errorf("Unexpected error: %v", err) } if msg.Role != test.expected { t.Errorf("role not lowercased: got %v, expected %v", msg.Role, test.expected) } } } func TestToolFunction_UnmarshalJSON(t *testing.T) { tests := []struct { name string input string wantErr string }{ { name: "valid enum with same types", input: `{ "name": "test", "description": "test function", "parameters": { "type": "object", "required": ["test"], "properties": { "test": { "type": "string", "description": "test prop", "enum": ["a", "b", "c"] } } } }`, wantErr: "", }, { name: "empty enum array", input: `{ "name": "test", "description": "test function", "parameters": { "type": "object", "required": ["test"], "properties": { "test": { "type": "string", "description": "test prop", "enum": [] } } } }`, wantErr: "", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var tf ToolFunction err := json.Unmarshal([]byte(tt.input), &tf) if tt.wantErr != "" { require.Error(t, err) assert.Contains(t, err.Error(), tt.wantErr) } else { require.NoError(t, err) } }) } } func TestToolFunctionParameters_MarshalJSON(t *testing.T) { tests := []struct { name string input ToolFunctionParameters expected string }{ { name: "simple object with string property", input: ToolFunctionParameters{ Type: "object", Required: []string{"name"}, Properties: testPropsMap(map[string]ToolProperty{ "name": {Type: PropertyType{"string"}}, }), }, expected: `{"type":"object","required":["name"],"properties":{"name":{"type":"string"}}}`, }, { name: "no required", input: ToolFunctionParameters{ Type: "object", Properties: testPropsMap(map[string]ToolProperty{ "name": {Type: PropertyType{"string"}}, }), }, expected: `{"type":"object","properties":{"name":{"type":"string"}}}`, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { data, err := json.Marshal(test.input) require.NoError(t, err) assert.Equal(t, test.expected, string(data)) }) } } func TestToolCallFunction_IndexAlwaysMarshals(t *testing.T) { fn := ToolCallFunction{ Name: "echo", Arguments: testArgs(map[string]any{"message": "hi"}), } data, err := json.Marshal(fn) require.NoError(t, err) raw := map[string]any{} require.NoError(t, json.Unmarshal(data, &raw)) require.Contains(t, raw, "index") assert.Equal(t, float64(0), raw["index"]) fn.Index = 3 data, err = json.Marshal(fn) require.NoError(t, err) raw = map[string]any{} require.NoError(t, json.Unmarshal(data, &raw)) require.Contains(t, raw, "index") assert.Equal(t, float64(3), raw["index"]) } func TestPropertyType_UnmarshalJSON(t *testing.T) { tests := []struct { name string input string expected PropertyType }{ { name: "string type", input: `"string"`, expected: PropertyType{"string"}, }, { name: "array of types", input: `["string", "number"]`, expected: PropertyType{"string", "number"}, }, { name: "array with single type", input: `["string"]`, expected: PropertyType{"string"}, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { var pt PropertyType if err := json.Unmarshal([]byte(test.input), &pt); err != nil { t.Errorf("Unexpected error: %v", err) } if len(pt) != len(test.expected) { t.Errorf("Length mismatch: got %v, expected %v", len(pt), len(test.expected)) } for i, v := range pt { if v != test.expected[i] { t.Errorf("Value mismatch at index %d: got %v, expected %v", i, v, test.expected[i]) } } }) } } func TestPropertyType_MarshalJSON(t *testing.T) { tests := []struct { name string input PropertyType expected string }{ { name: "single type", input: PropertyType{"string"}, expected: `"string"`, }, { name: "multiple types", input: PropertyType{"string", "number"}, expected: `["string","number"]`, }, { name: "empty type", input: PropertyType{}, expected: `[]`, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { data, err := json.Marshal(test.input) if err != nil { t.Errorf("Unexpected error: %v", err) } if string(data) != test.expected { t.Errorf("Marshaled data mismatch: got %v, expected %v", string(data), test.expected) } }) } } func TestThinking_UnmarshalJSON(t *testing.T) { tests := []struct { name string input string expectedThinking *ThinkValue expectedError bool }{ { name: "true", input: `{ "think": true }`, expectedThinking: &ThinkValue{Value: true}, }, { name: "false", input: `{ "think": false }`, expectedThinking: &ThinkValue{Value: false}, }, { name: "unset", input: `{ }`, expectedThinking: nil, }, { name: "string_high", input: `{ "think": "high" }`, expectedThinking: &ThinkValue{Value: "high"}, }, { name: "string_medium", input: `{ "think": "medium" }`, expectedThinking: &ThinkValue{Value: "medium"}, }, { name: "string_low", input: `{ "think": "low" }`, expectedThinking: &ThinkValue{Value: "low"}, }, { name: "invalid_string", input: `{ "think": "invalid" }`, expectedThinking: nil, expectedError: true, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { var req GenerateRequest err := json.Unmarshal([]byte(test.input), &req) if test.expectedError { require.Error(t, err) } else { require.NoError(t, err) if test.expectedThinking == nil { assert.Nil(t, req.Think) } else { require.NotNil(t, req.Think) assert.Equal(t, test.expectedThinking.Value, req.Think.Value) } } }) } } func TestToolPropertyNestedProperties(t *testing.T) { tests := []struct { name string input string expected ToolProperty }{ { name: "nested object properties", input: `{ "type": "object", "description": "Location details", "properties": { "address": { "type": "string", "description": "Street address" }, "city": { "type": "string", "description": "City name" } } }`, expected: ToolProperty{ Type: PropertyType{"object"}, Description: "Location details", Properties: testPropsMap(map[string]ToolProperty{ "address": { Type: PropertyType{"string"}, Description: "Street address", }, "city": { Type: PropertyType{"string"}, Description: "City name", }, }), }, }, { name: "deeply nested properties", input: `{ "type": "object", "description": "Event", "properties": { "location": { "type": "object", "description": "Location", "properties": { "coordinates": { "type": "object", "description": "GPS coordinates", "properties": { "lat": {"type": "number", "description": "Latitude"}, "lng": {"type": "number", "description": "Longitude"} } } } } } }`, expected: ToolProperty{ Type: PropertyType{"object"}, Description: "Event", Properties: testPropsMap(map[string]ToolProperty{ "location": { Type: PropertyType{"object"}, Description: "Location", Properties: testPropsMap(map[string]ToolProperty{ "coordinates": { Type: PropertyType{"object"}, Description: "GPS coordinates", Properties: testPropsMap(map[string]ToolProperty{ "lat": {Type: PropertyType{"number"}, Description: "Latitude"}, "lng": {Type: PropertyType{"number"}, Description: "Longitude"}, }), }, }), }, }), }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var prop ToolProperty err := json.Unmarshal([]byte(tt.input), &prop) require.NoError(t, err) // Compare JSON representations since pointer comparison doesn't work expectedJSON, err := json.Marshal(tt.expected) require.NoError(t, err) actualJSON, err := json.Marshal(prop) require.NoError(t, err) assert.JSONEq(t, string(expectedJSON), string(actualJSON)) // Round-trip test: marshal and unmarshal again data, err := json.Marshal(prop) require.NoError(t, err) var prop2 ToolProperty err = json.Unmarshal(data, &prop2) require.NoError(t, err) prop2JSON, err := json.Marshal(prop2) require.NoError(t, err) assert.JSONEq(t, string(expectedJSON), string(prop2JSON)) }) } } func TestToolFunctionParameters_String(t *testing.T) { tests := []struct { name string params ToolFunctionParameters expected string }{ { name: "simple object with string property", params: ToolFunctionParameters{ Type: "object", Required: []string{"name"}, Properties: testPropsMap(map[string]ToolProperty{ "name": { Type: PropertyType{"string"}, Description: "The name of the person", }, }), }, expected: `{"type":"object","required":["name"],"properties":{"name":{"type":"string","description":"The name of the person"}}}`, }, { name: "marshal failure returns empty string", params: ToolFunctionParameters{ Type: "object", Defs: func() any { // Create a cycle that will cause json.Marshal to fail type selfRef struct { Self *selfRef } s := &selfRef{} s.Self = s return s }(), Properties: testPropsMap(map[string]ToolProperty{}), }, expected: "", }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { result := test.params.String() assert.Equal(t, test.expected, result) }) } } func TestToolCallFunctionArguments_OrderPreservation(t *testing.T) { t.Run("marshal preserves insertion order", func(t *testing.T) { args := NewToolCallFunctionArguments() args.Set("zebra", "z") args.Set("apple", "a") args.Set("mango", "m") data, err := json.Marshal(args) require.NoError(t, err) // Should preserve insertion order, not alphabetical assert.Equal(t, `{"zebra":"z","apple":"a","mango":"m"}`, string(data)) }) t.Run("unmarshal preserves JSON order", func(t *testing.T) { jsonData := `{"zebra":"z","apple":"a","mango":"m"}` var args ToolCallFunctionArguments err := json.Unmarshal([]byte(jsonData), &args) require.NoError(t, err) // Verify iteration order matches JSON order var keys []string for k := range args.All() { keys = append(keys, k) } assert.Equal(t, []string{"zebra", "apple", "mango"}, keys) }) t.Run("round trip preserves order", func(t *testing.T) { original := `{"z":1,"a":2,"m":3,"b":4}` var args ToolCallFunctionArguments err := json.Unmarshal([]byte(original), &args) require.NoError(t, err) data, err := json.Marshal(args) require.NoError(t, err) assert.Equal(t, original, string(data)) }) t.Run("String method returns ordered JSON", func(t *testing.T) { args := NewToolCallFunctionArguments() args.Set("c", 3) args.Set("a", 1) args.Set("b", 2) assert.Equal(t, `{"c":3,"a":1,"b":2}`, args.String()) }) t.Run("Get retrieves correct values", func(t *testing.T) { args := NewToolCallFunctionArguments() args.Set("key1", "value1") args.Set("key2", 42) v, ok := args.Get("key1") assert.True(t, ok) assert.Equal(t, "value1", v) v, ok = args.Get("key2") assert.True(t, ok) assert.Equal(t, 42, v) _, ok = args.Get("nonexistent") assert.False(t, ok) }) t.Run("Len returns correct count", func(t *testing.T) { args := NewToolCallFunctionArguments() assert.Equal(t, 0, args.Len()) args.Set("a", 1) assert.Equal(t, 1, args.Len()) args.Set("b", 2) assert.Equal(t, 2, args.Len()) }) t.Run("empty args marshal to empty object", func(t *testing.T) { args := NewToolCallFunctionArguments() data, err := json.Marshal(args) require.NoError(t, err) assert.Equal(t, `{}`, string(data)) }) t.Run("zero value args marshal to empty object", func(t *testing.T) { var args ToolCallFunctionArguments assert.Equal(t, "{}", args.String()) }) } func TestToolPropertiesMap_OrderPreservation(t *testing.T) { t.Run("marshal preserves insertion order", func(t *testing.T) { props := NewToolPropertiesMap() props.Set("zebra", ToolProperty{Type: PropertyType{"string"}}) props.Set("apple", ToolProperty{Type: PropertyType{"number"}}) props.Set("mango", ToolProperty{Type: PropertyType{"boolean"}}) data, err := json.Marshal(props) require.NoError(t, err) // Should preserve insertion order, not alphabetical expected := `{"zebra":{"type":"string"},"apple":{"type":"number"},"mango":{"type":"boolean"}}` assert.Equal(t, expected, string(data)) }) t.Run("unmarshal preserves JSON order", func(t *testing.T) { jsonData := `{"zebra":{"type":"string"},"apple":{"type":"number"},"mango":{"type":"boolean"}}` var props ToolPropertiesMap err := json.Unmarshal([]byte(jsonData), &props) require.NoError(t, err) // Verify iteration order matches JSON order var keys []string for k := range props.All() { keys = append(keys, k) } assert.Equal(t, []string{"zebra", "apple", "mango"}, keys) }) t.Run("round trip preserves order", func(t *testing.T) { original := `{"z":{"type":"string"},"a":{"type":"number"},"m":{"type":"boolean"}}` var props ToolPropertiesMap err := json.Unmarshal([]byte(original), &props) require.NoError(t, err) data, err := json.Marshal(props) require.NoError(t, err) assert.Equal(t, original, string(data)) }) t.Run("Get retrieves correct values", func(t *testing.T) { props := NewToolPropertiesMap() props.Set("name", ToolProperty{Type: PropertyType{"string"}, Description: "The name"}) props.Set("age", ToolProperty{Type: PropertyType{"integer"}, Description: "The age"}) v, ok := props.Get("name") assert.True(t, ok) assert.Equal(t, "The name", v.Description) v, ok = props.Get("age") assert.True(t, ok) assert.Equal(t, "The age", v.Description) _, ok = props.Get("nonexistent") assert.False(t, ok) }) t.Run("Len returns correct count", func(t *testing.T) { props := NewToolPropertiesMap() assert.Equal(t, 0, props.Len()) props.Set("a", ToolProperty{}) assert.Equal(t, 1, props.Len()) props.Set("b", ToolProperty{}) assert.Equal(t, 2, props.Len()) }) t.Run("nil props marshal to null", func(t *testing.T) { var props *ToolPropertiesMap data, err := json.Marshal(props) require.NoError(t, err) assert.Equal(t, `null`, string(data)) }) t.Run("ToMap returns regular map", func(t *testing.T) { props := NewToolPropertiesMap() props.Set("a", ToolProperty{Type: PropertyType{"string"}}) props.Set("b", ToolProperty{Type: PropertyType{"number"}}) m := props.ToMap() assert.Equal(t, 2, len(m)) assert.Equal(t, PropertyType{"string"}, m["a"].Type) assert.Equal(t, PropertyType{"number"}, m["b"].Type) }) } func TestToolCallFunctionArguments_ComplexValues(t *testing.T) { t.Run("nested objects preserve order", func(t *testing.T) { jsonData := `{"outer":{"z":1,"a":2},"simple":"value"}` var args ToolCallFunctionArguments err := json.Unmarshal([]byte(jsonData), &args) require.NoError(t, err) // Outer keys should be in order var keys []string for k := range args.All() { keys = append(keys, k) } assert.Equal(t, []string{"outer", "simple"}, keys) }) t.Run("arrays as values", func(t *testing.T) { args := NewToolCallFunctionArguments() args.Set("items", []string{"a", "b", "c"}) args.Set("numbers", []int{1, 2, 3}) data, err := json.Marshal(args) require.NoError(t, err) assert.Equal(t, `{"items":["a","b","c"],"numbers":[1,2,3]}`, string(data)) }) } func TestToolPropertiesMap_NestedProperties(t *testing.T) { t.Run("nested properties preserve order", func(t *testing.T) { props := NewToolPropertiesMap() nestedProps := NewToolPropertiesMap() nestedProps.Set("z_field", ToolProperty{Type: PropertyType{"string"}}) nestedProps.Set("a_field", ToolProperty{Type: PropertyType{"number"}}) props.Set("outer", ToolProperty{ Type: PropertyType{"object"}, Properties: nestedProps, }) data, err := json.Marshal(props) require.NoError(t, err) // Both outer and inner should preserve order expected := `{"outer":{"type":"object","properties":{"z_field":{"type":"string"},"a_field":{"type":"number"}}}}` assert.Equal(t, expected, string(data)) }) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/api/client.go
api/client.go
// Package api implements the client-side API for code wishing to interact // with the ollama service. The methods of the [Client] type correspond to // the ollama REST API as described in [the API documentation]. // The ollama command-line client itself uses this package to interact with // the backend service. // // # Examples // // Several examples of using this package are available [in the GitHub // repository]. // // [the API documentation]: https://github.com/ollama/ollama/blob/main/docs/api.md // [in the GitHub repository]: https://github.com/ollama/ollama/tree/main/api/examples package api import ( "bufio" "bytes" "context" "encoding/json" "errors" "fmt" "io" "net/http" "net/url" "runtime" "strconv" "time" "github.com/ollama/ollama/auth" "github.com/ollama/ollama/envconfig" "github.com/ollama/ollama/format" "github.com/ollama/ollama/version" ) // Client encapsulates client state for interacting with the ollama // service. Use [ClientFromEnvironment] to create new Clients. type Client struct { base *url.URL http *http.Client } func checkError(resp *http.Response, body []byte) error { if resp.StatusCode < http.StatusBadRequest { return nil } if resp.StatusCode == http.StatusUnauthorized { authError := AuthorizationError{StatusCode: resp.StatusCode} json.Unmarshal(body, &authError) return authError } apiError := StatusError{StatusCode: resp.StatusCode} err := json.Unmarshal(body, &apiError) if err != nil { // Use the full body as the message if we fail to decode a response. apiError.ErrorMessage = string(body) } return apiError } // ClientFromEnvironment creates a new [Client] using configuration from the // environment variable OLLAMA_HOST, which points to the network host and // port on which the ollama service is listening. The format of this variable // is: // // <scheme>://<host>:<port> // // If the variable is not specified, a default ollama host and port will be // used. func ClientFromEnvironment() (*Client, error) { return &Client{ base: envconfig.Host(), http: http.DefaultClient, }, nil } func NewClient(base *url.URL, http *http.Client) *Client { return &Client{ base: base, http: http, } } func getAuthorizationToken(ctx context.Context, challenge string) (string, error) { token, err := auth.Sign(ctx, []byte(challenge)) if err != nil { return "", err } return token, nil } func (c *Client) do(ctx context.Context, method, path string, reqData, respData any) error { var reqBody io.Reader var data []byte var err error switch reqData := reqData.(type) { case io.Reader: // reqData is already an io.Reader reqBody = reqData case nil: // noop default: data, err = json.Marshal(reqData) if err != nil { return err } reqBody = bytes.NewReader(data) } requestURL := c.base.JoinPath(path) var token string if envconfig.UseAuth() || c.base.Hostname() == "ollama.com" { now := strconv.FormatInt(time.Now().Unix(), 10) chal := fmt.Sprintf("%s,%s?ts=%s", method, path, now) token, err = getAuthorizationToken(ctx, chal) if err != nil { return err } q := requestURL.Query() q.Set("ts", now) requestURL.RawQuery = q.Encode() } request, err := http.NewRequestWithContext(ctx, method, requestURL.String(), reqBody) if err != nil { return err } request.Header.Set("Content-Type", "application/json") request.Header.Set("Accept", "application/json") request.Header.Set("User-Agent", fmt.Sprintf("ollama/%s (%s %s) Go/%s", version.Version, runtime.GOARCH, runtime.GOOS, runtime.Version())) if token != "" { request.Header.Set("Authorization", token) } respObj, err := c.http.Do(request) if err != nil { return err } defer respObj.Body.Close() respBody, err := io.ReadAll(respObj.Body) if err != nil { return err } if err := checkError(respObj, respBody); err != nil { return err } if len(respBody) > 0 && respData != nil { if err := json.Unmarshal(respBody, respData); err != nil { return err } } return nil } const maxBufferSize = 512 * format.KiloByte func (c *Client) stream(ctx context.Context, method, path string, data any, fn func([]byte) error) error { var buf io.Reader if data != nil { bts, err := json.Marshal(data) if err != nil { return err } buf = bytes.NewBuffer(bts) } requestURL := c.base.JoinPath(path) var token string if envconfig.UseAuth() || c.base.Hostname() == "ollama.com" { var err error now := strconv.FormatInt(time.Now().Unix(), 10) chal := fmt.Sprintf("%s,%s?ts=%s", method, path, now) token, err = getAuthorizationToken(ctx, chal) if err != nil { return err } q := requestURL.Query() q.Set("ts", now) requestURL.RawQuery = q.Encode() } request, err := http.NewRequestWithContext(ctx, method, requestURL.String(), buf) if err != nil { return err } request.Header.Set("Content-Type", "application/json") request.Header.Set("Accept", "application/x-ndjson") request.Header.Set("User-Agent", fmt.Sprintf("ollama/%s (%s %s) Go/%s", version.Version, runtime.GOARCH, runtime.GOOS, runtime.Version())) if token != "" { request.Header.Set("Authorization", token) } response, err := c.http.Do(request) if err != nil { return err } defer response.Body.Close() scanner := bufio.NewScanner(response.Body) // increase the buffer size to avoid running out of space scanBuf := make([]byte, 0, maxBufferSize) scanner.Buffer(scanBuf, maxBufferSize) for scanner.Scan() { var errorResponse struct { Error string `json:"error,omitempty"` SigninURL string `json:"signin_url,omitempty"` } bts := scanner.Bytes() if err := json.Unmarshal(bts, &errorResponse); err != nil { if response.StatusCode >= http.StatusBadRequest { return StatusError{ StatusCode: response.StatusCode, Status: response.Status, ErrorMessage: string(bts), } } return errors.New(string(bts)) } if response.StatusCode == http.StatusUnauthorized { return AuthorizationError{ StatusCode: response.StatusCode, Status: response.Status, SigninURL: errorResponse.SigninURL, } } else if response.StatusCode >= http.StatusBadRequest { return StatusError{ StatusCode: response.StatusCode, Status: response.Status, ErrorMessage: errorResponse.Error, } } if errorResponse.Error != "" { return errors.New(errorResponse.Error) } if err := fn(bts); err != nil { return err } } return nil } // GenerateResponseFunc is a function that [Client.Generate] invokes every time // a response is received from the service. If this function returns an error, // [Client.Generate] will stop generating and return this error. type GenerateResponseFunc func(GenerateResponse) error // Generate generates a response for a given prompt. The req parameter should // be populated with prompt details. fn is called for each response (there may // be multiple responses, e.g. in case streaming is enabled). func (c *Client) Generate(ctx context.Context, req *GenerateRequest, fn GenerateResponseFunc) error { return c.stream(ctx, http.MethodPost, "/api/generate", req, func(bts []byte) error { var resp GenerateResponse if err := json.Unmarshal(bts, &resp); err != nil { return err } return fn(resp) }) } // ChatResponseFunc is a function that [Client.Chat] invokes every time // a response is received from the service. If this function returns an error, // [Client.Chat] will stop generating and return this error. type ChatResponseFunc func(ChatResponse) error // Chat generates the next message in a chat. [ChatRequest] may contain a // sequence of messages which can be used to maintain chat history with a model. // fn is called for each response (there may be multiple responses, e.g. if case // streaming is enabled). func (c *Client) Chat(ctx context.Context, req *ChatRequest, fn ChatResponseFunc) error { return c.stream(ctx, http.MethodPost, "/api/chat", req, func(bts []byte) error { var resp ChatResponse if err := json.Unmarshal(bts, &resp); err != nil { return err } return fn(resp) }) } // PullProgressFunc is a function that [Client.Pull] invokes every time there // is progress with a "pull" request sent to the service. If this function // returns an error, [Client.Pull] will stop the process and return this error. type PullProgressFunc func(ProgressResponse) error // Pull downloads a model from the ollama library. fn is called each time // progress is made on the request and can be used to display a progress bar, // etc. func (c *Client) Pull(ctx context.Context, req *PullRequest, fn PullProgressFunc) error { return c.stream(ctx, http.MethodPost, "/api/pull", req, func(bts []byte) error { var resp ProgressResponse if err := json.Unmarshal(bts, &resp); err != nil { return err } return fn(resp) }) } // PushProgressFunc is a function that [Client.Push] invokes when progress is // made. // It's similar to other progress function types like [PullProgressFunc]. type PushProgressFunc func(ProgressResponse) error // Push uploads a model to the model library; requires registering for ollama.ai // and adding a public key first. fn is called each time progress is made on // the request and can be used to display a progress bar, etc. func (c *Client) Push(ctx context.Context, req *PushRequest, fn PushProgressFunc) error { return c.stream(ctx, http.MethodPost, "/api/push", req, func(bts []byte) error { var resp ProgressResponse if err := json.Unmarshal(bts, &resp); err != nil { return err } return fn(resp) }) } // CreateProgressFunc is a function that [Client.Create] invokes when progress // is made. // It's similar to other progress function types like [PullProgressFunc]. type CreateProgressFunc func(ProgressResponse) error // Create creates a model from a [Modelfile]. fn is a progress function that // behaves similarly to other methods (see [Client.Pull]). // // [Modelfile]: https://github.com/ollama/ollama/blob/main/docs/modelfile.mdx func (c *Client) Create(ctx context.Context, req *CreateRequest, fn CreateProgressFunc) error { return c.stream(ctx, http.MethodPost, "/api/create", req, func(bts []byte) error { var resp ProgressResponse if err := json.Unmarshal(bts, &resp); err != nil { return err } return fn(resp) }) } // List lists models that are available locally. func (c *Client) List(ctx context.Context) (*ListResponse, error) { var lr ListResponse if err := c.do(ctx, http.MethodGet, "/api/tags", nil, &lr); err != nil { return nil, err } return &lr, nil } // ListRunning lists running models. func (c *Client) ListRunning(ctx context.Context) (*ProcessResponse, error) { var lr ProcessResponse if err := c.do(ctx, http.MethodGet, "/api/ps", nil, &lr); err != nil { return nil, err } return &lr, nil } // Copy copies a model - creating a model with another name from an existing // model. func (c *Client) Copy(ctx context.Context, req *CopyRequest) error { if err := c.do(ctx, http.MethodPost, "/api/copy", req, nil); err != nil { return err } return nil } // Delete deletes a model and its data. func (c *Client) Delete(ctx context.Context, req *DeleteRequest) error { if err := c.do(ctx, http.MethodDelete, "/api/delete", req, nil); err != nil { return err } return nil } // Show obtains model information, including details, modelfile, license etc. func (c *Client) Show(ctx context.Context, req *ShowRequest) (*ShowResponse, error) { var resp ShowResponse if err := c.do(ctx, http.MethodPost, "/api/show", req, &resp); err != nil { return nil, err } return &resp, nil } // Heartbeat checks if the server has started and is responsive; if yes, it // returns nil, otherwise an error. func (c *Client) Heartbeat(ctx context.Context) error { if err := c.do(ctx, http.MethodHead, "/", nil, nil); err != nil { return err } return nil } // Embed generates embeddings from a model. func (c *Client) Embed(ctx context.Context, req *EmbedRequest) (*EmbedResponse, error) { var resp EmbedResponse if err := c.do(ctx, http.MethodPost, "/api/embed", req, &resp); err != nil { return nil, err } return &resp, nil } // Embeddings generates an embedding from a model. func (c *Client) Embeddings(ctx context.Context, req *EmbeddingRequest) (*EmbeddingResponse, error) { var resp EmbeddingResponse if err := c.do(ctx, http.MethodPost, "/api/embeddings", req, &resp); err != nil { return nil, err } return &resp, nil } // CreateBlob creates a blob from a file on the server. digest is the // expected SHA256 digest of the file, and r represents the file. func (c *Client) CreateBlob(ctx context.Context, digest string, r io.Reader) error { return c.do(ctx, http.MethodPost, fmt.Sprintf("/api/blobs/%s", digest), r, nil) } // Version returns the Ollama server version as a string. func (c *Client) Version(ctx context.Context) (string, error) { var version struct { Version string `json:"version"` } if err := c.do(ctx, http.MethodGet, "/api/version", nil, &version); err != nil { return "", err } return version.Version, nil } // Signout will signout a client for a local ollama server. func (c *Client) Signout(ctx context.Context) error { return c.do(ctx, http.MethodPost, "/api/signout", nil, nil) } // Disconnect will disconnect an ollama instance from ollama.com. func (c *Client) Disconnect(ctx context.Context, encodedKey string) error { return c.do(ctx, http.MethodDelete, fmt.Sprintf("/api/user/keys/%s", encodedKey), nil, nil) } func (c *Client) Whoami(ctx context.Context) (*UserResponse, error) { var resp UserResponse if err := c.do(ctx, http.MethodPost, "/api/me", nil, &resp); err != nil { return nil, err } return &resp, nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/api/types.go
api/types.go
package api import ( "encoding/json" "fmt" "iter" "log/slog" "math" "os" "reflect" "strconv" "strings" "time" "github.com/google/uuid" "github.com/ollama/ollama/envconfig" "github.com/ollama/ollama/internal/orderedmap" "github.com/ollama/ollama/types/model" ) // StatusError is an error with an HTTP status code and message. type StatusError struct { StatusCode int Status string ErrorMessage string `json:"error"` } func (e StatusError) Error() string { switch { case e.Status != "" && e.ErrorMessage != "": return fmt.Sprintf("%s: %s", e.Status, e.ErrorMessage) case e.Status != "": return e.Status case e.ErrorMessage != "": return e.ErrorMessage default: // this should not happen return "something went wrong, please see the ollama server logs for details" } } type AuthorizationError struct { StatusCode int Status string SigninURL string `json:"signin_url"` } func (e AuthorizationError) Error() string { if e.Status != "" { return e.Status } return "something went wrong, please see the ollama server logs for details" } // ImageData represents the raw binary data of an image file. type ImageData []byte // GenerateRequest describes a request sent by [Client.Generate]. While you // have to specify the Model and Prompt fields, all the other fields have // reasonable defaults for basic uses. type GenerateRequest struct { // Model is the model name; it should be a name familiar to Ollama from // the library at https://ollama.com/library Model string `json:"model"` // Prompt is the textual prompt to send to the model. Prompt string `json:"prompt"` // Suffix is the text that comes after the inserted text. Suffix string `json:"suffix"` // System overrides the model's default system message/prompt. System string `json:"system"` // Template overrides the model's default prompt template. Template string `json:"template"` // Context is the context parameter returned from a previous call to // [Client.Generate]. It can be used to keep a short conversational memory. Context []int `json:"context,omitempty"` // Stream specifies whether the response is streaming; it is true by default. Stream *bool `json:"stream,omitempty"` // Raw set to true means that no formatting will be applied to the prompt. Raw bool `json:"raw,omitempty"` // Format specifies the format to return a response in. Format json.RawMessage `json:"format,omitempty"` // KeepAlive controls how long the model will stay loaded in memory following // this request. KeepAlive *Duration `json:"keep_alive,omitempty"` // Images is an optional list of raw image bytes accompanying this // request, for multimodal models. Images []ImageData `json:"images,omitempty"` // Options lists model-specific options. For example, temperature can be // set through this field, if the model supports it. Options map[string]any `json:"options"` // Think controls whether thinking/reasoning models will think before // responding. Can be a boolean (true/false) or a string ("high", "medium", "low") // for supported models. Needs to be a pointer so we can distinguish between false // (request that thinking _not_ be used) and unset (use the old behavior // before this option was introduced) Think *ThinkValue `json:"think,omitempty"` // Truncate is a boolean that, when set to true, truncates the chat history messages // if the rendered prompt exceeds the context length limit. Truncate *bool `json:"truncate,omitempty"` // Shift is a boolean that, when set to true, shifts the chat history // when hitting the context length limit instead of erroring. Shift *bool `json:"shift,omitempty"` // DebugRenderOnly is a debug option that, when set to true, returns the rendered // template instead of calling the model. DebugRenderOnly bool `json:"_debug_render_only,omitempty"` // Logprobs specifies whether to return log probabilities of the output tokens. Logprobs bool `json:"logprobs,omitempty"` // TopLogprobs is the number of most likely tokens to return at each token position, // each with an associated log probability. Only applies when Logprobs is true. // Valid values are 0-20. Default is 0 (only return the selected token's logprob). TopLogprobs int `json:"top_logprobs,omitempty"` } // ChatRequest describes a request sent by [Client.Chat]. type ChatRequest struct { // Model is the model name, as in [GenerateRequest]. Model string `json:"model"` // Messages is the messages of the chat - can be used to keep a chat memory. Messages []Message `json:"messages"` // Stream enables streaming of returned responses; true by default. Stream *bool `json:"stream,omitempty"` // Format is the format to return the response in (e.g. "json"). Format json.RawMessage `json:"format,omitempty"` // KeepAlive controls how long the model will stay loaded into memory // following the request. KeepAlive *Duration `json:"keep_alive,omitempty"` // Tools is an optional list of tools the model has access to. Tools `json:"tools,omitempty"` // Options lists model-specific options. Options map[string]any `json:"options"` // Think controls whether thinking/reasoning models will think before // responding. Can be a boolean (true/false) or a string ("high", "medium", "low") // for supported models. Think *ThinkValue `json:"think,omitempty"` // Truncate is a boolean that, when set to true, truncates the chat history messages // if the rendered prompt exceeds the context length limit. Truncate *bool `json:"truncate,omitempty"` // Shift is a boolean that, when set to true, shifts the chat history // when hitting the context length limit instead of erroring. Shift *bool `json:"shift,omitempty"` // DebugRenderOnly is a debug option that, when set to true, returns the rendered // template instead of calling the model. DebugRenderOnly bool `json:"_debug_render_only,omitempty"` // Logprobs specifies whether to return log probabilities of the output tokens. Logprobs bool `json:"logprobs,omitempty"` // TopLogprobs is the number of most likely tokens to return at each token position, // each with an associated log probability. Only applies when Logprobs is true. // Valid values are 0-20. Default is 0 (only return the selected token's logprob). TopLogprobs int `json:"top_logprobs,omitempty"` } type Tools []Tool func (t Tools) String() string { bts, _ := json.Marshal(t) return string(bts) } func (t Tool) String() string { bts, _ := json.Marshal(t) return string(bts) } // Message is a single message in a chat sequence. The message contains the // role ("system", "user", or "assistant"), the content and an optional list // of images. type Message struct { Role string `json:"role"` Content string `json:"content"` // Thinking contains the text that was inside thinking tags in the // original model output when ChatRequest.Think is enabled. Thinking string `json:"thinking,omitempty"` Images []ImageData `json:"images,omitempty"` ToolCalls []ToolCall `json:"tool_calls,omitempty"` ToolName string `json:"tool_name,omitempty"` ToolCallID string `json:"tool_call_id,omitempty"` } func (m *Message) UnmarshalJSON(b []byte) error { type Alias Message var a Alias if err := json.Unmarshal(b, &a); err != nil { return err } *m = Message(a) m.Role = strings.ToLower(m.Role) return nil } type ToolCall struct { ID string `json:"id,omitempty"` Function ToolCallFunction `json:"function"` } type ToolCallFunction struct { Index int `json:"index"` Name string `json:"name"` Arguments ToolCallFunctionArguments `json:"arguments"` } // ToolCallFunctionArguments holds tool call arguments in insertion order. type ToolCallFunctionArguments struct { om *orderedmap.Map[string, any] } // NewToolCallFunctionArguments creates a new empty ToolCallFunctionArguments. func NewToolCallFunctionArguments() ToolCallFunctionArguments { return ToolCallFunctionArguments{om: orderedmap.New[string, any]()} } // Get retrieves a value by key. func (t *ToolCallFunctionArguments) Get(key string) (any, bool) { if t == nil || t.om == nil { return nil, false } return t.om.Get(key) } // Set sets a key-value pair, preserving insertion order. func (t *ToolCallFunctionArguments) Set(key string, value any) { if t == nil { return } if t.om == nil { t.om = orderedmap.New[string, any]() } t.om.Set(key, value) } // Len returns the number of arguments. func (t *ToolCallFunctionArguments) Len() int { if t == nil || t.om == nil { return 0 } return t.om.Len() } // All returns an iterator over all key-value pairs in insertion order. func (t *ToolCallFunctionArguments) All() iter.Seq2[string, any] { if t == nil || t.om == nil { return func(yield func(string, any) bool) {} } return t.om.All() } // ToMap returns a regular map (order not preserved). func (t *ToolCallFunctionArguments) ToMap() map[string]any { if t == nil || t.om == nil { return nil } return t.om.ToMap() } func (t *ToolCallFunctionArguments) String() string { if t == nil || t.om == nil { return "{}" } bts, _ := json.Marshal(t.om) return string(bts) } func (t *ToolCallFunctionArguments) UnmarshalJSON(data []byte) error { t.om = orderedmap.New[string, any]() return json.Unmarshal(data, t.om) } func (t ToolCallFunctionArguments) MarshalJSON() ([]byte, error) { if t.om == nil { return []byte("{}"), nil } return json.Marshal(t.om) } type Tool struct { Type string `json:"type"` Items any `json:"items,omitempty"` Function ToolFunction `json:"function"` } // PropertyType can be either a string or an array of strings type PropertyType []string // UnmarshalJSON implements the json.Unmarshaler interface func (pt *PropertyType) UnmarshalJSON(data []byte) error { // Try to unmarshal as a string first var s string if err := json.Unmarshal(data, &s); err == nil { *pt = []string{s} return nil } // If that fails, try to unmarshal as an array of strings var a []string if err := json.Unmarshal(data, &a); err != nil { return err } *pt = a return nil } // MarshalJSON implements the json.Marshaler interface func (pt PropertyType) MarshalJSON() ([]byte, error) { if len(pt) == 1 { // If there's only one type, marshal as a string return json.Marshal(pt[0]) } // Otherwise marshal as an array return json.Marshal([]string(pt)) } // String returns a string representation of the PropertyType func (pt PropertyType) String() string { if len(pt) == 0 { return "" } if len(pt) == 1 { return pt[0] } return fmt.Sprintf("%v", []string(pt)) } // ToolPropertiesMap holds tool properties in insertion order. type ToolPropertiesMap struct { om *orderedmap.Map[string, ToolProperty] } // NewToolPropertiesMap creates a new empty ToolPropertiesMap. func NewToolPropertiesMap() *ToolPropertiesMap { return &ToolPropertiesMap{om: orderedmap.New[string, ToolProperty]()} } // Get retrieves a property by name. func (t *ToolPropertiesMap) Get(key string) (ToolProperty, bool) { if t == nil || t.om == nil { return ToolProperty{}, false } return t.om.Get(key) } // Set sets a property, preserving insertion order. func (t *ToolPropertiesMap) Set(key string, value ToolProperty) { if t == nil { return } if t.om == nil { t.om = orderedmap.New[string, ToolProperty]() } t.om.Set(key, value) } // Len returns the number of properties. func (t *ToolPropertiesMap) Len() int { if t == nil || t.om == nil { return 0 } return t.om.Len() } // All returns an iterator over all properties in insertion order. func (t *ToolPropertiesMap) All() iter.Seq2[string, ToolProperty] { if t == nil || t.om == nil { return func(yield func(string, ToolProperty) bool) {} } return t.om.All() } // ToMap returns a regular map (order not preserved). func (t *ToolPropertiesMap) ToMap() map[string]ToolProperty { if t == nil || t.om == nil { return nil } return t.om.ToMap() } func (t ToolPropertiesMap) MarshalJSON() ([]byte, error) { if t.om == nil { return []byte("null"), nil } return json.Marshal(t.om) } func (t *ToolPropertiesMap) UnmarshalJSON(data []byte) error { t.om = orderedmap.New[string, ToolProperty]() return json.Unmarshal(data, t.om) } type ToolProperty struct { AnyOf []ToolProperty `json:"anyOf,omitempty"` Type PropertyType `json:"type,omitempty"` Items any `json:"items,omitempty"` Description string `json:"description,omitempty"` Enum []any `json:"enum,omitempty"` Properties *ToolPropertiesMap `json:"properties,omitempty"` } // ToTypeScriptType converts a ToolProperty to a TypeScript type string func (tp ToolProperty) ToTypeScriptType() string { if len(tp.AnyOf) > 0 { var types []string for _, anyOf := range tp.AnyOf { types = append(types, anyOf.ToTypeScriptType()) } return strings.Join(types, " | ") } if len(tp.Type) == 0 { return "any" } if len(tp.Type) == 1 { return mapToTypeScriptType(tp.Type[0]) } var types []string for _, t := range tp.Type { types = append(types, mapToTypeScriptType(t)) } return strings.Join(types, " | ") } // mapToTypeScriptType maps JSON Schema types to TypeScript types func mapToTypeScriptType(jsonType string) string { switch jsonType { case "string": return "string" case "number", "integer": return "number" case "boolean": return "boolean" case "array": return "any[]" case "object": return "Record<string, any>" case "null": return "null" default: return "any" } } type ToolFunctionParameters struct { Type string `json:"type"` Defs any `json:"$defs,omitempty"` Items any `json:"items,omitempty"` Required []string `json:"required,omitempty"` Properties *ToolPropertiesMap `json:"properties"` } func (t *ToolFunctionParameters) String() string { bts, _ := json.Marshal(t) return string(bts) } type ToolFunction struct { Name string `json:"name"` Description string `json:"description,omitempty"` Parameters ToolFunctionParameters `json:"parameters"` } func (t *ToolFunction) String() string { bts, _ := json.Marshal(t) return string(bts) } // TokenLogprob represents log probability information for a single token alternative. type TokenLogprob struct { // Token is the text representation of the token. Token string `json:"token"` // Logprob is the log probability of this token. Logprob float64 `json:"logprob"` // Bytes contains the raw byte representation of the token Bytes []int `json:"bytes,omitempty"` } // Logprob contains log probability information for a generated token. type Logprob struct { TokenLogprob // TopLogprobs contains the most likely tokens and their log probabilities // at this position, if requested via TopLogprobs parameter. TopLogprobs []TokenLogprob `json:"top_logprobs,omitempty"` } // ChatResponse is the response returned by [Client.Chat]. Its fields are // similar to [GenerateResponse]. type ChatResponse struct { // Model is the model name that generated the response. Model string `json:"model"` // RemoteModel is the name of the upstream model that generated the response. RemoteModel string `json:"remote_model,omitempty"` // RemoteHost is the URL of the upstream Ollama host that generated the response. RemoteHost string `json:"remote_host,omitempty"` // CreatedAt is the timestamp of the response. CreatedAt time.Time `json:"created_at"` // Message contains the message or part of a message from the model. Message Message `json:"message"` // Done specifies if the response is complete. Done bool `json:"done"` // DoneReason is the reason the model stopped generating text. DoneReason string `json:"done_reason,omitempty"` DebugInfo *DebugInfo `json:"_debug_info,omitempty"` // Logprobs contains log probability information for the generated tokens, // if requested via the Logprobs parameter. Logprobs []Logprob `json:"logprobs,omitempty"` Metrics } // DebugInfo contains debug information for template rendering type DebugInfo struct { RenderedTemplate string `json:"rendered_template"` ImageCount int `json:"image_count,omitempty"` } type Metrics struct { TotalDuration time.Duration `json:"total_duration,omitempty"` LoadDuration time.Duration `json:"load_duration,omitempty"` PromptEvalCount int `json:"prompt_eval_count,omitempty"` PromptEvalDuration time.Duration `json:"prompt_eval_duration,omitempty"` EvalCount int `json:"eval_count,omitempty"` EvalDuration time.Duration `json:"eval_duration,omitempty"` } // Options specified in [GenerateRequest]. If you add a new option here, also // add it to the API docs. type Options struct { Runner // Predict options used at runtime NumKeep int `json:"num_keep,omitempty"` Seed int `json:"seed,omitempty"` NumPredict int `json:"num_predict,omitempty"` TopK int `json:"top_k,omitempty"` TopP float32 `json:"top_p,omitempty"` MinP float32 `json:"min_p,omitempty"` TypicalP float32 `json:"typical_p,omitempty"` RepeatLastN int `json:"repeat_last_n,omitempty"` Temperature float32 `json:"temperature,omitempty"` RepeatPenalty float32 `json:"repeat_penalty,omitempty"` PresencePenalty float32 `json:"presence_penalty,omitempty"` FrequencyPenalty float32 `json:"frequency_penalty,omitempty"` Stop []string `json:"stop,omitempty"` } // Runner options which must be set when the model is loaded into memory type Runner struct { NumCtx int `json:"num_ctx,omitempty"` NumBatch int `json:"num_batch,omitempty"` NumGPU int `json:"num_gpu,omitempty"` MainGPU int `json:"main_gpu,omitempty"` UseMMap *bool `json:"use_mmap,omitempty"` NumThread int `json:"num_thread,omitempty"` } // EmbedRequest is the request passed to [Client.Embed]. type EmbedRequest struct { // Model is the model name. Model string `json:"model"` // Input is the input to embed. Input any `json:"input"` // KeepAlive controls how long the model will stay loaded in memory following // this request. KeepAlive *Duration `json:"keep_alive,omitempty"` // Truncate truncates the input to fit the model's max sequence length. Truncate *bool `json:"truncate,omitempty"` // Dimensions truncates the output embedding to the specified dimension. Dimensions int `json:"dimensions,omitempty"` // Options lists model-specific options. Options map[string]any `json:"options"` } // EmbedResponse is the response from [Client.Embed]. type EmbedResponse struct { Model string `json:"model"` Embeddings [][]float32 `json:"embeddings"` TotalDuration time.Duration `json:"total_duration,omitempty"` LoadDuration time.Duration `json:"load_duration,omitempty"` PromptEvalCount int `json:"prompt_eval_count,omitempty"` } // EmbeddingRequest is the request passed to [Client.Embeddings]. type EmbeddingRequest struct { // Model is the model name. Model string `json:"model"` // Prompt is the textual prompt to embed. Prompt string `json:"prompt"` // KeepAlive controls how long the model will stay loaded in memory following // this request. KeepAlive *Duration `json:"keep_alive,omitempty"` // Options lists model-specific options. Options map[string]any `json:"options"` } // EmbeddingResponse is the response from [Client.Embeddings]. type EmbeddingResponse struct { Embedding []float64 `json:"embedding"` } // CreateRequest is the request passed to [Client.Create]. type CreateRequest struct { // Model is the model name to create. Model string `json:"model"` // Stream specifies whether the response is streaming; it is true by default. Stream *bool `json:"stream,omitempty"` // Quantize is the quantization format for the model; leave blank to not change the quantization level. Quantize string `json:"quantize,omitempty"` // From is the name of the model or file to use as the source. From string `json:"from,omitempty"` // RemoteHost is the URL of the upstream ollama API for the model (if any). RemoteHost string `json:"remote_host,omitempty"` // Files is a map of files include when creating the model. Files map[string]string `json:"files,omitempty"` // Adapters is a map of LoRA adapters to include when creating the model. Adapters map[string]string `json:"adapters,omitempty"` // Template is the template used when constructing a request to the model. Template string `json:"template,omitempty"` // License is a string or list of strings for licenses. License any `json:"license,omitempty"` // System is the system prompt for the model. System string `json:"system,omitempty"` // Parameters is a map of hyper-parameters which are applied to the model. Parameters map[string]any `json:"parameters,omitempty"` // Messages is a list of messages added to the model before chat and generation requests. Messages []Message `json:"messages,omitempty"` Renderer string `json:"renderer,omitempty"` Parser string `json:"parser,omitempty"` // Requires is the minimum version of Ollama required by the model. Requires string `json:"requires,omitempty"` // Info is a map of additional information for the model Info map[string]any `json:"info,omitempty"` // Deprecated: set the model name with Model instead Name string `json:"name"` // Deprecated: use Quantize instead Quantization string `json:"quantization,omitempty"` } // DeleteRequest is the request passed to [Client.Delete]. type DeleteRequest struct { Model string `json:"model"` // Deprecated: set the model name with Model instead Name string `json:"name"` } // ShowRequest is the request passed to [Client.Show]. type ShowRequest struct { Model string `json:"model"` System string `json:"system"` // Template is deprecated Template string `json:"template"` Verbose bool `json:"verbose"` Options map[string]any `json:"options"` // Deprecated: set the model name with Model instead Name string `json:"name"` } // ShowResponse is the response returned from [Client.Show]. type ShowResponse struct { License string `json:"license,omitempty"` Modelfile string `json:"modelfile,omitempty"` Parameters string `json:"parameters,omitempty"` Template string `json:"template,omitempty"` System string `json:"system,omitempty"` Renderer string `json:"renderer,omitempty"` Parser string `json:"parser,omitempty"` Details ModelDetails `json:"details,omitempty"` Messages []Message `json:"messages,omitempty"` RemoteModel string `json:"remote_model,omitempty"` RemoteHost string `json:"remote_host,omitempty"` ModelInfo map[string]any `json:"model_info,omitempty"` ProjectorInfo map[string]any `json:"projector_info,omitempty"` Tensors []Tensor `json:"tensors,omitempty"` Capabilities []model.Capability `json:"capabilities,omitempty"` ModifiedAt time.Time `json:"modified_at,omitempty"` Requires string `json:"requires,omitempty"` } // CopyRequest is the request passed to [Client.Copy]. type CopyRequest struct { Source string `json:"source"` Destination string `json:"destination"` } // PullRequest is the request passed to [Client.Pull]. type PullRequest struct { Model string `json:"model"` Insecure bool `json:"insecure,omitempty"` // Deprecated: ignored Username string `json:"username"` // Deprecated: ignored Password string `json:"password"` // Deprecated: ignored Stream *bool `json:"stream,omitempty"` // Deprecated: set the model name with Model instead Name string `json:"name"` } // ProgressResponse is the response passed to progress functions like // [PullProgressFunc] and [PushProgressFunc]. type ProgressResponse struct { Status string `json:"status"` Digest string `json:"digest,omitempty"` Total int64 `json:"total,omitempty"` Completed int64 `json:"completed,omitempty"` } // PushRequest is the request passed to [Client.Push]. type PushRequest struct { Model string `json:"model"` Insecure bool `json:"insecure,omitempty"` Username string `json:"username"` Password string `json:"password"` Stream *bool `json:"stream,omitempty"` // Deprecated: set the model name with Model instead Name string `json:"name"` } // ListResponse is the response from [Client.List]. type ListResponse struct { Models []ListModelResponse `json:"models"` } // ProcessResponse is the response from [Client.Process]. type ProcessResponse struct { Models []ProcessModelResponse `json:"models"` } // ListModelResponse is a single model description in [ListResponse]. type ListModelResponse struct { Name string `json:"name"` Model string `json:"model"` RemoteModel string `json:"remote_model,omitempty"` RemoteHost string `json:"remote_host,omitempty"` ModifiedAt time.Time `json:"modified_at"` Size int64 `json:"size"` Digest string `json:"digest"` Details ModelDetails `json:"details,omitempty"` } // ProcessModelResponse is a single model description in [ProcessResponse]. type ProcessModelResponse struct { Name string `json:"name"` Model string `json:"model"` Size int64 `json:"size"` Digest string `json:"digest"` Details ModelDetails `json:"details,omitempty"` ExpiresAt time.Time `json:"expires_at"` SizeVRAM int64 `json:"size_vram"` ContextLength int `json:"context_length"` } type TokenResponse struct { Token string `json:"token"` } // GenerateResponse is the response passed into [GenerateResponseFunc]. type GenerateResponse struct { // Model is the model name that generated the response. Model string `json:"model"` // RemoteModel is the name of the upstream model that generated the response. RemoteModel string `json:"remote_model,omitempty"` // RemoteHost is the URL of the upstream Ollama host that generated the response. RemoteHost string `json:"remote_host,omitempty"` // CreatedAt is the timestamp of the response. CreatedAt time.Time `json:"created_at"` // Response is the textual response itself. Response string `json:"response"` // Thinking contains the text that was inside thinking tags in the // original model output when ChatRequest.Think is enabled. Thinking string `json:"thinking,omitempty"` // Done specifies if the response is complete. Done bool `json:"done"` // DoneReason is the reason the model stopped generating text. DoneReason string `json:"done_reason,omitempty"` // Context is an encoding of the conversation used in this response; this // can be sent in the next request to keep a conversational memory. Context []int `json:"context,omitempty"` Metrics ToolCalls []ToolCall `json:"tool_calls,omitempty"` DebugInfo *DebugInfo `json:"_debug_info,omitempty"` // Logprobs contains log probability information for the generated tokens, // if requested via the Logprobs parameter. Logprobs []Logprob `json:"logprobs,omitempty"` } // ModelDetails provides details about a model. type ModelDetails struct { ParentModel string `json:"parent_model"` Format string `json:"format"` Family string `json:"family"` Families []string `json:"families"` ParameterSize string `json:"parameter_size"` QuantizationLevel string `json:"quantization_level"` } // UserResponse provides information about a user. type UserResponse struct { ID uuid.UUID `json:"id"` Email string `json:"email"` Name string `json:"name"` Bio string `json:"bio,omitempty"` AvatarURL string `json:"avatarurl,omitempty"` FirstName string `json:"firstname,omitempty"` LastName string `json:"lastname,omitempty"` Plan string `json:"plan,omitempty"` } // Tensor describes the metadata for a given tensor. type Tensor struct { Name string `json:"name"` Type string `json:"type"` Shape []uint64 `json:"shape"` } func (m *Metrics) Summary() { if m.TotalDuration > 0 { fmt.Fprintf(os.Stderr, "total duration: %v\n", m.TotalDuration) } if m.LoadDuration > 0 { fmt.Fprintf(os.Stderr, "load duration: %v\n", m.LoadDuration) } if m.PromptEvalCount > 0 { fmt.Fprintf(os.Stderr, "prompt eval count: %d token(s)\n", m.PromptEvalCount) } if m.PromptEvalDuration > 0 { fmt.Fprintf(os.Stderr, "prompt eval duration: %s\n", m.PromptEvalDuration) fmt.Fprintf(os.Stderr, "prompt eval rate: %.2f tokens/s\n", float64(m.PromptEvalCount)/m.PromptEvalDuration.Seconds()) } if m.EvalCount > 0 { fmt.Fprintf(os.Stderr, "eval count: %d token(s)\n", m.EvalCount) } if m.EvalDuration > 0 { fmt.Fprintf(os.Stderr, "eval duration: %s\n", m.EvalDuration) fmt.Fprintf(os.Stderr, "eval rate: %.2f tokens/s\n", float64(m.EvalCount)/m.EvalDuration.Seconds()) } } func (opts *Options) FromMap(m map[string]any) error { valueOpts := reflect.ValueOf(opts).Elem() // names of the fields in the options struct typeOpts := reflect.TypeOf(opts).Elem() // types of the fields in the options struct // build map of json struct tags to their types jsonOpts := make(map[string]reflect.StructField) for _, field := range reflect.VisibleFields(typeOpts) { jsonTag := strings.Split(field.Tag.Get("json"), ",")[0] if jsonTag != "" { jsonOpts[jsonTag] = field } } for key, val := range m { opt, ok := jsonOpts[key] if !ok { slog.Warn("invalid option provided", "option", key) continue } field := valueOpts.FieldByName(opt.Name) if field.IsValid() && field.CanSet() { if val == nil { continue } switch field.Kind() { case reflect.Int: switch t := val.(type) { case int64: field.SetInt(t) case float64: // when JSON unmarshals numbers, it uses float64, not int field.SetInt(int64(t)) default: return fmt.Errorf("option %q must be of type integer", key) } case reflect.Bool: val, ok := val.(bool) if !ok { return fmt.Errorf("option %q must be of type boolean", key) } field.SetBool(val) case reflect.Float32: // JSON unmarshals to float64 val, ok := val.(float64) if !ok { return fmt.Errorf("option %q must be of type float32", key) } field.SetFloat(val) case reflect.String: val, ok := val.(string) if !ok { return fmt.Errorf("option %q must be of type string", key) } field.SetString(val) case reflect.Slice: // JSON unmarshals to []any, not []string val, ok := val.([]any) if !ok { return fmt.Errorf("option %q must be of type array", key) } // convert []any to []string slice := make([]string, len(val)) for i, item := range val { str, ok := item.(string) if !ok { return fmt.Errorf("option %q must be of an array of strings", key) } slice[i] = str } field.Set(reflect.ValueOf(slice)) case reflect.Pointer: var b bool if field.Type() == reflect.TypeOf(&b) { val, ok := val.(bool) if !ok { return fmt.Errorf("option %q must be of type boolean", key) } field.Set(reflect.ValueOf(&val)) } else { return fmt.Errorf("unknown type loading config params: %v %v", field.Kind(), field.Type()) } default: return fmt.Errorf("unknown type loading config params: %v", field.Kind()) } } } return nil } // DefaultOptions is the default set of options for [GenerateRequest]; these // values are used unless the user specifies other values explicitly. func DefaultOptions() Options { return Options{ // options set on request to runner NumPredict: -1, // set a minimal num_keep to avoid issues on context shifts NumKeep: 4, Temperature: 0.8, TopK: 40, TopP: 0.9, TypicalP: 1.0, RepeatLastN: 64, RepeatPenalty: 1.1, PresencePenalty: 0.0, FrequencyPenalty: 0.0, Seed: -1, Runner: Runner{ // options set when the model is loaded NumCtx: int(envconfig.ContextLength()), NumBatch: 512, NumGPU: -1, // -1 here indicates that NumGPU should be set dynamically NumThread: 0, // let the runtime decide UseMMap: nil, }, } } // ThinkValue represents a value that can be a boolean or a string ("high", "medium", "low") type ThinkValue struct {
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
true
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/api/client_test.go
api/client_test.go
package api import ( "encoding/json" "fmt" "net/http" "net/http/httptest" "net/url" "strings" "testing" ) func TestClientFromEnvironment(t *testing.T) { type testCase struct { value string expect string err error } testCases := map[string]*testCase{ "empty": {value: "", expect: "http://127.0.0.1:11434"}, "only address": {value: "1.2.3.4", expect: "http://1.2.3.4:11434"}, "only port": {value: ":1234", expect: "http://:1234"}, "address and port": {value: "1.2.3.4:1234", expect: "http://1.2.3.4:1234"}, "scheme http and address": {value: "http://1.2.3.4", expect: "http://1.2.3.4:80"}, "scheme https and address": {value: "https://1.2.3.4", expect: "https://1.2.3.4:443"}, "scheme, address, and port": {value: "https://1.2.3.4:1234", expect: "https://1.2.3.4:1234"}, "hostname": {value: "example.com", expect: "http://example.com:11434"}, "hostname and port": {value: "example.com:1234", expect: "http://example.com:1234"}, "scheme http and hostname": {value: "http://example.com", expect: "http://example.com:80"}, "scheme https and hostname": {value: "https://example.com", expect: "https://example.com:443"}, "scheme, hostname, and port": {value: "https://example.com:1234", expect: "https://example.com:1234"}, "trailing slash": {value: "example.com/", expect: "http://example.com:11434"}, "trailing slash port": {value: "example.com:1234/", expect: "http://example.com:1234"}, } for k, v := range testCases { t.Run(k, func(t *testing.T) { t.Setenv("OLLAMA_HOST", v.value) client, err := ClientFromEnvironment() if err != v.err { t.Fatalf("expected %s, got %s", v.err, err) } if client.base.String() != v.expect { t.Fatalf("expected %s, got %s", v.expect, client.base.String()) } }) } } // testError represents an internal error type with status code and message // this is used since the error response from the server is not a standard error struct type testError struct { message string statusCode int raw bool // if true, write message as-is instead of JSON encoding } func (e testError) Error() string { return e.message } func TestClientStream(t *testing.T) { testCases := []struct { name string responses []any wantErr string }{ { name: "immediate error response", responses: []any{ testError{ message: "test error message", statusCode: http.StatusBadRequest, }, }, wantErr: "test error message", }, { name: "error after successful chunks, ok response", responses: []any{ ChatResponse{Message: Message{Content: "partial response 1"}}, ChatResponse{Message: Message{Content: "partial response 2"}}, testError{ message: "mid-stream error", statusCode: http.StatusOK, }, }, wantErr: "mid-stream error", }, { name: "http status error takes precedence over general error", responses: []any{ testError{ message: "custom error message", statusCode: http.StatusInternalServerError, }, }, wantErr: "500", }, { name: "successful stream completion", responses: []any{ ChatResponse{Message: Message{Content: "chunk 1"}}, ChatResponse{Message: Message{Content: "chunk 2"}}, ChatResponse{ Message: Message{Content: "final chunk"}, Done: true, DoneReason: "stop", }, }, }, { name: "plain text error response", responses: []any{ "internal server error", }, wantErr: "internal server error", }, { name: "HTML error page", responses: []any{ "<html><body>404 Not Found</body></html>", }, wantErr: "404 Not Found", }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { flusher, ok := w.(http.Flusher) if !ok { t.Fatal("expected http.Flusher") } w.Header().Set("Content-Type", "application/x-ndjson") for _, resp := range tc.responses { if errResp, ok := resp.(testError); ok { w.WriteHeader(errResp.statusCode) err := json.NewEncoder(w).Encode(map[string]string{ "error": errResp.message, }) if err != nil { t.Fatal("failed to encode error response:", err) } return } if str, ok := resp.(string); ok { fmt.Fprintln(w, str) flusher.Flush() continue } if err := json.NewEncoder(w).Encode(resp); err != nil { t.Fatalf("failed to encode response: %v", err) } flusher.Flush() } })) defer ts.Close() client := NewClient(&url.URL{Scheme: "http", Host: ts.Listener.Addr().String()}, http.DefaultClient) var receivedChunks []ChatResponse err := client.stream(t.Context(), http.MethodPost, "/v1/chat", nil, func(chunk []byte) error { var resp ChatResponse if err := json.Unmarshal(chunk, &resp); err != nil { return fmt.Errorf("failed to unmarshal chunk: %w", err) } receivedChunks = append(receivedChunks, resp) return nil }) if tc.wantErr != "" { if err == nil { t.Fatal("expected error but got nil") } if !strings.Contains(err.Error(), tc.wantErr) { t.Errorf("expected error containing %q, got %v", tc.wantErr, err) } return } if err != nil { t.Errorf("unexpected error: %v", err) } }) } } func TestClientDo(t *testing.T) { testCases := []struct { name string response any wantErr string wantStatusCode int }{ { name: "immediate error response", response: testError{ message: "test error message", statusCode: http.StatusBadRequest, }, wantErr: "test error message", wantStatusCode: http.StatusBadRequest, }, { name: "server error response", response: testError{ message: "internal error", statusCode: http.StatusInternalServerError, }, wantErr: "internal error", wantStatusCode: http.StatusInternalServerError, }, { name: "successful response", response: struct { ID string `json:"id"` Success bool `json:"success"` }{ ID: "msg_123", Success: true, }, }, { name: "plain text error response", response: testError{ message: "internal server error", statusCode: http.StatusInternalServerError, raw: true, }, wantErr: "internal server error", wantStatusCode: http.StatusInternalServerError, }, { name: "HTML error page", response: testError{ message: "<html><body>404 Not Found</body></html>", statusCode: http.StatusNotFound, raw: true, }, wantErr: "<html><body>404 Not Found</body></html>", wantStatusCode: http.StatusNotFound, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if errResp, ok := tc.response.(testError); ok { w.WriteHeader(errResp.statusCode) if !errResp.raw { err := json.NewEncoder(w).Encode(map[string]string{ "error": errResp.message, }) if err != nil { t.Fatal("failed to encode error response:", err) } } else { // Write raw message (simulates non-JSON error responses) fmt.Fprint(w, errResp.message) } return } w.Header().Set("Content-Type", "application/json") if err := json.NewEncoder(w).Encode(tc.response); err != nil { t.Fatalf("failed to encode response: %v", err) } })) defer ts.Close() client := NewClient(&url.URL{Scheme: "http", Host: ts.Listener.Addr().String()}, http.DefaultClient) var resp struct { ID string `json:"id"` Success bool `json:"success"` } err := client.do(t.Context(), http.MethodPost, "/v1/messages", nil, &resp) if tc.wantErr != "" { if err == nil { t.Fatalf("got nil, want error %q", tc.wantErr) } if err.Error() != tc.wantErr { t.Errorf("error message mismatch: got %q, want %q", err.Error(), tc.wantErr) } if tc.wantStatusCode != 0 { if statusErr, ok := err.(StatusError); ok { if statusErr.StatusCode != tc.wantStatusCode { t.Errorf("status code mismatch: got %d, want %d", statusErr.StatusCode, tc.wantStatusCode) } } else { t.Errorf("expected StatusError, got %T", err) } } return } if err != nil { t.Fatalf("got error %q, want nil", err) } if expectedResp, ok := tc.response.(struct { ID string `json:"id"` Success bool `json:"success"` }); ok { if resp.ID != expectedResp.ID { t.Errorf("response ID mismatch: got %q, want %q", resp.ID, expectedResp.ID) } if resp.Success != expectedResp.Success { t.Errorf("response Success mismatch: got %v, want %v", resp.Success, expectedResp.Success) } } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/api/types_typescript_test.go
api/types_typescript_test.go
package api import ( "testing" ) func TestToolParameterToTypeScriptType(t *testing.T) { tests := []struct { name string param ToolProperty expected string }{ { name: "single string type", param: ToolProperty{ Type: PropertyType{"string"}, }, expected: "string", }, { name: "single number type", param: ToolProperty{ Type: PropertyType{"number"}, }, expected: "number", }, { name: "integer maps to number", param: ToolProperty{ Type: PropertyType{"integer"}, }, expected: "number", }, { name: "boolean type", param: ToolProperty{ Type: PropertyType{"boolean"}, }, expected: "boolean", }, { name: "array type", param: ToolProperty{ Type: PropertyType{"array"}, }, expected: "any[]", }, { name: "object type", param: ToolProperty{ Type: PropertyType{"object"}, }, expected: "Record<string, any>", }, { name: "null type", param: ToolProperty{ Type: PropertyType{"null"}, }, expected: "null", }, { name: "multiple types as union", param: ToolProperty{ Type: PropertyType{"string", "number"}, }, expected: "string | number", }, { name: "string or null union", param: ToolProperty{ Type: PropertyType{"string", "null"}, }, expected: "string | null", }, { name: "anyOf with single types", param: ToolProperty{ AnyOf: []ToolProperty{ {Type: PropertyType{"string"}}, {Type: PropertyType{"number"}}, }, }, expected: "string | number", }, { name: "anyOf with multiple types in each branch", param: ToolProperty{ AnyOf: []ToolProperty{ {Type: PropertyType{"string", "null"}}, {Type: PropertyType{"number"}}, }, }, expected: "string | null | number", }, { name: "nested anyOf", param: ToolProperty{ AnyOf: []ToolProperty{ {Type: PropertyType{"boolean"}}, { AnyOf: []ToolProperty{ {Type: PropertyType{"string"}}, {Type: PropertyType{"number"}}, }, }, }, }, expected: "boolean | string | number", }, { name: "empty type returns any", param: ToolProperty{ Type: PropertyType{}, }, expected: "any", }, { name: "unknown type maps to any", param: ToolProperty{ Type: PropertyType{"unknown_type"}, }, expected: "any", }, { name: "multiple types including array", param: ToolProperty{ Type: PropertyType{"string", "array", "null"}, }, expected: "string | any[] | null", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result := tt.param.ToTypeScriptType() if result != tt.expected { t.Errorf("ToTypeScriptType() = %q, want %q", result, tt.expected) } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/api/examples/chat/main.go
api/examples/chat/main.go
package main import ( "context" "fmt" "log" "github.com/ollama/ollama/api" ) func main() { client, err := api.ClientFromEnvironment() if err != nil { log.Fatal(err) } messages := []api.Message{ { Role: "system", Content: "Provide very brief, concise responses", }, { Role: "user", Content: "Name some unusual animals", }, { Role: "assistant", Content: "Monotreme, platypus, echidna", }, { Role: "user", Content: "which of these is the most dangerous?", }, } ctx := context.Background() req := &api.ChatRequest{ Model: "llama3.2", Messages: messages, } respFunc := func(resp api.ChatResponse) error { fmt.Print(resp.Message.Content) return nil } err = client.Chat(ctx, req, respFunc) if err != nil { log.Fatal(err) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/api/examples/generate/main.go
api/examples/generate/main.go
package main import ( "context" "fmt" "log" "github.com/ollama/ollama/api" ) func main() { client, err := api.ClientFromEnvironment() if err != nil { log.Fatal(err) } req := &api.GenerateRequest{ Model: "gemma2", Prompt: "how many planets are there?", // set streaming to false Stream: new(bool), } ctx := context.Background() respFunc := func(resp api.GenerateResponse) error { // Only print the response here; GenerateResponse has a number of other // interesting fields you want to examine. fmt.Println(resp.Response) return nil } err = client.Generate(ctx, req, respFunc) if err != nil { log.Fatal(err) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/api/examples/generate-streaming/main.go
api/examples/generate-streaming/main.go
package main import ( "context" "fmt" "log" "github.com/ollama/ollama/api" ) func main() { client, err := api.ClientFromEnvironment() if err != nil { log.Fatal(err) } // By default, GenerateRequest is streaming. req := &api.GenerateRequest{ Model: "gemma2", Prompt: "how many planets are there?", } ctx := context.Background() respFunc := func(resp api.GenerateResponse) error { // Only print the response here; GenerateResponse has a number of other // interesting fields you want to examine. // In streaming mode, responses are partial so we call fmt.Print (and not // Println) in order to avoid spurious newlines being introduced. The // model will insert its own newlines if it wants. fmt.Print(resp.Response) return nil } err = client.Generate(ctx, req, respFunc) if err != nil { log.Fatal(err) } fmt.Println() }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/api/examples/multimodal/main.go
api/examples/multimodal/main.go
package main import ( "context" "fmt" "log" "os" "github.com/ollama/ollama/api" ) func main() { if len(os.Args) <= 1 { log.Fatal("usage: <image name>") } imgData, err := os.ReadFile(os.Args[1]) if err != nil { log.Fatal(err) } client, err := api.ClientFromEnvironment() if err != nil { log.Fatal(err) } req := &api.GenerateRequest{ Model: "llava", Prompt: "describe this image", Images: []api.ImageData{imgData}, } ctx := context.Background() respFunc := func(resp api.GenerateResponse) error { // In streaming mode, responses are partial so we call fmt.Print (and not // Println) in order to avoid spurious newlines being introduced. The // model will insert its own newlines if it wants. fmt.Print(resp.Response) return nil } err = client.Generate(ctx, req, respFunc) if err != nil { log.Fatal(err) } fmt.Println() }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/api/examples/pull-progress/main.go
api/examples/pull-progress/main.go
package main import ( "context" "fmt" "log" "github.com/ollama/ollama/api" ) func main() { client, err := api.ClientFromEnvironment() if err != nil { log.Fatal(err) } ctx := context.Background() req := &api.PullRequest{ Model: "mistral", } progressFunc := func(resp api.ProgressResponse) error { fmt.Printf("Progress: status=%v, total=%v, completed=%v\n", resp.Status, resp.Total, resp.Completed) return nil } err = client.Pull(ctx, req, progressFunc) if err != nil { log.Fatal(err) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/runner/runner.go
runner/runner.go
package runner import ( "github.com/ollama/ollama/runner/llamarunner" "github.com/ollama/ollama/runner/ollamarunner" ) func Execute(args []string) error { if args[0] == "runner" { args = args[1:] } var newRunner bool if args[0] == "--ollama-engine" { args = args[1:] newRunner = true } if newRunner { return ollamarunner.Execute(args) } else { return llamarunner.Execute(args) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/runner/llamarunner/cache.go
runner/llamarunner/cache.go
package llamarunner import ( "errors" "fmt" "log/slog" "reflect" "time" "github.com/ollama/ollama/llama" ) type InputCache struct { // context window size (per slot) numCtx int // individual KV caches slots []InputCacheSlot // optimize cache eviction for multiple users multiUserCache bool lc *llama.Context } func NewInputCache(lc *llama.Context, kvSize int, numSlots int, multiUserCache bool) (*InputCache, error) { if kvSize/numSlots < 1 { return nil, fmt.Errorf("must have at least one kv cache entry per parallel sequence (kv: %v parallel: %v)", kvSize, numSlots) } slots := make([]InputCacheSlot, numSlots) for i := range slots { slots[i] = InputCacheSlot{ Id: i, Inputs: make([]input, 0), } } return &InputCache{ numCtx: kvSize / numSlots, slots: slots, multiUserCache: multiUserCache, lc: lc, }, nil } // Locking: Operations on InputCacheSlot (including finding one // through LoadCacheSlot) require a lock to be held that serializes // these operations with each other and llama.Decode type InputCacheSlot struct { // Index in the KV cache Id int // Inputs that are stored in the KV cache Inputs []input // is this cache actively being processed as part of a sequence? InUse bool // last time this cache was used (as of start of processing) lastUsed time.Time } func (c *InputCache) LoadCacheSlot(prompt []input, cachePrompt bool) (*InputCacheSlot, []input, error) { var slot *InputCacheSlot var numPast int var err error // In single-user scenarios, the longest cache slot works fine for getting good input // cache hit rates and it reuses the same VRAM over and over again, which is good for // GPU performance in situations where we miss the input cache. // For multiple users, the "best" cache slot produces better input cache hit rates // at the cost of worse performance when we miss the input cache (because it causes // GPU L2 cache misses due to spreading out accesses across VRAM). if !c.multiUserCache { slot, numPast, err = c.findLongestCacheSlot(prompt) } else { slot, numPast, err = c.findBestCacheSlot(prompt) } if err != nil { return nil, nil, err } if !cachePrompt { numPast = 0 } slot.InUse = true slot.lastUsed = time.Now() if numPast == len(prompt) { // Leave one input to sample so we can get a response numPast-- } if !c.lc.KvCacheSeqRm(slot.Id, numPast, -1) { // Some models don't support partial erasure c.lc.KvCacheSeqRm(slot.Id, 0, -1) numPast = 0 } slog.Debug("loading cache slot", "id", slot.Id, "cache", len(slot.Inputs), "prompt", len(prompt), "used", numPast, "remaining", len(prompt)-numPast) slot.Inputs = prompt[:numPast] prompt = prompt[numPast:] return slot, prompt, nil } func (c *InputCache) findLongestCacheSlot(prompt []input) (*InputCacheSlot, int, error) { longest := -1 var longestSlot *InputCacheSlot for i, s := range c.slots { if s.InUse { continue } count := countCommonPrefix(s.Inputs, prompt) if count > longest { longest = count longestSlot = &c.slots[i] } } if longestSlot == nil { return nil, 0, errors.New("no available cache slots") } return longestSlot, longest, nil } func (c *InputCache) findBestCacheSlot(prompt []input) (*InputCacheSlot, int, error) { oldest := time.Now() var oldestSlot *InputCacheSlot longest := -1 var longestSlot *InputCacheSlot for i, s := range c.slots { count := countCommonPrefix(s.Inputs, prompt) if count > longest { longest = count longestSlot = &c.slots[i] } if s.lastUsed.Compare(oldest) < 0 && !s.InUse { oldest = s.lastUsed oldestSlot = &c.slots[i] } } if longest == len(longestSlot.Inputs) && !longestSlot.InUse { return longestSlot, longest, nil } if oldestSlot.InUse { return nil, 0, errors.New("no available cache slots") } if len(oldestSlot.Inputs) != 0 { slog.Debug("evicting cache slot", "id", oldestSlot.Id, "inputs", len(oldestSlot.Inputs), "used", oldestSlot.lastUsed) } if longest > 0 && longestSlot != oldestSlot { slog.Debug("forking cache slot", "src", longestSlot.Id, "dst", oldestSlot.Id, "inputs", longest, "total", len(longestSlot.Inputs)) oldestSlot.Inputs = make([]input, longest) copy(oldestSlot.Inputs, longestSlot.Inputs[:longest]) // This is only nil for unit tests if c.lc != nil { c.lc.KvCacheSeqRm(oldestSlot.Id, 0, -1) c.lc.KvCacheSeqCp(longestSlot.Id, oldestSlot.Id, 0, longest) } } return oldestSlot, longest, nil } func countCommonPrefix(a []input, b []input) int { var count int for i := range a { if i >= len(b) { break } if !reflect.DeepEqual(a[i], b[i]) { break } count++ } return count } func (c *InputCache) ShiftDiscard(inputLen int, numKeep int) int { targetFree := (c.numCtx - numKeep) / 2 targetFree = max(targetFree, 1) currentFree := c.numCtx - inputLen return max(targetFree-currentFree, 0) } type ErrReprocessInputs struct { Inputs []input } func (e *ErrReprocessInputs) Error() string { return fmt.Sprintf("kv cache shift not supported, inputs need reprocessing (input count: %v)", len(e.Inputs)) } // ShiftCacheSlot frees up space in the KV cache by deleting the oldest half of history // and shifting the newest half into that space (saving numKeep inputs at the beginning). // // Assumes that at least 1 entry can be freed up by shifting (i.e. numKeep < numCtx) func (c *InputCache) ShiftCacheSlot(slot *InputCacheSlot, numKeep int) error { if numKeep >= c.numCtx { return fmt.Errorf("unable to shift context - keep exceeds context (keep: %v context: %v)", numKeep, c.numCtx) } inputLen := len(slot.Inputs) discard := c.ShiftDiscard(inputLen, numKeep) if discard <= 0 { return nil } slog.Debug("context limit hit - shifting", "id", slot.Id, "limit", c.numCtx, "input", len(slot.Inputs), "keep", numKeep, "discard", discard) var shiftFailed bool if c.lc.KvCacheCanShift() { // For models that support shifting, attempt to shift the KV cache if !c.lc.KvCacheSeqRm(slot.Id, numKeep, numKeep+discard) { shiftFailed = true slog.Debug("kv cache removal not supported, clearing cache and returning inputs for reprocessing", "id", slot.Id) } else { c.lc.KvCacheSeqAdd(slot.Id, numKeep+discard, inputLen, -discard) } } else { // For models that don't support shifting shiftFailed = true slog.Debug("kv cache cannot shift, clearing cache and returning inputs for reprocessing", "id", slot.Id) } if shiftFailed { // Create new input slice with preserved tokens (numKeep + remaining tokens after discard) newInputs := make([]input, numKeep+inputLen-(numKeep+discard)) copy(newInputs[:numKeep], slot.Inputs[:numKeep]) copy(newInputs[numKeep:], slot.Inputs[numKeep+discard:]) // Clear the entire KV cache _ = c.lc.KvCacheSeqRm(slot.Id, 0, -1) // Reset the slot inputs since we've cleared the cache slot.Inputs = []input{} // Return error with inputs that need to be reprocessed return &ErrReprocessInputs{Inputs: newInputs} } // Standard shift succeeded - update input array for i := numKeep + discard; i < inputLen; i++ { slot.Inputs[i-discard] = slot.Inputs[i] } slot.Inputs = slot.Inputs[:inputLen-discard] return nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/runner/llamarunner/cache_test.go
runner/llamarunner/cache_test.go
package llamarunner import ( "testing" "time" ) func TestCountCommon(t *testing.T) { tests := []struct { name string t1 []input t2 []input expected int }{ { name: "Equal", t1: []input{{token: 1}, {token: 2}, {token: 3}}, t2: []input{{token: 1}, {token: 2}, {token: 3}}, expected: 3, }, { name: "Prefix", t1: []input{{token: 1}}, t2: []input{{token: 1}, {token: 2}, {token: 3}}, expected: 1, }, { name: "Embeddings Prefix", t1: []input{{embed: []float32{0.1, 0.2, 0.3}}}, t2: []input{{embed: []float32{0.1, 0.2, 0.3}}, {embed: []float32{0.4, 0.5, 0.6}}, {embed: []float32{0.7}}}, expected: 1, }, { name: "Embeddings Prefix Partial", t1: []input{{embed: []float32{0.1, 0.2, 0.3}}}, t2: []input{{embed: []float32{0.1, 0.2}}, {embed: []float32{0.4, 0.5, 0.6}}, {embed: []float32{0.7}}}, expected: 0, }, { name: "Mixed", t1: []input{{token: 1}, {embed: []float32{0.2, 0.3, 0.4}}}, t2: []input{{token: 1}, {embed: []float32{0.2, 0.3, 0.4}}, {token: 5}}, expected: 2, }, { name: "Empty", t1: []input{}, t2: []input{{token: 1}, {token: 2}, {token: 3}}, expected: 0, }, { name: "Both Empty", t1: []input{}, t2: []input{}, expected: 0, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result := countCommonPrefix(tt.t1, tt.t2) if result != tt.expected { t.Errorf("countCommonPrefix(%v, %v): have %v; want %v", tt.t1, tt.t2, result, tt.expected) } }) } } func TestFindCacheSlot(t *testing.T) { type expected struct { result int len int } tests := []struct { name string cache InputCache prompt []input longest expected best expected }{ { name: "Empty", cache: InputCache{slots: []InputCacheSlot{ { Id: 0, Inputs: []input{}, InUse: false, lastUsed: time.Time{}, }, { Id: 1, Inputs: []input{}, InUse: false, lastUsed: time.Time{}, }, }}, prompt: []input{{token: 1}}, longest: expected{result: 0, len: 0}, best: expected{result: 0, len: 0}, }, { name: "Extend", cache: InputCache{slots: []InputCacheSlot{ { Id: 0, Inputs: []input{{token: 1}}, InUse: false, lastUsed: time.Now().Add(-time.Second), }, { Id: 1, Inputs: []input{{token: 1}, {token: 2}}, InUse: false, lastUsed: time.Now().Add(-2 * time.Second), }, }}, prompt: []input{{token: 1}, {token: 2}}, longest: expected{result: 1, len: 2}, best: expected{result: 1, len: 2}, }, { name: "New", cache: InputCache{slots: []InputCacheSlot{ { Id: 0, Inputs: []input{{token: 1}, {token: 2}}, InUse: false, lastUsed: time.Now().Add(-time.Second), }, { Id: 1, Inputs: []input{}, InUse: false, lastUsed: time.Time{}, }, }}, prompt: []input{{token: 2}}, longest: expected{result: 0, len: 0}, best: expected{result: 1, len: 0}, }, { name: "Fork", cache: InputCache{ slots: []InputCacheSlot{ { Id: 0, Inputs: []input{{token: 1}, {token: 2}}, InUse: false, lastUsed: time.Now().Add(-time.Second), }, { Id: 1, Inputs: []input{}, InUse: false, lastUsed: time.Time{}, }, }, }, prompt: []input{{token: 1}}, longest: expected{result: 0, len: 1}, best: expected{result: 1, len: 1}, }, { name: "Evict", cache: InputCache{slots: []InputCacheSlot{ { Id: 0, Inputs: []input{{token: 1}}, InUse: false, lastUsed: time.Now().Add(-time.Second), }, { Id: 1, Inputs: []input{{token: 1}, {token: 2}}, InUse: false, lastUsed: time.Now().Add(-2 * time.Second), }, }}, prompt: []input{{token: 2}, {token: 3}}, longest: expected{result: 0, len: 0}, best: expected{result: 1, len: 0}, }, { name: "In use", cache: InputCache{slots: []InputCacheSlot{ { Id: 0, Inputs: []input{{token: 1}, {token: 2}}, InUse: true, lastUsed: time.Now().Add(-time.Second), }, { Id: 1, Inputs: []input{{token: 1}}, InUse: false, lastUsed: time.Now().Add(-2 * time.Second), }, }}, prompt: []input{{token: 1}, {token: 2}}, longest: expected{result: 1, len: 1}, best: expected{result: 1, len: 2}, }, } for _, tt := range tests { t.Run("Longest-"+tt.name, func(t *testing.T) { result, resultLen, err := tt.cache.findLongestCacheSlot(tt.prompt) if err != nil { t.Errorf("findLongestCacheSlot: err %v", err) } else if result.Id != tt.longest.result || resultLen != tt.longest.len { t.Errorf("findLongestCacheSlot: slot have %v, want %v len have %v, want %v", result.Id, tt.longest.result, resultLen, tt.longest.len) } }) } for _, tt := range tests { t.Run("Best-"+tt.name, func(t *testing.T) { result, resultLen, err := tt.cache.findBestCacheSlot(tt.prompt) if err != nil { t.Errorf("findBestCacheSlot: err %v", err) } else if result.Id != tt.best.result || resultLen != tt.best.len { t.Errorf("findBestCacheSlot: slot have %v, want %v len have %v, want %v", result.Id, tt.best.result, resultLen, tt.best.len) } }) } } func TestShiftDiscard(t *testing.T) { tests := []struct { name string numCtx int numKeep int inputLen int expected int }{ { name: "Shift", numCtx: 2048, numKeep: 5, inputLen: 2048, expected: 1021, }, { name: "Max Keep", numCtx: 2048, numKeep: 2047, inputLen: 2048, expected: 1, }, { name: "No Keep", numCtx: 2048, numKeep: 0, inputLen: 2048, expected: 1024, }, { name: "Truncate", numCtx: 2048, numKeep: 5, inputLen: 5000, expected: 3973, }, { name: "Truncate Keep", numCtx: 2048, numKeep: 2047, inputLen: 5000, expected: 2953, }, { name: "No Op", numCtx: 2048, numKeep: 5, inputLen: 512, expected: 0, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := InputCache{numCtx: tt.numCtx} result := c.ShiftDiscard(tt.inputLen, tt.numKeep) if result != tt.expected { t.Errorf("shiftDiscard(ctx: %v, keep: %v input: %v): have %v; want %v", tt.numCtx, tt.numKeep, tt.inputLen, result, tt.expected) } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/runner/llamarunner/image.go
runner/llamarunner/image.go
package llamarunner import ( "errors" "fmt" "hash/maphash" "log/slog" "sync" "time" "github.com/ollama/ollama/llama" ) const imageCacheSize = 4 type ImageContext struct { // mu is required to be held when generating embeddings or accessing the cache mu sync.Mutex mtmd *llama.MtmdContext // cache of images to embeddings images []imageCache imageHash maphash.Hash } func NewImageContext(llamaContext *llama.Context, modelPath string) (*ImageContext, error) { arch, err := llama.GetModelArch(modelPath) if err != nil { return nil, fmt.Errorf("unable to determine vision architecture: %w (%s)", err, modelPath) } var c ImageContext if arch == "clip" { c.mtmd, err = llama.NewMtmdContext(llamaContext, modelPath) } else { return nil, fmt.Errorf("unknown vision model architecture: %s", arch) } if err != nil { return nil, err } c.images = make([]imageCache, imageCacheSize) return &c, nil } func (c *ImageContext) Free(modelPath string) { if c == nil { return } if c.mtmd != nil { c.mtmd.Free() } } func (c *ImageContext) MultimodalTokenize(llamaContext *llama.Context, data []byte) ([]llama.MtmdChunk, error) { if c == nil { return nil, nil } if len(data) <= 0 { return nil, errors.New("received zero length image") } hash := c.hashImage(data) c.mu.Lock() defer c.mu.Unlock() chunks, err := c.findImage(hash) if err != nil { if c.mtmd != nil { chunks, err = c.mtmd.MultimodalTokenize(llamaContext, data) if err != nil { return nil, err } } else { return nil, errors.New("received image but vision model not loaded") } c.addImage(hash, chunks) } return chunks, nil } func (c *ImageContext) BatchSize(configuredBatchSize int) int { // If images are not supported, we don't need to allocate embedding batches if c == nil { return 0 } return configuredBatchSize } func (c *ImageContext) EmbedSize(llamaContext *llama.Context) int { return llamaContext.Model().NEmbd() } type imageCache struct { key uint64 val []llama.MtmdChunk lastUsed time.Time } func (c *ImageContext) hashImage(image []byte) uint64 { c.imageHash.Reset() _, _ = c.imageHash.Write(image) return c.imageHash.Sum64() } var errImageNotFound = errors.New("image not found in cache") func (c *ImageContext) findImage(hash uint64) ([]llama.MtmdChunk, error) { for i := range c.images { if c.images[i].key == hash { slog.Debug("loading image embeddings from cache", "entry", i) c.images[i].lastUsed = time.Now() return c.images[i].val, nil } } return nil, errImageNotFound } func (c *ImageContext) addImage(hash uint64, embed []llama.MtmdChunk) { best := time.Now() var bestImage int for i := range c.images { if c.images[i].key == hash { bestImage = i break } if c.images[i].lastUsed.Compare(best) < 0 { best = c.images[i].lastUsed bestImage = i } } slog.Debug("storing image embeddings in cache", "entry", bestImage, "used", c.images[bestImage].lastUsed) c.images[bestImage].key = hash c.images[bestImage].val = embed c.images[bestImage].lastUsed = time.Now() }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/runner/llamarunner/image_test.go
runner/llamarunner/image_test.go
package llamarunner import ( "reflect" "testing" "github.com/ollama/ollama/llama" ) func TestImageCache(t *testing.T) { cache := ImageContext{images: make([]imageCache, 4)} valA := []llama.MtmdChunk{{Embed: []float32{0.1, 0.2}}, {Embed: []float32{0.3}}} valB := []llama.MtmdChunk{{Embed: []float32{0.4}}, {Embed: []float32{0.5}}, {Embed: []float32{0.6}}} valC := []llama.MtmdChunk{{Embed: []float32{0.7}}} valD := []llama.MtmdChunk{{Embed: []float32{0.8}}} valE := []llama.MtmdChunk{{Embed: []float32{0.9}}} // Empty cache result, err := cache.findImage(0x5adb61d31933a946) if err != errImageNotFound { t.Errorf("found result in empty cache: result %v, err %v", result, err) } // Insert A cache.addImage(0x5adb61d31933a946, valA) result, err = cache.findImage(0x5adb61d31933a946) if !reflect.DeepEqual(result, valA) { t.Errorf("failed to find expected value: result %v, err %v", result, err) } // Insert B cache.addImage(0x011551369a34a901, valB) result, err = cache.findImage(0x5adb61d31933a946) if !reflect.DeepEqual(result, valA) { t.Errorf("failed to find expected value: result %v, err %v", result, err) } result, err = cache.findImage(0x011551369a34a901) if !reflect.DeepEqual(result, valB) { t.Errorf("failed to find expected value: result %v, err %v", result, err) } // Replace B with C cache.addImage(0x011551369a34a901, valC) result, err = cache.findImage(0x5adb61d31933a946) if !reflect.DeepEqual(result, valA) { t.Errorf("failed to find expected value: result %v, err %v", result, err) } result, err = cache.findImage(0x011551369a34a901) if !reflect.DeepEqual(result, valC) { t.Errorf("failed to find expected value: result %v, err %v", result, err) } // Evict A cache.addImage(0x756b218a517e7353, valB) cache.addImage(0x75e5e8d35d7e3967, valD) cache.addImage(0xd96f7f268ca0646e, valE) result, err = cache.findImage(0x5adb61d31933a946) if reflect.DeepEqual(result, valA) { t.Errorf("failed to find expected value: result %v, err %v", result, err) } result, err = cache.findImage(0x756b218a517e7353) if !reflect.DeepEqual(result, valB) { t.Errorf("failed to find expected value: result %v, err %v", result, err) } result, err = cache.findImage(0x011551369a34a901) if !reflect.DeepEqual(result, valC) { t.Errorf("failed to find expected value: result %v, err %v", result, err) } result, err = cache.findImage(0x75e5e8d35d7e3967) if !reflect.DeepEqual(result, valD) { t.Errorf("failed to find expected value: result %v, err %v", result, err) } result, err = cache.findImage(0xd96f7f268ca0646e) if !reflect.DeepEqual(result, valE) { t.Errorf("failed to find expected value: result %v, err %v", result, err) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/runner/llamarunner/runner.go
runner/llamarunner/runner.go
package llamarunner import ( "context" "encoding/json" "errors" "flag" "fmt" "log" "log/slog" "net" "net/http" "os" "regexp" "sort" "strconv" "strings" "sync" "time" "unicode/utf8" "golang.org/x/sync/semaphore" "github.com/ollama/ollama/api" "github.com/ollama/ollama/envconfig" "github.com/ollama/ollama/llama" "github.com/ollama/ollama/llm" "github.com/ollama/ollama/logutil" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/runner/common" ) // response contains a piece of generated text along with optional logprobs type response struct { content string logprobs []llm.Logprob } // input is an element of the prompt to process, either // a token or an image embedding (generated from a vision projector) type input struct { token int // embed is an image embedding embed []float32 } type Sequence struct { // batch index iBatch int // number of tokens predicted so far numPredicted int // prompt inputs left to evaluate inputs []input // inputs that have been added to a batch but not yet submitted to Decode pendingInputs []input // tokens that have been generated but not returned yet (e.g. for stop sequences) pendingResponses []string // logprobs for tokens that haven't been returned yet pendingLogprobs []llm.Logprob // input cache being used by this sequence cache *InputCacheSlot // channel to send responses over responses chan response // channel to stop decoding (such as if the remote connection is closed) quit chan bool // number of tokens to predict numPredict int samplingCtx *llama.SamplingContext // channel to send back the embedding if embedding only embedding chan []float32 // stop sequences stop []string // number of inputs to keep at the beginning when shifting context window numKeep int // true if an embedding are to be returned instead of text generation embeddingOnly bool // shift if context window is exceeded shift bool doneReason llm.DoneReason // logprobs configuration logprobs bool topLogprobs int // Metrics processingDuration time.Duration generationDuration time.Duration numDecoded int numPromptInputs int } type NewSequenceParams struct { numPredict int stop []string numKeep int samplingParams *llama.SamplingParams embedding bool shift bool truncate bool logprobs bool topLogprobs int } var errorInputTooLong = errors.New("the input length exceeds the context length") func (s *Server) NewSequence(prompt string, images []llm.ImageData, params NewSequenceParams) (*Sequence, error) { s.ready.Wait() inputs, err := s.inputs(prompt, images) if err != nil { return nil, fmt.Errorf("failed to process inputs: %w", err) } else if len(inputs) == 0 { return nil, errors.New("no input provided") } if params.numKeep < 0 { params.numKeep = len(inputs) } if s.model.AddBOSToken() { params.numKeep += 1 } // Ensure that at least 1 input can be discarded during shift params.numKeep = min(params.numKeep, s.cache.numCtx-1) if len(inputs) > s.cache.numCtx { discard := len(inputs) - s.cache.numCtx if !params.truncate { return nil, errorInputTooLong } newInputs := inputs[:params.numKeep] newInputs = append(newInputs, inputs[params.numKeep+discard:]...) slog.Warn("truncating input prompt", "limit", s.cache.numCtx, "prompt", len(inputs), "keep", params.numKeep, "new", len(newInputs)) inputs = newInputs } var sc *llama.SamplingContext if params.samplingParams != nil { sc, err = llama.NewSamplingContext(s.model, *params.samplingParams) if err != nil { return nil, err } for _, input := range inputs { if input.embed == nil { sc.Accept(input.token, false) } } } return &Sequence{ inputs: inputs, numPromptInputs: len(inputs), numPredict: params.numPredict, pendingResponses: make([]string, 0), responses: make(chan response, 100), quit: make(chan bool, 1), embedding: make(chan []float32, 1), samplingCtx: sc, embeddingOnly: params.embedding, stop: params.stop, numKeep: params.numKeep, shift: params.shift, logprobs: params.logprobs, topLogprobs: params.topLogprobs, }, nil } // calculateLogprobsLlama converts raw logits to log probabilities and finds top K tokens func calculateLogprobsLlama(logits []float32, selectedToken int, topK int, model *llama.Model) []llm.Logprob { return common.CalculateLogprobs(logits, selectedToken, topK, model.TokenToPiece) } // inputs processes the prompt and images into a list of inputs // by splitting the prompt on [img-<n>] tags, tokenizing text and // generating image embeddings for each image func (s *Server) inputs(prompt string, images []llm.ImageData) ([]input, error) { var inputs []input var parts []string var matches [][]string if s.image != nil { re := regexp.MustCompile(`\[img-(\d+)\]`) parts = re.Split(prompt, -1) matches = re.FindAllStringSubmatch(prompt, -1) } else { parts = []string{prompt} } for i, part := range parts { // text - tokenize tokens, err := s.lc.Model().Tokenize(part, i == 0, true) if err != nil { return nil, err } for _, t := range tokens { inputs = append(inputs, input{token: t}) } // image - generate image embedding if i < len(matches) { n, _ := strconv.Atoi(matches[i][1]) imageIndex := -1 for j := range images { if images[j].ID == n { imageIndex = j break } } if imageIndex < 0 { return nil, fmt.Errorf("invalid image index: %d", n) } chunks, err := s.image.MultimodalTokenize(s.lc, images[imageIndex].Data) if err != nil { return nil, err } for _, c := range chunks { if len(c.Embed) != 0 { inputs = append(inputs, input{embed: c.Embed}) } else { for _, t := range c.Tokens { inputs = append(inputs, input{token: t}) } } } } } return inputs, nil } type Server struct { // modelPath is the location of the model to be loaded modelPath string // loadMu prevents more than one load attempt from occurring at a time loadMu sync.Mutex // is the server ready to process requests? // protects access to model and image ready sync.WaitGroup // loaded model model *llama.Model // image model context for multi-modal models image *ImageContext // status for external health reporting - loading, ready to serve, etc. status llm.ServerStatus // current progress on loading the model progress float32 // number of simultaneous requests to handle parallel int // maximum number of elements in a batch (per sequence) // TODO (jmorganca): make this n_batch batchSize int // protects access to everything below this line // this is context state needed for decoding mu sync.Mutex // indicates that data is ready for processing cond *sync.Cond // decoding state lc *llama.Context // the list of simultaneous sequences being evaluated seqs []*Sequence // seqs can have a maximum of parallel entries, which // is enfoced by seqSem seqsSem *semaphore.Weighted // KV cache cache *InputCache // next sequence for prompt processing to avoid starvation nextSeq int } func (s *Server) allNil() bool { for _, item := range s.seqs { if item != nil { return false } } return true } func flushPending(seq *Sequence) bool { joined := strings.Join(seq.pendingResponses, "") logprobs := seq.pendingLogprobs seq.pendingResponses = []string{} seq.pendingLogprobs = []llm.Logprob{} // Check if there are any partial UTF-8 characters remaining. // We already check and queue as we are generating but some may // still make it here: // - Sequence is ending, e.g. generation limit has been hit // - Invalid characters in the middle of a string // This is a stricter check to ensure we never output invalid Unicode. for !utf8.ValidString(joined) { joined = joined[:len(joined)-1] } if len(joined) == 0 { return true } select { case seq.responses <- response{content: joined, logprobs: logprobs}: return true case <-seq.quit: return false } } func (s *Server) removeSequence(seqIndex int, reason llm.DoneReason) { seq := s.seqs[seqIndex] flushPending(seq) seq.doneReason = reason close(seq.responses) close(seq.embedding) seq.cache.InUse = false s.seqs[seqIndex] = nil s.seqsSem.Release(1) } func (s *Server) run(ctx context.Context) { s.ready.Wait() // Logically these batches are used only within the context of processBatch // but it is better for performance to allocate them once here tokenBatch, err := llama.NewBatch(s.batchSize, len(s.seqs), 0) if err != nil { panic(err) } defer tokenBatch.Free() var embedBatch *llama.Batch embedBatchSize := s.image.BatchSize(s.batchSize) if embedBatchSize != 0 { embedBatch, err = llama.NewBatch(embedBatchSize, len(s.seqs), s.image.EmbedSize(s.lc)) if err != nil { panic(err) } defer embedBatch.Free() } else { embedBatch = &llama.Batch{} } for { select { case <-ctx.Done(): return default: err := s.processBatch(tokenBatch, embedBatch) if err != nil { panic(err) } tokenBatch.Clear() embedBatch.Clear() } } } // TODO (jmorganca): processBatch should be simplified, removing: // * sampling // * stop token checking // * metrics // these should instead be handled by the handlers // it should only be responsible for accepting tokens or embeddings and // processing batches as fast as possible func (s *Server) processBatch(tokenBatch *llama.Batch, embedBatch *llama.Batch) error { s.mu.Lock() for s.allNil() { s.cond.Wait() // Wait until an item is added } defer s.mu.Unlock() var batch *llama.Batch var numOutputs int seqIdx := s.nextSeq - 1 for range s.seqs { seqIdx = (seqIdx + 1) % len(s.seqs) seq := s.seqs[seqIdx] if seq == nil { continue } // if past the num predict limit if seq.numPredict > 0 && seq.numPredicted >= seq.numPredict { s.removeSequence(seqIdx, llm.DoneReasonLength) continue } for i, input := range seq.inputs { if len(seq.cache.Inputs)+len(seq.pendingInputs)+1 > s.cache.numCtx { if len(seq.pendingInputs) == 0 { if !seq.shift { s.removeSequence(seqIdx, llm.DoneReasonLength) break } err := s.cache.ShiftCacheSlot(seq.cache, seq.numKeep) if err != nil { var reprocess *ErrReprocessInputs if errors.As(err, &reprocess) { // Prepend these inputs to the sequence's inputs queue for reprocessing seq.inputs = append(reprocess.Inputs, seq.inputs...) // Continue processing as normal continue } else { return err } } } else { break } } embedding := input.embed != nil // If we don't currently have a batch, use one of the correct type and // fill it up as much as possible across all sequences. If we encounter an // input of the opppsite type, stop for that sequence but then pick up from // there for the next batch, ensuring that we alternate types if batch == nil { if !embedding { batch = tokenBatch } else { batch = embedBatch } } else if embedding != batch.IsEmbedding() { s.nextSeq = seqIdx break } if i >= batch.Size() { break } output := i+1 == len(seq.inputs) batch.Add(input.token, input.embed, len(seq.cache.Inputs)+len(seq.pendingInputs), output, seq.cache.Id) if output { numOutputs++ } seq.pendingInputs = append(seq.pendingInputs, input) seq.iBatch = batch.NumTokens() - 1 } seq.inputs = seq.inputs[len(seq.pendingInputs):] } if batch == nil || batch.NumTokens() == 0 { return nil } t := time.Now() if err := s.lc.Decode(batch); err != nil { return fmt.Errorf("failed to decode batch: %w", err) } if numOutputs > 0 { s.lc.Synchronize() } for i, seq := range s.seqs { if seq == nil { continue } // After calling Decode, pending inputs are now in the cache if len(seq.pendingInputs) > 0 { seq.cache.Inputs = append(seq.cache.Inputs, seq.pendingInputs...) seq.pendingInputs = []input{} } // don't sample prompt processing if len(seq.inputs) != 0 { seq.processingDuration += time.Since(t) continue } seq.numDecoded++ if seq.numDecoded > 1 { seq.generationDuration += time.Since(t) } else { seq.processingDuration += time.Since(t) } // if done processing the prompt, generate an embedding and return if seq.embeddingOnly { embed := s.lc.GetEmbeddingsSeq(seq.cache.Id) if embed == nil { embed = s.lc.GetEmbeddingsIth(seq.iBatch) } seq.embedding <- embed s.removeSequence(i, llm.DoneReasonStop) continue } // sample a token token := seq.samplingCtx.Sample(s.lc, seq.iBatch) seq.samplingCtx.Accept(token, true) piece := s.model.TokenToPiece(token) seq.numPredicted++ // if it's an end of sequence token, break if s.model.TokenIsEog(token) { // TODO (jmorganca): we should send this back // as it's important for the /api/generate context // seq.responses <- piece s.removeSequence(i, llm.DoneReasonStop) continue } // Calculate logprobs if requested (after EOS check to avoid logprobs for EOS tokens) if seq.logprobs { logits := s.lc.GetLogitsIth(seq.iBatch) if logits != nil { logprobs := calculateLogprobsLlama(logits, token, seq.topLogprobs, s.model) seq.pendingLogprobs = append(seq.pendingLogprobs, logprobs...) } } seq.inputs = []input{{token: token}} seq.pendingResponses = append(seq.pendingResponses, piece) sequence := strings.Join(seq.pendingResponses, "") if ok, stop := common.FindStop(sequence, seq.stop); ok { slog.Debug("hit stop token", "pending", seq.pendingResponses, "stop", stop) var tokenTruncated bool origLen := len(seq.pendingResponses) seq.pendingResponses, tokenTruncated = common.TruncateStop(seq.pendingResponses, stop) newLen := len(seq.pendingResponses) // Truncate logprobs to match the truncated responses if seq.logprobs { origLogprobsLen := len(seq.pendingLogprobs) numTokensRemoved := origLen - newLen newLogprobsLen := origLogprobsLen - numTokensRemoved if newLogprobsLen < 0 { newLogprobsLen = 0 } seq.pendingLogprobs = seq.pendingLogprobs[:newLogprobsLen] } // Update the cache based on the tokens that will be returned: // - We have 1 token more than is currently in the cache because // the last one generated wasn't submitted to Decode // - Remove any stop sequences that we stripped out // - If truncateStop removed a portion of a token, drop that // - As defense-in-depth, if truncatedToken didn't find a stop token // remove the extra one that we added to the cache len tokenLen := len(seq.cache.Inputs) + 1 tokenLen -= origLen - newLen if tokenTruncated || origLen == newLen { tokenLen-- } seq.cache.Inputs = seq.cache.Inputs[:tokenLen] s.removeSequence(i, llm.DoneReasonStop) continue } if common.ContainsStopSuffix(sequence, seq.stop) { continue } if common.IncompleteUnicode(sequence) { continue } if !flushPending(seq) { s.removeSequence(i, llm.DoneReasonConnectionClosed) } } return nil } func (s *Server) completion(w http.ResponseWriter, r *http.Request) { var req llm.CompletionRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { http.Error(w, "Bad request", http.StatusBadRequest) return } if req.Options == nil { opts := api.DefaultOptions() req.Options = &opts } // Set the headers to indicate streaming w.Header().Set("Content-Type", "application/json") w.Header().Set("Transfer-Encoding", "chunked") flusher, ok := w.(http.Flusher) if !ok { http.Error(w, "Streaming not supported", http.StatusInternalServerError) return } // Extract options from the CompletionRequest samplingParams := llama.SamplingParams{ TopK: req.Options.TopK, TopP: req.Options.TopP, MinP: req.Options.MinP, TypicalP: req.Options.TypicalP, Temp: req.Options.Temperature, RepeatLastN: req.Options.RepeatLastN, PenaltyRepeat: req.Options.RepeatPenalty, PenaltyFreq: req.Options.FrequencyPenalty, PenaltyPresent: req.Options.PresencePenalty, Seed: uint32(req.Options.Seed), Grammar: req.Grammar, } seq, err := s.NewSequence(req.Prompt, req.Images, NewSequenceParams{ numPredict: req.Options.NumPredict, stop: req.Options.Stop, numKeep: req.Options.NumKeep, samplingParams: &samplingParams, embedding: false, shift: req.Shift, truncate: req.Truncate, logprobs: req.Logprobs, topLogprobs: req.TopLogprobs, }) if err != nil { if errors.Is(err, errorInputTooLong) { http.Error(w, err.Error(), http.StatusBadRequest) return } http.Error(w, fmt.Sprintf("Failed to create new sequence: %v", err), http.StatusInternalServerError) return } // Ensure there is a place to put the sequence, released when removed from s.seqs if err := s.seqsSem.Acquire(r.Context(), 1); err != nil { if errors.Is(err, context.Canceled) { slog.Info("aborting completion request due to client closing the connection") } else { http.Error(w, fmt.Sprintf("Failed to acquire semaphore: %v", err), http.StatusInternalServerError) } return } s.mu.Lock() found := false for i, sq := range s.seqs { if sq == nil { seq.cache, seq.inputs, err = s.cache.LoadCacheSlot(seq.inputs, true) if err != nil { s.mu.Unlock() s.seqsSem.Release(1) http.Error(w, fmt.Sprintf("Failed to load cache: %v", err), http.StatusInternalServerError) return } s.seqs[i] = seq s.cond.Signal() found = true break } } s.mu.Unlock() if !found { s.seqsSem.Release(1) http.Error(w, "could not find an available sequence", http.StatusInternalServerError) return } for { select { case <-r.Context().Done(): close(seq.quit) return case resp, ok := <-seq.responses: if ok { if err := json.NewEncoder(w).Encode(&llm.CompletionResponse{ Content: resp.content, Logprobs: resp.logprobs, }); err != nil { http.Error(w, fmt.Sprintf("failed to encode response: %v", err), http.StatusInternalServerError) close(seq.quit) return } flusher.Flush() } else { if err := json.NewEncoder(w).Encode(&llm.CompletionResponse{ Done: true, DoneReason: seq.doneReason, PromptEvalCount: seq.numPromptInputs, PromptEvalDuration: seq.processingDuration, EvalCount: seq.numDecoded, EvalDuration: seq.generationDuration, }); err != nil { http.Error(w, fmt.Sprintf("failed to encode final response: %v", err), http.StatusInternalServerError) } return } } } } func (s *Server) embeddings(w http.ResponseWriter, r *http.Request) { var req llm.EmbeddingRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { http.Error(w, fmt.Sprintf("bad request: %s", err), http.StatusBadRequest) return } w.Header().Set("Content-Type", "application/json") seq, err := s.NewSequence(req.Content, nil, NewSequenceParams{ embedding: true, truncate: false, }) if err != nil { if errors.Is(err, errorInputTooLong) { http.Error(w, err.Error(), http.StatusBadRequest) return } http.Error(w, fmt.Sprintf("Failed to create new sequence: %v", err), http.StatusInternalServerError) return } // Ensure there is a place to put the sequence, released when removed from s.seqs if err := s.seqsSem.Acquire(r.Context(), 1); err != nil { if errors.Is(err, context.Canceled) { slog.Info("aborting embeddings request due to client closing the connection") } else { http.Error(w, fmt.Sprintf("Failed to acquire semaphore: %v", err), http.StatusInternalServerError) } return } s.mu.Lock() found := false for i, sq := range s.seqs { if sq == nil { seq.cache, seq.inputs, err = s.cache.LoadCacheSlot(seq.inputs, false) if err != nil { s.mu.Unlock() s.seqsSem.Release(1) http.Error(w, fmt.Sprintf("Failed to load cache: %v", err), http.StatusInternalServerError) return } s.seqs[i] = seq s.cond.Signal() found = true break } } s.mu.Unlock() if !found { s.seqsSem.Release(1) http.Error(w, "could not find an available sequence", http.StatusInternalServerError) return } embedding := <-seq.embedding if err := json.NewEncoder(w).Encode(&llm.EmbeddingResponse{ Embedding: embedding, PromptEvalCount: seq.numPromptInputs, }); err != nil { http.Error(w, fmt.Sprintf("failed to encode response: %v", err), http.StatusInternalServerError) } } func (s *Server) health(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") if err := json.NewEncoder(w).Encode(&llm.ServerStatusResponse{ Status: s.status, Progress: s.progress, }); err != nil { http.Error(w, fmt.Sprintf("failed to encode response: %v", err), http.StatusInternalServerError) } } // loadModel allocates memory based on the given parameters and loads the weights. The // memory allocated is worst case for text models but not for vision. func (s *Server) loadModel( params llama.ModelParams, mpath string, lpath []string, ppath string, kvSize int, kvCacheType string, flashAttention ml.FlashAttentionType, threads int, multiUserCache bool, ) { var err error s.model, err = llama.LoadModelFromFile(mpath, params) if err != nil { panic(err) } ctxParams := llama.NewContextParams(kvSize, s.batchSize, s.parallel, threads, flashAttention, kvCacheType) s.lc, err = llama.NewContextWithModel(s.model, ctxParams) if err != nil { panic(err) } for _, path := range lpath { err := s.model.ApplyLoraFromFile(s.lc, path, 1.0, threads) if err != nil { panic(err) } } if ppath != "" { var err error s.image, err = NewImageContext(s.lc, ppath) if err != nil { panic(err) } } s.cache, err = NewInputCache(s.lc, kvSize, s.parallel, multiUserCache) if err != nil { panic(err) } s.status = llm.ServerStatusReady s.ready.Done() } // load is the handler called by the Ollama server to process different // load operations func (s *Server) load(w http.ResponseWriter, r *http.Request) { s.loadMu.Lock() defer s.loadMu.Unlock() w.Header().Set("Content-Type", "application/json") if s.status != llm.ServerStatusLaunched { http.Error(w, "model already loaded", http.StatusInternalServerError) return } var req llm.LoadRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { http.Error(w, "bad request", http.StatusBadRequest) return } slog.Info("load", "request", req) switch req.Operation { // LoadOperationFit and LoadOperationAlloc have no meaning here - just return a successful response case llm.LoadOperationCommit: s.batchSize = req.BatchSize s.parallel = req.Parallel s.seqs = make([]*Sequence, s.parallel) s.seqsSem = semaphore.NewWeighted(int64(s.parallel)) numGPU := 0 var tensorSplit []float32 var llamaIDs []uint64 gpuIDs := llama.EnumerateGPUs() sort.Sort(req.GPULayers) for _, layers := range req.GPULayers { for i := range gpuIDs { if gpuIDs[i].DeviceID == layers.DeviceID { numGPU += len(layers.Layers) tensorSplit = append(tensorSplit, float32(len(layers.Layers))) llamaIDs = append(llamaIDs, gpuIDs[i].LlamaID) } } } params := llama.ModelParams{ Devices: llamaIDs, NumGpuLayers: numGPU, MainGpu: req.MainGPU, UseMmap: req.UseMmap && len(req.LoraPath) == 0, TensorSplit: tensorSplit, Progress: func(progress float32) { s.progress = progress }, } s.status = llm.ServerStatusLoadingModel go s.loadModel(params, s.modelPath, req.LoraPath, req.ProjectorPath, req.KvSize, req.KvCacheType, req.FlashAttention, req.NumThreads, req.MultiUserCache) case llm.LoadOperationClose: // No-op for us if err := json.NewEncoder(w).Encode(&llm.LoadResponse{}); err != nil { http.Error(w, fmt.Sprintf("failed to encode response: %v", err), http.StatusInternalServerError) } return } resp := llm.LoadResponse{Success: true} if err := json.NewEncoder(w).Encode(&resp); err != nil { http.Error(w, fmt.Sprintf("failed to encode response: %v", err), http.StatusInternalServerError) return } } func Execute(args []string) error { fs := flag.NewFlagSet("runner", flag.ExitOnError) mpath := fs.String("model", "", "Path to model binary file") port := fs.Int("port", 8080, "Port to expose the server on") _ = fs.Bool("verbose", false, "verbose output (default: disabled)") fs.Usage = func() { fmt.Fprintf(fs.Output(), "Runner usage\n") fs.PrintDefaults() } if err := fs.Parse(args); err != nil { return err } slog.SetDefault(logutil.NewLogger(os.Stderr, envconfig.LogLevel())) slog.Info("starting go runner") llama.BackendInit() server := &Server{ modelPath: *mpath, status: llm.ServerStatusLaunched, } server.ready.Add(1) server.cond = sync.NewCond(&server.mu) ctx, cancel := context.WithCancel(context.Background()) defer cancel() go server.run(ctx) addr := "127.0.0.1:" + strconv.Itoa(*port) listener, err := net.Listen("tcp", addr) if err != nil { fmt.Println("Listen error:", err) return err } defer listener.Close() mux := http.NewServeMux() mux.HandleFunc("POST /load", server.load) mux.HandleFunc("/embedding", server.embeddings) mux.HandleFunc("/completion", server.completion) mux.HandleFunc("/health", server.health) httpServer := http.Server{ Handler: mux, } log.Println("Server listening on", addr) if err := httpServer.Serve(listener); err != nil { log.Fatal("server error:", err) return err } return nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/runner/common/stop.go
runner/common/stop.go
package common import ( "strings" ) func FindStop(sequence string, stops []string) (bool, string) { for _, stop := range stops { if strings.Contains(sequence, stop) { return true, stop } } return false, "" } func ContainsStopSuffix(sequence string, stops []string) bool { for _, stop := range stops { for i := 1; i <= len(stop); i++ { if strings.HasSuffix(sequence, stop[:i]) { return true } } } return false } // TruncateStop removes the provided stop string from pieces, // returning the partial pieces with stop removed, including truncating // the last piece if required (and signalling if this was the case) func TruncateStop(pieces []string, stop string) ([]string, bool) { joined := strings.Join(pieces, "") index := strings.Index(joined, stop) if index == -1 { return pieces, false } joined = joined[:index] // Split truncated string back into pieces of original lengths lengths := make([]int, len(pieces)) for i, piece := range pieces { lengths[i] = len(piece) } var result []string tokenTruncated := false start := 0 for _, length := range lengths { if start >= len(joined) { break } end := start + length if end > len(joined) { end = len(joined) tokenTruncated = true } result = append(result, joined[start:end]) start = end } return result, tokenTruncated } func IncompleteUnicode(token string) bool { incomplete := false // check if there is incomplete UTF-8 character at the end for i := 1; i < 5 && i <= len(token); i++ { c := token[len(token)-i] if (c & 0xc0) == 0x80 { // continuation byte: 10xxxxxx continue } if (c & 0xe0) == 0xc0 { // 2-byte character: 110xxxxx ... incomplete = i < 2 } else if (c & 0xf0) == 0xe0 { // 3-byte character: 1110xxxx ... incomplete = i < 3 } else if (c & 0xf8) == 0xf0 { // 4-byte character: 11110xxx ... incomplete = i < 4 } // else 1-byte character or invalid byte break } return incomplete }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/runner/common/stop_test.go
runner/common/stop_test.go
package common import ( "reflect" "testing" ) func TestTruncateStop(t *testing.T) { tests := []struct { name string pieces []string stop string expected []string expectedTrunc bool }{ { name: "Single word", pieces: []string{"hello", "world"}, stop: "world", expected: []string{"hello"}, expectedTrunc: false, }, { name: "Partial", pieces: []string{"hello", "wor"}, stop: "or", expected: []string{"hello", "w"}, expectedTrunc: true, }, { name: "Suffix", pieces: []string{"Hello", " there", "!"}, stop: "!", expected: []string{"Hello", " there"}, expectedTrunc: false, }, { name: "Suffix partial", pieces: []string{"Hello", " the", "re!"}, stop: "there!", expected: []string{"Hello", " "}, expectedTrunc: true, }, { name: "Middle", pieces: []string{"hello", " wor"}, stop: "llo w", expected: []string{"he"}, expectedTrunc: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result, resultTrunc := TruncateStop(tt.pieces, tt.stop) if !reflect.DeepEqual(result, tt.expected) || resultTrunc != tt.expectedTrunc { t.Errorf("truncateStop(%v, %s): have %v (%v); want %v (%v)", tt.pieces, tt.stop, result, resultTrunc, tt.expected, tt.expectedTrunc) } }) } } func TestIncompleteUnicode(t *testing.T) { tests := []struct { name string input string expected bool }{ { name: "Basic", input: "hi", expected: false, }, { name: "Two byte", input: "hi" + string([]byte{0xc2, 0xa3}), expected: false, }, { name: "Two byte - missing last", input: "hi" + string([]byte{0xc2}), expected: true, }, { name: "Three byte", input: "hi" + string([]byte{0xe0, 0xA0, 0x80}), expected: false, }, { name: "Three byte - missing last", input: "hi" + string([]byte{0xe0, 0xA0}), expected: true, }, { name: "Three byte - missing last 2", input: "hi" + string([]byte{0xe0}), expected: true, }, { name: "Four byte", input: "hi" + string([]byte{0xf0, 0x92, 0x8a, 0xb7}), expected: false, }, { name: "Four byte - missing last", input: "hi" + string([]byte{0xf0, 0x92, 0x8a}), expected: true, }, { name: "Four byte - missing last 2", input: "hi" + string([]byte{0xf0, 0x92}), expected: true, }, { name: "Four byte - missing last 3", input: "hi" + string([]byte{0xf0}), expected: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result := IncompleteUnicode(tt.input) if result != tt.expected { t.Errorf("incompleteUnicode(%s): have %v; want %v", tt.input, result, tt.expected) } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/runner/common/logprob_test.go
runner/common/logprob_test.go
package common import ( "math" "testing" "github.com/ollama/ollama/llm" ) func TestCalculateLogprobs(t *testing.T) { tokens := map[int]string{ 0: "hello", 1: "hi", 2: "hey", 3: "world", } decoder := func(tokenID int) string { if text, ok := tokens[tokenID]; ok { return text } return "" } tests := []struct { name string logits []float32 selectedToken int topK int wantLen int wantToken string }{ { name: "Empty logits", logits: []float32{}, selectedToken: 0, topK: 0, wantLen: 0, }, { name: "Single token without top logprobs", logits: []float32{1.0, 0.5, 0.3, 0.1}, selectedToken: 0, topK: 0, wantLen: 1, wantToken: "hello", }, { name: "Single token with top logprobs", logits: []float32{1.0, 0.5, 0.3, 0.1}, selectedToken: 0, topK: 3, wantLen: 1, wantToken: "hello", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result := CalculateLogprobs(tt.logits, tt.selectedToken, tt.topK, decoder) if len(result) != tt.wantLen { t.Errorf("CalculateLogprobs() returned %d results, want %d", len(result), tt.wantLen) } if tt.wantLen > 0 && result[0].Token != tt.wantToken { t.Errorf("CalculateLogprobs() token = %s, want %s", result[0].Token, tt.wantToken) } if tt.topK > 0 && len(result) > 0 { if len(result[0].TopLogprobs) != tt.topK { t.Errorf("CalculateLogprobs() top logprobs count = %d, want %d", len(result[0].TopLogprobs), tt.topK) } } }) } } func TestCalculateLogprobsNumericalStability(t *testing.T) { tokens := map[int]string{ 0: "a", 1: "b", 2: "c", } decoder := func(tokenID int) string { if text, ok := tokens[tokenID]; ok { return text } return "" } // Test with very large logits to ensure numerical stability logits := []float32{1000.0, 999.0, 998.0} result := CalculateLogprobs(logits, 0, 3, decoder) if len(result) != 1 { t.Fatalf("Expected 1 result, got %d", len(result)) } // Check that log probabilities are finite and reasonable if math.IsInf(result[0].Logprob, 0) || math.IsNaN(result[0].Logprob) { t.Errorf("Selected token logprob is not finite: %f", result[0].Logprob) } for i, tlp := range result[0].TopLogprobs { if math.IsInf(tlp.Logprob, 0) || math.IsNaN(tlp.Logprob) { t.Errorf("Top logprob[%d] is not finite: %f", i, tlp.Logprob) } } // Top logprobs should be in descending order for i := 1; i < len(result[0].TopLogprobs); i++ { if result[0].TopLogprobs[i].Logprob > result[0].TopLogprobs[i-1].Logprob { t.Errorf("Top logprobs not in descending order: %f > %f", result[0].TopLogprobs[i].Logprob, result[0].TopLogprobs[i-1].Logprob) } } } func TestCalculateLogprobsProbabilityCorrectness(t *testing.T) { tokens := map[int]string{ 0: "hello", 1: "world", 2: "foo", 3: "bar", } decoder := func(tokenID int) string { if text, ok := tokens[tokenID]; ok { return text } return "" } tests := []struct { name string logits []float32 selectedToken int topK int }{ { name: "Uniform logits", logits: []float32{1.0, 1.0, 1.0, 1.0}, selectedToken: 0, topK: 4, }, { name: "Different logits", logits: []float32{2.0, 1.0, 0.5, 0.1}, selectedToken: 0, topK: 4, }, { name: "Negative logits", logits: []float32{-1.0, -2.0, -3.0, -4.0}, selectedToken: 0, topK: 4, }, { name: "Mixed logits", logits: []float32{5.0, -5.0, 0.0, 2.5}, selectedToken: 0, topK: 4, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result := CalculateLogprobs(tt.logits, tt.selectedToken, tt.topK, decoder) if len(result) != 1 { t.Fatalf("Expected 1 result, got %d", len(result)) } // Verify all probabilities are non-positive (log probabilities should be <= 0) if result[0].Logprob > 0 { t.Errorf("Selected token logprob should be <= 0, got %f", result[0].Logprob) } for i, tlp := range result[0].TopLogprobs { if tlp.Logprob > 0 { t.Errorf("Top logprob[%d] should be <= 0, got %f", i, tlp.Logprob) } } // Verify that probabilities sum to approximately 1 // Sum of exp(logprob) for all tokens should equal 1 var probSum float64 for _, lp := range result[0].TopLogprobs { probSum += math.Exp(lp.Logprob) } // For uniform logits, each probability should be 1/n if tt.name == "Uniform logits" { expectedProb := 1.0 / float64(len(tt.logits)) actualProb := math.Exp(result[0].Logprob) if math.Abs(actualProb-expectedProb) > 1e-6 { t.Errorf("For uniform logits, expected probability %f, got %f", expectedProb, actualProb) } } // Verify top logprobs are sorted in descending order for i := 1; i < len(result[0].TopLogprobs); i++ { if result[0].TopLogprobs[i].Logprob > result[0].TopLogprobs[i-1].Logprob { t.Errorf("Top logprobs not sorted: position %d (%f) > position %d (%f)", i, result[0].TopLogprobs[i].Logprob, i-1, result[0].TopLogprobs[i-1].Logprob) } } // Verify the selected token appears in top logprobs selectedText := decoder(tt.selectedToken) found := false for _, tlp := range result[0].TopLogprobs { if tlp.Token == selectedText { found = true // The logprob in top logprobs should match the selected token's logprob if math.Abs(tlp.Logprob-result[0].Logprob) > 1e-6 { t.Errorf("Selected token logprob mismatch: main=%f, in top=%f", result[0].Logprob, tlp.Logprob) } break } } if !found { t.Errorf("Selected token %q not found in top logprobs", selectedText) } }) } } func TestCalculateLogprobsSoftmaxCorrectness(t *testing.T) { // Test that softmax calculation is correct by verifying probabilities sum to 1 decoder := func(tokenID int) string { return string(rune('A' + tokenID)) } tests := []struct { name string logits []float32 }{ { name: "Small vocabulary", logits: []float32{1.0, 2.0, 3.0}, }, { name: "Large differences", logits: []float32{10.0, 0.0, -10.0}, }, { name: "All equal", logits: []float32{5.0, 5.0, 5.0, 5.0, 5.0}, }, { name: "Very large values", logits: []float32{500.0, 499.0, 498.0}, }, { name: "Very small values", logits: []float32{-500.0, -499.0, -498.0}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { // Calculate logprobs for all tokens var totalProb float64 for i := range tt.logits { result := CalculateLogprobs(tt.logits, i, 0, decoder) if len(result) != 1 { t.Fatalf("Expected 1 result, got %d", len(result)) } prob := math.Exp(result[0].Logprob) totalProb += prob // Verify each probability is between 0 and 1 if prob < 0 || prob > 1 { t.Errorf("Token %d probability %f is out of range [0, 1]", i, prob) } } // Total probability should be very close to 1.0 (allowing for floating point errors) if math.Abs(totalProb-1.0) > 1e-5 { t.Errorf("Total probability sum is %f, expected 1.0", totalProb) } }) } } func TestCalculateLogprobsSelectedTokenCorrectness(t *testing.T) { decoder := func(tokenID int) string { return string(rune('A' + tokenID)) } logits := []float32{3.0, 1.0, 2.0, 0.5} // Test that selecting different tokens gives the correct probabilities // and that the highest logit has the highest probability maxLogitIndex := 0 maxLogitValue := logits[0] for i, logit := range logits[1:] { if logit > maxLogitValue { maxLogitValue = logit maxLogitIndex = i + 1 } } var maxProb float64 var maxProbIndex int for i := range logits { result := CalculateLogprobs(logits, i, 0, decoder) prob := math.Exp(result[0].Logprob) if prob > maxProb { maxProb = prob maxProbIndex = i } // Verify the token matches expectedToken := decoder(i) if result[0].Token != expectedToken { t.Errorf("Token %d: expected token %q, got %q", i, expectedToken, result[0].Token) } } // The token with the highest logit should have the highest probability if maxProbIndex != maxLogitIndex { t.Errorf("Token with highest probability (%d) doesn't match token with highest logit (%d)", maxProbIndex, maxLogitIndex) } } func TestCalculateLogprobsTopKOrdering(t *testing.T) { tokens := map[int]string{ 0: "first", 1: "second", 2: "third", 3: "fourth", 4: "fifth", } decoder := func(tokenID int) string { return tokens[tokenID] } // Logits in non-sorted order logits := []float32{2.0, 5.0, 1.0, 4.0, 3.0} // Expected order by probability: 1 (5.0), 3 (4.0), 4 (3.0), 0 (2.0), 2 (1.0) expectedOrder := []string{"second", "fourth", "fifth", "first", "third"} result := CalculateLogprobs(logits, 0, 5, decoder) if len(result) != 1 { t.Fatalf("Expected 1 result, got %d", len(result)) } if len(result[0].TopLogprobs) != 5 { t.Fatalf("Expected 5 top logprobs, got %d", len(result[0].TopLogprobs)) } // Verify ordering matches expected for i, tlp := range result[0].TopLogprobs { if tlp.Token != expectedOrder[i] { t.Errorf("Position %d: expected token %q, got %q", i, expectedOrder[i], tlp.Token) } } // Verify probabilities are in descending order for i := 1; i < len(result[0].TopLogprobs); i++ { if result[0].TopLogprobs[i].Logprob > result[0].TopLogprobs[i-1].Logprob { t.Errorf("Probabilities not in descending order at position %d: %f > %f", i, result[0].TopLogprobs[i].Logprob, result[0].TopLogprobs[i-1].Logprob) } } } func TestLogprobsWithStopSequences(t *testing.T) { tests := []struct { name string pendingResponses []string pendingLogprobs []llm.Logprob stop string expectedResponses []string expectedLogprobs int }{ { name: "Single token stop", pendingResponses: []string{"Hello", " world", "!"}, pendingLogprobs: []llm.Logprob{ {TokenLogprob: llm.TokenLogprob{Token: "Hello", Logprob: -0.1}}, {TokenLogprob: llm.TokenLogprob{Token: " world", Logprob: -0.2}}, {TokenLogprob: llm.TokenLogprob{Token: "!", Logprob: -0.3}}, }, stop: "!", expectedResponses: []string{"Hello", " world"}, expectedLogprobs: 2, }, { name: "Multi-token stop sequence", pendingResponses: []string{"Hello", " ", "there", "STOP"}, pendingLogprobs: []llm.Logprob{ {TokenLogprob: llm.TokenLogprob{Token: "Hello", Logprob: -0.1}}, {TokenLogprob: llm.TokenLogprob{Token: " ", Logprob: -0.2}}, {TokenLogprob: llm.TokenLogprob{Token: "there", Logprob: -0.3}}, {TokenLogprob: llm.TokenLogprob{Token: "STOP", Logprob: -0.4}}, }, stop: "STOP", expectedResponses: []string{"Hello", " ", "there"}, expectedLogprobs: 3, }, { name: "Partial token stop", pendingResponses: []string{"Hello", " the", "re!"}, pendingLogprobs: []llm.Logprob{ {TokenLogprob: llm.TokenLogprob{Token: "Hello", Logprob: -0.1}}, {TokenLogprob: llm.TokenLogprob{Token: " the", Logprob: -0.2}}, {TokenLogprob: llm.TokenLogprob{Token: "re!", Logprob: -0.3}}, }, stop: "there!", expectedResponses: []string{"Hello", " "}, expectedLogprobs: 2, }, { name: "Stop at beginning of last token", pendingResponses: []string{"Hello", " world", "END"}, pendingLogprobs: []llm.Logprob{ {TokenLogprob: llm.TokenLogprob{Token: "Hello", Logprob: -0.1}}, {TokenLogprob: llm.TokenLogprob{Token: " world", Logprob: -0.2}}, {TokenLogprob: llm.TokenLogprob{Token: "END", Logprob: -0.3}}, }, stop: "END", expectedResponses: []string{"Hello", " world"}, expectedLogprobs: 2, }, { name: "Multi-token stop across tokens", pendingResponses: []string{"Text", " ", "with", " ", "stop", " ", "word"}, pendingLogprobs: []llm.Logprob{ {TokenLogprob: llm.TokenLogprob{Token: "Text", Logprob: -0.1}}, {TokenLogprob: llm.TokenLogprob{Token: " ", Logprob: -0.2}}, {TokenLogprob: llm.TokenLogprob{Token: "with", Logprob: -0.3}}, {TokenLogprob: llm.TokenLogprob{Token: " ", Logprob: -0.4}}, {TokenLogprob: llm.TokenLogprob{Token: "stop", Logprob: -0.5}}, {TokenLogprob: llm.TokenLogprob{Token: " ", Logprob: -0.6}}, {TokenLogprob: llm.TokenLogprob{Token: "word", Logprob: -0.7}}, }, stop: "stop word", expectedResponses: []string{"Text", " ", "with", " "}, expectedLogprobs: 4, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { // Simulate the stop sequence detection and truncation origLen := len(tt.pendingResponses) responses, tokenTruncated := TruncateStop(tt.pendingResponses, tt.stop) newLen := len(responses) // Simulate logprobs truncation logprobs := make([]llm.Logprob, len(tt.pendingLogprobs)) copy(logprobs, tt.pendingLogprobs) origLogprobsLen := len(logprobs) numTokensRemoved := origLen - newLen newLogprobsLen := origLogprobsLen - numTokensRemoved if newLogprobsLen < 0 { newLogprobsLen = 0 } logprobs = logprobs[:newLogprobsLen] // Verify responses were truncated correctly if len(responses) != len(tt.expectedResponses) { t.Errorf("Expected %d responses, got %d", len(tt.expectedResponses), len(responses)) } // Verify logprobs count matches truncated responses if len(logprobs) != tt.expectedLogprobs { t.Errorf("Expected %d logprobs after truncation, got %d", tt.expectedLogprobs, len(logprobs)) } // Verify logprobs count matches response count if len(logprobs) != len(responses) { t.Errorf("Logprobs count (%d) doesn't match responses count (%d)", len(logprobs), len(responses)) } // Verify the correct logprobs were kept (skip last token if it was truncated) // When tokenTruncated is true, the last response token may not match the logprob token checkLen := len(logprobs) if tokenTruncated && checkLen > 0 { checkLen-- // Skip checking the last token when it was partially truncated } for i := range checkLen { if i < len(responses) && logprobs[i].Token != responses[i] { t.Errorf("Logprob[%d] token %q doesn't match response[%d] %q", i, logprobs[i].Token, i, responses[i]) } } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/runner/common/logprob.go
runner/common/logprob.go
package common import ( "math" "sort" "github.com/ollama/ollama/llm" ) // TokenDecoderFunc is a function that converts token IDs to text. type TokenDecoderFunc func(tokenID int) string // CalculateLogprobs converts raw logits to log probabilities and finds top K tokens. // It uses numerically stable softmax to compute log probabilities. func CalculateLogprobs(logits []float32, selectedToken int, topK int, decoder TokenDecoderFunc) []llm.Logprob { if len(logits) == 0 { return nil } // Step 1: Convert logits to log probabilities using numerically stable softmax maxLogit := logits[0] for _, logit := range logits[1:] { if logit > maxLogit { maxLogit = logit } } var sumExp float64 for _, logit := range logits { sumExp += math.Exp(float64(logit - maxLogit)) } logSumExp := float32(math.Log(sumExp)) logProbs := make([]float32, len(logits)) for i, logit := range logits { logProbs[i] = (logit - maxLogit) - logSumExp } // Step 2: Get selected token's information selectedLogprob := logProbs[selectedToken] selectedText := decoder(selectedToken) result := llm.Logprob{ TokenLogprob: llm.TokenLogprob{ Token: selectedText, Logprob: float64(selectedLogprob), }, } // Step 3: If topK requested, find the top K tokens if topK > 0 { type tokenLogprobPair struct { tokenID int logprob float32 } pairs := make([]tokenLogprobPair, len(logProbs)) for i, lp := range logProbs { pairs[i] = tokenLogprobPair{tokenID: i, logprob: lp} } sort.Slice(pairs, func(i, j int) bool { return pairs[i].logprob > pairs[j].logprob }) k := min(topK, len(pairs)) topLogprobs := make([]llm.TokenLogprob, k) for i := range k { tokenText := decoder(pairs[i].tokenID) topLogprobs[i] = llm.TokenLogprob{ Token: tokenText, Logprob: float64(pairs[i].logprob), } } result.TopLogprobs = topLogprobs } return []llm.Logprob{result} }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/runner/ollamarunner/multimodal.go
runner/ollamarunner/multimodal.go
package ollamarunner import ( "errors" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/model/input" ) // Tensors can't be used across multiple compute graphs. This is a problem // if a single embedding is split across batches using views since all of // the views will have the same source tensor. We also don't want to // recompute the entire embedding for each batch. // // To avoid this, we compute all of the tensors for the embedding on the // first use and then store the result in system memory. When we need // additional tensors, we recreate them from the stored data. // multimodalEntry represents the embeddings of a single object (such // as an image). type multimodalEntry struct { // mm is the original set of tensors created by EncodeMultimodal mm []input.Multimodal // data is the computed result of mm. Nil if not yet computed data [][]float32 } // multimodalStore maps from an individual tensor (of which there // may be many in a single multimodal object) to its parent embedding type multimodalStore map[ml.Tensor]*multimodalEntry func newMultimodalStore() multimodalStore { return make(multimodalStore) } // addMultimodal stores an embedding for later use in a compute graph func (m multimodalStore) addMultimodal(embedding []input.Multimodal) { entry := &multimodalEntry{mm: embedding} for _, e := range embedding { if e.Tensor != nil { m[e.Tensor] = entry } } } // getMultimodal takes a source set of tensors (which may contain a whole or // parts of one or more images) and returns the equivalent that can be used in // the current context func (m multimodalStore) getMultimodal(backend ml.Backend, ctx ml.Context, in []input.Multimodal, reserve bool) ([]input.Multimodal, error) { out := make([]input.Multimodal, len(in)) for i := range out { if in[i].Tensor != nil { var err error out[i].Tensor, err = m.getTensor(backend, ctx, in[i].Tensor, reserve) if err != nil { return nil, err } } out[i].Data = in[i].Data } return out, nil } func (m multimodalStore) getTensor(backend ml.Backend, ctx ml.Context, in ml.Tensor, reserve bool) (ml.Tensor, error) { entry := m[in] if entry.data == nil { computeCtx := backend.NewContext() defer computeCtx.Close() var tensors []ml.Tensor for _, t := range entry.mm { if t.Tensor != nil { tensors = append(tensors, t.Tensor) } } if len(tensors) == 0 { return nil, nil } computeCtx.Forward(tensors...) entry.data = make([][]float32, len(entry.mm)) // Multimodal processing is computationally intensive, so treat it similarly to a large batch computeCtx.SetBatchSize(512) if !reserve { computeCtx.Compute(tensors...) for i, t := range entry.mm { if t.Tensor != nil { entry.data[i] = t.Tensor.Floats() } } } else { computeCtx.Reserve() } } for i, t := range entry.mm { if in == t.Tensor { if !reserve { return ctx.Input().FromFloats(entry.data[i], t.Tensor.Shape()...), nil } else { return ctx.Input().Empty(t.Tensor.DType(), t.Tensor.Shape()...), nil } } } return nil, errors.New("multimodal tensor not found") }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/runner/ollamarunner/cache.go
runner/ollamarunner/cache.go
package ollamarunner import ( "errors" "fmt" "log/slog" "math" "time" "github.com/ollama/ollama/kvcache" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/model" "github.com/ollama/ollama/model/input" ) type InputCache struct { // context window size (per slot) numCtx int32 // does the cache store data or do we need to always send the full input? // note that when enabled is false the underlying cache may either be nil // or a non-nil dummy that doesn't actually store anything enabled bool // individual KV caches slots []InputCacheSlot // optimize cache eviction for multiple users multiUserCache bool cache kvcache.Cache } func NewInputCache(model model.Model, kvCacheType string, kvSize int32, numSlots int, batchSize int, multiUserCache bool) (*InputCache, error) { numCtx := kvSize / int32(numSlots) if int(numCtx) < batchSize { return nil, fmt.Errorf("kv size must be at least as large as batch size * parallel (kv: %v batch: %v parallel: %v)", kvSize, batchSize, numSlots) } slots := make([]InputCacheSlot, numSlots) for i := range slots { slots[i] = InputCacheSlot{Id: i} } cache := model.Config().Cache if cache != nil { cache.Init(model.Backend(), kvCacheTypeFromStr(kvCacheType), numSlots, int(numCtx), batchSize) } return &InputCache{ numCtx: numCtx, enabled: cache != nil, slots: slots, multiUserCache: multiUserCache, cache: cache, }, nil } func kvCacheTypeFromStr(s string) ml.DType { switch s { case "q8_0": return ml.DTypeQ80 case "q4_0": return ml.DTypeQ40 default: return ml.DTypeF16 } } func (c *InputCache) Close() { if c != nil && c.cache != nil { c.cache.Close() } } // Locking: Operations on InputCacheSlot (including finding one // through LoadCacheSlot) require a lock to be held that serializes // these operations with each other and processBatch type InputCacheSlot struct { // Index in the KV cache Id int // Inputs that are stored in the KV cache Inputs []*input.Input // is this cache actively being processed as part of a sequence? InUse bool // last time this cache was used (as of start of processing) lastUsed time.Time } func (c *InputCache) LoadCacheSlot(prompt []*input.Input, cachePrompt bool) (*InputCacheSlot, []*input.Input, error) { var slot *InputCacheSlot var numPast int32 var err error // In single-user scenarios, the longest cache slot works fine for getting good input // cache hit rates and it keeps the footprint of the cache small, which improves throughput. // For multiple users, the "best" cache slot produces better input cache hit rates // at the cost of worse performance when we miss the input cache. if !c.multiUserCache { slot, numPast, err = c.findLongestCacheSlot(prompt) } else { slot, numPast, err = c.findBestCacheSlot(prompt) } if err != nil { return nil, nil, err } if !cachePrompt { numPast = 0 } slot.InUse = true slot.lastUsed = time.Now() if numPast == int32(len(prompt)) { // Leave one input to sample so we can get a response numPast-- } if c.cache != nil { if numPast > 0 && !c.cache.CanResume(slot.Id, numPast) { numPast = 0 } err = c.cache.Remove(slot.Id, numPast, math.MaxInt32) if err != nil { // Some models don't support partial erasure err = c.cache.Remove(slot.Id, 0, math.MaxInt32) if err != nil { return nil, nil, err } numPast = 0 } } slog.Debug("loading cache slot", "id", slot.Id, "cache", len(slot.Inputs), "prompt", len(prompt), "used", numPast, "remaining", int32(len(prompt))-numPast) slot.Inputs = prompt[:numPast] prompt = prompt[numPast:] return slot, prompt, nil } func (c *InputCache) findLongestCacheSlot(prompt []*input.Input) (*InputCacheSlot, int32, error) { longest := int32(-1) var longestSlot *InputCacheSlot for i, s := range c.slots { if s.InUse { continue } count := countCommonPrefix(s.Inputs, prompt) if count > longest { longest = count longestSlot = &c.slots[i] } } if longestSlot == nil { return nil, 0, errors.New("no available cache slots") } return longestSlot, longest, nil } func (c *InputCache) findBestCacheSlot(prompt []*input.Input) (*InputCacheSlot, int32, error) { oldest := time.Now() var oldestSlot *InputCacheSlot longest := int32(-1) var longestSlot *InputCacheSlot for i, s := range c.slots { count := countCommonPrefix(s.Inputs, prompt) if count > longest { longest = count longestSlot = &c.slots[i] } if s.lastUsed.Compare(oldest) < 0 && !s.InUse { oldest = s.lastUsed oldestSlot = &c.slots[i] } } if longest == int32(len(longestSlot.Inputs)) && !longestSlot.InUse { return longestSlot, longest, nil } if oldestSlot.InUse { return nil, 0, errors.New("no available cache slots") } if len(oldestSlot.Inputs) != 0 { slog.Debug("evicting cache slot", "id", oldestSlot.Id, "inputs", len(oldestSlot.Inputs), "used", oldestSlot.lastUsed) } if longest > 0 && longestSlot != oldestSlot { slog.Debug("forking cache slot", "src", longestSlot.Id, "dst", oldestSlot.Id, "inputs", longest, "total", len(longestSlot.Inputs)) oldestSlot.Inputs = make([]*input.Input, longest) copy(oldestSlot.Inputs, longestSlot.Inputs[:longest]) if c.cache != nil { c.cache.CopyPrefix(longestSlot.Id, oldestSlot.Id, longest) } } return oldestSlot, longest, nil } func countCommonPrefix(a []*input.Input, b []*input.Input) int32 { var count int32 for i := range a { if i >= len(b) { break } if a[i].Token != b[i].Token || a[i].MultimodalHash != b[i].MultimodalHash { break } count++ } return count } // ShiftDiscard computes how many inputs can be discarded from the cache. Inputs in the same batch // are discarded together. func (c *InputCache) ShiftDiscard(inputs []*input.Input, numKeep int32) int32 { targetFree := max((c.numCtx-numKeep)/2, 1) currentFree := c.numCtx - int32(len(inputs)) var discard, sameBatch int32 for _, input := range inputs[numKeep:] { if sameBatch <= 0 && currentFree >= targetFree { break } sameBatch-- currentFree++ discard++ if input.SameBatch > 0 { sameBatch = int32(input.SameBatch) } } return discard } type ErrReprocessInputs struct { Inputs []*input.Input } func (e *ErrReprocessInputs) Error() string { return fmt.Sprintf("kv cache shift not supported, inputs need reprocessing (input count: %v)", len(e.Inputs)) } // Frees up space in the KV cache by deleting the oldest half of history and shifting // the newest half into that space (saving numKeep inputs at the beginning). // // Assumes that at least 1 entry can be freed up by shifting (i.e. numKeep < numCtx) func (c *InputCache) ShiftCacheSlot(slot *InputCacheSlot, numKeep int32) error { if numKeep >= c.numCtx { return fmt.Errorf("unable to shift context - keep exceeds context (keep: %v context: %v)", numKeep, c.numCtx) } inputLen := int32(len(slot.Inputs)) discard := c.ShiftDiscard(slot.Inputs, numKeep) if discard <= 0 { return nil } slog.Debug("context limit hit - shifting", "id", slot.Id, "limit", c.numCtx, "input", len(slot.Inputs), "keep", numKeep, "discard", discard) if c.cache != nil { err := c.cache.Remove(slot.Id, numKeep, numKeep+discard) if err != nil { slog.Debug("kv cache removal unsupported, clearing cache and returning inputs for reprocessing", "id", slot.Id, "error", err) // Create new input slice with preserved tokens (numKeep + remaining tokens after discard) newInputs := make([]*input.Input, numKeep+inputLen-(numKeep+discard)) copy(newInputs[:numKeep], slot.Inputs[:numKeep]) copy(newInputs[numKeep:], slot.Inputs[numKeep+discard:]) // Reset the cache _ = c.cache.Remove(slot.Id, 0, math.MaxInt32) slot.Inputs = []*input.Input{} // Return error with inputs that need to be reprocessed return &ErrReprocessInputs{Inputs: newInputs} } } for i := numKeep + discard; i < inputLen; i++ { slot.Inputs[i-discard] = slot.Inputs[i] } slot.Inputs = slot.Inputs[:inputLen-discard] return nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/runner/ollamarunner/cache_test.go
runner/ollamarunner/cache_test.go
package ollamarunner import ( "errors" "fmt" "slices" "testing" "time" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/model/input" ) func TestCountCommon(t *testing.T) { tests := []struct { name string t1 []*input.Input t2 []*input.Input expected int32 }{ { name: "Equal", t1: []*input.Input{{Token: 1}, {Token: 2}, {Token: 3}}, t2: []*input.Input{{Token: 1}, {Token: 2}, {Token: 3}}, expected: 3, }, { name: "Prefix", t1: []*input.Input{{Token: 1}}, t2: []*input.Input{{Token: 1}, {Token: 2}, {Token: 3}}, expected: 1, }, { name: "Image Prefix", t1: []*input.Input{{MultimodalHash: 1}}, t2: []*input.Input{{MultimodalHash: 1}, {MultimodalHash: 2}, {MultimodalHash: 3}}, expected: 1, }, { name: "Mixed", t1: []*input.Input{{Token: 1}, {MultimodalHash: 1}}, t2: []*input.Input{{Token: 1}, {MultimodalHash: 1}, {Token: 5}}, expected: 2, }, { name: "Mixed, Same Length", t1: []*input.Input{{Token: 1}, {MultimodalHash: 1}}, t2: []*input.Input{{Token: 1}, {MultimodalHash: 2}}, expected: 1, }, { name: "Empty", t1: []*input.Input{}, t2: []*input.Input{{Token: 1}, {Token: 2}, {Token: 3}}, expected: 0, }, { name: "Both Empty", t1: []*input.Input{}, t2: []*input.Input{}, expected: 0, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result := countCommonPrefix(tt.t1, tt.t2) if result != tt.expected { t.Errorf("countCommonPrefix(%v, %v): have %v; want %v", tt.t1, tt.t2, result, tt.expected) } }) } } func TestFindCacheSlot(t *testing.T) { type expected struct { result int len int32 } tests := []struct { name string cache InputCache prompt []*input.Input longest expected best expected }{ { name: "Empty", cache: InputCache{slots: []InputCacheSlot{ { Id: 0, Inputs: []*input.Input{}, InUse: false, lastUsed: time.Time{}, }, { Id: 1, Inputs: []*input.Input{}, InUse: false, lastUsed: time.Time{}, }, }}, prompt: []*input.Input{{Token: 1}}, longest: expected{result: 0, len: 0}, best: expected{result: 0, len: 0}, }, { name: "Extend", cache: InputCache{slots: []InputCacheSlot{ { Id: 0, Inputs: []*input.Input{{Token: 1}}, InUse: false, lastUsed: time.Now().Add(-time.Second), }, { Id: 1, Inputs: []*input.Input{{Token: 1}, {Token: 2}}, InUse: false, lastUsed: time.Now().Add(-2 * time.Second), }, }}, prompt: []*input.Input{{Token: 1}, {Token: 2}}, longest: expected{result: 1, len: 2}, best: expected{result: 1, len: 2}, }, { name: "New", cache: InputCache{slots: []InputCacheSlot{ { Id: 0, Inputs: []*input.Input{{Token: 1}, {Token: 2}}, InUse: false, lastUsed: time.Now().Add(-time.Second), }, { Id: 1, Inputs: []*input.Input{}, InUse: false, lastUsed: time.Time{}, }, }}, prompt: []*input.Input{{Token: 2}}, longest: expected{result: 0, len: 0}, best: expected{result: 1, len: 0}, }, { name: "Fork", cache: InputCache{ slots: []InputCacheSlot{ { Id: 0, Inputs: []*input.Input{{Token: 1}, {Token: 2}}, InUse: false, lastUsed: time.Now().Add(-time.Second), }, { Id: 1, Inputs: []*input.Input{}, InUse: false, lastUsed: time.Time{}, }, }, }, prompt: []*input.Input{{Token: 1}}, longest: expected{result: 0, len: 1}, best: expected{result: 1, len: 1}, }, { name: "Evict", cache: InputCache{slots: []InputCacheSlot{ { Id: 0, Inputs: []*input.Input{{Token: 1}}, InUse: false, lastUsed: time.Now().Add(-time.Second), }, { Id: 1, Inputs: []*input.Input{{Token: 1}, {Token: 2}}, InUse: false, lastUsed: time.Now().Add(-2 * time.Second), }, }}, prompt: []*input.Input{{Token: 2}, {Token: 3}}, longest: expected{result: 0, len: 0}, best: expected{result: 1, len: 0}, }, { name: "In use", cache: InputCache{slots: []InputCacheSlot{ { Id: 0, Inputs: []*input.Input{{Token: 1}, {Token: 2}}, InUse: true, lastUsed: time.Now().Add(-time.Second), }, { Id: 1, Inputs: []*input.Input{{Token: 1}}, InUse: false, lastUsed: time.Now().Add(-2 * time.Second), }, }}, prompt: []*input.Input{{Token: 1}, {Token: 2}}, longest: expected{result: 1, len: 1}, best: expected{result: 1, len: 2}, }, } for _, tt := range tests { t.Run("Longest-"+tt.name, func(t *testing.T) { result, resultLen, err := tt.cache.findLongestCacheSlot(tt.prompt) if err != nil { t.Errorf("findLongestCacheSlot: err %v", err) } else if result.Id != tt.longest.result || resultLen != tt.longest.len { t.Errorf("findLongestCacheSlot: slot have %v, want %v len have %v, want %v", result.Id, tt.longest.result, resultLen, tt.longest.len) } }) } for _, tt := range tests { t.Run("Best-"+tt.name, func(t *testing.T) { result, resultLen, err := tt.cache.findBestCacheSlot(tt.prompt) if err != nil { t.Errorf("findBestCacheSlot: err %v", err) } else if result.Id != tt.best.result || resultLen != tt.best.len { t.Errorf("findBestCacheSlot: slot have %v, want %v len have %v, want %v", result.Id, tt.best.result, resultLen, tt.best.len) } }) } } func TestShiftDiscard(t *testing.T) { tests := []struct { name string numCtx int32 numKeep int32 inputs []*input.Input expected int32 }{ { name: "Shift", numCtx: 2048, numKeep: 5, inputs: slices.Repeat([]*input.Input{{}}, 2048), expected: 1021, }, { name: "Max Keep", numCtx: 2048, numKeep: 2047, inputs: slices.Repeat([]*input.Input{{}}, 2048), expected: 1, }, { name: "No Keep", numCtx: 2048, numKeep: 0, inputs: slices.Repeat([]*input.Input{{}}, 2048), expected: 1024, }, { name: "Truncate", numCtx: 2048, numKeep: 5, inputs: slices.Repeat([]*input.Input{{}}, 5000), expected: 3973, }, { name: "Truncate Keep", numCtx: 2048, numKeep: 2047, inputs: slices.Repeat([]*input.Input{{}}, 5000), expected: 2953, }, { name: "No Op", numCtx: 2048, numKeep: 5, inputs: slices.Repeat([]*input.Input{{}}, 512), expected: 0, }, { name: "Same Batch", numCtx: 2048, numKeep: 5, inputs: slices.Collect(func(yield func(*input.Input) bool) { for range 1024 { if !yield(&input.Input{}) { return } } if !yield(&input.Input{SameBatch: 512 - 1}) { return } for range 2048 - 1024 - 1 { if !yield(&input.Input{}) { return } } }), expected: 1531, }, { name: "Same Batch Near Start", numCtx: 2048, numKeep: 5, inputs: slices.Collect(func(yield func(*input.Input) bool) { for range 10 { if !yield(&input.Input{}) { return } } if !yield(&input.Input{SameBatch: 512 - 1}) { return } for range 2048 - 10 - 1 { if !yield(&input.Input{}) { return } } }), expected: 1021, }, { name: "Consecutive Same Batch", numCtx: 32, inputs: slices.Collect(func(yield func(*input.Input) bool) { for i := range 32 { input := input.Input{} if i%10 == 0 { input.SameBatch = 10 - 1 } if !yield(&input) { return } } }), expected: 20, }, { name: "Overlapping Same Batch", numCtx: 32, inputs: slices.Collect(func(yield func(*input.Input) bool) { for i := range 32 { input := input.Input{} if slices.Contains([]int{4, 8, 14}, i) { input.SameBatch = 10 - 1 } if !yield(&input) { return } } }), expected: 24, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := InputCache{numCtx: tt.numCtx} result := c.ShiftDiscard(tt.inputs, tt.numKeep) if result != tt.expected { t.Errorf("shiftDiscard(ctx: %v, keep: %v inputs: %v): have %v; want %v", tt.numCtx, tt.numKeep, len(tt.inputs), result, tt.expected) } }) } } func TestLoadCacheSlot(t *testing.T) { tests := []struct { name string cache InputCache prompt []*input.Input wantErr bool expectedSlotId int expectedPrompt int // expected length of remaining prompt }{ { name: "Basic cache hit - single user", cache: InputCache{ multiUserCache: false, slots: []InputCacheSlot{ { Id: 0, Inputs: []*input.Input{{Token: 1}, {Token: 2}}, InUse: false, lastUsed: time.Now().Add(-time.Second), }, { Id: 1, Inputs: []*input.Input{}, InUse: false, lastUsed: time.Now().Add(-2 * time.Second), }, }, }, prompt: []*input.Input{{Token: 1}, {Token: 2}, {Token: 3}}, wantErr: false, expectedSlotId: 0, expectedPrompt: 1, // Only token 3 remains }, { name: "Basic cache hit - multi user", cache: InputCache{ multiUserCache: true, slots: []InputCacheSlot{ { Id: 0, Inputs: []*input.Input{{Token: 1}, {Token: 2}}, InUse: false, lastUsed: time.Now().Add(-time.Second), }, { Id: 1, Inputs: []*input.Input{}, InUse: false, lastUsed: time.Now().Add(-2 * time.Second), }, }, }, prompt: []*input.Input{{Token: 1}, {Token: 2}, {Token: 3}}, wantErr: false, expectedSlotId: 0, expectedPrompt: 1, // Only token 3 remains }, { name: "Exact match - leave one input", cache: InputCache{ multiUserCache: false, slots: []InputCacheSlot{ { Id: 0, Inputs: []*input.Input{{Token: 1}, {Token: 2}}, InUse: false, lastUsed: time.Now().Add(-time.Second), }, }, }, prompt: []*input.Input{{Token: 1}, {Token: 2}}, wantErr: false, expectedSlotId: 0, expectedPrompt: 1, // Should leave 1 token for sampling }, { name: "No available slots", cache: InputCache{ multiUserCache: false, slots: []InputCacheSlot{ { Id: 0, Inputs: []*input.Input{{Token: 1}, {Token: 2}}, InUse: true, lastUsed: time.Now().Add(-time.Second), }, }, }, prompt: []*input.Input{{Token: 1}, {Token: 2}, {Token: 3}}, wantErr: true, expectedSlotId: -1, expectedPrompt: -1, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { slot, remainingPrompt, err := tt.cache.LoadCacheSlot(tt.prompt, true) // Check error state if (err != nil) != tt.wantErr { t.Errorf("LoadCacheSlot() error = %v, wantErr %v", err, tt.wantErr) return } if tt.wantErr { return // Skip further checks if we expected an error } // Verify slot ID if slot.Id != tt.expectedSlotId { t.Errorf("LoadCacheSlot() slot ID = %v, expected %v", slot.Id, tt.expectedSlotId) } // Verify slot is now marked in use if !slot.InUse { t.Errorf("LoadCacheSlot() slot not marked InUse") } // Verify remaining prompt length if len(remainingPrompt) != tt.expectedPrompt { t.Errorf("LoadCacheSlot() remaining prompt length = %v, expected %v", len(remainingPrompt), tt.expectedPrompt) } }) } } // Mock implementation of the Cache interface type mockCache struct { shouldFail bool } // Implement only the methods needed for the test func (m *mockCache) Remove(seq int, beginIndex, endIndex int32) error { if m.shouldFail { return fmt.Errorf("mock cache removal error") } return nil } // Stub implementations for other interface methods func (m *mockCache) SetLayer(layer int) {} func (m *mockCache) Get(ctx ml.Context) (ml.Tensor, ml.Tensor, ml.Tensor) { return nil, nil, nil } func (m *mockCache) Put(ctx ml.Context, key, value ml.Tensor) {} func (m *mockCache) Init(backend ml.Backend, dtype ml.DType, maxSequences, capacity, maxBatch int) {} func (m *mockCache) Close() {} func (m *mockCache) StartForward(ctx ml.Context, batch input.Batch, reserve bool) error { return nil } func (m *mockCache) CopyPrefix(srcSeq, dstSeq int, len int32) {} func (m *mockCache) SetConfig(ml.CacheConfig) {} func (m *mockCache) CanResume(seq int, pos int32) bool { return true } func TestShiftCacheSlot(t *testing.T) { tests := []struct { name string numCtx int32 inputs []*input.Input numKeep int32 cacheErr bool wantErr any wantInputsLen int }{ { name: "Normal shift", numCtx: 10, inputs: []*input.Input{{Token: 1}, {Token: 2}, {Token: 3}, {Token: 4}, {Token: 5}, {Token: 6}, {Token: 7}, {Token: 8}, {Token: 9}, {Token: 10}}, numKeep: 2, cacheErr: false, // No error wantErr: nil, wantInputsLen: 6, // After discarding 4 tokens }, { name: "Cache removal fails", numCtx: 10, inputs: []*input.Input{{Token: 1}, {Token: 2}, {Token: 3}, {Token: 4}, {Token: 5}, {Token: 6}, {Token: 7}, {Token: 8}, {Token: 9}, {Token: 10}}, numKeep: 2, cacheErr: true, wantErr: &ErrReprocessInputs{}, wantInputsLen: 0, // Original inputs should be cleared }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { mock := &mockCache{shouldFail: tt.cacheErr} c := InputCache{ numCtx: tt.numCtx, cache: mock, } slot := &InputCacheSlot{ Id: 123, Inputs: make([]*input.Input, len(tt.inputs)), } copy(slot.Inputs, tt.inputs) err := c.ShiftCacheSlot(slot, tt.numKeep) if tt.wantErr != nil { if err == nil { t.Errorf("Expected error but got nil") return } if !errors.As(err, &tt.wantErr) { t.Errorf("Expected error of type %T but got %T: %v", tt.wantErr, err, err) } } else if err != nil { t.Errorf("Unexpected error: %v", err) } if len(slot.Inputs) != tt.wantInputsLen { t.Errorf("Slot inputs length after operation: got %v, want %v", len(slot.Inputs), tt.wantInputsLen) } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/runner/ollamarunner/runner.go
runner/ollamarunner/runner.go
package ollamarunner import ( "bytes" "context" "encoding/json" "errors" "flag" "fmt" "hash/maphash" "image" "log" "log/slog" "net" "net/http" "os" "reflect" "regexp" "runtime" "strconv" "strings" "sync" "time" "unicode/utf8" "golang.org/x/image/bmp" "golang.org/x/sync/semaphore" "github.com/ollama/ollama/api" "github.com/ollama/ollama/envconfig" "github.com/ollama/ollama/fs/ggml" "github.com/ollama/ollama/llm" "github.com/ollama/ollama/logutil" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/ml/nn/pooling" "github.com/ollama/ollama/model" "github.com/ollama/ollama/model/input" "github.com/ollama/ollama/runner/common" "github.com/ollama/ollama/sample" _ "github.com/ollama/ollama/model/models" ) // response contains a piece of generated text along with optional logprobs type response struct { content string logprobs []llm.Logprob } type Sequence struct { // ctxs are used for allocating tensors that last the lifetime of the sequence, such as // multimodal embeddings ctxs []ml.Context // mmStore holds multimodal embeddings to mange memory and enable splitting across batches mmStore multimodalStore // batch index iBatch int // prompt inputs left to evaluate inputs []*input.Input // inputs that have been added to a batch but not yet submitted to Forward pendingInputs []*input.Input // tokens that have been generated but not returned yet (e.g. for stop sequences) pendingResponses []string // logprobs for tokens that haven't been returned yet pendingLogprobs []llm.Logprob // input cache being used by this sequence cache *InputCacheSlot // channel to send responses over responses chan response // channel to stop decoding (such as if the remote connection is closed) quit chan bool // number of tokens to predict numPredict int // sampler with transforms to run on generated logits sampler sample.Sampler // channel to send back the embedding if embedding only embedding chan []float32 // stop sequences stop []string // number of inputs to keep at the beginning when shifting context window numKeep int32 // true if an embedding are to be returned instead of text generation embeddingOnly bool // shift if context window is exceeded shift bool doneReason llm.DoneReason // logprobs configuration logprobs bool topLogprobs int // Metrics startedAt, lastUpdatedAt time.Time processingDuration time.Duration samplingDuration time.Duration numPredicted int numPromptInputs int } type NewSequenceParams struct { numPredict int stop []string numKeep int32 sampler sample.Sampler embedding bool shift bool truncate bool logprobs bool topLogprobs int } var errorInputTooLong = errors.New("the input length exceeds the context length") func (s *Server) NewSequence(prompt string, images []llm.ImageData, params NewSequenceParams) (*Sequence, error) { s.ready.Wait() inputs, ctxs, mmStore, err := s.inputs(prompt, images) if err != nil { return nil, fmt.Errorf("failed to process inputs: %w", err) } else if len(inputs) == 0 { return nil, errors.New("no input provided") } if params.numKeep < 0 { params.numKeep = int32(len(inputs)) } // Ensure that at least 1 input can be discarded during shift params.numKeep = min(params.numKeep, s.cache.numCtx-1) if int32(len(inputs)) > s.cache.numCtx { if !params.truncate { return nil, errorInputTooLong } discard := int32(len(inputs)) - s.cache.numCtx promptStart := params.numKeep + discard // If we need to truncate in the middle of a unbreakable batch, remove the entire batch sameBatch := 0 for i, inp := range inputs { if sameBatch > 0 { sameBatch-- if promptStart == int32(i) { promptStart++ } } else if promptStart == int32(i) { break } if inp.SameBatch != 0 { if int32(i) < params.numKeep { return nil, fmt.Errorf("SameBatch may not be specified within numKeep (index: %v numKeep: %v SameBatch: %v)", i, params.numKeep, inp.SameBatch) } sameBatch = inp.SameBatch } } if promptStart >= int32(len(inputs)) { return nil, errors.New("entire prompt removed by truncation") } newInputs := inputs[:params.numKeep] newInputs = append(newInputs, inputs[promptStart:]...) slog.Warn("truncating input prompt", "limit", s.cache.numCtx, "prompt", len(inputs), "keep", params.numKeep, "new", len(newInputs)) inputs = newInputs } // TODO(jessegross): Ingest cached history for grammar return &Sequence{ ctxs: ctxs, mmStore: mmStore, inputs: inputs, numPromptInputs: len(inputs), numPredict: params.numPredict, pendingResponses: make([]string, 0), responses: make(chan response, 100), quit: make(chan bool, 1), embedding: make(chan []float32, 1), sampler: params.sampler, embeddingOnly: params.embedding, stop: params.stop, numKeep: params.numKeep, shift: params.shift, logprobs: params.logprobs, topLogprobs: params.topLogprobs, }, nil } // calculateLogprobs converts raw logits to log probabilities and finds top K tokens func calculateLogprobs(logits []float32, selectedToken int32, topK int, textProcessor model.TextProcessor) []llm.Logprob { decoder := func(tokenID int) string { text, _ := textProcessor.Decode([]int32{int32(tokenID)}) return text } return common.CalculateLogprobs(logits, int(selectedToken), topK, decoder) } // inputs processes the prompt and images into a list of inputs // by splitting the prompt on [img-<n>] tags, tokenizing text and // decoding images func (s *Server) inputs(prompt string, images []llm.ImageData) ([]*input.Input, []ml.Context, multimodalStore, error) { var inputs []*input.Input var ctxs []ml.Context var mmStore multimodalStore var parts []string var matches [][]string multimodalProcessor, visionModel := s.model.(model.MultimodalProcessor) if visionModel { re := regexp.MustCompile(`\[img-(\d+)\]`) parts = re.Split(prompt, -1) matches = re.FindAllStringSubmatch(prompt, -1) mmStore = newMultimodalStore() } else { parts = []string{prompt} } for i, part := range parts { // text - tokenize tokens, err := s.model.(model.TextProcessor).Encode(part, i == 0) if err != nil { return nil, nil, nil, err } for _, t := range tokens { inputs = append(inputs, &input.Input{Token: t}) } // image - decode and store if i < len(matches) { n, _ := strconv.Atoi(matches[i][1]) imageIndex := -1 for j := range images { if images[j].ID == n { imageIndex = j break } } if imageIndex < 0 { return nil, nil, nil, fmt.Errorf("invalid image index: %d", n) } ctx := s.model.Backend().NewContext() runtime.SetFinalizer(ctx, func(c ml.Context) { c.Close() }) ctxs = append(ctxs, ctx) imageEmbeddings, err := multimodalProcessor.EncodeMultimodal(ctx, images[imageIndex].Data) if err != nil { return nil, nil, nil, err } s.multimodalHash.Reset() _, _ = s.multimodalHash.Write(images[imageIndex].Data) imageHash := s.multimodalHash.Sum64() mmStore.addMultimodal(imageEmbeddings) inputs = append(inputs, &input.Input{Multimodal: imageEmbeddings, MultimodalHash: imageHash}) } } if visionModel { var err error inputs, err = multimodalProcessor.PostTokenize(inputs) if err != nil { return nil, nil, nil, err } } return inputs, ctxs, mmStore, nil } type batchState struct { // id provides a counter for trace logging batches id int // ctx holds the backend context used for this batch ctx ml.Context // modelOutput holds the outputs from this batch modelOutput ml.Tensor // batchInputs holds the input token pointers which may start as // placeholders later filled in before calling ctx.Compute batchInputs []*input.Input // batch contains the inputs for a model forward pass batch input.Batch // full set of seqs at the time this batch was initiated seqs []*Sequence // Signaled when this batches inputs are ready and compute can proceed inputsReadyCh chan struct{} // Signaling when Compute is about to begin on this batch, and // seqs have been updated to prepare for the next batch computeStartedCh chan struct{} // Signaled when this batches outputs are complete and the next batch can proceed outputsReadyCh chan struct{} } type Server struct { // modelPath is the location of the model to be loaded modelPath string // loadMu prevents more than one load attempt from occurring at a time loadMu sync.Mutex // lastLoad is the load request from the previous load attempt. Used to // detect if we can reuse an existing memory allocation. lastLoad llm.LoadRequest // is the server ready to process requests? // protects access to model and image ready sync.WaitGroup // loaded model model model.Model // status for external health reporting - loading, ready to serve, etc. status llm.ServerStatus // current progress on loading the model progress float32 // number of simultaneous requests to handle parallel int // maximum number of elements in a batch (per sequence) // TODO (jmorganca): make this n_batch batchSize int // Simple counter used only for trace logging batches batchID int // protects access to everything below this line // this is context state needed for decoding mu sync.Mutex // indicates that data is ready for processing cond *sync.Cond // the list of simultaneous sequences being evaluated seqs []*Sequence // seqs can have a maximum of parallel entries, which // is enfoced by seqSem seqsSem *semaphore.Weighted // KV cache cache *InputCache // next sequence for prompt processing to avoid starvation nextSeq int // multimodalHash generates hashes for comparing equality // of non-text data multimodalHash maphash.Hash } func (s *Server) allNil() bool { for _, item := range s.seqs { if item != nil { return false } } return true } func flushPending(seq *Sequence) bool { joined := strings.Join(seq.pendingResponses, "") logprobs := seq.pendingLogprobs seq.pendingResponses = []string{} seq.pendingLogprobs = []llm.Logprob{} // Check if there are any partial UTF-8 characters remaining. // We already check and queue as we are generating but some may // still make it here: // - Sequence is ending, e.g. generation limit has been hit // - Invalid characters in the middle of a string // This is a stricter check to ensure we never output invalid Unicode. for !utf8.ValidString(joined) { joined = joined[:len(joined)-1] } if len(joined) == 0 { return true } select { case seq.responses <- response{content: joined, logprobs: logprobs}: return true case <-seq.quit: return false } } func (s *Server) removeSequence(seqIndex int, reason llm.DoneReason) { seq := s.seqs[seqIndex] flushPending(seq) seq.doneReason = reason close(seq.responses) close(seq.embedding) seq.cache.InUse = false s.seqs[seqIndex] = nil s.seqsSem.Release(1) } // track batch state between forwardBatch, computeBatch and predictForwardBatch func (s *Server) run(ctx context.Context) { s.ready.Wait() supportsAsync := pooling.Type(s.model.Backend().Config().Uint("pooling_type")) == pooling.TypeNone var previousBatch batchState for { select { case <-ctx.Done(): return default: var err error nextBatch, err := s.forwardBatch(previousBatch) if err != nil { panic(err) } if supportsAsync { go s.computeBatch(nextBatch) } else { s.computeBatch(nextBatch) } previousBatch = nextBatch } } } // forwardBatch will calculate a batch. func (s *Server) forwardBatch(pendingBatch batchState) (nextBatch batchState, err error) { // If we have a pending batch still processing, wait until Compute has started // before setting up the next batch so the seqs inputs are ready to receive their // token values and we get the correct input pointers for the batchInputs if pendingBatch.ctx != nil { logutil.Trace("forwardBatch waiting for compute to start", "pendingBatch.id", pendingBatch.id) <-pendingBatch.computeStartedCh logutil.Trace("forwardBatch compute started, setting up next batch", "pendingBatch.id", pendingBatch.id, "id", s.batchID) nextBatch.inputsReadyCh = pendingBatch.outputsReadyCh // Chain the ouputs from the pending batch to the next inputs batch } else { logutil.Trace("forwardBatch no pending batch detected", "batchID", s.batchID) // No pendingBatch, so the inputs will be ready in the seqs immediately nextBatch.inputsReadyCh = make(chan struct{}, 1) nextBatch.inputsReadyCh <- struct{}{} } s.mu.Lock() for s.allNil() { s.cond.Wait() // Wait until an item is added } defer s.mu.Unlock() nextBatch.ctx = s.model.Backend().NewContext() defer func() { if err != nil { nextBatch.ctx.Close() nextBatch.ctx = nil } }() nextBatch.id = s.batchID nextBatch.seqs = append([]*Sequence{}, s.seqs...) nextBatch.computeStartedCh = make(chan struct{}, 1) nextBatch.outputsReadyCh = make(chan struct{}, 1) // Prepare the seqs and batch, but defer the input token values as we may not be ready yet var batchInputs []*input.Input var batchOutputs []int32 var batch input.Batch resumeSeq := -1 seqIdx := s.nextSeq - 1 for range s.seqs { seqIdx = (seqIdx + 1) % len(s.seqs) seq := s.seqs[seqIdx] if seq == nil { continue } // if past the num predict limit if seq.numPredict > 0 && seq.numPredicted >= seq.numPredict { s.removeSequence(seqIdx, llm.DoneReasonLength) nextBatch.seqs[seqIdx] = nil continue } if !s.cache.enabled { seq.inputs = append(seq.cache.Inputs, seq.inputs...) seq.cache.Inputs = []*input.Input{} } batchSize := s.batchSize for i, inp := range seq.inputs { // If we are required to put following inputs into a single batch then extend the // batch size. Since we are only extending the size the minimum amount possible, this // will cause a break if we have existing inputs. minBatch := 1 + inp.SameBatch if minBatch > batchSize { batchSize = minBatch } // Stop if the required batch would put us over the total batch size (including tokens // added by other sequences). If we haven't been able to add anything yet then pick up // here again for the next batch to avoid starvation, though we can opportunistically // check if other sequences can still squeeze something in. if len(batchInputs)+minBatch > batchSize { if len(seq.pendingInputs) == 0 && resumeSeq == -1 { resumeSeq = seqIdx } break } // If the sum of our working set (already processed tokens, tokens we added to this // batch, required following tokens) exceeds the context size, then trigger a shift // now so we don't have to do one later when we can't break the batch. if int32(len(seq.cache.Inputs)+len(seq.pendingInputs)+minBatch) > s.cache.numCtx { if len(seq.pendingInputs) != 0 { break } if !seq.shift { s.removeSequence(seqIdx, llm.DoneReasonLength) nextBatch.seqs[seqIdx] = nil break } err = s.cache.ShiftCacheSlot(seq.cache, seq.numKeep) if err != nil { var reprocess *ErrReprocessInputs if errors.As(err, &reprocess) { // Prepend these inputs to the sequence's inputs queue for reprocessing seq.inputs = append(reprocess.Inputs, seq.inputs...) // Skip this sequence but continue processing the rest nextBatch.seqs[seqIdx] = nil // clear this sequence for this batch err = nil continue } else { return } } } batchInputs = append(batchInputs, seq.inputs[i]) if inp.Multimodal != nil { var mm []input.Multimodal mm, err = seq.mmStore.getMultimodal(s.model.Backend(), nextBatch.ctx, inp.Multimodal, false) if err != nil { return } batch.Multimodal = append(batch.Multimodal, input.MultimodalIndex{Index: len(batchInputs) - 1, Multimodal: mm}) } batch.Positions = append(batch.Positions, int32(len(seq.cache.Inputs)+len(seq.pendingInputs))) batch.Sequences = append(batch.Sequences, seq.cache.Id) seq.iBatch = len(batchOutputs) if i+1 == len(seq.inputs) || seq.embeddingOnly { batchOutputs = append(batchOutputs, int32(len(batchInputs)-1)) } logutil.Trace("forwardBatch iBatch", "batchID", s.batchID, "seqIdx", seqIdx, "seq.iBatch", seq.iBatch, "i+1", i+1, "len(seq.inputs)", len(seq.inputs)) seq.pendingInputs = append(seq.pendingInputs, inp) } seq.inputs = seq.inputs[len(seq.pendingInputs):] } startedAt := time.Now() for i := range nextBatch.seqs { if nextBatch.seqs[i] != nil && nextBatch.seqs[i].startedAt.IsZero() { nextBatch.seqs[i].startedAt = startedAt } } if resumeSeq != -1 { s.nextSeq = resumeSeq } else { s.nextSeq = seqIdx + 1 } if len(batchInputs) == 0 { logutil.Trace("forwardBatch no batchInputs, going idle", "batchID", s.batchID) nextBatch.ctx.Close() nextBatch.ctx = nil return } s.batchID++ // Actual batchInputs values will be injected into the batch.Inputs tensor before calling Compute batch.Inputs = nextBatch.ctx.Input().Empty(ml.DTypeI32, len(batchInputs)) batch.Outputs = nextBatch.ctx.Input().FromInts(batchOutputs, len(batchOutputs)) nextBatch.ctx.SetBatchSize(len(batchInputs)) nextBatch.modelOutput, err = model.Forward(nextBatch.ctx, s.model, batch) if err != nil { err = fmt.Errorf("failed to build graph: %w", err) return } nextBatch.batchInputs = batchInputs nextBatch.batch = batch return } // Async processing of the next batch func (s *Server) computeBatch(activeBatch batchState) { if activeBatch.ctx == nil { // Nothing to compute return } defer activeBatch.ctx.Close() // Wait until inputs are ready logutil.Trace("computeBatch: waiting for inputs to be ready", "batchID", activeBatch.id) <-activeBatch.inputsReadyCh logutil.Trace("computeBatch: inputs are ready", "batchID", activeBatch.id) // Once we complete, signal the next batch of inputs are ready // This will unblock the next computeBatch, or forwardBatch if new seqs come in defer func() { logutil.Trace("computeBatch: outputs are ready", "batchID", activeBatch.id) activeBatch.outputsReadyCh <- struct{}{} }() s.mu.Lock() // Gather the actual input token values now that they're ready batchInputs := make([]int32, len(activeBatch.batchInputs)) for i := range batchInputs { batchInputs[i] = activeBatch.batchInputs[i].Token } // Now we run part of the decoding algorithm to adjust the seq.inputs with placeholder tokens // so that forwardBatch can build a batchInputs set which will eventually contain the actual // decoded tokens. nextBatchTokens := make([]*input.Input, len(s.seqs)) iBatches := make([]int, len(s.seqs)) // Record the iBatch values before releasing the lock for i, seq := range s.seqs { iBatches[i] = -1 if seq == nil { continue } // Skip over any newly added or skipped sequences if activeBatch.seqs[i] == nil { continue } // Detect if the sequence we're processing has already been completed and replaced // with a new sequence if seq != activeBatch.seqs[i] { logutil.Trace("computeBatch: sequence replaced, discarding its results", "batchID", activeBatch.id, "seqIdx", i) continue } // Pending inputs will actually be in the cache after we call Compute. // However, we have already resolved any placeholder tokens. // // It's possible for incoming sequences to look at the values that we've // added to the cache here and start relying on them before we've done // the computation. This is OK as long as we ensure that this batch's // computation happens before any future batch's and we never fail // (unless we take down the whole runner). if len(seq.pendingInputs) > 0 { seq.cache.Inputs = append(seq.cache.Inputs, seq.pendingInputs...) seq.pendingInputs = []*input.Input{} } // don't sample prompt processing if len(seq.inputs) != 0 { if !s.cache.enabled { panic("caching disabled but unable to fit entire input in a batch") } continue } seq.numPredicted++ nextToken := &input.Input{Token: 0} // placeholder we'll fill in after Compute/Floats seq.inputs = []*input.Input{nextToken} nextBatchTokens[i] = nextToken iBatches[i] = seq.iBatch } // At this point the seqs are ready for forwardBatch to move forward so unblock s.mu.Unlock() activeBatch.batch.Inputs.FromInts(batchInputs) activeBatch.ctx.ComputeWithNotify( func() { logutil.Trace("computeBatch: signaling computeStartedCh", "batchID", activeBatch.id) activeBatch.computeStartedCh <- struct{}{} }, activeBatch.modelOutput) outputs := activeBatch.modelOutput.Floats() t := time.Now() logutil.Trace("computeBatch: logits ready", "batchID", activeBatch.id) s.mu.Lock() defer s.mu.Unlock() logutil.Trace("computeBatch: decoding", "batchID", activeBatch.id) for i, seq := range s.seqs { if seq == nil || nextBatchTokens[i] == nil { continue } seq.lastUpdatedAt = t if seq.numPredicted == 1 { seq.processingDuration = seq.lastUpdatedAt.Sub(seq.startedAt) seq.startedAt = seq.lastUpdatedAt } // if done processing the prompt, generate an embedding and return if seq.embeddingOnly { seq.embedding <- outputs s.removeSequence(i, llm.DoneReasonStop) continue } // sample a token vocabSize := len(outputs) / activeBatch.batch.Outputs.Dim(0) logutil.Trace("computeBatch: vocab details", "batchID", activeBatch.id, "seqIdx", i, "len(logits)", len(outputs), "len(activeBatch.batch.Outputs)", activeBatch.batch.Outputs.Dim(0), "vocabSize", vocabSize, "iBatches", iBatches) logits := outputs[iBatches[i]*vocabSize : (iBatches[i]+1)*vocabSize] token, err := seq.sampler.Sample(logits) if err != nil { panic("failed to sample token") } nextBatchTokens[i].Token = token // if it's an end of sequence token, break if s.model.(model.TextProcessor).Is(token, model.SpecialEOS) { // TODO (jmorganca): we should send this back // as it's important for the /api/generate context // seq.responses <- piece logutil.Trace("computeBatch: EOS", "batchID", activeBatch.id, "seqIdx", i) s.removeSequence(i, llm.DoneReasonStop) continue } piece, err := s.model.(model.TextProcessor).Decode([]int32{token}) if err != nil { panic("failed to decode token") } // Calculate logprobs if requested (after EOS check to avoid logprobs for EOS tokens) if seq.logprobs { logprobs := calculateLogprobs(logits, token, seq.topLogprobs, s.model.(model.TextProcessor)) seq.pendingLogprobs = append(seq.pendingLogprobs, logprobs...) } seq.pendingResponses = append(seq.pendingResponses, piece) sequence := strings.Join(seq.pendingResponses, "") if ok, stop := common.FindStop(sequence, seq.stop); ok { slog.Debug("hit stop token", "pending", seq.pendingResponses, "stop", stop) var tokenTruncated bool origLen := len(seq.pendingResponses) seq.pendingResponses, tokenTruncated = common.TruncateStop(seq.pendingResponses, stop) newLen := len(seq.pendingResponses) // Truncate logprobs to match the truncated responses if seq.logprobs { origLogprobsLen := len(seq.pendingLogprobs) numTokensRemoved := origLen - newLen newLogprobsLen := origLogprobsLen - numTokensRemoved if newLogprobsLen < 0 { newLogprobsLen = 0 } seq.pendingLogprobs = seq.pendingLogprobs[:newLogprobsLen] } // Update the cache based on the tokens that will be returned: // - We have 1 token more than is currently in the cache because // the last one generated wasn't submitted to Decode // - Remove any stop sequences that we stripped out // - If truncateStop removed a portion of a token, drop that // - As defense-in-depth, if truncatedToken didn't find a stop token // remove the extra one that we added to the cache len tokenLen := len(seq.cache.Inputs) + 1 tokenLen -= origLen - newLen if tokenTruncated || origLen == newLen { tokenLen-- } seq.cache.Inputs = seq.cache.Inputs[:tokenLen] s.removeSequence(i, llm.DoneReasonStop) continue } if common.ContainsStopSuffix(sequence, seq.stop) { continue } if common.IncompleteUnicode(sequence) { continue } if !flushPending(seq) { s.removeSequence(i, llm.DoneReasonConnectionClosed) } } samplingDuration := time.Since(t) for i, seq := range s.seqs { if seq != nil && nextBatchTokens[i] != nil { s.seqs[i].samplingDuration += samplingDuration } } } func (s *Server) completion(w http.ResponseWriter, r *http.Request) { var req llm.CompletionRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { http.Error(w, "Bad request", http.StatusBadRequest) return } if req.Options == nil { opts := api.DefaultOptions() req.Options = &opts } // Set the headers to indicate streaming w.Header().Set("Content-Type", "application/json") w.Header().Set("Transfer-Encoding", "chunked") flusher, ok := w.(http.Flusher) if !ok { http.Error(w, "Streaming not supported", http.StatusInternalServerError) return } var grammar *sample.GrammarSampler var err error if req.Grammar != "" { grammar, err = sample.NewGrammarSampler(s.model.(model.TextProcessor), req.Grammar) if err != nil { http.Error(w, "failed to load model vocabulary required for format", http.StatusInternalServerError) return } defer grammar.Free() } sampler := sample.NewSampler( req.Options.Temperature, req.Options.TopK, req.Options.TopP, req.Options.MinP, req.Options.Seed, grammar, ) seq, err := s.NewSequence(req.Prompt, req.Images, NewSequenceParams{ numPredict: req.Options.NumPredict, stop: req.Options.Stop, numKeep: int32(req.Options.NumKeep), sampler: sampler, embedding: false, shift: req.Shift, truncate: req.Truncate, logprobs: req.Logprobs, topLogprobs: req.TopLogprobs, }) if err != nil { if errors.Is(err, errorInputTooLong) { http.Error(w, err.Error(), http.StatusBadRequest) return } http.Error(w, fmt.Sprintf("Failed to create new sequence: %v", err), http.StatusInternalServerError) return } // Ensure there is a place to put the sequence, released when removed from s.seqs if err := s.seqsSem.Acquire(r.Context(), 1); err != nil { if errors.Is(err, context.Canceled) { slog.Info("aborting completion request due to client closing the connection") } else { http.Error(w, fmt.Sprintf("Failed to acquire semaphore: %v", err), http.StatusInternalServerError) } return } s.mu.Lock() found := false for i, sq := range s.seqs { if sq == nil { seq.cache, seq.inputs, err = s.cache.LoadCacheSlot(seq.inputs, true) if err != nil { s.mu.Unlock() s.seqsSem.Release(1) http.Error(w, fmt.Sprintf("Failed to load cache: %v", err), http.StatusInternalServerError) return } s.seqs[i] = seq s.cond.Signal() found = true break } } s.mu.Unlock() if !found { s.seqsSem.Release(1) http.Error(w, "could not find an available sequence", http.StatusInternalServerError) return } for { select { case <-r.Context().Done(): close(seq.quit) return case resp, ok := <-seq.responses: if ok { if err := json.NewEncoder(w).Encode(&llm.CompletionResponse{ Content: resp.content, Logprobs: resp.logprobs, }); err != nil { http.Error(w, fmt.Sprintf("failed to encode response: %v", err), http.StatusInternalServerError) close(seq.quit) return } flusher.Flush() } else { if err := json.NewEncoder(w).Encode(&llm.CompletionResponse{ Done: true, DoneReason: seq.doneReason, PromptEvalCount: seq.numPromptInputs, PromptEvalDuration: seq.processingDuration, EvalCount: seq.numPredicted, EvalDuration: seq.lastUpdatedAt.Sub(seq.startedAt) - seq.samplingDuration, }); err != nil { http.Error(w, fmt.Sprintf("failed to encode final response: %v", err), http.StatusInternalServerError) } return } } } } func (s *Server) embeddings(w http.ResponseWriter, r *http.Request) { if pooling.Type(s.model.Backend().Config().Uint("pooling_type")) == pooling.TypeNone { http.Error(w, "this model does not support embeddings", http.StatusNotImplemented) return } var req llm.EmbeddingRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { http.Error(w, fmt.Sprintf("bad request: %s", err), http.StatusBadRequest) return } w.Header().Set("Content-Type", "application/json") seq, err := s.NewSequence(req.Content, nil, NewSequenceParams{ embedding: true, truncate: false, }) if err != nil { if errors.Is(err, errorInputTooLong) { http.Error(w, err.Error(), http.StatusBadRequest) return } http.Error(w, fmt.Sprintf("failed to create new sequence: %v", err), http.StatusInternalServerError) return } if err := s.seqsSem.Acquire(r.Context(), 1); err != nil { if errors.Is(err, context.Canceled) { slog.Info("aborting embedding request due to client closing the connection") } else { http.Error(w, fmt.Sprintf("failed to acquire semaphore: %v", err), http.StatusInternalServerError) } return } s.mu.Lock() found := false for i, sq := range s.seqs { if sq == nil { seq.cache, seq.inputs, err = s.cache.LoadCacheSlot(seq.inputs, false) if err != nil { s.mu.Unlock() s.seqsSem.Release(1) http.Error(w, fmt.Sprintf("failed to load cache: %v", err), http.StatusInternalServerError) return } s.seqs[i] = seq s.cond.Signal() found = true break } } s.mu.Unlock() if !found { s.seqsSem.Release(1) http.Error(w, "could not find an available sequence", http.StatusInternalServerError) return } if err := json.NewEncoder(w).Encode(&llm.EmbeddingResponse{ Embedding: <-seq.embedding, PromptEvalCount: seq.numPromptInputs, }); err != nil { http.Error(w, fmt.Sprintf("failed to encode response: %v", err), http.StatusInternalServerError) } } func (s *Server) health(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") if err := json.NewEncoder(w).Encode(&llm.ServerStatusResponse{ Status: s.status, Progress: s.progress, }); err != nil { http.Error(w, fmt.Sprintf("failed to encode response: %v", err), http.StatusInternalServerError) } } func (s *Server) reserveWorstCaseGraph(prompt bool) error { ctx := s.model.Backend().NewContext() defer ctx.Close() var err error batchSize := 1 if prompt { batchSize = s.batchSize } inputs := make([]*input.Input, batchSize) for i := range inputs { inputs[i] = &input.Input{} } mmStore := newMultimodalStore() // Multimodal strategy: // - Encode a 2048x2048 image. This assumes that a single image of this // size is sufficient to trigger the worst case. This is currently true // because for existing models, only a single image fits in a batch. // - Add the embedding to a full batch of tokens - this is necessary because // the model may be looking for non-image data, such as <image> tags. // - Run PostTokenize to execute any transformations between generated // embeddings and what the forward pass expects. // - The result may now be larger than a batch (images may not fit in a // single batch), so trim based on what will fit and must be grouped together. // - Fill out the rest of the space with text tokens. if multimodalProcessor, ok := s.model.(model.MultimodalProcessor); prompt && ok { mmCtx := s.model.Backend().NewContext() defer mmCtx.Close() img := image.NewGray(image.Rect(0, 0, 2048, 2048)) var buf bytes.Buffer bmp.Encode(&buf, img) if inputs[0].Multimodal, err = multimodalProcessor.EncodeMultimodal(mmCtx, buf.Bytes()); err == nil { mmStore.addMultimodal(inputs[0].Multimodal) inputs, err = multimodalProcessor.PostTokenize(inputs) if err != nil { return err } for i, inp := range inputs { minBatch := 1 + inp.SameBatch if minBatch > s.batchSize { inputs = inputs[i:min(i+minBatch, len(inputs))] break } else if i+minBatch > s.batchSize { inputs = inputs[:i] break } } if len(inputs) < batchSize { newInputs := make([]*input.Input, batchSize) copy(newInputs, inputs) for i := len(inputs); i < batchSize; i++ { newInputs[i] = &input.Input{} } inputs = newInputs } } } var batch input.Batch batchInputs := make([]int32, len(inputs)) batch.Positions = make([]int32, len(inputs)) batch.Sequences = make([]int, len(inputs)) for i, inp := range inputs { batchInputs[i] = inp.Token if inp.Multimodal != nil {
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
true
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/types/model/capability.go
types/model/capability.go
package model type Capability string const ( CapabilityCompletion = Capability("completion") CapabilityTools = Capability("tools") CapabilityInsert = Capability("insert") CapabilityVision = Capability("vision") CapabilityEmbedding = Capability("embedding") CapabilityThinking = Capability("thinking") ) func (c Capability) String() string { return string(c) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/types/model/config.go
types/model/config.go
package model // ConfigV2 represents the configuration metadata for a model. type ConfigV2 struct { ModelFormat string `json:"model_format"` ModelFamily string `json:"model_family"` ModelFamilies []string `json:"model_families"` ModelType string `json:"model_type"` // shown as Parameter Size FileType string `json:"file_type"` // shown as Quantization Level Renderer string `json:"renderer,omitempty"` Parser string `json:"parser,omitempty"` Requires string `json:"requires,omitempty"` RemoteHost string `json:"remote_host,omitempty"` RemoteModel string `json:"remote_model,omitempty"` // used for remotes Capabilities []string `json:"capabilities,omitempty"` ContextLen int `json:"context_length,omitempty"` EmbedLen int `json:"embedding_length,omitempty"` BaseName string `json:"base_name,omitempty"` // required by spec Architecture string `json:"architecture"` OS string `json:"os"` RootFS RootFS `json:"rootfs"` } // RootFS represents the root filesystem configuration for a model. type RootFS struct { Type string `json:"type"` DiffIDs []string `json:"diff_ids"` }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/types/model/name.go
types/model/name.go
// Package model contains types and utilities for parsing, validating, and // working with model names and digests. package model import ( "cmp" "errors" "fmt" "log/slog" "path/filepath" "strings" ) // Errors var ( // ErrUnqualifiedName represents an error where a name is not fully // qualified. It is not used directly in this package, but is here // to avoid other packages inventing their own error type. // Additionally, it can be conveniently used via [Unqualified]. ErrUnqualifiedName = errors.New("unqualified name") ) // Unqualified is a helper function that returns an error with // ErrUnqualifiedName as the cause and the name as the message. func Unqualified(n Name) error { return fmt.Errorf("%w: %s", ErrUnqualifiedName, n) } // MissingPart is used to indicate any part of a name that was "promised" by // the presence of a separator, but is missing. // // The value was chosen because it is deemed unlikely to be set by a user, // not a valid part name valid when checked by [Name.IsValid], and easy to // spot in logs. const MissingPart = "!MISSING!" const ( defaultHost = "registry.ollama.ai" defaultNamespace = "library" defaultTag = "latest" ) // DefaultName returns a name with the default values for the host, namespace, // and tag parts. The model and digest parts are empty. // // - The default host is ("registry.ollama.ai") // - The default namespace is ("library") // - The default tag is ("latest") func DefaultName() Name { return Name{ Host: defaultHost, Namespace: defaultNamespace, Tag: defaultTag, } } type partKind int const ( kindHost partKind = iota kindNamespace kindModel kindTag kindDigest ) func (k partKind) String() string { switch k { case kindHost: return "host" case kindNamespace: return "namespace" case kindModel: return "model" case kindTag: return "tag" case kindDigest: return "digest" default: return "unknown" } } // Name is a structured representation of a model name string, as defined by // [ParseNameNoDefaults]. // // It is not guaranteed to be valid. Use [Name.IsValid] to check if the name // is valid. type Name struct { Host string Namespace string Model string Tag string } // ParseName parses and assembles a Name from a name string. The // format of a valid name string is: // // s: // { host } "/" { namespace } "/" { model } ":" { tag } "@" { digest } // { host } "/" { namespace } "/" { model } ":" { tag } // { host } "/" { namespace } "/" { model } "@" { digest } // { host } "/" { namespace } "/" { model } // { namespace } "/" { model } ":" { tag } "@" { digest } // { namespace } "/" { model } ":" { tag } // { namespace } "/" { model } "@" { digest } // { namespace } "/" { model } // { model } ":" { tag } "@" { digest } // { model } ":" { tag } // { model } "@" { digest } // { model } // "@" { digest } // host: // pattern: { alphanum | "_" } { alphanum | "-" | "_" | "." | ":" }* // length: [1, 350] // namespace: // pattern: { alphanum | "_" } { alphanum | "-" | "_" }* // length: [1, 80] // model: // pattern: { alphanum | "_" } { alphanum | "-" | "_" | "." }* // length: [1, 80] // tag: // pattern: { alphanum | "_" } { alphanum | "-" | "_" | "." }* // length: [1, 80] // digest: // pattern: { alphanum | "_" } { alphanum | "-" | ":" }* // length: [1, 80] // // Most users should use [ParseName] instead, unless need to support // different defaults than DefaultName. // // The name returned is not guaranteed to be valid. If it is not valid, the // field values are left in an undefined state. Use [Name.IsValid] to check // if the name is valid. func ParseName(s string) Name { return Merge(ParseNameBare(s), DefaultName()) } // ParseNameBare parses s as a name string and returns a Name. No merge with // [DefaultName] is performed. func ParseNameBare(s string) Name { var n Name var promised bool // "/" is an illegal tag character, so we can use it to split the host if strings.LastIndex(s, ":") > strings.LastIndex(s, "/") { s, n.Tag, _ = cutPromised(s, ":") } s, n.Model, promised = cutPromised(s, "/") if !promised { n.Model = s return n } s, n.Namespace, promised = cutPromised(s, "/") if !promised { n.Namespace = s return n } scheme, host, ok := strings.Cut(s, "://") if !ok { host = scheme } n.Host = host return n } // ParseNameFromFilepath parses a 4-part filepath as a Name. The parts are // expected to be in the form: // // { host } "/" { namespace } "/" { model } "/" { tag } func ParseNameFromFilepath(s string) (n Name) { parts := strings.Split(s, string(filepath.Separator)) if len(parts) != 4 { return Name{} } n.Host = parts[0] n.Namespace = parts[1] n.Model = parts[2] n.Tag = parts[3] if !n.IsFullyQualified() { return Name{} } return n } // Merge merges the host, namespace, and tag parts of the two names, // preferring the non-empty parts of a. func Merge(a, b Name) Name { a.Host = cmp.Or(a.Host, b.Host) a.Namespace = cmp.Or(a.Namespace, b.Namespace) a.Tag = cmp.Or(a.Tag, b.Tag) return a } // String returns the name string, in the format that [ParseNameNoDefaults] // accepts as valid, if [Name.IsValid] reports true; otherwise the empty // string is returned. func (n Name) String() string { var b strings.Builder if n.Host != "" { b.WriteString(n.Host) b.WriteByte('/') } if n.Namespace != "" { b.WriteString(n.Namespace) b.WriteByte('/') } b.WriteString(n.Model) if n.Tag != "" { b.WriteByte(':') b.WriteString(n.Tag) } return b.String() } // DisplayShortest returns a short string version of the name. func (n Name) DisplayShortest() string { var sb strings.Builder if !strings.EqualFold(n.Host, defaultHost) { sb.WriteString(n.Host) sb.WriteByte('/') sb.WriteString(n.Namespace) sb.WriteByte('/') } else if !strings.EqualFold(n.Namespace, defaultNamespace) { sb.WriteString(n.Namespace) sb.WriteByte('/') } // always include model and tag sb.WriteString(n.Model) sb.WriteString(":") sb.WriteString(n.Tag) return sb.String() } // IsValidNamespace reports whether the provided string is a valid // namespace. func IsValidNamespace(s string) bool { return isValidPart(kindNamespace, s) } // IsValid reports whether all parts of the name are present and valid. The // digest is a special case, and is checked for validity only if present. // // Note: The digest check has been removed as is planned to be added back in // at a later time. func (n Name) IsValid() bool { return n.IsFullyQualified() } // IsFullyQualified returns true if all parts of the name are present and // valid without the digest. func (n Name) IsFullyQualified() bool { parts := []string{ n.Host, n.Namespace, n.Model, n.Tag, } for i, part := range parts { if !isValidPart(partKind(i), part) { return false } } return true } // Filepath returns a canonical filepath that represents the name with each part from // host to tag as a directory in the form: // // {host}/{namespace}/{model}/{tag} // // It uses the system's filepath separator and ensures the path is clean. // // It panics if the name is not fully qualified. Use [Name.IsFullyQualified] // to check if the name is fully qualified. func (n Name) Filepath() string { if !n.IsFullyQualified() { panic("illegal attempt to get filepath of invalid name") } return filepath.Join( n.Host, n.Namespace, n.Model, n.Tag, ) } // LogValue returns a slog.Value that represents the name as a string. func (n Name) LogValue() slog.Value { return slog.StringValue(n.String()) } func (n Name) EqualFold(o Name) bool { return strings.EqualFold(n.Host, o.Host) && strings.EqualFold(n.Namespace, o.Namespace) && strings.EqualFold(n.Model, o.Model) && strings.EqualFold(n.Tag, o.Tag) } func isValidLen(kind partKind, s string) bool { switch kind { case kindHost: return len(s) >= 1 && len(s) <= 350 case kindTag: return len(s) >= 1 && len(s) <= 80 default: return len(s) >= 1 && len(s) <= 80 } } func isValidPart(kind partKind, s string) bool { if !isValidLen(kind, s) { return false } for i := range s { if i == 0 { if !isAlphanumericOrUnderscore(s[i]) { return false } continue } switch s[i] { case '_', '-': case '.': if kind == kindNamespace { return false } case ':': if kind != kindHost && kind != kindDigest { return false } default: if !isAlphanumericOrUnderscore(s[i]) { return false } } } return true } func isAlphanumericOrUnderscore(c byte) bool { return c >= 'A' && c <= 'Z' || c >= 'a' && c <= 'z' || c >= '0' && c <= '9' || c == '_' } func cutLast(s, sep string) (before, after string, ok bool) { i := strings.LastIndex(s, sep) if i >= 0 { return s[:i], s[i+len(sep):], true } return s, "", false } // cutPromised cuts the last part of s at the last occurrence of sep. If sep is // found, the part before and after sep are returned as-is unless empty, in // which case they are returned as MissingPart, which will cause // [Name.IsValid] to return false. func cutPromised(s, sep string) (before, after string, ok bool) { before, after, ok = cutLast(s, sep) if !ok { return before, after, false } return cmp.Or(before, MissingPart), cmp.Or(after, MissingPart), true }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/types/model/name_test.go
types/model/name_test.go
package model import ( "path/filepath" "reflect" "runtime" "testing" ) const ( part80 = "88888888888888888888888888888888888888888888888888888888888888888888888888888888" part350 = "33333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333" ) func TestParseNameParts(t *testing.T) { cases := []struct { in string want Name wantFilepath string wantValidDigest bool }{ { in: "registry.ollama.ai/library/dolphin-mistral:7b-v2.6-dpo-laser-q6_K", want: Name{ Host: "registry.ollama.ai", Namespace: "library", Model: "dolphin-mistral", Tag: "7b-v2.6-dpo-laser-q6_K", }, wantFilepath: filepath.Join("registry.ollama.ai", "library", "dolphin-mistral", "7b-v2.6-dpo-laser-q6_K"), }, { in: "scheme://host:port/namespace/model:tag", want: Name{ Host: "host:port", Namespace: "namespace", Model: "model", Tag: "tag", }, wantFilepath: filepath.Join("host:port", "namespace", "model", "tag"), }, { in: "host/namespace/model:tag", want: Name{ Host: "host", Namespace: "namespace", Model: "model", Tag: "tag", }, wantFilepath: filepath.Join("host", "namespace", "model", "tag"), }, { in: "host:port/namespace/model:tag", want: Name{ Host: "host:port", Namespace: "namespace", Model: "model", Tag: "tag", }, wantFilepath: filepath.Join("host:port", "namespace", "model", "tag"), }, { in: "host/namespace/model", want: Name{ Host: "host", Namespace: "namespace", Model: "model", }, wantFilepath: filepath.Join("host", "namespace", "model", "latest"), }, { in: "host:port/namespace/model", want: Name{ Host: "host:port", Namespace: "namespace", Model: "model", }, wantFilepath: filepath.Join("host:port", "namespace", "model", "latest"), }, { in: "namespace/model", want: Name{ Namespace: "namespace", Model: "model", }, wantFilepath: filepath.Join("registry.ollama.ai", "namespace", "model", "latest"), }, { in: "model", want: Name{ Model: "model", }, wantFilepath: filepath.Join("registry.ollama.ai", "library", "model", "latest"), }, { in: "h/nn/mm:t", want: Name{ Host: "h", Namespace: "nn", Model: "mm", Tag: "t", }, wantFilepath: filepath.Join("h", "nn", "mm", "t"), }, { in: part80 + "/" + part80 + "/" + part80 + ":" + part80, want: Name{ Host: part80, Namespace: part80, Model: part80, Tag: part80, }, wantFilepath: filepath.Join(part80, part80, part80, part80), }, { in: part350 + "/" + part80 + "/" + part80 + ":" + part80, want: Name{ Host: part350, Namespace: part80, Model: part80, Tag: part80, }, wantFilepath: filepath.Join(part350, part80, part80, part80), }, } for _, tt := range cases { t.Run(tt.in, func(t *testing.T) { got := ParseNameBare(tt.in) if !reflect.DeepEqual(got, tt.want) { t.Errorf("parseName(%q) = %v; want %v", tt.in, got, tt.want) } got = ParseName(tt.in) if tt.wantFilepath != "" && got.Filepath() != tt.wantFilepath { t.Errorf("parseName(%q).Filepath() = %q; want %q", tt.in, got.Filepath(), tt.wantFilepath) } }) } } var testCases = map[string]bool{ // name -> valid "": false, "_why/_the/_lucky:_stiff": true, // minimal "h/n/m:t": true, "host/namespace/model:tag": true, "host/namespace/model": false, "namespace/model": false, "model": false, // long (but valid) part80 + "/" + part80 + "/" + part80 + ":" + part80: true, part350 + "/" + part80 + "/" + part80 + ":" + part80: true, "h/nn/mm:t": true, // bare minimum part sizes // unqualified "m": false, "n/m:": false, "h/n/m": false, "@t": false, "m@d": false, // invalids "^": false, "mm:": false, "/nn/mm": false, "//": false, "//mm": false, "hh//": false, "//mm:@": false, "00@": false, "@": false, // not starting with alphanum "-hh/nn/mm:tt": false, "hh/-nn/mm:tt": false, "hh/nn/-mm:tt": false, "hh/nn/mm:-tt": false, // hosts "host:https/namespace/model:tag": true, // colon in non-host part before tag "host/name:space/model:tag": false, } func TestNameparseNameDefault(t *testing.T) { const name = "xx" n := ParseName(name) got := n.String() want := "registry.ollama.ai/library/xx:latest" if got != want { t.Errorf("parseName(%q).String() = %q; want %q", name, got, want) } } func TestNameIsValid(t *testing.T) { var numStringTests int for s, want := range testCases { n := ParseNameBare(s) got := n.IsValid() if got != want { t.Errorf("parseName(%q).IsValid() = %v; want %v", s, got, want) } // Test roundtrip with String if got { got := ParseNameBare(s).String() if got != s { t.Errorf("parseName(%q).String() = %q; want %q", s, got, s) } numStringTests++ } } if numStringTests == 0 { t.Errorf("no tests for Name.String") } } func TestNameIsValidPart(t *testing.T) { cases := []struct { kind partKind s string want bool }{ {kind: kindHost, s: "", want: false}, {kind: kindHost, s: "a", want: true}, {kind: kindHost, s: "a.", want: true}, {kind: kindHost, s: "a.b", want: true}, {kind: kindHost, s: "a:123", want: true}, {kind: kindHost, s: "a:123/aa/bb", want: false}, {kind: kindNamespace, s: "bb", want: true}, {kind: kindNamespace, s: "a.", want: false}, {kind: kindModel, s: "-h", want: false}, {kind: kindDigest, s: "sha256-1000000000000000000000000000000000000000000000000000000000000000", want: true}, } for _, tt := range cases { t.Run(tt.s, func(t *testing.T) { got := isValidPart(tt.kind, tt.s) if got != tt.want { t.Errorf("isValidPart(%s, %q) = %v; want %v", tt.kind, tt.s, got, tt.want) } }) } } func TestFilepathAllocs(t *testing.T) { n := ParseNameBare("HOST/NAMESPACE/MODEL:TAG") allocs := testing.AllocsPerRun(1000, func() { n.Filepath() }) var allowedAllocs float64 = 1 if runtime.GOOS == "windows" { allowedAllocs = 3 } if allocs > allowedAllocs { t.Errorf("allocs = %v; allowed %v", allocs, allowedAllocs) } } func TestParseNameFromFilepath(t *testing.T) { cases := map[string]Name{ filepath.Join("host", "namespace", "model", "tag"): {Host: "host", Namespace: "namespace", Model: "model", Tag: "tag"}, filepath.Join("host:port", "namespace", "model", "tag"): {Host: "host:port", Namespace: "namespace", Model: "model", Tag: "tag"}, filepath.Join("namespace", "model", "tag"): {}, filepath.Join("model", "tag"): {}, "model": {}, filepath.Join("..", "..", "model", "tag"): {}, filepath.Join("", "namespace", ".", "tag"): {}, filepath.Join(".", ".", ".", "."): {}, filepath.Join("/", "path", "to", "random", "file"): {}, } for in, want := range cases { t.Run(in, func(t *testing.T) { got := ParseNameFromFilepath(in) if !reflect.DeepEqual(got, want) { t.Errorf("parseNameFromFilepath(%q) = %v; want %v", in, got, want) } }) } } func TestDisplayShortest(t *testing.T) { cases := map[string]string{ "registry.ollama.ai/library/model:latest": "model:latest", "registry.ollama.ai/library/model:tag": "model:tag", "registry.ollama.ai/namespace/model:tag": "namespace/model:tag", "host/namespace/model:tag": "host/namespace/model:tag", "host/library/model:tag": "host/library/model:tag", } for in, want := range cases { t.Run(in, func(t *testing.T) { got := ParseNameBare(in).DisplayShortest() if got != want { t.Errorf("parseName(%q).DisplayShortest() = %q; want %q", in, got, want) } }) } } func FuzzName(f *testing.F) { for s := range testCases { f.Add(s) } f.Fuzz(func(t *testing.T, s string) { n := ParseNameBare(s) if n.IsValid() { parts := [...]string{n.Host, n.Namespace, n.Model, n.Tag} for _, part := range parts { if part == ".." { t.Errorf("unexpected .. as valid part") } if len(part) > 350 { t.Errorf("part too long: %q", part) } } if n.String() != s { t.Errorf("String() = %q; want %q", n.String(), s) } } }) } func TestIsValidNamespace(t *testing.T) { cases := []struct { username string expected bool }{ {"", false}, {"a", true}, {"a:b", false}, {"a/b", false}, {"a:b/c", false}, {"a/b:c", false}, {"a/b:c", false}, {"a/b:c/d", false}, {"a/b:c/d@e", false}, {"a/b:c/d@sha256-100", false}, {"himynameisjoe", true}, {"himynameisreallyreallyreallyreallylongbutitshouldstillbevalid", true}, } for _, tt := range cases { t.Run(tt.username, func(t *testing.T) { if got := IsValidNamespace(tt.username); got != tt.expected { t.Errorf("IsValidName(%q) = %v; want %v", tt.username, got, tt.expected) } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/types/syncmap/syncmap.go
types/syncmap/syncmap.go
package syncmap import ( "maps" "sync" ) // SyncMap is a simple, generic thread-safe map implementation. type SyncMap[K comparable, V any] struct { mu sync.RWMutex m map[K]V } func NewSyncMap[K comparable, V any]() *SyncMap[K, V] { return &SyncMap[K, V]{ m: make(map[K]V), } } func (s *SyncMap[K, V]) Load(key K) (V, bool) { s.mu.RLock() defer s.mu.RUnlock() val, ok := s.m[key] return val, ok } func (s *SyncMap[K, V]) Store(key K, value V) { s.mu.Lock() defer s.mu.Unlock() s.m[key] = value } func (s *SyncMap[K, V]) Items() map[K]V { s.mu.RLock() defer s.mu.RUnlock() // shallow copy map items return maps.Clone(s.m) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/types/errtypes/errtypes.go
types/errtypes/errtypes.go
// Package errtypes contains custom error types package errtypes import ( "fmt" "strings" ) const ( UnknownOllamaKeyErrMsg = "unknown ollama key" InvalidModelNameErrMsg = "invalid model name" ) // TODO: This should have a structured response from the API type UnknownOllamaKey struct { Key string } func (e *UnknownOllamaKey) Error() string { return fmt.Sprintf("unauthorized: %s %q", UnknownOllamaKeyErrMsg, strings.TrimSpace(e.Key)) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/llm/status.go
llm/status.go
package llm import ( "bytes" "os" ) // StatusWriter is a writer that captures error messages from the llama runner process type StatusWriter struct { LastErrMsg string out *os.File } func NewStatusWriter(out *os.File) *StatusWriter { return &StatusWriter{ out: out, } } // TODO - regex matching to detect errors like // libcublasLt.so.11: cannot open shared object file: No such file or directory var errorPrefixes = []string{ "error:", "CUDA error", "ROCm error", "cudaMalloc failed", "\"ERR\"", "error loading model", "GGML_ASSERT", "Deepseek2 does not support K-shift", } func (w *StatusWriter) Write(b []byte) (int, error) { var errMsg string for _, prefix := range errorPrefixes { if _, after, ok := bytes.Cut(b, []byte(prefix)); ok { errMsg = prefix + string(bytes.TrimSpace(after)) } } if errMsg != "" { w.LastErrMsg = errMsg } return w.out.Write(b) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/llm/server_test.go
llm/server_test.go
package llm import ( "context" "errors" "fmt" "strings" "testing" "github.com/ollama/ollama/api" "github.com/ollama/ollama/format" "github.com/ollama/ollama/ml" "golang.org/x/sync/semaphore" ) func TestLLMServerFitGPU(t *testing.T) { minMemory := 457 * format.MebiByte tests := []struct { name string gpus []ml.DeviceInfo layers []int numGPU int requireFull bool expected ml.GPULayersList expectedErr error }{ { name: "No GPU", layers: []int{50 * format.MebiByte, 50 * format.MebiByte, 50 * format.MebiByte}, numGPU: -1, expected: ml.GPULayersList{}, requireFull: true, // Should not try to evict even though we can't load any layers }, { name: "Full single GPU", gpus: []ml.DeviceInfo{{DeviceID: ml.DeviceID{ID: "gpu0"}, FreeMemory: uint64(256*format.MebiByte + minMemory)}}, layers: []int{50 * format.MebiByte, 50 * format.MebiByte, 50 * format.MebiByte}, numGPU: -1, expected: ml.GPULayersList{{DeviceID: ml.DeviceID{ID: "gpu0"}, Layers: []int{0, 1, 2}}}, }, { name: "Partial single GPU", gpus: []ml.DeviceInfo{{DeviceID: ml.DeviceID{ID: "gpu0"}, FreeMemory: uint64(256*format.MebiByte + minMemory)}}, layers: []int{100 * format.MebiByte, 100 * format.MebiByte, 100 * format.MebiByte, 100 * format.MebiByte}, numGPU: -1, expected: ml.GPULayersList{{DeviceID: ml.DeviceID{ID: "gpu0"}, Layers: []int{1, 2}}}, }, { name: "Single GPU with numGPU 1", gpus: []ml.DeviceInfo{{DeviceID: ml.DeviceID{ID: "gpu0"}, FreeMemory: uint64(256*format.MebiByte + minMemory)}}, layers: []int{50 * format.MebiByte, 50 * format.MebiByte, 50 * format.MebiByte}, numGPU: 1, expected: ml.GPULayersList{{DeviceID: ml.DeviceID{ID: "gpu0"}, Layers: []int{1}}}, }, { name: "Single GPU with numGPU 0", gpus: []ml.DeviceInfo{{DeviceID: ml.DeviceID{ID: "gpu0"}, FreeMemory: uint64(256*format.MebiByte + minMemory)}}, layers: []int{50 * format.MebiByte, 50 * format.MebiByte, 50 * format.MebiByte}, numGPU: 0, expected: ml.GPULayersList{}, }, { name: "Single GPU with numGPU 999", gpus: []ml.DeviceInfo{{DeviceID: ml.DeviceID{ID: "gpu0"}, FreeMemory: uint64(256*format.MebiByte + minMemory)}}, layers: []int{100 * format.MebiByte, 100 * format.MebiByte, 100 * format.MebiByte, 100 * format.MebiByte}, numGPU: 999, expected: ml.GPULayersList{{DeviceID: ml.DeviceID{ID: "gpu0"}, Layers: []int{0, 1, 2, 3}}}, }, { name: "Multi GPU fits on one", gpus: []ml.DeviceInfo{{DeviceID: ml.DeviceID{ID: "gpu0"}, FreeMemory: uint64(128*format.MebiByte + minMemory)}, {DeviceID: ml.DeviceID{ID: "gpu1"}, FreeMemory: uint64(256*format.MebiByte + minMemory)}}, layers: []int{50 * format.MebiByte, 50 * format.MebiByte, 50 * format.MebiByte}, numGPU: -1, expected: ml.GPULayersList{{DeviceID: ml.DeviceID{ID: "gpu1"}, Layers: []int{0, 1, 2}}}, }, { name: "Multi GPU split", gpus: []ml.DeviceInfo{{DeviceID: ml.DeviceID{ID: "gpu0"}, FreeMemory: uint64(128*format.MebiByte + minMemory)}, {DeviceID: ml.DeviceID{ID: "gpu1"}, FreeMemory: uint64(256*format.MebiByte + minMemory)}}, layers: []int{256 * format.MebiByte, 50 * format.MebiByte, 50 * format.MebiByte}, numGPU: -1, expected: ml.GPULayersList{{DeviceID: ml.DeviceID{ID: "gpu1"}, Layers: []int{0}}, {DeviceID: ml.DeviceID{ID: "gpu0"}, Layers: []int{1, 2}}}, }, { name: "Multi GPU partial", gpus: []ml.DeviceInfo{{DeviceID: ml.DeviceID{ID: "gpu0"}, FreeMemory: uint64(128*format.MebiByte + minMemory)}, {DeviceID: ml.DeviceID{ID: "gpu1"}, FreeMemory: uint64(256*format.MebiByte + minMemory)}}, layers: []int{256 * format.MebiByte, 256 * format.MebiByte, 50 * format.MebiByte}, numGPU: -1, expected: ml.GPULayersList{{DeviceID: ml.DeviceID{ID: "gpu1"}, Layers: []int{1}}}, }, { name: "Multi GPU numGPU 1", gpus: []ml.DeviceInfo{{DeviceID: ml.DeviceID{ID: "gpu0"}, FreeMemory: uint64(128*format.MebiByte + minMemory)}, {DeviceID: ml.DeviceID{ID: "gpu1"}, FreeMemory: uint64(256*format.MebiByte + minMemory)}}, layers: []int{50 * format.MebiByte, 50 * format.MebiByte, 50 * format.MebiByte}, numGPU: 1, expected: ml.GPULayersList{{DeviceID: ml.DeviceID{ID: "gpu1"}, Layers: []int{1}}}, }, { name: "Multi GPU numGPU 2", gpus: []ml.DeviceInfo{{DeviceID: ml.DeviceID{ID: "gpu0"}, FreeMemory: uint64(128*format.MebiByte + minMemory)}, {DeviceID: ml.DeviceID{ID: "gpu1"}, FreeMemory: uint64(256*format.MebiByte + minMemory)}}, layers: []int{256 * format.MebiByte, 50 * format.MebiByte, 50 * format.MebiByte}, numGPU: 2, expected: ml.GPULayersList{{DeviceID: ml.DeviceID{ID: "gpu1"}, Layers: []int{0}}, {DeviceID: ml.DeviceID{ID: "gpu0"}, Layers: []int{1}}}, }, { name: "Multi GPU numGPU 999", gpus: []ml.DeviceInfo{{DeviceID: ml.DeviceID{ID: "gpu0"}, FreeMemory: uint64(128*format.MebiByte + minMemory)}, {DeviceID: ml.DeviceID{ID: "gpu1"}, FreeMemory: uint64(256*format.MebiByte + minMemory)}}, layers: []int{256 * format.MebiByte, 256 * format.MebiByte, 50 * format.MebiByte}, numGPU: 999, expected: ml.GPULayersList{{DeviceID: ml.DeviceID{ID: "gpu1"}, Layers: []int{0, 1}}, {DeviceID: ml.DeviceID{ID: "gpu0"}, Layers: []int{2}}}, }, { name: "Multi GPU different libraries", gpus: []ml.DeviceInfo{{DeviceID: ml.DeviceID{Library: "CUDA", ID: "gpu0"}, FreeMemory: uint64(128*format.MebiByte + minMemory)}, {DeviceID: ml.DeviceID{Library: "ROCm", ID: "gpu1"}, FreeMemory: uint64(256*format.MebiByte + minMemory)}}, layers: []int{128 * format.MebiByte, 128 * format.MebiByte, 50 * format.MebiByte}, numGPU: -1, expected: ml.GPULayersList{{DeviceID: ml.DeviceID{ID: "gpu1", Library: "ROCm"}, Layers: []int{0, 1}}}, }, { name: "requireFull", gpus: []ml.DeviceInfo{{DeviceID: ml.DeviceID{ID: "gpu0"}, FreeMemory: uint64(256*format.MebiByte + minMemory)}}, layers: []int{100 * format.MebiByte, 100 * format.MebiByte, 100 * format.MebiByte, 100 * format.MebiByte}, numGPU: -1, requireFull: true, expectedErr: ErrLoadRequiredFull, }, { name: "requireFull numGPU", gpus: []ml.DeviceInfo{{DeviceID: ml.DeviceID{ID: "gpu0"}, FreeMemory: uint64(256 * format.MebiByte)}}, layers: []int{100 * format.MebiByte, 100 * format.MebiByte, 100 * format.MebiByte, 100 * format.MebiByte}, numGPU: 4, requireFull: true, expectedErr: ErrLoadRequiredFull, }, { name: "iGPU", gpus: []ml.DeviceInfo{{DeviceID: ml.DeviceID{ID: "gpu0"}, Integrated: true, FreeMemory: uint64(256*format.MebiByte + minMemory)}}, layers: []int{50 * format.MebiByte, 50 * format.MebiByte, 50 * format.MebiByte}, numGPU: -1, expected: ml.GPULayersList{{DeviceID: ml.DeviceID{ID: "gpu0"}, Layers: []int{0, 1, 2}}}, }, { name: "iGPU + dGPU", gpus: []ml.DeviceInfo{{DeviceID: ml.DeviceID{ID: "gpu0"}, FreeMemory: uint64(128*format.MebiByte + minMemory)}, {DeviceID: ml.DeviceID{ID: "gpu1"}, Integrated: true, FreeMemory: uint64(256*format.MebiByte + minMemory)}}, layers: []int{50 * format.MebiByte, 50 * format.MebiByte, 50 * format.MebiByte}, numGPU: -1, expected: ml.GPULayersList{{DeviceID: ml.DeviceID{ID: "gpu1"}, Layers: []int{0}}, {DeviceID: ml.DeviceID{ID: "gpu0"}, Layers: []int{1, 2}}}, }, { name: "iGPU + dGPU fits on one", gpus: []ml.DeviceInfo{{DeviceID: ml.DeviceID{ID: "gpu0"}, FreeMemory: uint64(128*format.MebiByte + minMemory)}, {DeviceID: ml.DeviceID{ID: "gpu1"}, Integrated: true, FreeMemory: uint64(256*format.MebiByte + minMemory)}}, layers: []int{50 * format.MebiByte, 50 * format.MebiByte}, numGPU: -1, expected: ml.GPULayersList{{DeviceID: ml.DeviceID{ID: "gpu0"}, Layers: []int{0, 1}}}, }, { name: "iGPU + dGPU partial", gpus: []ml.DeviceInfo{{DeviceID: ml.DeviceID{ID: "gpu0"}, FreeMemory: uint64(128*format.MebiByte + minMemory)}, {DeviceID: ml.DeviceID{ID: "gpu1"}, Integrated: true, FreeMemory: uint64(256*format.MebiByte + minMemory)}}, layers: []int{100 * format.MebiByte, 100 * format.MebiByte, 100 * format.MebiByte, 100 * format.MebiByte}, numGPU: -1, expected: ml.GPULayersList{{DeviceID: ml.DeviceID{ID: "gpu1"}, Layers: []int{0, 1}}, {DeviceID: ml.DeviceID{ID: "gpu0"}, Layers: []int{2}}}, }, { name: "iGPU + dGPU numGPU 1", gpus: []ml.DeviceInfo{{DeviceID: ml.DeviceID{ID: "gpu0"}, FreeMemory: uint64(128*format.MebiByte + minMemory)}, {DeviceID: ml.DeviceID{ID: "gpu1"}, Integrated: true, FreeMemory: uint64(256*format.MebiByte + minMemory)}}, layers: []int{100 * format.MebiByte, 100 * format.MebiByte, 100 * format.MebiByte, 100 * format.MebiByte}, numGPU: 1, expected: ml.GPULayersList{{DeviceID: ml.DeviceID{ID: "gpu0"}, Layers: []int{2}}}, }, { name: "iGPU + dGPU numGPU 999", gpus: []ml.DeviceInfo{{DeviceID: ml.DeviceID{ID: "gpu0"}, FreeMemory: uint64(128*format.MebiByte + minMemory)}, {DeviceID: ml.DeviceID{ID: "gpu1"}, Integrated: true, FreeMemory: uint64(256*format.MebiByte + minMemory)}}, layers: []int{100 * format.MebiByte, 100 * format.MebiByte, 100 * format.MebiByte, 100 * format.MebiByte}, numGPU: 999, expected: ml.GPULayersList{{DeviceID: ml.DeviceID{ID: "gpu0"}, Layers: []int{0}}, {DeviceID: ml.DeviceID{ID: "gpu1"}, Layers: []int{1, 2, 3}}}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var systemInfo ml.SystemInfo systemInfo.TotalMemory = format.GibiByte systemInfo.FreeMemory = 512 * format.MebiByte systemInfo.FreeSwap = 256 * format.MebiByte s := &ollamaServer{ llmServer: llmServer{ totalLayers: uint64(len(tt.layers)), options: api.Options{ Runner: api.Runner{ NumGPU: tt.numGPU, }, }, }, } s.mem = &ml.BackendMemory{CPU: ml.DeviceMemory{ Weights: make([]uint64, s.totalLayers), Cache: make([]uint64, s.totalLayers), }, GPUs: make([]ml.DeviceMemory, len(tt.gpus))} for i := range tt.layers { s.mem.CPU.Weights[i] = uint64(tt.layers[i]) } for i := range s.mem.GPUs { s.mem.GPUs[i].DeviceID = tt.gpus[i].DeviceID s.mem.GPUs[i].Weights = make([]uint64, s.totalLayers) s.mem.GPUs[i].Cache = make([]uint64, s.totalLayers) } gpuLayers, err := s.createLayout(systemInfo, tt.gpus, s.mem, tt.requireFull, 0) if err != tt.expectedErr { t.Fatalf("fitGPU returned error: %v", err) } if gpuLayers.Hash() != tt.expected.Hash() { t.Errorf("fitGPU assigned %v, want %v", gpuLayers, tt.expected) } }) } } func TestLLMServerCompletionFormat(t *testing.T) { // This test was written to fix an already deployed issue. It is a bit // of a mess, and but it's good enough, until we can refactoring the // Completion method to be more testable. ctx, cancel := context.WithCancel(t.Context()) s := &llmServer{ sem: semaphore.NewWeighted(1), // required to prevent nil panic } checkInvalid := func(format string) { t.Helper() err := s.Completion(ctx, CompletionRequest{ Options: new(api.Options), Format: []byte(format), }, nil) want := fmt.Sprintf("invalid format: %q; expected \"json\" or a valid JSON Schema", format) if err == nil || !strings.Contains(err.Error(), want) { t.Fatalf("err = %v; want %q", err, want) } } checkInvalid("X") // invalid format checkInvalid(`"X"`) // invalid JSON Schema cancel() // prevent further processing if request makes it past the format check checkValid := func(err error) { t.Helper() if !errors.Is(err, context.Canceled) { t.Fatalf("Completion: err = %v; expected context.Canceled", err) } } valids := []string{ // "missing" ``, `""`, `null`, // JSON `"json"`, `{"type":"object"}`, } for _, valid := range valids { err := s.Completion(ctx, CompletionRequest{ Options: new(api.Options), Format: []byte(valid), }, nil) checkValid(err) } err := s.Completion(ctx, CompletionRequest{ Options: new(api.Options), Format: nil, // missing format }, nil) checkValid(err) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/llm/server.go
llm/server.go
package llm import ( "bufio" "bytes" "context" "encoding/json" "errors" "fmt" "io" "log" "log/slog" "math/rand" "net" "net/http" "os" "os/exec" "path/filepath" "runtime" "slices" "sort" "strconv" "strings" "sync" "time" "golang.org/x/sync/semaphore" "github.com/ollama/ollama/api" "github.com/ollama/ollama/envconfig" "github.com/ollama/ollama/format" "github.com/ollama/ollama/fs/ggml" "github.com/ollama/ollama/llama" "github.com/ollama/ollama/logutil" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/model" ) type filteredEnv []string func (e filteredEnv) LogValue() slog.Value { var attrs []slog.Attr for _, env := range e { if key, value, ok := strings.Cut(env, "="); ok { switch { case strings.HasPrefix(key, "OLLAMA_"), strings.HasPrefix(key, "CUDA_"), strings.HasPrefix(key, "ROCR_"), strings.HasPrefix(key, "ROCM_"), strings.HasPrefix(key, "HIP_"), strings.HasPrefix(key, "GPU_"), strings.HasPrefix(key, "HSA_"), strings.HasPrefix(key, "GGML_"), slices.Contains([]string{ "PATH", "LD_LIBRARY_PATH", "DYLD_LIBRARY_PATH", }, key): attrs = append(attrs, slog.String(key, value)) } } } return slog.GroupValue(attrs...) } type LlamaServer interface { ModelPath() string Load(ctx context.Context, systemInfo ml.SystemInfo, gpus []ml.DeviceInfo, requireFull bool) ([]ml.DeviceID, error) Ping(ctx context.Context) error WaitUntilRunning(ctx context.Context) error Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error Embedding(ctx context.Context, input string) ([]float32, int, error) Tokenize(ctx context.Context, content string) ([]int, error) Detokenize(ctx context.Context, tokens []int) (string, error) Close() error VRAMSize() uint64 // Total VRAM across all GPUs TotalSize() uint64 VRAMByGPU(id ml.DeviceID) uint64 Pid() int GetPort() int GetDeviceInfos(ctx context.Context) []ml.DeviceInfo HasExited() bool } // llmServer is an instance of a runner hosting a single model type llmServer struct { port int cmd *exec.Cmd done chan error // Channel to signal when the process exits status *StatusWriter options api.Options modelPath string loadRequest LoadRequest // Parameters used to initialize the runner mem *ml.BackendMemory // Memory allocations for this model // llamaModel is an instance of the cgo llama.cpp model definition // nil if this server is running the new engine llamaModel *llama.Model llamaModelLock *sync.Mutex totalLayers uint64 loadStart time.Time // Record how long it took the model to load loadProgress float32 sem *semaphore.Weighted } type llamaServer struct { llmServer ggml *ggml.GGML } type ollamaServer struct { llmServer textProcessor model.TextProcessor // textProcessor handles text encoding/decoding } // LoadModel will load a model from disk. The model must be in the GGML format. // // It collects array values for arrays with a size less than or equal to // maxArraySize. If maxArraySize is 0, the default value of 1024 is used. If // the maxArraySize is negative, all arrays are collected. func LoadModel(model string, maxArraySize int) (*ggml.GGML, error) { if _, err := os.Stat(model); err != nil { return nil, err } f, err := os.Open(model) if err != nil { return nil, err } defer f.Close() ggml, err := ggml.Decode(f, maxArraySize) return ggml, err } // NewLlamaServer will run a server for the given GPUs func NewLlamaServer(systemInfo ml.SystemInfo, gpus []ml.DeviceInfo, modelPath string, f *ggml.GGML, adapters, projectors []string, opts api.Options, numParallel int) (LlamaServer, error) { var llamaModel *llama.Model var textProcessor model.TextProcessor var err error if envconfig.NewEngine() || f.KV().OllamaEngineRequired() { if len(projectors) == 0 { textProcessor, err = model.NewTextProcessor(modelPath) } else { err = errors.New("split vision models aren't supported") } if err != nil { // To prepare for opt-out mode, instead of treating this as an error, we fallback to the old runner slog.Debug("model not yet supported by Ollama engine, switching to compatibility mode", "model", modelPath, "error", err) } } if textProcessor == nil { llamaModel, err = llama.LoadModelFromFile(modelPath, llama.ModelParams{VocabOnly: true}) if err != nil { return nil, err } } // Verify the requested context size is <= the model training size trainCtx := f.KV().ContextLength() if opts.NumCtx > int(trainCtx) && trainCtx > 0 { slog.Warn("requested context size too large for model", "num_ctx", opts.NumCtx, "n_ctx_train", trainCtx) opts.NumCtx = int(trainCtx) } opts.NumBatch = min(opts.NumBatch, opts.NumCtx) loadRequest := LoadRequest{LoraPath: adapters, KvSize: opts.NumCtx * numParallel, BatchSize: opts.NumBatch, Parallel: numParallel, MultiUserCache: envconfig.MultiUserCache()} defaultThreads := systemInfo.ThreadCount if opts.NumThread > 0 { loadRequest.NumThreads = opts.NumThread } else if defaultThreads > 0 { loadRequest.NumThreads = defaultThreads } // TODO - NUMA support currently doesn't work properly if opts.MainGPU > 0 { loadRequest.MainGPU = opts.MainGPU } if len(projectors) > 0 && llamaModel != nil { loadRequest.ProjectorPath = projectors[0] } // Determine if the user has forced FA on or off faUserSet := false if envconfig.FlashAttention(true) == envconfig.FlashAttention(false) { faUserSet = true } fa := envconfig.FlashAttention(f.FlashAttention()) // This will disable flash attention unless all GPUs on the system support it, even if we end up selecting a subset // that can handle it. if fa && !ml.FlashAttentionSupported(gpus) { slog.Warn("flash attention enabled but not supported by gpu") fa = false } if fa && !f.SupportsFlashAttention() { slog.Warn("flash attention enabled but not supported by model") fa = false } kvct := strings.ToLower(envconfig.KvCacheType()) if textProcessor == nil { flashAttention := ml.FlashAttentionAuto if faUserSet { if fa { flashAttention = ml.FlashAttentionEnabled } else { flashAttention = ml.FlashAttentionDisabled } } if kvct != "" { if f.KVCacheTypeIsQuantized(kvct) { if flashAttention != ml.FlashAttentionEnabled { slog.Warn("OLLAMA_FLASH_ATTENTION must be enabled to use a quantized OLLAMA_KV_CACHE_TYPE", "type", kvct) loadRequest.KvCacheType = "" } else if f.SupportsKVCacheType(kvct) { loadRequest.KvCacheType = kvct } else { slog.Warn("unsupported OLLAMA_KV_CACHE_TYPE", "type", kvct) } } else { if f.SupportsKVCacheType(kvct) { loadRequest.KvCacheType = kvct } else { slog.Warn("unsupported OLLAMA_KV_CACHE_TYPE", "type", kvct) } } } loadRequest.FlashAttention = flashAttention } else { // For Ollama engine, use our SupportsFlashAttention logic if fa { slog.Info("enabling flash attention") loadRequest.FlashAttention = ml.FlashAttentionEnabled // Flash Attention also supports kv cache quantization // Enable if the requested and kv cache type is supported by the model if f.SupportsKVCacheType(kvct) { loadRequest.KvCacheType = kvct } else { slog.Warn("kv cache type not supported by model", "type", kvct) } } else if kvct != "" && kvct != "f16" { slog.Warn("quantized kv cache requested but flash attention disabled", "type", kvct) } } gpuLibs := ml.LibraryPaths(gpus) status := NewStatusWriter(os.Stderr) cmd, port, err := StartRunner( textProcessor != nil, modelPath, gpuLibs, status, ml.GetVisibleDevicesEnv(gpus, false), ) s := llmServer{ port: port, cmd: cmd, status: status, options: opts, modelPath: modelPath, loadRequest: loadRequest, llamaModel: llamaModel, llamaModelLock: &sync.Mutex{}, sem: semaphore.NewWeighted(int64(numParallel)), totalLayers: f.KV().BlockCount() + 1, loadStart: time.Now(), done: make(chan error, 1), } if err != nil { var msg string if s.status != nil && s.status.LastErrMsg != "" { msg = s.status.LastErrMsg } err := fmt.Errorf("error starting runner: %v %s", err, msg) if llamaModel != nil { llama.FreeModel(llamaModel) } return nil, err } // reap subprocess when it exits go func() { err := s.cmd.Wait() // Favor a more detailed message over the process exit status if err != nil && s.status != nil && s.status.LastErrMsg != "" { slog.Error("llama runner terminated", "error", err) if strings.Contains(s.status.LastErrMsg, "unknown model") { s.status.LastErrMsg = "this model is not supported by your version of Ollama. You may need to upgrade" } s.done <- errors.New(s.status.LastErrMsg) } else { s.done <- err } }() if textProcessor != nil { return &ollamaServer{llmServer: s, textProcessor: textProcessor}, nil } else { return &llamaServer{llmServer: s, ggml: f}, nil } } func StartRunner(ollamaEngine bool, modelPath string, gpuLibs []string, out io.Writer, extraEnvs map[string]string) (cmd *exec.Cmd, port int, err error) { var exe string exe, err = os.Executable() if err != nil { return nil, 0, fmt.Errorf("unable to lookup executable path: %w", err) } if eval, err := filepath.EvalSymlinks(exe); err == nil { exe = eval } port = 0 if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil { var l *net.TCPListener if l, err = net.ListenTCP("tcp", a); err == nil { port = l.Addr().(*net.TCPAddr).Port l.Close() } } if port == 0 { slog.Debug("ResolveTCPAddr failed, using random port") port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range } params := []string{"runner"} if ollamaEngine { params = append(params, "--ollama-engine") } if modelPath != "" { params = append(params, "--model", modelPath) } params = append(params, "--port", strconv.Itoa(port)) var pathEnv string switch runtime.GOOS { case "windows": pathEnv = "PATH" case "darwin": pathEnv = "DYLD_LIBRARY_PATH" default: pathEnv = "LD_LIBRARY_PATH" } // Note: we always put our dependency paths first // since these are the exact version we compiled/linked against libraryPaths := append([]string{}, gpuLibs...) if libraryPath, ok := os.LookupEnv(pathEnv); ok { libraryPaths = append(libraryPaths, filepath.SplitList(libraryPath)...) } cmd = exec.Command(exe, params...) cmd.Env = os.Environ() if out != nil { stdout, err := cmd.StdoutPipe() if err != nil { return nil, 0, fmt.Errorf("failed to spawn server stdout pipe: %w", err) } stderr, err := cmd.StderrPipe() if err != nil { return nil, 0, fmt.Errorf("failed to spawn server stderr pipe: %w", err) } go func() { io.Copy(out, stdout) //nolint:errcheck }() go func() { io.Copy(out, stderr) //nolint:errcheck }() } cmd.SysProcAttr = LlamaServerSysProcAttr // Always filter down the set of GPUs in case there are any unsupported devices that might crash pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator)) // Update or add the path variable with our adjusted version pathNeeded := true ollamaPathNeeded := true extraEnvsDone := map[string]bool{} for k := range extraEnvs { extraEnvsDone[k] = false } for i := range cmd.Env { cmp := strings.SplitN(cmd.Env[i], "=", 2) if strings.EqualFold(cmp[0], pathEnv) { cmd.Env[i] = pathEnv + "=" + pathEnvVal pathNeeded = false } else if strings.EqualFold(cmp[0], "OLLAMA_LIBRARY_PATH") { cmd.Env[i] = "OLLAMA_LIBRARY_PATH=" + strings.Join(gpuLibs, string(filepath.ListSeparator)) ollamaPathNeeded = false } else if len(extraEnvs) != 0 { for k, v := range extraEnvs { if strings.EqualFold(cmp[0], k) { cmd.Env[i] = k + "=" + v extraEnvsDone[k] = true } } } } if pathNeeded { cmd.Env = append(cmd.Env, pathEnv+"="+pathEnvVal) } if ollamaPathNeeded { cmd.Env = append(cmd.Env, "OLLAMA_LIBRARY_PATH="+strings.Join(gpuLibs, string(filepath.ListSeparator))) } for k, done := range extraEnvsDone { if !done { cmd.Env = append(cmd.Env, k+"="+extraEnvs[k]) } } slog.Info("starting runner", "cmd", cmd) slog.Debug("subprocess", "", filteredEnv(cmd.Env)) if err = cmd.Start(); err != nil { return nil, 0, err } err = nil return } func (s *llmServer) ModelPath() string { return s.modelPath } type LoadOperation int // The order of these constants are significant because we iterate over the operations. They // should be in order of increasingly loading the model. const ( LoadOperationFit LoadOperation = iota // Return memory requirements but do not allocate LoadOperationAlloc // Allocate memory but do not load the weights LoadOperationCommit // Load weights - further changes cannot be made after this LoadOperationClose // Close model and free memory ) func (o LoadOperation) String() string { switch o { case LoadOperationFit: return "fit" case LoadOperationAlloc: return "alloc" case LoadOperationCommit: return "commit" case LoadOperationClose: return "close" default: return "unknown" } } type LoadRequest struct { Operation LoadOperation LoraPath []string Parallel int BatchSize int FlashAttention ml.FlashAttentionType KvSize int KvCacheType string NumThreads int GPULayers ml.GPULayersList MultiUserCache bool // Legacy fields - not used with the Ollama engine ProjectorPath string MainGPU int UseMmap bool } type LoadResponse struct { Success bool Memory ml.BackendMemory } var ErrLoadRequiredFull = errors.New("unable to load full model on GPU") func (s *llamaServer) Load(ctx context.Context, systemInfo ml.SystemInfo, systemGPUs []ml.DeviceInfo, requireFull bool) ([]ml.DeviceID, error) { slog.Info("loading model", "model layers", s.totalLayers, "requested", s.options.NumGPU) gpus := append(make([]ml.DeviceInfo, 0, len(systemGPUs)), systemGPUs...) // Synthesize memory allocation information based on our estimates s.mem = &ml.BackendMemory{CPU: ml.DeviceMemory{ Name: "CPU", Weights: make([]uint64, s.totalLayers), Cache: make([]uint64, s.totalLayers), }, GPUs: make([]ml.DeviceMemory, len(gpus))} for i := range s.mem.GPUs { s.mem.GPUs[i].Name = gpus[i].Name s.mem.GPUs[i].DeviceID = gpus[i].DeviceID s.mem.GPUs[i].Weights = make([]uint64, s.totalLayers) s.mem.GPUs[i].Cache = make([]uint64, s.totalLayers) } // Check if embedding model and adjust batch size accordingly _, isEmbedding := s.ggml.KV()[fmt.Sprintf("%s.pooling_type", s.ggml.KV().Architecture())] if isEmbedding && s.loadRequest.BatchSize < s.options.NumCtx { s.loadRequest.BatchSize = s.options.NumCtx slog.Info("embedding model detected, setting batch size to context length", "batch_size", s.loadRequest.BatchSize) } kv, graphPartialOffload, graphFullOffload := s.ggml.GraphSize(uint64(s.options.NumCtx), uint64(s.loadRequest.BatchSize), s.loadRequest.Parallel, s.loadRequest.KvCacheType, s.loadRequest.FlashAttention) // Use the size of one layer as a buffer layers := s.ggml.Tensors().GroupLayers() if blk0, ok := layers["blk.0"]; ok { buffer := blk0.Size() + kv[0] for i := range gpus { if gpus[i].FreeMemory > buffer { gpus[i].FreeMemory -= buffer } else { gpus[i].FreeMemory = 0 } } } else { slog.Warn("model missing blk.0 layer size") } // Assign all the layers to the CPU for now, they will get reassigned later for i := range s.ggml.KV().BlockCount() { if blk, ok := layers[fmt.Sprintf("blk.%d", i)]; ok { s.mem.CPU.Weights[i] = blk.Size() s.mem.CPU.Cache[i] += kv[i] } } // We historically haven't included InputWeights in the model size var outputWeights uint64 if layer, ok := layers["output_norm"]; ok { outputWeights += layer.Size() } if layer, ok := layers["output"]; ok { outputWeights += layer.Size() } else if layer, ok := layers["token_embd"]; ok { outputWeights += layer.Size() } s.mem.CPU.Weights[s.totalLayers-1] = outputWeights // The vision projector is always loaded on the first GPU if available. // This can't be assigned by us, so just subtract it from free space projectorGPU := -1 var projectorWeights uint64 if len(gpus) > 0 { for _, projector := range s.loadRequest.LoraPath { projectorWeights += projectorMemoryRequirements(projector) } // llama.cpp uses the first discrete GPU if available, otherwise the first iGPU firstIntegrated := -1 for i := range gpus { if !gpus[i].Integrated { projectorGPU = i break } if firstIntegrated == -1 { firstIntegrated = i } } if projectorGPU == -1 { projectorGPU = firstIntegrated } if gpus[projectorGPU].FreeMemory > projectorWeights { gpus[projectorGPU].FreeMemory -= projectorWeights } else { gpus[projectorGPU].FreeMemory = 0 } } var kvTotal uint64 for _, kvLayer := range kv { kvTotal += kvLayer } if graphPartialOffload == 0 { headsKV := s.ggml.KV().HeadCountKVMin() if headsKV == 0 { headsKV = 1 } gqa := s.ggml.KV().HeadCountMax() / headsKV graphPartialOffload = gqa * kvTotal / 6 } if graphFullOffload == 0 { graphFullOffload = graphPartialOffload } // On Metal there's no partial offload overhead if len(gpus) > 0 && gpus[0].Library == "Metal" { graphPartialOffload = graphFullOffload } // Create a layout based on the memory data that we've built. The compute graph // for GPUs is iteratively assigned based on the number of GPUs that are required. var gpuLayers ml.GPULayersList for { prevGPULayers := gpuLayers var err error gpuLayers, err = s.createLayout(systemInfo, gpus, s.mem, requireFull, 0) if err != nil { return nil, err } if len(gpuLayers) > len(prevGPULayers) { for _, gl := range gpuLayers { for i := range s.mem.GPUs { if gl.DeviceID == s.mem.GPUs[i].DeviceID { s.mem.GPUs[i].Graph = max(graphPartialOffload, graphFullOffload) break } } } } else { break } } // This maintains the historical assignment of graph sizes, though it isn't fully accurate graphSize := graphFullOffload if gpuLayers.Sum() < int(s.totalLayers) { graphSize = graphPartialOffload } // For all layers that we have assigned to GPUs, move them in the memory data so // that it is reported accurately for _, gl := range gpuLayers { for i := range s.mem.GPUs { if gl.DeviceID == s.mem.GPUs[i].DeviceID { for _, l := range gl.Layers { s.mem.GPUs[i].Weights[l] = s.mem.CPU.Weights[l] s.mem.GPUs[i].Cache[l] = s.mem.CPU.Cache[l] s.mem.CPU.Weights[l] = 0 s.mem.CPU.Cache[l] = 0 } s.mem.GPUs[i].Graph = graphSize break } } } if projectorGPU > 0 && len(s.mem.GPUs[projectorGPU].Weights) > 0 { s.mem.GPUs[projectorGPU].Weights[s.totalLayers-1] += projectorWeights } slog.Debug("memory", "estimate", s.mem) s.mem.Log(slog.LevelInfo) // The llama engine uses mmap by default s.loadRequest.UseMmap = true // mmap has issues with partial offloading on metal for _, g := range gpus { if g.Library == "Metal" && uint64(s.options.NumGPU) > 0 && uint64(s.options.NumGPU) < s.totalLayers { s.options.UseMMap = new(bool) *s.options.UseMMap = false } } // Windows CUDA should not use mmap for best performance // Linux with a model larger than free space, mmap leads to thrashing // For CPU loads we want the memory to be allocated, not FS cache if (runtime.GOOS == "windows" && len(gpus) > 0 && gpus[0].Library == "CUDA" && s.options.UseMMap == nil) || (runtime.GOOS == "linux" && systemInfo.FreeMemory < s.TotalSize() && s.options.UseMMap == nil) || (len(gpus) == 0 && s.options.UseMMap == nil) || (len(gpus) > 0 && gpus[0].Library == "Vulkan" && s.options.UseMMap == nil) || (s.options.UseMMap != nil && !*s.options.UseMMap) { s.loadRequest.UseMmap = false } if err := s.waitUntilRunnerLaunched(ctx); err != nil { return nil, err } s.loadRequest.GPULayers = gpuLayers resp, err := s.initModel(ctx, s.loadRequest, LoadOperationCommit) if err != nil { return nil, err } if !resp.Success { return nil, errors.New("failed to allocate memory for model") } // The llama engine does its memory allocations together with model loading, so we // need to wait until it is done to ensure that we have accurate memory data before // loading the next model return uniqueDeviceIDs(s.loadRequest.GPULayers), s.WaitUntilRunning(ctx) } func projectorMemoryRequirements(filename string) (weights uint64) { file, err := os.Open(filename) if err != nil { return 0 } defer file.Close() ggml, err := ggml.Decode(file, 1024) if err != nil { return 0 } for _, layer := range ggml.Tensors().GroupLayers() { weights += layer.Size() } return weights } // Load finds the optimal layout of layers to offload on GPUs based on no initial information about the size of the model // It does this by: // 1. Assigning the full model to the GPU with the largest available free memory // 2. Attempting to allocate the layout and receiving the memory requirements in response // 3. Creating a new layout based on the updated memory information // 4. Going back to step 2 and looping until we either stabilize on a particular layout or discover that we have entered a cycle // // This process is repeated for higher levels of loading the model (fit, allocate, commit). The earlier levels are quicker, // allowing for faster iteration, but may return less information. // // Returns the list of GPU IDs that were used in the final allocation on success func (s *ollamaServer) Load(ctx context.Context, systemInfo ml.SystemInfo, gpus []ml.DeviceInfo, requireFull bool) ([]ml.DeviceID, error) { var success bool defer func() { if !success { s.initModel(ctx, LoadRequest{}, LoadOperationClose) } if s.mem != nil { s.mem.Log(slog.LevelInfo) } }() slog.Info("loading model", "model layers", s.totalLayers, "requested", s.options.NumGPU) pastAllocations := make(map[uint64]struct{}) var backoff float32 gpuLayers, err := s.createLayout(systemInfo, gpus, s.mem, requireFull, backoff) if err != nil { return nil, err } if err := s.waitUntilRunnerLaunched(ctx); err != nil { return nil, err } nextOperation: for operation := LoadOperationFit; operation < LoadOperationCommit; operation++ { nextLoad: for { s.loadRequest.GPULayers = gpuLayers resp, err := s.initModel(ctx, s.loadRequest, operation) if err != nil { return nil, err } resp.Memory.Log(slog.LevelDebug) slog.Debug("memory", "success", resp.Success, "required", resp.Memory) pastAllocations[gpuLayers.Hash()] = struct{}{} s.mem = &resp.Memory for { newGPULayers, err := s.createLayout(systemInfo, gpus, s.mem, requireFull, backoff) if err != nil { return nil, err } slog.Debug("new layout created", "layers", newGPULayers) // We get additional memory information over time, which will reduce the number of // layers that can fit, so fewer layers is actually better. As long as we haven't seen // this layout before and it doesn't have more layers than the last one, we can keep // trying to see if we can do better. if _, ok := pastAllocations[newGPULayers.Hash()]; !ok && newGPULayers.Sum() <= gpuLayers.Sum() { gpuLayers = newGPULayers continue nextLoad } // If we are looping around a few different layouts due to graphs moving off and on // GPUs, make sure that we try out the intermediate states. For example, if we are // looping between offloading 39 and 41 layers, we should also check 40. // // This switches strategies to force an incremental number of layers to be offloaded // and checking the memory layout. If the allocation succeeds and creating a new layout // without forcing offload yields the same or greater number of layers offloaded, then // the trial is successful. // // This alternate strategy does not introduce the possibility of loops with the overall // state machine, as it exits this code block either with a successful result, moving // to the next operation or the original number of layers offloaded. if s.options.NumGPU < 0 && newGPULayers.Sum()-gpuLayers.Sum() > 1 { for i := newGPULayers.Sum() - 1; i >= gpuLayers.Sum(); i-- { slog.Debug("exploring intermediate layers", "layer", i) s.options.NumGPU = i newGPULayers, err = s.createLayout(systemInfo, gpus, s.mem, requireFull, backoff) s.options.NumGPU = -1 if err != nil { return nil, err } slog.Debug("new layout created", "layers", newGPULayers) s.loadRequest.GPULayers = newGPULayers resp, err = s.initModel(ctx, s.loadRequest, operation) if err != nil { return nil, err } resp.Memory.Log(slog.LevelDebug) slog.Debug("memory", "success", resp.Success, "required", resp.Memory) if resp.Success { verifyGPULayers, err := s.createLayout(systemInfo, gpus, &resp.Memory, requireFull, backoff) if err != nil { return nil, err } slog.Debug("verifying layout", "layers", verifyGPULayers) if newGPULayers.Sum() <= verifyGPULayers.Sum() { gpuLayers = newGPULayers // Since we are going backwards (increasing the number of layers), ensure that // we can come back down if needed clear(pastAllocations) continue nextOperation } } } } // If we generated a layout a second time or go backwards, then we've converged. Use the last // layout before the repeat, which is already allocated. if resp.Success { continue nextOperation } if s.options.NumGPU >= 0 { return nil, fmt.Errorf("memory layout cannot be allocated with num_gpu = %v", s.options.NumGPU) } // Memory allocation failed even though we created a layout that we thought should // fit in available memory. This could happen if either our free memory reports // are incorrect or if available memory is changing between layout and allocation // time. Apply a backoff to try to find the real amount of available space. if backoff > 1 { slog.Warn("memory layout cannot be allocated", "memory", resp.Memory) return nil, errors.New("memory layout cannot be allocated") } else { backoff += 0.1 } slog.Info("model layout did not fit, applying backoff", "backoff", fmt.Sprintf("%.2f", backoff)) } } } s.loadRequest.GPULayers = gpuLayers resp, err := s.initModel(ctx, s.loadRequest, LoadOperationCommit) if err != nil { return nil, err } success = resp.Success s.mem = &resp.Memory if !success { slog.Warn("failed to commit memory for model", "memory", resp.Memory) return nil, errors.New("failed to commit memory for model") } return uniqueDeviceIDs(gpuLayers), nil } func uniqueDeviceIDs(gpuLayers ml.GPULayersList) []ml.DeviceID { devices := []ml.DeviceID{} for _, layer := range gpuLayers { new := true for _, ID := range devices { if layer.DeviceID == ID { new = false break } } if new { devices = append(devices, layer.DeviceID) } } return devices } // createLayout uses the current best view of memory requirements and creates a layout of model layers on GPUs. // It does this by: // - Calculating how much space each layer requires // - Calculating how much space each GPU has available for layers, based on free memory and space occupied by the graph // - Assigning layers // - Ensuring that we don't exceed limits, such as requirements about partial offloading or system memory func (s *llmServer) createLayout(systemInfo ml.SystemInfo, systemGPUs []ml.DeviceInfo, memory *ml.BackendMemory, requireFull bool, backoff float32) (ml.GPULayersList, error) { if memory == nil { memory = &ml.BackendMemory{CPU: ml.DeviceMemory{ Weights: make([]uint64, s.totalLayers), Cache: make([]uint64, s.totalLayers), }} } gpuLayers, layers := s.buildLayout(systemGPUs, memory, requireFull, backoff) err := s.verifyLayout(systemInfo, systemGPUs, memory, requireFull, gpuLayers, layers) if err != nil { return nil, err } return gpuLayers, nil } func (s *llmServer) buildLayout(systemGPUs []ml.DeviceInfo, memory *ml.BackendMemory, requireFull bool, backoff float32) (ml.GPULayersList, []uint64) { gpus := append(make([]ml.DeviceInfo, 0, len(systemGPUs)), systemGPUs...) sort.Sort(sort.Reverse(ml.ByFreeMemory(gpus))) layers := make([]uint64, len(memory.CPU.Weights)) for i := range layers { for j := range memory.GPUs { layers[i] += memory.GPUs[j].Weights[i] layers[i] += memory.GPUs[j].Cache[i] } layers[i] += memory.CPU.Weights[i] layers[i] += memory.CPU.Cache[i] logutil.Trace("layer to assign", "layer", i, "size", format.HumanBytes2(layers[i])) } gpuLayers := ml.GPULayersList{} for _, gl := range ml.ByLibrary(gpus) { // If a GPU already has a graph allocated on it, then we should continue to use it. // Otherwise, we lose information that we got from previous allocations, which can // cause cycling. Plus, we get more information about required allocation from each // iteration, so it doesn't make sense that a later iteration would use fewer GPUs. lastUsedGPU := 0 for i := range gl { found := false for j := range memory.GPUs { if gl[i].DeviceID == memory.GPUs[j].DeviceID { if memory.GPUs[j].Graph != 0 { lastUsedGPU = i } reserved := uint64(float32(gl[i].FreeMemory)*backoff) + gl[i].MinimumMemory() + envconfig.GpuOverhead() + memory.GPUs[j].Graph if gl[i].FreeMemory > reserved { gl[i].FreeMemory -= reserved } else { gl[i].FreeMemory = 0 } slog.Debug("available gpu", "id", gl[i].ID, "library", gl[i].Library, "available layer vram", format.HumanBytes2(gl[i].FreeMemory), "backoff", fmt.Sprintf("%.2f", backoff), "minimum", format.HumanBytes2(gl[i].MinimumMemory()), "overhead", format.HumanBytes2(envconfig.GpuOverhead()), "graph", format.HumanBytes2(memory.GPUs[j].Graph)) found = true break } } if !found { // The runner doesn't report seeing this GPU gl[i].FreeMemory = 0 } } libraryGpuLayers := assignLayers(layers, gl, requireFull, s.options.NumGPU, lastUsedGPU) if libraryGpuLayers.Sum() > gpuLayers.Sum() { gpuLayers = libraryGpuLayers } } return gpuLayers, layers } // verifyLayout ensures that we don't exceed limits, such as requirements about partial offloading or system memory func (s *llmServer) verifyLayout(systemInfo ml.SystemInfo, systemGPUs []ml.DeviceInfo, memory *ml.BackendMemory, requireFull bool, gpuLayers ml.GPULayersList, layers []uint64) error { // These sizes will only increase as we go through additional iterations and get additional information. cpuSize := memory.InputWeights + memory.CPU.Graph var vramSize uint64 for _, gl := range gpuLayers { for _, gpu := range memory.GPUs { if gl.DeviceID == gpu.DeviceID { vramSize += gpu.Graph break } } } nextLayer: for i := range layers { for _, g := range gpuLayers { for _, gl := range g.Layers { if i == gl { vramSize += layers[i] continue nextLayer } } } cpuSize += layers[i] } if requireFull { if len(systemGPUs) > 0 && gpuLayers.Sum() < len(layers) && (s.options.NumGPU < 0 || gpuLayers.Sum() < s.options.NumGPU) { slog.Info("model requires more gpu memory than is currently available, evicting a model to make space", "loaded layers", gpuLayers.Sum()) return ErrLoadRequiredFull } if cpuSize > systemInfo.FreeMemory { slog.Info("model requires more system memory than is currently available, evicting a model to make space", "required", cpuSize, "free", systemInfo.FreeMemory) return fmt.Errorf("model requires more system memory than is currently available %w", ErrLoadRequiredFull) } } // On linux and windows, over-allocating CPU memory will almost always result in an error // Darwin has fully dynamic swap so has no direct concept of free swap space if runtime.GOOS != "darwin" { available := systemInfo.FreeMemory + systemInfo.FreeSwap if cpuSize > available { slog.Warn("model request too large for system", "requested", format.HumanBytes2(cpuSize), "available", format.HumanBytes2(available), "total", format.HumanBytes2(systemInfo.TotalMemory), "free", format.HumanBytes2(systemInfo.FreeMemory), "swap", format.HumanBytes2(systemInfo.FreeSwap)) return fmt.Errorf("model requires more system memory (%s) than is available (%s)", format.HumanBytes2(cpuSize), format.HumanBytes2(available)) } } else {
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
true
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/llm/llm_darwin.go
llm/llm_darwin.go
package llm import ( "syscall" ) var LlamaServerSysProcAttr = &syscall.SysProcAttr{}
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/llm/llm_windows.go
llm/llm_windows.go
package llm import ( "syscall" ) const ( CREATE_DEFAULT_ERROR_MODE = 0x04000000 ABOVE_NORMAL_PRIORITY_CLASS = 0x00008000 CREATE_NO_WINDOW = 0x08000000 ) var LlamaServerSysProcAttr = &syscall.SysProcAttr{ // Wire up the default error handling logic If for some reason a DLL is // missing in the path this will pop up a GUI Dialog explaining the fault so // the user can either fix their PATH, or report a bug. Without this // setting, the process exits immediately with a generic exit status but no // way to (easily) figure out what the actual missing DLL was. // // Setting Above Normal priority class ensures when running as a "background service" // with "programs" given best priority, we aren't starved of cpu cycles CreationFlags: CREATE_DEFAULT_ERROR_MODE | ABOVE_NORMAL_PRIORITY_CLASS | CREATE_NO_WINDOW, }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/llm/llm_linux.go
llm/llm_linux.go
package llm import ( "syscall" ) var LlamaServerSysProcAttr = &syscall.SysProcAttr{}
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/docs/tools/extract-examples/main.go
docs/tools/extract-examples/main.go
package main import ( "bufio" "fmt" "os" "path/filepath" "regexp" "strings" ) func main() { if len(os.Args) < 2 { fmt.Fprintln(os.Stderr, "Usage: go run extract-examples.go <mdx-file>") os.Exit(1) } mdxFile := os.Args[1] f, err := os.Open(mdxFile) if err != nil { fmt.Fprintf(os.Stderr, "Error: %v\n", err) os.Exit(1) } defer f.Close() // Create temp directory tempDir, err := os.MkdirTemp("", "mdx-examples-*") if err != nil { fmt.Fprintf(os.Stderr, "Error creating temp dir: %v\n", err) os.Exit(1) } fmt.Printf("Extracting code examples to: %s\n\n", tempDir) // Patterns codeBlockStart := regexp.MustCompile("^```([a-zA-Z0-9_-]+)\\s+([^\\s]+)$") codeGroupStart := regexp.MustCompile("^<CodeGroup") codeGroupEnd := regexp.MustCompile("^</CodeGroup>") scanner := bufio.NewScanner(f) inCodeBlock := false inCodeGroup := false var currentFile string var content strings.Builder count := 0 codeGroupNum := 0 for scanner.Scan() { line := scanner.Text() // Track CodeGroup boundaries if codeGroupStart.MatchString(line) { inCodeGroup = true codeGroupNum++ continue } if codeGroupEnd.MatchString(line) { inCodeGroup = false continue } if inCodeBlock { if line == "```" { // End of code block - write file if currentFile != "" { outPath := filepath.Join(tempDir, currentFile) if err := os.WriteFile(outPath, []byte(content.String()), 0o644); err != nil { fmt.Fprintf(os.Stderr, "Error writing %s: %v\n", currentFile, err) } else { fmt.Printf(" - %s\n", currentFile) count++ } } inCodeBlock = false currentFile = "" content.Reset() } else { content.WriteString(line) content.WriteString("\n") } } else { if matches := codeBlockStart.FindStringSubmatch(line); matches != nil { inCodeBlock = true filename := matches[2] // Prefix with CodeGroup number if inside a CodeGroup if inCodeGroup { currentFile = fmt.Sprintf("%02d_%s", codeGroupNum, filename) } else { currentFile = filename } content.Reset() } } } if err := scanner.Err(); err != nil { fmt.Fprintf(os.Stderr, "Error reading file: %v\n", err) os.Exit(1) } // Write package.json for JavaScript dependencies packageJSON := `{ "name": "mdx-examples", "type": "module", "dependencies": { "openai": "^4", "ollama": "^0.5" } } ` if err := os.WriteFile(filepath.Join(tempDir, "package.json"), []byte(packageJSON), 0o644); err != nil { fmt.Fprintf(os.Stderr, "Error writing package.json: %v\n", err) } // Write pyproject.toml for Python dependencies pyprojectTOML := `[project] name = "mdx-examples" version = "0.0.0" dependencies = [ "openai", "ollama", ] ` if err := os.WriteFile(filepath.Join(tempDir, "pyproject.toml"), []byte(pyprojectTOML), 0o644); err != nil { fmt.Fprintf(os.Stderr, "Error writing pyproject.toml: %v\n", err) } fmt.Printf("\n") fmt.Printf("Extracted %d file(s) to %s\n", count, tempDir) fmt.Printf("\n") fmt.Printf("To run examples:\n") fmt.Printf("\n") fmt.Printf(" cd %s\n npm install # for JS examples\n", tempDir) fmt.Printf("\n") fmt.Printf("then run individual files with `node file.js`, `python file.py`, `bash file.sh`\n") }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/internal/orderedmap/orderedmap_test.go
internal/orderedmap/orderedmap_test.go
package orderedmap import ( "encoding/json" "slices" "testing" ) func TestMap_BasicOperations(t *testing.T) { m := New[string, int]() // Test empty map if m.Len() != 0 { t.Errorf("expected Len() = 0, got %d", m.Len()) } v, ok := m.Get("a") if ok { t.Error("expected Get on empty map to return false") } if v != 0 { t.Errorf("expected zero value, got %d", v) } // Test Set and Get m.Set("a", 1) m.Set("b", 2) m.Set("c", 3) if m.Len() != 3 { t.Errorf("expected Len() = 3, got %d", m.Len()) } v, ok = m.Get("a") if !ok || v != 1 { t.Errorf("expected Get(a) = (1, true), got (%d, %v)", v, ok) } v, ok = m.Get("b") if !ok || v != 2 { t.Errorf("expected Get(b) = (2, true), got (%d, %v)", v, ok) } v, ok = m.Get("c") if !ok || v != 3 { t.Errorf("expected Get(c) = (3, true), got (%d, %v)", v, ok) } // Test updating existing key preserves position m.Set("a", 10) v, ok = m.Get("a") if !ok || v != 10 { t.Errorf("expected Get(a) = (10, true), got (%d, %v)", v, ok) } if m.Len() != 3 { t.Errorf("expected Len() = 3 after update, got %d", m.Len()) } } func TestMap_InsertionOrderPreserved(t *testing.T) { m := New[string, int]() // Insert in non-alphabetical order m.Set("z", 1) m.Set("a", 2) m.Set("m", 3) m.Set("b", 4) // Verify iteration order matches insertion order var keys []string var values []int for k, v := range m.All() { keys = append(keys, k) values = append(values, v) } expectedKeys := []string{"z", "a", "m", "b"} expectedValues := []int{1, 2, 3, 4} if !slices.Equal(keys, expectedKeys) { t.Errorf("expected keys %v, got %v", expectedKeys, keys) } if !slices.Equal(values, expectedValues) { t.Errorf("expected values %v, got %v", expectedValues, values) } } func TestMap_UpdatePreservesPosition(t *testing.T) { m := New[string, int]() m.Set("first", 1) m.Set("second", 2) m.Set("third", 3) // Update middle element m.Set("second", 20) var keys []string for k := range m.All() { keys = append(keys, k) } // Order should still be first, second, third expected := []string{"first", "second", "third"} if !slices.Equal(keys, expected) { t.Errorf("expected keys %v, got %v", expected, keys) } } func TestMap_MarshalJSON_PreservesOrder(t *testing.T) { m := New[string, int]() // Insert in non-alphabetical order m.Set("z", 1) m.Set("a", 2) m.Set("m", 3) data, err := json.Marshal(m) if err != nil { t.Fatalf("Marshal failed: %v", err) } // JSON should preserve insertion order, not alphabetical expected := `{"z":1,"a":2,"m":3}` if string(data) != expected { t.Errorf("expected %s, got %s", expected, string(data)) } } func TestMap_UnmarshalJSON_PreservesOrder(t *testing.T) { // JSON with non-alphabetical key order jsonData := `{"z":1,"a":2,"m":3}` m := New[string, int]() if err := json.Unmarshal([]byte(jsonData), m); err != nil { t.Fatalf("Unmarshal failed: %v", err) } // Verify iteration order matches JSON order var keys []string for k := range m.All() { keys = append(keys, k) } expected := []string{"z", "a", "m"} if !slices.Equal(keys, expected) { t.Errorf("expected keys %v, got %v", expected, keys) } } func TestMap_JSONRoundTrip(t *testing.T) { // Test that unmarshal -> marshal produces identical JSON original := `{"zebra":"z","apple":"a","mango":"m","banana":"b"}` m := New[string, string]() if err := json.Unmarshal([]byte(original), m); err != nil { t.Fatalf("Unmarshal failed: %v", err) } data, err := json.Marshal(m) if err != nil { t.Fatalf("Marshal failed: %v", err) } if string(data) != original { t.Errorf("round trip failed: expected %s, got %s", original, string(data)) } } func TestMap_ToMap(t *testing.T) { m := New[string, int]() m.Set("a", 1) m.Set("b", 2) regular := m.ToMap() if len(regular) != 2 { t.Errorf("expected len 2, got %d", len(regular)) } if regular["a"] != 1 { t.Errorf("expected regular[a] = 1, got %d", regular["a"]) } if regular["b"] != 2 { t.Errorf("expected regular[b] = 2, got %d", regular["b"]) } } func TestMap_NilSafety(t *testing.T) { var m *Map[string, int] // All operations should be safe on nil if m.Len() != 0 { t.Errorf("expected Len() = 0 on nil map, got %d", m.Len()) } v, ok := m.Get("a") if ok { t.Error("expected Get on nil map to return false") } if v != 0 { t.Errorf("expected zero value from nil map, got %d", v) } // Set on nil is a no-op m.Set("a", 1) if m.Len() != 0 { t.Errorf("expected Len() = 0 after Set on nil, got %d", m.Len()) } // All returns empty iterator var keys []string for k := range m.All() { keys = append(keys, k) } if len(keys) != 0 { t.Errorf("expected empty iteration on nil map, got %v", keys) } // ToMap returns nil if m.ToMap() != nil { t.Error("expected ToMap to return nil on nil map") } // MarshalJSON returns null data, err := json.Marshal(m) if err != nil { t.Fatalf("Marshal failed: %v", err) } if string(data) != "null" { t.Errorf("expected null, got %s", string(data)) } } func TestMap_EmptyMapMarshal(t *testing.T) { m := New[string, int]() data, err := json.Marshal(m) if err != nil { t.Fatalf("Marshal failed: %v", err) } if string(data) != "{}" { t.Errorf("expected {}, got %s", string(data)) } } func TestMap_NestedValues(t *testing.T) { m := New[string, any]() m.Set("string", "hello") m.Set("number", 42) m.Set("bool", true) m.Set("nested", map[string]int{"x": 1}) data, err := json.Marshal(m) if err != nil { t.Fatalf("Marshal failed: %v", err) } expected := `{"string":"hello","number":42,"bool":true,"nested":{"x":1}}` if string(data) != expected { t.Errorf("expected %s, got %s", expected, string(data)) } } func TestMap_AllIteratorEarlyExit(t *testing.T) { m := New[string, int]() m.Set("a", 1) m.Set("b", 2) m.Set("c", 3) m.Set("d", 4) // Collect only first 2 var keys []string for k := range m.All() { keys = append(keys, k) if len(keys) == 2 { break } } expected := []string{"a", "b"} if !slices.Equal(keys, expected) { t.Errorf("expected %v, got %v", expected, keys) } } func TestMap_IntegerKeys(t *testing.T) { m := New[int, string]() m.Set(3, "three") m.Set(1, "one") m.Set(2, "two") var keys []int for k := range m.All() { keys = append(keys, k) } // Should preserve insertion order, not numerical order expected := []int{3, 1, 2} if !slices.Equal(keys, expected) { t.Errorf("expected %v, got %v", expected, keys) } } func TestMap_UnmarshalIntoExisting(t *testing.T) { m := New[string, int]() m.Set("existing", 999) // Unmarshal should replace contents if err := json.Unmarshal([]byte(`{"new":1}`), m); err != nil { t.Fatalf("Unmarshal failed: %v", err) } _, ok := m.Get("existing") if ok { t.Error("existing key should be gone after unmarshal") } v, ok := m.Get("new") if !ok || v != 1 { t.Errorf("expected Get(new) = (1, true), got (%d, %v)", v, ok) } } func TestMap_LargeOrderPreservation(t *testing.T) { m := New[string, int]() // Create many keys in specific order keys := make([]string, 100) for i := range 100 { keys[i] = string(rune('a' + (99 - i))) // reverse order: 'd', 'c', 'b', 'a' (extended) if i >= 26 { keys[i] = string(rune('A'+i-26)) + string(rune('a'+i%26)) } } for i, k := range keys { m.Set(k, i) } // Verify order preserved var resultKeys []string for k := range m.All() { resultKeys = append(resultKeys, k) } if !slices.Equal(keys, resultKeys) { t.Error("large map should preserve insertion order") } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/internal/orderedmap/orderedmap.go
internal/orderedmap/orderedmap.go
// Package orderedmap provides a generic ordered map that maintains insertion order. // It wraps github.com/wk8/go-ordered-map/v2 to encapsulate the dependency. package orderedmap import ( "encoding/json" "iter" orderedmap "github.com/wk8/go-ordered-map/v2" ) // Map is a generic ordered map that maintains insertion order. type Map[K comparable, V any] struct { om *orderedmap.OrderedMap[K, V] } // New creates a new empty ordered map. func New[K comparable, V any]() *Map[K, V] { return &Map[K, V]{ om: orderedmap.New[K, V](), } } // Get retrieves a value by key. func (m *Map[K, V]) Get(key K) (V, bool) { if m == nil || m.om == nil { var zero V return zero, false } return m.om.Get(key) } // Set sets a key-value pair. If the key already exists, its value is updated // but its position in the iteration order is preserved. If the key is new, // it is appended to the end. func (m *Map[K, V]) Set(key K, value V) { if m == nil { return } if m.om == nil { m.om = orderedmap.New[K, V]() } m.om.Set(key, value) } // Len returns the number of entries. func (m *Map[K, V]) Len() int { if m == nil || m.om == nil { return 0 } return m.om.Len() } // All returns an iterator over all key-value pairs in insertion order. func (m *Map[K, V]) All() iter.Seq2[K, V] { return func(yield func(K, V) bool) { if m == nil || m.om == nil { return } for pair := m.om.Oldest(); pair != nil; pair = pair.Next() { if !yield(pair.Key, pair.Value) { return } } } } // ToMap converts to a regular Go map. // Note: The resulting map does not preserve order. func (m *Map[K, V]) ToMap() map[K]V { if m == nil || m.om == nil { return nil } result := make(map[K]V, m.om.Len()) for pair := m.om.Oldest(); pair != nil; pair = pair.Next() { result[pair.Key] = pair.Value } return result } // MarshalJSON implements json.Marshaler. The JSON output preserves key order. func (m *Map[K, V]) MarshalJSON() ([]byte, error) { if m == nil || m.om == nil { return []byte("null"), nil } return json.Marshal(m.om) } // UnmarshalJSON implements json.Unmarshaler. The insertion order matches the // order of keys in the JSON input. func (m *Map[K, V]) UnmarshalJSON(data []byte) error { m.om = orderedmap.New[K, V]() return json.Unmarshal(data, &m.om) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/template/template_test.go
template/template_test.go
package template import ( "bufio" "bytes" "encoding/json" "io" "os" "path/filepath" "slices" "strings" "testing" "time" "github.com/google/go-cmp/cmp" "github.com/ollama/ollama/api" "github.com/ollama/ollama/fs/ggml" ) func TestNamed(t *testing.T) { f, err := os.Open(filepath.Join("testdata", "templates.jsonl")) if err != nil { t.Fatal(err) } defer f.Close() scanner := bufio.NewScanner(f) for scanner.Scan() { var ss map[string]string if err := json.Unmarshal(scanner.Bytes(), &ss); err != nil { t.Fatal(err) } for k, v := range ss { t.Run(k, func(t *testing.T) { kv := ggml.KV{"tokenizer.chat_template": v} s := kv.ChatTemplate() r, err := Named(s) if err != nil { t.Fatal(err) } if r.Name != k { t.Errorf("expected %q, got %q", k, r.Name) } var b bytes.Buffer if _, err := io.Copy(&b, r.Reader()); err != nil { t.Fatal(err) } tmpl, err := Parse(b.String()) if err != nil { t.Fatal(err) } if tmpl.Tree.Root.String() == "" { t.Errorf("empty %s template", k) } }) } } } func TestTemplate(t *testing.T) { cases := make(map[string][]api.Message) for _, mm := range [][]api.Message{ { {Role: "user", Content: "Hello, how are you?"}, }, { {Role: "user", Content: "Hello, how are you?"}, {Role: "assistant", Content: "I'm doing great. How can I help you today?"}, {Role: "user", Content: "I'd like to show off how chat templating works!"}, }, { {Role: "system", Content: "You are a helpful assistant."}, {Role: "user", Content: "Hello, how are you?"}, {Role: "assistant", Content: "I'm doing great. How can I help you today?"}, {Role: "user", Content: "I'd like to show off how chat templating works!"}, }, } { var roles []string for _, m := range mm { roles = append(roles, m.Role) } cases[strings.Join(roles, "-")] = mm } matches, err := filepath.Glob("*.gotmpl") if err != nil { t.Fatal(err) } for _, match := range matches { t.Run(match, func(t *testing.T) { bts, err := os.ReadFile(match) if err != nil { t.Fatal(err) } tmpl, err := Parse(string(bts)) if err != nil { t.Fatal(err) } for n, tt := range cases { var actual bytes.Buffer t.Run(n, func(t *testing.T) { if err := tmpl.Execute(&actual, Values{Messages: tt}); err != nil { t.Fatal(err) } expect, err := os.ReadFile(filepath.Join("testdata", match, n)) if err != nil { t.Fatal(err) } bts := actual.Bytes() if slices.Contains([]string{"chatqa.gotmpl", "llama2-chat.gotmpl", "mistral-instruct.gotmpl", "openchat.gotmpl", "vicuna.gotmpl"}, match) && bts[len(bts)-1] == ' ' { t.Log("removing trailing space from output") bts = bts[:len(bts)-1] } if diff := cmp.Diff(bts, expect); diff != "" { t.Errorf("mismatch (-got +want):\n%s", diff) } }) t.Run("legacy", func(t *testing.T) { t.Skip("legacy outputs are currently default outputs") var legacy bytes.Buffer if err := tmpl.Execute(&legacy, Values{Messages: tt, forceLegacy: true}); err != nil { t.Fatal(err) } legacyBytes := legacy.Bytes() if slices.Contains([]string{"chatqa.gotmpl", "openchat.gotmpl", "vicuna.gotmpl"}, match) && legacyBytes[len(legacyBytes)-1] == ' ' { t.Log("removing trailing space from legacy output") legacyBytes = legacyBytes[:len(legacyBytes)-1] } else if slices.Contains([]string{"codellama-70b-instruct.gotmpl", "llama2-chat.gotmpl", "mistral-instruct.gotmpl"}, match) { t.Skip("legacy outputs cannot be compared to messages outputs") } if diff := cmp.Diff(legacyBytes, actual.Bytes()); diff != "" { t.Errorf("mismatch (-got +want):\n%s", diff) } }) } }) } } func TestParse(t *testing.T) { validCases := []struct { name string template string vars []string }{ { name: "PromptOnly", template: "{{ .Prompt }}", vars: []string{"prompt", "response"}, }, { name: "SystemAndPrompt", template: "{{ .System }} {{ .Prompt }}", vars: []string{"prompt", "response", "system"}, }, { name: "PromptResponseSystem", template: "{{ .System }} {{ .Prompt }} {{ .Response }}", vars: []string{"prompt", "response", "system"}, }, { name: "ToolsBlock", template: "{{ with .Tools }}{{ . }}{{ end }} {{ .System }} {{ .Prompt }}", vars: []string{"prompt", "response", "system", "tools"}, }, { name: "MessagesRange", template: "{{ range .Messages }}{{ .Role }} {{ .Content }}{{ end }}", vars: []string{"content", "messages", "role"}, }, { name: "ToolResultConditional", template: "{{ range .Messages }}{{ if eq .Role \"tool\" }}Tool Result: {{ .ToolName }} {{ .Content }}{{ end }}{{ end }}", vars: []string{"content", "messages", "role", "toolname"}, }, { name: "MultilineSystemUserAssistant", template: `{{- range .Messages }} {{- if eq .Role "system" }}SYSTEM: {{- else if eq .Role "user" }}USER: {{- else if eq .Role "assistant" }}ASSISTANT: {{- else if eq .Role "tool" }}TOOL: {{- end }} {{ .Content }} {{- end }}`, vars: []string{"content", "messages", "role"}, }, { name: "ChatMLLike", template: `{{- if .Messages }} {{- range .Messages }}<|im_start|>{{ .Role }} {{ .Content }}<|im_end|> {{ end }}<|im_start|>assistant {{ else -}} {{ if .System }}<|im_start|>system {{ .System }}<|im_end|> {{ end }}{{ if .Prompt }}<|im_start|>user {{ .Prompt }}<|im_end|> {{ end }}<|im_start|>assistant {{ .Response }}<|im_end|> {{- end -}}`, vars: []string{"content", "messages", "prompt", "response", "role", "system"}, }, } for _, tt := range validCases { t.Run(tt.name, func(t *testing.T) { t.Parallel() tmpl, err := Parse(tt.template) if err != nil { t.Fatalf("Parse returned unexpected error: %v", err) } gotVars, err := tmpl.Vars() if err != nil { t.Fatalf("Vars returned unexpected error: %v", err) } if diff := cmp.Diff(gotVars, tt.vars); diff != "" { t.Errorf("Vars mismatch (-got +want):\n%s", diff) } }) } } func TestParseError(t *testing.T) { invalidCases := []struct { name string template string errorStr string }{ { "TemplateNotClosed", "{{ .Prompt ", "unclosed action", }, { "Template", `{{define "x"}}{{template "x"}}{{end}}{{template "x"}}`, "undefined template specified", }, } for _, tt := range invalidCases { t.Run(tt.name, func(t *testing.T) { _, err := Parse(tt.template) if err == nil { t.Fatalf("expected Parse to return an error for an invalid template, got nil") } if !strings.Contains(strings.ToLower(err.Error()), strings.ToLower(tt.errorStr)) { t.Errorf("unexpected error message.\n got: %q\n want substring (case‑insensitive): %q", err.Error(), tt.errorStr) } }) } } func TestExecuteWithMessages(t *testing.T) { type template struct { name string template string } cases := []struct { name string templates []template values Values expected string }{ { "mistral", []template{ {"no response", `[INST] {{ if .System }}{{ .System }} {{ end }}{{ .Prompt }}[/INST] `}, {"response", `[INST] {{ if .System }}{{ .System }} {{ end }}{{ .Prompt }}[/INST] {{ .Response }}`}, {"messages", `[INST] {{ if .System }}{{ .System }} {{ end }} {{- range .Messages }} {{- if eq .Role "user" }}{{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }}[INST] {{ end }} {{- end }}`}, }, Values{ Messages: []api.Message{ {Role: "user", Content: "Hello friend!"}, {Role: "assistant", Content: "Hello human!"}, {Role: "user", Content: "What is your name?"}, }, }, `[INST] Hello friend![/INST] Hello human![INST] What is your name?[/INST] `, }, { "mistral system", []template{ {"no response", `[INST] {{ if .System }}{{ .System }} {{ end }}{{ .Prompt }}[/INST] `}, {"response", `[INST] {{ if .System }}{{ .System }} {{ end }}{{ .Prompt }}[/INST] {{ .Response }}`}, {"messages", `[INST] {{ if .System }}{{ .System }} {{ end }} {{- range .Messages }} {{- if eq .Role "user" }}{{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }}[INST] {{ end }} {{- end }}`}, }, Values{ Messages: []api.Message{ {Role: "system", Content: "You are a helpful assistant!"}, {Role: "user", Content: "Hello friend!"}, {Role: "assistant", Content: "Hello human!"}, {Role: "user", Content: "What is your name?"}, }, }, `[INST] You are a helpful assistant! Hello friend![/INST] Hello human![INST] What is your name?[/INST] `, }, { "mistral assistant", []template{ {"no response", `[INST] {{ .Prompt }}[/INST] `}, {"response", `[INST] {{ .Prompt }}[/INST] {{ .Response }}`}, {"messages", ` {{- range $i, $m := .Messages }} {{- if eq .Role "user" }}[INST] {{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }}{{ end }} {{- end }}`}, }, Values{ Messages: []api.Message{ {Role: "user", Content: "Hello friend!"}, {Role: "assistant", Content: "Hello human!"}, {Role: "user", Content: "What is your name?"}, {Role: "assistant", Content: "My name is Ollama and I"}, }, }, `[INST] Hello friend![/INST] Hello human![INST] What is your name?[/INST] My name is Ollama and I`, }, { "chatml", []template{ // this does not have a "no response" test because it's impossible to render the same output {"response", `{{ if .System }}<|im_start|>system {{ .System }}<|im_end|> {{ end }}{{ if .Prompt }}<|im_start|>user {{ .Prompt }}<|im_end|> {{ end }}<|im_start|>assistant {{ .Response }}<|im_end|> `}, {"messages", ` {{- range $index, $_ := .Messages }}<|im_start|>{{ .Role }} {{ .Content }}<|im_end|> {{ end }}<|im_start|>assistant `}, }, Values{ Messages: []api.Message{ {Role: "system", Content: "You are a helpful assistant!"}, {Role: "user", Content: "Hello friend!"}, {Role: "assistant", Content: "Hello human!"}, {Role: "user", Content: "What is your name?"}, }, }, `<|im_start|>system You are a helpful assistant!<|im_end|> <|im_start|>user Hello friend!<|im_end|> <|im_start|>assistant Hello human!<|im_end|> <|im_start|>user What is your name?<|im_end|> <|im_start|>assistant `, }, } for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { for _, ttt := range tt.templates { t.Run(ttt.name, func(t *testing.T) { tmpl, err := Parse(ttt.template) if err != nil { t.Fatal(err) } var b bytes.Buffer if err := tmpl.Execute(&b, tt.values); err != nil { t.Fatal(err) } if diff := cmp.Diff(b.String(), tt.expected); diff != "" { t.Errorf("mismatch (-got +want):\n%s", diff) } }) } }) } } func TestExecuteWithSuffix(t *testing.T) { tmpl, err := Parse(`{{- if .Suffix }}<PRE> {{ .Prompt }} <SUF>{{ .Suffix }} <MID> {{- else }}{{ .Prompt }} {{- end }}`) if err != nil { t.Fatal(err) } cases := []struct { name string values Values expect string }{ { "message", Values{Messages: []api.Message{{Role: "user", Content: "hello"}}}, "hello", }, { "prompt suffix", Values{Prompt: "def add(", Suffix: "return x"}, "<PRE> def add( <SUF>return x <MID>", }, } for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { var b bytes.Buffer if err := tmpl.Execute(&b, tt.values); err != nil { t.Fatal(err) } if diff := cmp.Diff(b.String(), tt.expect); diff != "" { t.Errorf("mismatch (-got +want):\n%s", diff) } }) } } func TestDateFunctions(t *testing.T) { t.Run("currentDate", func(t *testing.T) { tmpl, err := Parse("{{- range .Messages }}{{ .Content }}{{ end }} Today is {{ currentDate }}") if err != nil { t.Fatal(err) } var b bytes.Buffer if err := tmpl.Execute(&b, Values{Messages: []api.Message{{Role: "user", Content: "Hello"}}}); err != nil { t.Fatal(err) } expected := "Hello Today is " + time.Now().Format("2006-01-02") if b.String() != expected { t.Errorf("got %q, want %q", b.String(), expected) } }) t.Run("yesterdayDate", func(t *testing.T) { tmpl, err := Parse("{{- range .Messages }}{{ .Content }}{{ end }} Yesterday was {{ yesterdayDate }}") if err != nil { t.Fatal(err) } var b bytes.Buffer if err := tmpl.Execute(&b, Values{Messages: []api.Message{{Role: "user", Content: "Hello"}}}); err != nil { t.Fatal(err) } expected := "Hello Yesterday was " + time.Now().AddDate(0, 0, -1).Format("2006-01-02") if b.String() != expected { t.Errorf("got %q, want %q", b.String(), expected) } }) t.Run("yesterdayDate format", func(t *testing.T) { tmpl, err := Parse("{{- range .Messages }}{{ end }}{{ yesterdayDate }}") if err != nil { t.Fatal(err) } var b bytes.Buffer if err := tmpl.Execute(&b, Values{Messages: []api.Message{{Role: "user", Content: "Hello"}}}); err != nil { t.Fatal(err) } // Verify the format matches YYYY-MM-DD result := b.String() if len(result) != 10 { t.Errorf("expected date length 10, got %d: %q", len(result), result) } // Parse and verify it's a valid date parsed, err := time.Parse("2006-01-02", result) if err != nil { t.Errorf("failed to parse date %q: %v", result, err) } // Verify it's yesterday yesterday := time.Now().AddDate(0, 0, -1) if parsed.Year() != yesterday.Year() || parsed.Month() != yesterday.Month() || parsed.Day() != yesterday.Day() { t.Errorf("expected yesterday's date, got %v", parsed) } }) } func TestCollate(t *testing.T) { cases := []struct { name string msgs []api.Message expected []*api.Message system string }{ { name: "consecutive user messages are merged", msgs: []api.Message{ {Role: "user", Content: "Hello"}, {Role: "user", Content: "How are you?"}, }, expected: []*api.Message{ {Role: "user", Content: "Hello\n\nHow are you?"}, }, system: "", }, { name: "consecutive tool messages are NOT merged", msgs: []api.Message{ {Role: "tool", Content: "sunny", ToolName: "get_weather"}, {Role: "tool", Content: "72F", ToolName: "get_temperature"}, }, expected: []*api.Message{ {Role: "tool", Content: "sunny", ToolName: "get_weather"}, {Role: "tool", Content: "72F", ToolName: "get_temperature"}, }, system: "", }, { name: "tool messages preserve all fields", msgs: []api.Message{ {Role: "user", Content: "What's the weather?"}, {Role: "tool", Content: "sunny", ToolName: "get_conditions"}, {Role: "tool", Content: "72F", ToolName: "get_temperature"}, }, expected: []*api.Message{ {Role: "user", Content: "What's the weather?"}, {Role: "tool", Content: "sunny", ToolName: "get_conditions"}, {Role: "tool", Content: "72F", ToolName: "get_temperature"}, }, system: "", }, { name: "mixed messages with system", msgs: []api.Message{ {Role: "system", Content: "You are helpful"}, {Role: "user", Content: "Hello"}, {Role: "assistant", Content: "Hi there!"}, {Role: "user", Content: "What's the weather?"}, {Role: "tool", Content: "sunny", ToolName: "get_weather"}, {Role: "tool", Content: "72F", ToolName: "get_temperature"}, {Role: "user", Content: "Thanks"}, }, expected: []*api.Message{ {Role: "system", Content: "You are helpful"}, {Role: "user", Content: "Hello"}, {Role: "assistant", Content: "Hi there!"}, {Role: "user", Content: "What's the weather?"}, {Role: "tool", Content: "sunny", ToolName: "get_weather"}, {Role: "tool", Content: "72F", ToolName: "get_temperature"}, {Role: "user", Content: "Thanks"}, }, system: "You are helpful", }, } for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { system, collated := collate(tt.msgs) if diff := cmp.Diff(system, tt.system); diff != "" { t.Errorf("system mismatch (-got +want):\n%s", diff) } // Compare the messages if len(collated) != len(tt.expected) { t.Errorf("expected %d messages, got %d", len(tt.expected), len(collated)) return } for i := range collated { if collated[i].Role != tt.expected[i].Role { t.Errorf("message %d role mismatch: got %q, want %q", i, collated[i].Role, tt.expected[i].Role) } if collated[i].Content != tt.expected[i].Content { t.Errorf("message %d content mismatch: got %q, want %q", i, collated[i].Content, tt.expected[i].Content) } if collated[i].ToolName != tt.expected[i].ToolName { t.Errorf("message %d tool name mismatch: got %q, want %q", i, collated[i].ToolName, tt.expected[i].ToolName) } } }) } } func TestTemplateArgumentsJSON(t *testing.T) { // Test that {{ .Function.Arguments }} outputs valid JSON, not map[key:value] tmpl := `{{- range .Messages }}{{- range .ToolCalls }}{{ .Function.Arguments }}{{- end }}{{- end }}` template, err := Parse(tmpl) if err != nil { t.Fatal(err) } args := api.NewToolCallFunctionArguments() args.Set("location", "Tokyo") args.Set("unit", "celsius") var buf bytes.Buffer err = template.Execute(&buf, Values{ Messages: []api.Message{{ Role: "assistant", ToolCalls: []api.ToolCall{{ Function: api.ToolCallFunction{ Name: "get_weather", Arguments: args, }, }}, }}, }) if err != nil { t.Fatal(err) } got := buf.String() // Should be valid JSON, not "map[location:Tokyo unit:celsius]" if strings.HasPrefix(got, "map[") { t.Errorf("Arguments output as Go map format: %s", got) } var parsed map[string]any if err := json.Unmarshal([]byte(got), &parsed); err != nil { t.Errorf("Arguments not valid JSON: %s, error: %v", got, err) } } func TestTemplatePropertiesJSON(t *testing.T) { // Test that {{ .Function.Parameters.Properties }} outputs valid JSON // Note: template must reference .Messages to trigger the modern code path that converts Tools tmpl := `{{- range .Messages }}{{- end }}{{- range .Tools }}{{ .Function.Parameters.Properties }}{{- end }}` template, err := Parse(tmpl) if err != nil { t.Fatal(err) } props := api.NewToolPropertiesMap() props.Set("location", api.ToolProperty{Type: api.PropertyType{"string"}, Description: "City name"}) var buf bytes.Buffer err = template.Execute(&buf, Values{ Messages: []api.Message{{Role: "user", Content: "test"}}, Tools: api.Tools{{ Type: "function", Function: api.ToolFunction{ Name: "get_weather", Description: "Get weather", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: props, }, }, }}, }) if err != nil { t.Fatal(err) } got := buf.String() // Should be valid JSON, not "map[location:{...}]" if strings.HasPrefix(got, "map[") { t.Errorf("Properties output as Go map format: %s", got) } var parsed map[string]any if err := json.Unmarshal([]byte(got), &parsed); err != nil { t.Errorf("Properties not valid JSON: %s, error: %v", got, err) } } func TestTemplateArgumentsRange(t *testing.T) { // Test that we can range over Arguments in templates tmpl := `{{- range .Messages }}{{- range .ToolCalls }}{{- range $k, $v := .Function.Arguments }}{{ $k }}={{ $v }};{{- end }}{{- end }}{{- end }}` template, err := Parse(tmpl) if err != nil { t.Fatal(err) } args := api.NewToolCallFunctionArguments() args.Set("city", "Tokyo") var buf bytes.Buffer err = template.Execute(&buf, Values{ Messages: []api.Message{{ Role: "assistant", ToolCalls: []api.ToolCall{{ Function: api.ToolCallFunction{ Name: "get_weather", Arguments: args, }, }}, }}, }) if err != nil { t.Fatal(err) } got := buf.String() if got != "city=Tokyo;" { t.Errorf("Range over Arguments failed, got: %s, want: city=Tokyo;", got) } } func TestTemplatePropertiesRange(t *testing.T) { // Test that we can range over Properties in templates // Note: template must reference .Messages to trigger the modern code path that converts Tools tmpl := `{{- range .Messages }}{{- end }}{{- range .Tools }}{{- range $name, $prop := .Function.Parameters.Properties }}{{ $name }}:{{ $prop.Type }};{{- end }}{{- end }}` template, err := Parse(tmpl) if err != nil { t.Fatal(err) } props := api.NewToolPropertiesMap() props.Set("location", api.ToolProperty{Type: api.PropertyType{"string"}}) var buf bytes.Buffer err = template.Execute(&buf, Values{ Messages: []api.Message{{Role: "user", Content: "test"}}, Tools: api.Tools{{ Type: "function", Function: api.ToolFunction{ Name: "get_weather", Parameters: api.ToolFunctionParameters{ Type: "object", Properties: props, }, }, }}, }) if err != nil { t.Fatal(err) } got := buf.String() if got != "location:string;" { t.Errorf("Range over Properties failed, got: %s, want: location:string;", got) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/template/template.go
template/template.go
package template import ( "bytes" "embed" "encoding/json" "errors" "io" "maps" "math" "slices" "strings" "sync" "text/template" "text/template/parse" "time" "github.com/agnivade/levenshtein" "github.com/ollama/ollama/api" ) //go:embed index.json var indexBytes []byte //go:embed *.gotmpl //go:embed *.json var templatesFS embed.FS var templatesOnce = sync.OnceValues(func() ([]*named, error) { var templates []*named if err := json.Unmarshal(indexBytes, &templates); err != nil { return nil, err } for _, t := range templates { bts, err := templatesFS.ReadFile(t.Name + ".gotmpl") if err != nil { return nil, err } // normalize line endings t.Bytes = bytes.ReplaceAll(bts, []byte("\r\n"), []byte("\n")) params, err := templatesFS.ReadFile(t.Name + ".json") if err != nil { continue } if err := json.Unmarshal(params, &t.Parameters); err != nil { return nil, err } } return templates, nil }) type named struct { Name string `json:"name"` Template string `json:"template"` Bytes []byte Parameters *struct { Stop []string `json:"stop"` } } func (t named) Reader() io.Reader { return bytes.NewReader(t.Bytes) } func Named(s string) (*named, error) { templates, err := templatesOnce() if err != nil { return nil, err } var template *named score := math.MaxInt for _, t := range templates { if s := levenshtein.ComputeDistance(s, t.Template); s < score { score = s template = t } } if score < 100 { return template, nil } return nil, errors.New("no matching template found") } var DefaultTemplate, _ = Parse("{{ .Prompt }}") type Template struct { *template.Template raw string } // response is a template node that can be added to templates that don't already have one var response = parse.ActionNode{ NodeType: parse.NodeAction, Pipe: &parse.PipeNode{ NodeType: parse.NodePipe, Cmds: []*parse.CommandNode{ { NodeType: parse.NodeCommand, Args: []parse.Node{ &parse.FieldNode{ NodeType: parse.NodeField, Ident: []string{"Response"}, }, }, }, }, }, } var funcs = template.FuncMap{ "json": func(v any) string { b, _ := json.Marshal(v) return string(b) }, "currentDate": func(args ...string) string { // Currently ignoring the format argument, but accepting it for future use // Default format is YYYY-MM-DD return time.Now().Format("2006-01-02") }, "yesterdayDate": func(args ...string) string { return time.Now().AddDate(0, 0, -1).Format("2006-01-02") }, "toTypeScriptType": func(v any) string { if param, ok := v.(api.ToolProperty); ok { return param.ToTypeScriptType() } // Handle pointer case if param, ok := v.(*api.ToolProperty); ok && param != nil { return param.ToTypeScriptType() } return "any" }, } func Parse(s string) (*Template, error) { tmpl := template.New("").Option("missingkey=zero").Funcs(funcs) tmpl, err := tmpl.Parse(s) if err != nil { return nil, err } t := Template{Template: tmpl, raw: s} vars, err := t.Vars() if err != nil { return nil, err } if !slices.Contains(vars, "messages") && !slices.Contains(vars, "response") { // touch up the template and append {{ .Response }} tmpl.Tree.Root.Nodes = append(tmpl.Tree.Root.Nodes, &response) } return &t, nil } func (t *Template) String() string { return t.raw } func (t *Template) Vars() ([]string, error) { var vars []string for _, tt := range t.Templates() { for _, n := range tt.Root.Nodes { v, err := Identifiers(n) if err != nil { return vars, err } vars = append(vars, v...) } } set := make(map[string]struct{}) for _, n := range vars { set[strings.ToLower(n)] = struct{}{} } return slices.Sorted(maps.Keys(set)), nil } func (t *Template) Contains(s string) bool { return strings.Contains(t.raw, s) } type Values struct { Messages []api.Message api.Tools Prompt string Suffix string Think bool // ThinkLevel contains the thinking level if Think is true and a string value was provided ThinkLevel string // whether or not the user explicitly set the thinking flag (vs. it being // implicitly false). Templates can't see whether `Think` is nil IsThinkSet bool // forceLegacy is a flag used to test compatibility with legacy templates forceLegacy bool } func (t *Template) Subtree(fn func(parse.Node) bool) *template.Template { var walk func(parse.Node) parse.Node walk = func(n parse.Node) parse.Node { if fn(n) { return n } switch t := n.(type) { case *parse.ListNode: for _, c := range t.Nodes { if n := walk(c); n != nil { return n } } case *parse.BranchNode: for _, n := range []*parse.ListNode{t.List, t.ElseList} { if n != nil { if n := walk(n); n != nil { return n } } } case *parse.IfNode: return walk(&t.BranchNode) case *parse.WithNode: return walk(&t.BranchNode) case *parse.RangeNode: return walk(&t.BranchNode) } return nil } if n := walk(t.Tree.Root); n != nil { return (&template.Template{ Tree: &parse.Tree{ Root: &parse.ListNode{ Nodes: []parse.Node{n}, }, }, }).Funcs(funcs) } return nil } func (t *Template) Execute(w io.Writer, v Values) error { system, messages := collate(v.Messages) vars, err := t.Vars() if err != nil { return err } if v.Prompt != "" && v.Suffix != "" { return t.Template.Execute(w, map[string]any{ "Prompt": v.Prompt, "Suffix": v.Suffix, "Response": "", "Think": v.Think, "ThinkLevel": v.ThinkLevel, "IsThinkSet": v.IsThinkSet, }) } else if !v.forceLegacy && slices.Contains(vars, "messages") { return t.Template.Execute(w, map[string]any{ "System": system, "Messages": convertMessagesForTemplate(messages), "Tools": convertToolsForTemplate(v.Tools), "Response": "", "Think": v.Think, "ThinkLevel": v.ThinkLevel, "IsThinkSet": v.IsThinkSet, }) } system = "" var b bytes.Buffer var prompt, response string for _, m := range messages { execute := func() error { if err := t.Template.Execute(&b, map[string]any{ "System": system, "Prompt": prompt, "Response": response, "Think": v.Think, "ThinkLevel": v.ThinkLevel, "IsThinkSet": v.IsThinkSet, }); err != nil { return err } system = "" prompt = "" response = "" return nil } switch m.Role { case "system": if prompt != "" || response != "" { if err := execute(); err != nil { return err } } system = m.Content case "user": if response != "" { if err := execute(); err != nil { return err } } prompt = m.Content case "assistant": response = m.Content } } var cut bool nodes := deleteNode(t.Template.Root.Copy(), func(n parse.Node) bool { if field, ok := n.(*parse.FieldNode); ok && slices.Contains(field.Ident, "Response") { cut = true return false } return cut }) tree := parse.Tree{Root: nodes.(*parse.ListNode)} if err := template.Must(template.New("").AddParseTree("", &tree)).Execute(&b, map[string]any{ "System": system, "Prompt": prompt, "Response": response, "Think": v.Think, "ThinkLevel": v.ThinkLevel, "IsThinkSet": v.IsThinkSet, }); err != nil { return err } _, err = io.Copy(w, &b) return err } // collate messages based on role. consecutive messages of the same role are merged // into a single message (except for tool messages which preserve individual metadata). // collate also collects and returns all system messages. // collate mutates message content adding image tags ([img-%d]) as needed // todo(parthsareen): revisit for contextual image support func collate(msgs []api.Message) (string, []*api.Message) { var system []string var collated []*api.Message for i := range msgs { if msgs[i].Role == "system" { system = append(system, msgs[i].Content) } // merges consecutive messages of the same role into a single message (except for tool messages) if len(collated) > 0 && collated[len(collated)-1].Role == msgs[i].Role && msgs[i].Role != "tool" { collated[len(collated)-1].Content += "\n\n" + msgs[i].Content } else { collated = append(collated, &msgs[i]) } } return strings.Join(system, "\n\n"), collated } // templateTools is a slice of templateTool that marshals to JSON. type templateTools []templateTool func (t templateTools) String() string { bts, _ := json.Marshal(t) return string(bts) } // templateArgs is a map type with JSON string output for templates. type templateArgs map[string]any func (t templateArgs) String() string { if t == nil { return "{}" } bts, _ := json.Marshal(t) return string(bts) } // templateProperties is a map type with JSON string output for templates. type templateProperties map[string]api.ToolProperty func (t templateProperties) String() string { if t == nil { return "{}" } bts, _ := json.Marshal(t) return string(bts) } // templateTool is a template-compatible representation of api.Tool // with Properties as a regular map for template ranging. type templateTool struct { Type string `json:"type"` Items any `json:"items,omitempty"` Function templateToolFunction `json:"function"` } type templateToolFunction struct { Name string `json:"name"` Description string `json:"description"` Parameters templateToolFunctionParameters `json:"parameters"` } type templateToolFunctionParameters struct { Type string `json:"type"` Defs any `json:"$defs,omitempty"` Items any `json:"items,omitempty"` Required []string `json:"required,omitempty"` Properties templateProperties `json:"properties"` } // templateToolCall is a template-compatible representation of api.ToolCall // with Arguments as a regular map for template ranging. type templateToolCall struct { ID string Function templateToolCallFunction } type templateToolCallFunction struct { Index int Name string Arguments templateArgs } // templateMessage is a template-compatible representation of api.Message // with ToolCalls converted for template use. type templateMessage struct { Role string Content string Thinking string Images []api.ImageData ToolCalls []templateToolCall ToolName string ToolCallID string } // convertToolsForTemplate converts Tools to template-compatible format. func convertToolsForTemplate(tools api.Tools) templateTools { if tools == nil { return nil } result := make(templateTools, len(tools)) for i, tool := range tools { result[i] = templateTool{ Type: tool.Type, Items: tool.Items, Function: templateToolFunction{ Name: tool.Function.Name, Description: tool.Function.Description, Parameters: templateToolFunctionParameters{ Type: tool.Function.Parameters.Type, Defs: tool.Function.Parameters.Defs, Items: tool.Function.Parameters.Items, Required: tool.Function.Parameters.Required, Properties: templateProperties(tool.Function.Parameters.Properties.ToMap()), }, }, } } return result } // convertMessagesForTemplate converts Messages to template-compatible format. func convertMessagesForTemplate(messages []*api.Message) []*templateMessage { if messages == nil { return nil } result := make([]*templateMessage, len(messages)) for i, msg := range messages { var toolCalls []templateToolCall for _, tc := range msg.ToolCalls { toolCalls = append(toolCalls, templateToolCall{ ID: tc.ID, Function: templateToolCallFunction{ Index: tc.Function.Index, Name: tc.Function.Name, Arguments: templateArgs(tc.Function.Arguments.ToMap()), }, }) } result[i] = &templateMessage{ Role: msg.Role, Content: msg.Content, Thinking: msg.Thinking, Images: msg.Images, ToolCalls: toolCalls, ToolName: msg.ToolName, ToolCallID: msg.ToolCallID, } } return result } // Identifiers walks the node tree returning any identifiers it finds along the way func Identifiers(n parse.Node) ([]string, error) { switch n := n.(type) { case *parse.ListNode: var names []string for _, n := range n.Nodes { i, err := Identifiers(n) if err != nil { return names, err } names = append(names, i...) } return names, nil case *parse.TemplateNode: if n.Pipe == nil { return nil, errors.New("undefined template specified") } return Identifiers(n.Pipe) case *parse.ActionNode: if n.Pipe == nil { return nil, errors.New("undefined action in template") } return Identifiers(n.Pipe) case *parse.BranchNode: if n.Pipe == nil { return nil, errors.New("undefined branch") } names, err := Identifiers(n.Pipe) if err != nil { return names, err } for _, n := range []*parse.ListNode{n.List, n.ElseList} { if n != nil { i, err := Identifiers(n) if err != nil { return names, err } names = append(names, i...) } } return names, nil case *parse.IfNode: return Identifiers(&n.BranchNode) case *parse.RangeNode: return Identifiers(&n.BranchNode) case *parse.WithNode: return Identifiers(&n.BranchNode) case *parse.PipeNode: var names []string for _, c := range n.Cmds { for _, a := range c.Args { i, err := Identifiers(a) if err != nil { return names, err } names = append(names, i...) } } return names, nil case *parse.FieldNode: return n.Ident, nil case *parse.VariableNode: return n.Ident, nil } return nil, nil } // deleteNode walks the node list and deletes nodes that match the predicate // this is currently to remove the {{ .Response }} node from templates func deleteNode(n parse.Node, fn func(parse.Node) bool) parse.Node { var walk func(n parse.Node) parse.Node walk = func(n parse.Node) parse.Node { if fn(n) { return nil } switch t := n.(type) { case *parse.ListNode: var nodes []parse.Node for _, c := range t.Nodes { if n := walk(c); n != nil { nodes = append(nodes, n) } } t.Nodes = nodes return t case *parse.IfNode: t.BranchNode = *(walk(&t.BranchNode).(*parse.BranchNode)) case *parse.WithNode: t.BranchNode = *(walk(&t.BranchNode).(*parse.BranchNode)) case *parse.RangeNode: t.BranchNode = *(walk(&t.BranchNode).(*parse.BranchNode)) case *parse.BranchNode: t.List = walk(t.List).(*parse.ListNode) if t.ElseList != nil { t.ElseList = walk(t.ElseList).(*parse.ListNode) } case *parse.ActionNode: n := walk(t.Pipe) if n == nil { return nil } t.Pipe = n.(*parse.PipeNode) case *parse.PipeNode: var commands []*parse.CommandNode for _, c := range t.Cmds { var args []parse.Node for _, a := range c.Args { if n := walk(a); n != nil { args = append(args, n) } } if len(args) == 0 { return nil } c.Args = args commands = append(commands, c) } if len(commands) == 0 { return nil } t.Cmds = commands } return n } return walk(n) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/server/routes_test.go
server/routes_test.go
package server import ( "bytes" "context" "encoding/binary" "encoding/json" "fmt" "io" "io/fs" "math" "math/rand/v2" "net" "net/http" "net/http/httptest" "os" "path/filepath" "reflect" "slices" "sort" "strings" "testing" "unicode" "github.com/gin-gonic/gin" "github.com/google/go-cmp/cmp" "github.com/ollama/ollama/api" "github.com/ollama/ollama/fs/ggml" "github.com/ollama/ollama/openai" "github.com/ollama/ollama/server/internal/client/ollama" "github.com/ollama/ollama/types/model" "github.com/ollama/ollama/version" ) func createTestFile(t *testing.T, name string) (string, string) { t.Helper() modelDir := os.Getenv("OLLAMA_MODELS") if modelDir == "" { t.Fatalf("OLLAMA_MODELS not specified") } f, err := os.CreateTemp(t.TempDir(), name) if err != nil { t.Fatalf("failed to create temp file: %v", err) } defer f.Close() err = binary.Write(f, binary.LittleEndian, []byte("GGUF")) if err != nil { t.Fatalf("failed to write to file: %v", err) } err = binary.Write(f, binary.LittleEndian, uint32(3)) if err != nil { t.Fatalf("failed to write to file: %v", err) } err = binary.Write(f, binary.LittleEndian, uint64(0)) if err != nil { t.Fatalf("failed to write to file: %v", err) } err = binary.Write(f, binary.LittleEndian, uint64(0)) if err != nil { t.Fatalf("failed to write to file: %v", err) } // Calculate sha256 sum of file if _, err := f.Seek(0, 0); err != nil { t.Fatal(err) } digest, _ := GetSHA256Digest(f) if err := f.Close(); err != nil { t.Fatal(err) } if err := createLink(f.Name(), filepath.Join(modelDir, "blobs", fmt.Sprintf("sha256-%s", strings.TrimPrefix(digest, "sha256:")))); err != nil { t.Fatal(err) } return f.Name(), digest } type panicTransport struct{} func (t *panicTransport) RoundTrip(r *http.Request) (*http.Response, error) { panic("unexpected RoundTrip call") } var panicOnRoundTrip = &http.Client{Transport: &panicTransport{}} func TestRoutes(t *testing.T) { type testCase struct { Name string Method string Path string Setup func(t *testing.T, req *http.Request) Expected func(t *testing.T, resp *http.Response) } createTestModel := func(t *testing.T, name string) { t.Helper() _, digest := createTestFile(t, "ollama-model") fn := func(resp api.ProgressResponse) { t.Logf("Status: %s", resp.Status) } r := api.CreateRequest{ Name: name, Files: map[string]string{"test.gguf": digest}, Parameters: map[string]any{ "seed": 42, "top_p": 0.9, "stop": []string{"foo", "bar"}, }, } modelName := model.ParseName(name) baseLayers, err := ggufLayers(digest, fn) if err != nil { t.Fatalf("failed to create model: %v", err) } config := &model.ConfigV2{ OS: "linux", Architecture: "amd64", RootFS: model.RootFS{ Type: "layers", }, } if err := createModel(r, modelName, baseLayers, config, fn); err != nil { t.Fatal(err) } } testCases := []testCase{ { Name: "Version Handler", Method: http.MethodGet, Path: "/api/version", Setup: func(t *testing.T, req *http.Request) { }, Expected: func(t *testing.T, resp *http.Response) { contentType := resp.Header.Get("Content-Type") if contentType != "application/json; charset=utf-8" { t.Errorf("expected content type application/json; charset=utf-8, got %s", contentType) } body, err := io.ReadAll(resp.Body) if err != nil { t.Fatalf("failed to read response body: %v", err) } expectedBody := fmt.Sprintf(`{"version":"%s"}`, version.Version) if string(body) != expectedBody { t.Errorf("expected body %s, got %s", expectedBody, string(body)) } }, }, { Name: "Tags Handler (no tags)", Method: http.MethodGet, Path: "/api/tags", Expected: func(t *testing.T, resp *http.Response) { contentType := resp.Header.Get("Content-Type") if contentType != "application/json; charset=utf-8" { t.Errorf("expected content type application/json; charset=utf-8, got %s", contentType) } body, err := io.ReadAll(resp.Body) if err != nil { t.Fatalf("failed to read response body: %v", err) } var modelList api.ListResponse err = json.Unmarshal(body, &modelList) if err != nil { t.Fatalf("failed to unmarshal response body: %v", err) } if modelList.Models == nil || len(modelList.Models) != 0 { t.Errorf("expected empty model list, got %v", modelList.Models) } }, }, { Name: "openai empty list", Method: http.MethodGet, Path: "/v1/models", Expected: func(t *testing.T, resp *http.Response) { contentType := resp.Header.Get("Content-Type") if contentType != "application/json" { t.Errorf("expected content type application/json, got %s", contentType) } body, err := io.ReadAll(resp.Body) if err != nil { t.Fatalf("failed to read response body: %v", err) } var modelList openai.ListCompletion err = json.Unmarshal(body, &modelList) if err != nil { t.Fatalf("failed to unmarshal response body: %v", err) } if modelList.Object != "list" || len(modelList.Data) != 0 { t.Errorf("expected empty model list, got %v", modelList.Data) } }, }, { Name: "Tags Handler (yes tags)", Method: http.MethodGet, Path: "/api/tags", Setup: func(t *testing.T, req *http.Request) { createTestModel(t, "test-model") }, Expected: func(t *testing.T, resp *http.Response) { contentType := resp.Header.Get("Content-Type") if contentType != "application/json; charset=utf-8" { t.Errorf("expected content type application/json; charset=utf-8, got %s", contentType) } body, err := io.ReadAll(resp.Body) if err != nil { t.Fatalf("failed to read response body: %v", err) } if strings.Contains(string(body), "expires_at") { t.Errorf("response body should not contain 'expires_at'") } var modelList api.ListResponse err = json.Unmarshal(body, &modelList) if err != nil { t.Fatalf("failed to unmarshal response body: %v", err) } if len(modelList.Models) != 1 || modelList.Models[0].Name != "test-model:latest" { t.Errorf("expected model 'test-model:latest', got %v", modelList.Models) } }, }, { Name: "Delete Model Handler", Method: http.MethodDelete, Path: "/api/delete", Setup: func(t *testing.T, req *http.Request) { createTestModel(t, "model_to_delete") deleteReq := api.DeleteRequest{ Name: "model_to_delete", } jsonData, err := json.Marshal(deleteReq) if err != nil { t.Fatalf("failed to marshal delete request: %v", err) } req.Body = io.NopCloser(bytes.NewReader(jsonData)) }, Expected: func(t *testing.T, resp *http.Response) { if resp.StatusCode != http.StatusOK { t.Errorf("expected status code 200, got %d", resp.StatusCode) } // Verify the model was deleted _, err := GetModel("model-to-delete") if err == nil || !os.IsNotExist(err) { t.Errorf("expected model to be deleted, got error %v", err) } }, }, { Name: "Delete Non-existent Model", Method: http.MethodDelete, Path: "/api/delete", Setup: func(t *testing.T, req *http.Request) { deleteReq := api.DeleteRequest{ Name: "non_existent_model", } jsonData, err := json.Marshal(deleteReq) if err != nil { t.Fatalf("failed to marshal delete request: %v", err) } req.Body = io.NopCloser(bytes.NewReader(jsonData)) }, Expected: func(t *testing.T, resp *http.Response) { if resp.StatusCode != http.StatusNotFound { t.Errorf("expected status code 404, got %d", resp.StatusCode) } body, err := io.ReadAll(resp.Body) if err != nil { t.Fatalf("failed to read response body: %v", err) } var errorResp map[string]string err = json.Unmarshal(body, &errorResp) if err != nil { t.Fatalf("failed to unmarshal response body: %v", err) } if !strings.Contains(errorResp["error"], "not found") { t.Errorf("expected error message to contain 'not found', got %s", errorResp["error"]) } }, }, { Name: "openai list models with tags", Method: http.MethodGet, Path: "/v1/models", Expected: func(t *testing.T, resp *http.Response) { contentType := resp.Header.Get("Content-Type") if contentType != "application/json" { t.Errorf("expected content type application/json, got %s", contentType) } body, err := io.ReadAll(resp.Body) if err != nil { t.Fatalf("failed to read response body: %v", err) } var modelList openai.ListCompletion err = json.Unmarshal(body, &modelList) if err != nil { t.Fatalf("failed to unmarshal response body: %v", err) } if len(modelList.Data) != 1 || modelList.Data[0].Id != "test-model:latest" || modelList.Data[0].OwnedBy != "library" { t.Errorf("expected model 'test-model:latest' owned by 'library', got %v", modelList.Data) } }, }, { Name: "Create Model Handler", Method: http.MethodPost, Path: "/api/create", Setup: func(t *testing.T, req *http.Request) { _, digest := createTestFile(t, "ollama-model") stream := false createReq := api.CreateRequest{ Name: "t-bone", Files: map[string]string{"test.gguf": digest}, Stream: &stream, } jsonData, err := json.Marshal(createReq) if err != nil { t.Fatalf("failed to marshal create request: %v", err) } req.Body = io.NopCloser(bytes.NewReader(jsonData)) }, Expected: func(t *testing.T, resp *http.Response) { contentType := resp.Header.Get("Content-Type") if contentType != "application/json" { t.Errorf("expected content type application/json, got %s", contentType) } _, err := io.ReadAll(resp.Body) if err != nil { t.Fatalf("failed to read response body: %v", err) } if resp.StatusCode != http.StatusOK { // Updated line t.Errorf("expected status code 200, got %d", resp.StatusCode) } model, err := GetModel("t-bone") if err != nil { t.Fatalf("failed to get model: %v", err) } if model.ShortName != "t-bone:latest" { t.Errorf("expected model name 't-bone:latest', got %s", model.ShortName) } }, }, { Name: "Copy Model Handler", Method: http.MethodPost, Path: "/api/copy", Setup: func(t *testing.T, req *http.Request) { createTestModel(t, "hamshank") copyReq := api.CopyRequest{ Source: "hamshank", Destination: "beefsteak", } jsonData, err := json.Marshal(copyReq) if err != nil { t.Fatalf("failed to marshal copy request: %v", err) } req.Body = io.NopCloser(bytes.NewReader(jsonData)) }, Expected: func(t *testing.T, resp *http.Response) { model, err := GetModel("beefsteak") if err != nil { t.Fatalf("failed to get model: %v", err) } if model.ShortName != "beefsteak:latest" { t.Errorf("expected model name 'beefsteak:latest', got %s", model.ShortName) } }, }, { Name: "Show Model Handler", Method: http.MethodPost, Path: "/api/show", Setup: func(t *testing.T, req *http.Request) { createTestModel(t, "show-model") showReq := api.ShowRequest{Model: "show-model"} jsonData, err := json.Marshal(showReq) if err != nil { t.Fatalf("failed to marshal show request: %v", err) } req.Body = io.NopCloser(bytes.NewReader(jsonData)) }, Expected: func(t *testing.T, resp *http.Response) { contentType := resp.Header.Get("Content-Type") if contentType != "application/json; charset=utf-8" { t.Errorf("expected content type application/json; charset=utf-8, got %s", contentType) } body, err := io.ReadAll(resp.Body) if err != nil { t.Fatalf("failed to read response body: %v", err) } var showResp api.ShowResponse err = json.Unmarshal(body, &showResp) if err != nil { t.Fatalf("failed to unmarshal response body: %v", err) } var params []string paramsSplit := strings.Split(showResp.Parameters, "\n") for _, p := range paramsSplit { params = append(params, strings.Join(strings.Fields(p), " ")) } sort.Strings(params) expectedParams := []string{ "seed 42", "stop \"bar\"", "stop \"foo\"", "top_p 0.9", } if !slices.Equal(params, expectedParams) { t.Errorf("expected parameters %v, got %v", expectedParams, params) } paramCount, ok := showResp.ModelInfo["general.parameter_count"].(float64) if !ok { t.Fatalf("expected parameter count to be a float64, got %T", showResp.ModelInfo["general.parameter_count"]) } if math.Abs(paramCount) > 1e-9 { t.Errorf("expected parameter count to be 0, got %f", paramCount) } }, }, { Name: "openai retrieve model handler", Setup: func(t *testing.T, req *http.Request) { createTestModel(t, "show-model") }, Method: http.MethodGet, Path: "/v1/models/show-model", Expected: func(t *testing.T, resp *http.Response) { contentType := resp.Header.Get("Content-Type") if contentType != "application/json" { t.Errorf("expected content type application/json, got %s", contentType) } body, err := io.ReadAll(resp.Body) if err != nil { t.Fatalf("failed to read response body: %v", err) } var m openai.Model err = json.Unmarshal(body, &m) if err != nil { t.Fatalf("failed to unmarshal response body: %v", err) } if m.Id != "show-model" || m.OwnedBy != "library" { t.Errorf("expected model 'show-model' owned by 'library', got %v", m) } }, }, { Name: "Method Not Allowed", Method: http.MethodGet, Path: "/api/show", Expected: func(t *testing.T, resp *http.Response) { if resp.StatusCode != 405 { t.Errorf("expected status code 405, got %d", resp.StatusCode) } }, }, } modelsDir := t.TempDir() t.Setenv("OLLAMA_MODELS", modelsDir) rc := &ollama.Registry{ // This is a temporary measure to allow us to move forward, // surfacing any code contacting ollama.com we do not intended // to. // // Currently, this only handles DELETE /api/delete, which // should not make any contact with the ollama.com registry, so // be clear about that. // // Tests that do need to contact the registry here, will be // consumed into our new server/api code packages and removed // from here. HTTPClient: panicOnRoundTrip, } s := &Server{} router, err := s.GenerateRoutes(rc) if err != nil { t.Fatalf("failed to generate routes: %v", err) } httpSrv := httptest.NewServer(router) t.Cleanup(httpSrv.Close) for _, tc := range testCases { t.Run(tc.Name, func(t *testing.T) { u := httpSrv.URL + tc.Path req, err := http.NewRequestWithContext(t.Context(), tc.Method, u, nil) if err != nil { t.Fatalf("failed to create request: %v", err) } if tc.Setup != nil { tc.Setup(t, req) } resp, err := httpSrv.Client().Do(req) if err != nil { t.Fatalf("failed to do request: %v", err) } defer resp.Body.Close() if tc.Expected != nil { tc.Expected(t, resp) } }) } } func casingShuffle(s string) string { rr := []rune(s) for i := range rr { if rand.N(2) == 0 { rr[i] = unicode.ToUpper(rr[i]) } else { rr[i] = unicode.ToLower(rr[i]) } } return string(rr) } func TestManifestCaseSensitivity(t *testing.T) { t.Setenv("OLLAMA_MODELS", t.TempDir()) r := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) io.WriteString(w, `{}`) //nolint:errcheck })) defer r.Close() nameUsed := make(map[string]bool) name := func() string { const fqmn = "example/namespace/model:tag" for { v := casingShuffle(fqmn) if nameUsed[v] { continue } nameUsed[v] = true return v } } wantStableName := name() t.Logf("stable name: %s", wantStableName) // checkManifestList tests that there is strictly one manifest in the // models directory, and that the manifest is for the model under test. checkManifestList := func() { t.Helper() mandir := filepath.Join(os.Getenv("OLLAMA_MODELS"), "manifests/") var entries []string t.Logf("dir entries:") fsys := os.DirFS(mandir) err := fs.WalkDir(fsys, ".", func(path string, info fs.DirEntry, err error) error { if err != nil { return err } t.Logf(" %s", fs.FormatDirEntry(info)) if info.IsDir() { return nil } path = strings.TrimPrefix(path, mandir) entries = append(entries, path) return nil }) if err != nil { t.Fatalf("failed to walk directory: %v", err) } if len(entries) != 1 { t.Errorf("len(got) = %d, want 1", len(entries)) return // do not use Fatal so following steps run } g := entries[0] // raw path g = filepath.ToSlash(g) w := model.ParseName(wantStableName).Filepath() w = filepath.ToSlash(w) if g != w { t.Errorf("\ngot: %s\nwant: %s", g, w) } } checkOK := func(w *httptest.ResponseRecorder) { t.Helper() if w.Code != http.StatusOK { t.Errorf("code = %d, want 200", w.Code) t.Logf("body: %s", w.Body.String()) } } var s Server testMakeRequestDialContext = func(ctx context.Context, _, _ string) (net.Conn, error) { var d net.Dialer return d.DialContext(ctx, "tcp", r.Listener.Addr().String()) } t.Cleanup(func() { testMakeRequestDialContext = nil }) t.Logf("creating") _, digest := createBinFile(t, nil, nil) checkOK(createRequest(t, s.CreateHandler, api.CreateRequest{ // Start with the stable name, and later use a case-shuffled // version. Name: wantStableName, Files: map[string]string{"test.gguf": digest}, Stream: &stream, })) checkManifestList() t.Logf("creating (again)") checkOK(createRequest(t, s.CreateHandler, api.CreateRequest{ Name: name(), Files: map[string]string{"test.gguf": digest}, Stream: &stream, })) checkManifestList() t.Logf("pulling") checkOK(createRequest(t, s.PullHandler, api.PullRequest{ Name: name(), Stream: &stream, Insecure: true, })) checkManifestList() t.Logf("copying") checkOK(createRequest(t, s.CopyHandler, api.CopyRequest{ Source: name(), Destination: name(), })) checkManifestList() t.Logf("pushing") rr := createRequest(t, s.PushHandler, api.PushRequest{ Model: name(), Insecure: true, Username: "alice", Password: "x", }) checkOK(rr) if !strings.Contains(rr.Body.String(), `"status":"success"`) { t.Errorf("got = %q, want success", rr.Body.String()) } } func TestShow(t *testing.T) { t.Setenv("OLLAMA_MODELS", t.TempDir()) var s Server _, digest1 := createBinFile(t, ggml.KV{"general.architecture": "test"}, nil) _, digest2 := createBinFile(t, ggml.KV{"general.type": "projector", "general.architecture": "clip"}, nil) createRequest(t, s.CreateHandler, api.CreateRequest{ Name: "show-model", Files: map[string]string{"model.gguf": digest1, "projector.gguf": digest2}, }) w := createRequest(t, s.ShowHandler, api.ShowRequest{ Name: "show-model", }) if w.Code != http.StatusOK { t.Fatalf("expected status code 200, actual %d", w.Code) } var resp api.ShowResponse if err := json.NewDecoder(w.Body).Decode(&resp); err != nil { t.Fatal(err) } if resp.ModelInfo["general.architecture"] != "test" { t.Fatal("Expected model architecture to be 'test', but got", resp.ModelInfo["general.architecture"]) } if resp.ProjectorInfo["general.architecture"] != "clip" { t.Fatal("Expected projector architecture to be 'clip', but got", resp.ProjectorInfo["general.architecture"]) } } func TestNormalize(t *testing.T) { type testCase struct { input []float32 expectError bool } testCases := []testCase{ {input: []float32{1}, expectError: false}, {input: []float32{0, 1, 2, 3}, expectError: false}, {input: []float32{0.1, 0.2, 0.3}, expectError: false}, {input: []float32{-0.1, 0.2, 0.3, -0.4}, expectError: false}, {input: []float32{0, 0, 0}, expectError: false}, {input: []float32{float32(math.NaN()), 0.2, 0.3}, expectError: true}, {input: []float32{0.1, float32(math.NaN()), 0.3}, expectError: true}, {input: []float32{float32(math.Inf(1)), 0.2, 0.3}, expectError: true}, {input: []float32{float32(math.Inf(-1)), 0.2, 0.3}, expectError: true}, } isNormalized := func(vec []float32) (res bool) { sum := 0.0 for _, v := range vec { sum += float64(v * v) } if math.Abs(sum-1) > 1e-6 { return sum == 0 } else { return true } } for _, tc := range testCases { t.Run("", func(t *testing.T) { normalized, err := normalize(tc.input) if tc.expectError { if err == nil { t.Errorf("Expected error for input %v, but got none", tc.input) } } else { if err != nil { t.Errorf("Unexpected error for input %v: %v", tc.input, err) } if !isNormalized(normalized) { t.Errorf("Vector %v is not normalized", tc.input) } } }) } } func TestFilterThinkTags(t *testing.T) { type testCase struct { msgs []api.Message want []api.Message model *Model } testCases := []testCase{ { msgs: []api.Message{ {Role: "user", Content: "Hello, world!"}, {Role: "assistant", Content: "<think>Thinking... about the answer</think>abc"}, {Role: "user", Content: "What is the answer?"}, }, want: []api.Message{ {Role: "user", Content: "Hello, world!"}, {Role: "assistant", Content: "abc"}, {Role: "user", Content: "What is the answer?"}, }, model: &Model{ Config: model.ConfigV2{ ModelFamily: "qwen3", }, }, }, // with newlines inside the think tag aned newlines after { msgs: []api.Message{ {Role: "user", Content: "Hello, world!"}, {Role: "assistant", Content: "<think>Thinking... \n\nabout \nthe answer</think>\n\nabc\ndef"}, {Role: "user", Content: "What is the answer?"}, }, want: []api.Message{ {Role: "user", Content: "Hello, world!"}, {Role: "assistant", Content: "abc\ndef"}, {Role: "user", Content: "What is the answer?"}, }, model: &Model{ Config: model.ConfigV2{ ModelFamily: "qwen3", }, }, }, // should leave thinking tags if it's after the last user message { msgs: []api.Message{ {Role: "user", Content: "Hello, world!"}, {Role: "assistant", Content: "<think>Thinking...</think>after"}, {Role: "user", Content: "What is the answer?"}, {Role: "assistant", Content: "<think>thinking again</think>hjk"}, {Role: "assistant", Content: "<think>thinking yet again</think>hjk"}, }, want: []api.Message{ {Role: "user", Content: "Hello, world!"}, {Role: "assistant", Content: "after"}, {Role: "user", Content: "What is the answer?"}, {Role: "assistant", Content: "<think>thinking again</think>hjk"}, {Role: "assistant", Content: "<think>thinking yet again</think>hjk"}, }, model: &Model{ Config: model.ConfigV2{ ModelFamily: "qwen3", }, }, }, { // shouldn't strip anything because the model family isn't one of the hardcoded ones msgs: []api.Message{ {Role: "user", Content: "Hello, world!"}, {Role: "assistant", Content: "<think>Thinking... about the answer</think>abc"}, {Role: "user", Content: "What is the answer?"}, }, want: []api.Message{ {Role: "user", Content: "Hello, world!"}, {Role: "assistant", Content: "<think>Thinking... about the answer</think>abc"}, {Role: "user", Content: "What is the answer?"}, }, model: &Model{ Config: model.ConfigV2{ ModelFamily: "llama3", }, }, }, { // deepseek-r1:-prefixed model msgs: []api.Message{ {Role: "user", Content: "Hello, world!"}, {Role: "assistant", Content: "<think>Thinking... about the answer</think>abc"}, {Role: "user", Content: "What is the answer?"}, }, want: []api.Message{ {Role: "user", Content: "Hello, world!"}, {Role: "assistant", Content: "abc"}, {Role: "user", Content: "What is the answer?"}, }, model: &Model{ Name: "registry.ollama.ai/library/deepseek-r1:latest", ShortName: "deepseek-r1:7b", Config: model.ConfigV2{}, }, }, } for i, tc := range testCases { filtered := filterThinkTags(tc.msgs, tc.model) if !reflect.DeepEqual(filtered, tc.want) { t.Errorf("messages differ for case %d:", i) for i := range tc.want { if i >= len(filtered) { t.Errorf(" missing message %d: %+v", i, tc.want[i]) continue } if !reflect.DeepEqual(filtered[i], tc.want[i]) { t.Errorf(" message %d:\n want: %+v\n got: %+v", i, tc.want[i], filtered[i]) } } if len(filtered) > len(tc.want) { for i := len(tc.want); i < len(filtered); i++ { t.Errorf(" extra message %d: %+v", i, filtered[i]) } } } } } func TestWaitForStream(t *testing.T) { gin.SetMode(gin.TestMode) cases := []struct { name string messages []any expectCode int expectBody string }{ { name: "error", messages: []any{ gin.H{"error": "internal server error"}, }, expectCode: http.StatusInternalServerError, expectBody: `{"error":"internal server error"}`, }, { name: "error status", messages: []any{ gin.H{"status": http.StatusNotFound, "error": "not found"}, }, expectCode: http.StatusNotFound, expectBody: `{"error":"not found"}`, }, { name: "unknown error", messages: []any{ gin.H{"msg": "something else"}, }, expectCode: http.StatusInternalServerError, expectBody: `{"error":"unknown error"}`, }, { name: "unknown type", messages: []any{ struct{}{}, }, expectCode: http.StatusInternalServerError, expectBody: `{"error":"unknown message type"}`, }, { name: "progress success", messages: []any{ api.ProgressResponse{Status: "success"}, }, expectCode: http.StatusOK, expectBody: `{"status":"success"}`, }, { name: "progress more than success", messages: []any{ api.ProgressResponse{Status: "success"}, api.ProgressResponse{Status: "one more thing"}, }, expectCode: http.StatusOK, expectBody: `{"status":"one more thing"}`, }, } for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) ch := make(chan any, len(tt.messages)) for _, msg := range tt.messages { ch <- msg } close(ch) waitForStream(c, ch) if w.Code != tt.expectCode { t.Errorf("expected status %d, got %d", tt.expectCode, w.Code) } if diff := cmp.Diff(w.Body.String(), tt.expectBody); diff != "" { t.Errorf("body mismatch (-want +got):\n%s", diff) } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/server/quantization.go
server/quantization.go
package server import ( "fmt" "io" "log/slog" "maps" "os" "strings" "unsafe" fsggml "github.com/ollama/ollama/fs/ggml" "github.com/ollama/ollama/ml/backend/ggml" ) type quantizer struct { *os.File offset uint64 from, to *fsggml.Tensor progressFn func(n uint64) } func (q quantizer) WriteTo(w io.Writer) (int64, error) { quantize := q.from.Kind != q.to.Kind sr := io.NewSectionReader(q, int64(q.offset), int64(q.from.Size())) if !quantize { n, err := io.Copy(w, sr) q.progressFn(q.from.Size()) return n, err } data, err := io.ReadAll(sr) if err != nil { slog.Warn("file read error", "tensor", q.from.Name, "file", q.Name(), "error", err) return 0, fmt.Errorf("unable to read tensor %s from %s: %s", q.from.Name, q.Name(), err) } var f32s []float32 newType := fsggml.TensorType(q.to.Kind) if fsggml.TensorType(q.from.Kind) == fsggml.TensorTypeF32 { f32s = unsafe.Slice((*float32)(unsafe.Pointer(&data[0])), q.from.Elements()) } else { f32s = ggml.ConvertToF32(data, q.from.Kind, q.from.Elements()) } data = ggml.Quantize(newType, f32s, q.from.Shape) n, err := w.Write(data) q.progressFn(q.from.Size()) return int64(n), err } type quantizeState struct { nAttnV int // Number of attn_*v* weight tensors nFfnDown int // Number of ffn_down tensors iAttnV int // Running counter of number of attn_v tensors that have been processed iFfnDown int // Running counter of number of ffn_down tensors that have been processed hasOutput bool // used to figure out if a model shares tok_embd with the output weight } func useMoreBits(iLayer, nLayers int) bool { return iLayer < (nLayers/8) || iLayer >= 7*nLayers/8 || (iLayer-nLayers/8)%3 == 2 } func getTensorNewType(kv fsggml.KV, qs *quantizeState, newType fsggml.TensorType, name string, shape []uint64, ftype fsggml.FileType) fsggml.TensorType { // Ported from llama_tensor_get_type, removed unsupported quantization types nExperts := max(1, kv.Uint("expert_count", 0)) if name == "output.weight" || name == "output_norm.weight" || (!qs.hasOutput && name == "token_embd.weight") { nx := shape[0] qk_k := newType.BlockSize() if nx%qk_k != 0 { newType = fsggml.TensorTypeQ8_0 } else if newType != fsggml.TensorTypeQ8_0 { newType = fsggml.TensorTypeQ6_K } } else if strings.Contains(name, "attn_v.weight") { if (ftype == fsggml.FileTypeQ4_K_M) && useMoreBits(qs.iAttnV, qs.nAttnV) { newType = fsggml.TensorTypeQ6_K } else if ftype == fsggml.FileTypeQ4_K_S && qs.iAttnV < 4 { newType = fsggml.TensorTypeQ5_K } // TODO // if (qs.model.type == LLM_TYPE_70B) { // // In the 70B model we have 8 heads sharing the same attn_v weights. As a result, the attn_v.weight tensor is // // 8x smaller compared to attn_q.weight. Hence, we can get a nice boost in quantization accuracy with // // nearly negligible increase in model size by quantizing this tensor with more bits: // if (newType == GGML_TYPE_Q3_K || newType == GGML_TYPE_Q4_K) newType = GGML_TYPE_Q5_K; // } if nExperts == 8 { // for the 8-expert model, bumping this to Q8_0 trades just ~128MB newType = fsggml.TensorTypeQ8_0 } qs.iAttnV++ } else if strings.Contains(name, "attn_k.weight") { if nExperts == 8 { // for the 8-expert model, bumping this to Q8_0 trades just ~128MB newType = fsggml.TensorTypeQ8_0 } } else if strings.Contains(name, "ffn_down") { iLayer := qs.iFfnDown n_layer := qs.nFfnDown if ftype == fsggml.FileTypeQ4_K_M { if useMoreBits(iLayer, n_layer) { newType = fsggml.TensorTypeQ6_K } } else if ftype == fsggml.FileTypeQ4_K_S && iLayer < n_layer/8 { newType = fsggml.TensorTypeQ5_K } qs.iFfnDown++ } else if strings.Contains(name, "attn_output.weight") { if nExperts == 8 { if ftype == fsggml.FileTypeQ4_K_S || ftype == fsggml.FileTypeQ4_K_M { newType = fsggml.TensorTypeQ5_K } } } else if strings.Contains(name, "attn_qkv.weight") { if ftype == fsggml.FileTypeQ4_K_M { newType = fsggml.TensorTypeQ5_K } } if newType.IsQuantized() { nx := shape[0] qk_k := newType.BlockSize() // Check if first dimension is divisible by block size if nx%qk_k != 0 { // Store the original type for logging originalType := newType // Select appropriate fallback based on original type switch newType { case fsggml.TensorTypeQ4_K: newType = fsggml.TensorTypeQ5_0 case fsggml.TensorTypeQ5_K: newType = fsggml.TensorTypeQ5_1 case fsggml.TensorTypeQ6_K: newType = fsggml.TensorTypeQ8_0 } // Final check - if still incompatible, fall back to F16 if nx%newType.BlockSize() != 0 { newType = fsggml.TensorTypeF16 } slog.Warn(fmt.Sprintf("tensor cols %d are not divisible by %d, required for %s - using fallback quantization %s", nx, qk_k, originalType.String(), newType.String())) } } return newType } func quantize(in, out *os.File, orig *fsggml.GGML, newFileType fsggml.FileType, progressFn func(n uint64)) error { kv := maps.Clone(orig.KV()) kv["general.file_type"] = newFileType // kv["general.quantization_version"] = ggml.QuantizationVersion() qs := &quantizeState{} // Build up the quantize state so newType can adjust types layerCount := 0 for k, l := range orig.Tensors().GroupLayers() { if strings.HasPrefix(k, "blk.") { layerCount++ } for _, tensor := range l { if strings.Contains(tensor.Name, "attn_v.weight") || strings.Contains(tensor.Name, "attn_qkv.weight") || strings.Contains(tensor.Name, "attn_kv_b.weight") { qs.nAttnV++ } else if tensor.Name == "output.weight" { qs.hasOutput = true } } } qs.nFfnDown = layerCount origTensors := orig.Tensors().Items() outputTensors := make([]*fsggml.Tensor, len(origTensors)) for i, tensor := range origTensors { newType := newType(tensor, kv, qs, newFileType) newTensor := &fsggml.Tensor{ Name: tensor.Name, Shape: tensor.Shape, Kind: uint32(newType), } outputTensors[i] = newTensor outputTensors[i].WriterTo = quantizer{ File: in, offset: orig.Tensors().Offset + tensor.Offset, from: tensor, to: newTensor, progressFn: progressFn, } } return fsggml.WriteGGUF(out, kv, outputTensors) } func newType(t *fsggml.Tensor, kv fsggml.KV, qs *quantizeState, ftype fsggml.FileType) fsggml.TensorType { defaultType := ftype.ToTensorType() name := t.Name quantize := strings.HasSuffix(name, "weight") // don't quantize vision stuff quantize = quantize && (!strings.Contains(name, "v.") || strings.Contains(name, "_v.")) quantize = quantize && !strings.Contains(name, "mm.") // quantize only 2D and 3D tensors (experts) quantize = quantize && (len(t.Shape) >= 2) // do not quantize norm tensors quantize = quantize && !strings.Contains(name, "_norm.weight") // do not quantize expert gating tensors quantize = quantize && !strings.Contains(name, "ffn_gate_inp.weight") // do not quantize positional embeddings and token types (BERT) quantize = quantize && (name != "position_embd.weight") quantize = quantize && (name != "token_types.weight") // do not quantize Mamba's small yet 2D weights // NOTE: can't use LLM_TN here because the layer number is not known quantize = quantize && !strings.Contains(name, "ssm_conv1d.weight") // do not quantize RWKV's time_mix_first tensors quantize = quantize && !strings.Contains(name, "time_mix_first.weight") quantize = quantize && !strings.Contains(name, "time_mix_w1.weight") quantize = quantize && !strings.Contains(name, "time_mix_w2.weight") quantize = quantize && !strings.Contains(name, "time_mix_decay_w1.weight") quantize = quantize && !strings.Contains(name, "time_mix_decay_w2.weight") quantize = quantize && !strings.Contains(name, "time_mix_lerp_fused.weight") // do not quantize relative position bias (T5) quantize = quantize && !strings.Contains(name, "attn_rel_b.weight") quantize = quantize && !strings.Contains(name, "per_layer_token_embd.weight") newType := fsggml.TensorType(t.Kind) if quantize { // get more optimal quantization type based on the tensor shape, layer, etc. newType = getTensorNewType(kv, qs, defaultType, t.Name, t.Shape, ftype) if newType != defaultType { slog.Debug("tensor quantization adjusted for better quality", "name", t.Name, "requested", defaultType, "quantization", newType) } } return newType }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/server/routes_create_test.go
server/routes_create_test.go
package server import ( "bytes" "cmp" "crypto/sha256" "encoding/json" "fmt" "io" "maps" "net/http" "net/http/httptest" "os" "path/filepath" "reflect" "slices" "strings" "testing" "github.com/gin-gonic/gin" gocmp "github.com/google/go-cmp/cmp" gocmpopts "github.com/google/go-cmp/cmp/cmpopts" "github.com/ollama/ollama/api" "github.com/ollama/ollama/envconfig" "github.com/ollama/ollama/fs/ggml" "github.com/ollama/ollama/types/model" ) var stream bool = false func createBinFile(t *testing.T, kv map[string]any, ti []*ggml.Tensor) (string, string) { t.Helper() t.Setenv("OLLAMA_MODELS", cmp.Or(os.Getenv("OLLAMA_MODELS"), t.TempDir())) modelDir := envconfig.Models() f, err := os.CreateTemp(t.TempDir(), "") if err != nil { t.Fatal(err) } defer f.Close() base := map[string]any{"general.architecture": "test"} maps.Copy(base, kv) if err := ggml.WriteGGUF(f, base, ti); err != nil { t.Fatal(err) } // Calculate sha256 of file if _, err := f.Seek(0, 0); err != nil { t.Fatal(err) } digest, _ := GetSHA256Digest(f) if err := f.Close(); err != nil { t.Fatal(err) } if err := createLink(f.Name(), filepath.Join(modelDir, "blobs", fmt.Sprintf("sha256-%s", strings.TrimPrefix(digest, "sha256:")))); err != nil { t.Fatal(err) } return f.Name(), digest } type responseRecorder struct { *httptest.ResponseRecorder http.CloseNotifier } func NewRecorder() *responseRecorder { return &responseRecorder{ ResponseRecorder: httptest.NewRecorder(), } } func (t *responseRecorder) CloseNotify() <-chan bool { return make(chan bool) } func createRequest(t *testing.T, fn func(*gin.Context), body any) *httptest.ResponseRecorder { t.Helper() // if OLLAMA_MODELS is not set, set it to the temp directory t.Setenv("OLLAMA_MODELS", cmp.Or(os.Getenv("OLLAMA_MODELS"), t.TempDir())) w := NewRecorder() c, _ := gin.CreateTestContext(w) var b bytes.Buffer if err := json.NewEncoder(&b).Encode(body); err != nil { t.Fatal(err) } c.Request = &http.Request{ Body: io.NopCloser(&b), } fn(c) return w.ResponseRecorder } func checkFileExists(t *testing.T, p string, expect []string) { t.Helper() actual, err := filepath.Glob(p) if err != nil { t.Fatal(err) } if diff := gocmp.Diff(expect, actual, gocmpopts.SortSlices(strings.Compare), gocmpopts.EquateEmpty()); diff != "" { t.Errorf("file exists mismatch (-want +got):\n%s", diff) } } func TestCreateFromBin(t *testing.T) { gin.SetMode(gin.TestMode) p := t.TempDir() t.Setenv("OLLAMA_MODELS", p) var s Server _, digest := createBinFile(t, nil, nil) w := createRequest(t, s.CreateHandler, api.CreateRequest{ Name: "test", Files: map[string]string{"test.gguf": digest}, Stream: &stream, }) if w.Code != http.StatusOK { fmt.Println(w) t.Fatalf("expected status code 200, actual %d", w.Code) } checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{ filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"), }) checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{ filepath.Join(p, "blobs", "sha256-6bcdb8859d417753645538d7bbfbd7ca91a3f0c191aef5379c53c05e86b669dd"), filepath.Join(p, "blobs", "sha256-89a2116c3a82d6a97f59f748d86ed4417214353fd178ee54df418fde32495fad"), }) } func TestCreateFromModel(t *testing.T) { gin.SetMode(gin.TestMode) p := t.TempDir() t.Setenv("OLLAMA_MODELS", p) var s Server _, digest := createBinFile(t, nil, nil) w := createRequest(t, s.CreateHandler, api.CreateRequest{ Name: "test", Files: map[string]string{"test.gguf": digest}, Stream: &stream, }) if w.Code != http.StatusOK { t.Fatalf("expected status code 200, actual %d", w.Code) } checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{ filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"), }) w = createRequest(t, s.CreateHandler, api.CreateRequest{ Name: "test2", From: "test", Stream: &stream, }) if w.Code != http.StatusOK { t.Fatalf("expected status code 200, actual %d", w.Code) } checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{ filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"), filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test2", "latest"), }) checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{ filepath.Join(p, "blobs", "sha256-6bcdb8859d417753645538d7bbfbd7ca91a3f0c191aef5379c53c05e86b669dd"), filepath.Join(p, "blobs", "sha256-89a2116c3a82d6a97f59f748d86ed4417214353fd178ee54df418fde32495fad"), }) } func TestCreateFromModelInheritsRendererParser(t *testing.T) { gin.SetMode(gin.TestMode) p := t.TempDir() t.Setenv("OLLAMA_MODELS", p) var s Server const ( renderer = "custom-renderer" parser = "custom-parser" ) _, digest := createBinFile(t, nil, nil) w := createRequest(t, s.CreateHandler, api.CreateRequest{ Name: "base", Files: map[string]string{"base.gguf": digest}, Renderer: renderer, Parser: parser, Stream: &stream, }) if w.Code != http.StatusOK { t.Fatalf("expected status code 200, actual %d", w.Code) } w = createRequest(t, s.CreateHandler, api.CreateRequest{ Name: "child", From: "base", Stream: &stream, }) if w.Code != http.StatusOK { t.Fatalf("expected status code 200, actual %d", w.Code) } manifest, err := ParseNamedManifest(model.ParseName("child")) if err != nil { t.Fatalf("parse manifest: %v", err) } if manifest.Config.Digest == "" { t.Fatalf("unexpected empty config digest for child manifest") } configPath, err := GetBlobsPath(manifest.Config.Digest) if err != nil { t.Fatalf("config blob path: %v", err) } cfgFile, err := os.Open(configPath) if err != nil { t.Fatalf("open config blob: %v", err) } defer cfgFile.Close() var cfg model.ConfigV2 if err := json.NewDecoder(cfgFile).Decode(&cfg); err != nil { t.Fatalf("decode config: %v", err) } if cfg.Renderer != renderer { t.Fatalf("expected renderer %q, got %q", renderer, cfg.Renderer) } if cfg.Parser != parser { t.Fatalf("expected parser %q, got %q", parser, cfg.Parser) } } func TestCreateRemovesLayers(t *testing.T) { gin.SetMode(gin.TestMode) p := t.TempDir() t.Setenv("OLLAMA_MODELS", p) var s Server _, digest := createBinFile(t, nil, nil) w := createRequest(t, s.CreateHandler, api.CreateRequest{ Name: "test", Files: map[string]string{"test.gguf": digest}, Template: "{{ .Prompt }}", Stream: &stream, }) if w.Code != http.StatusOK { t.Fatalf("expected status code 200, actual %d", w.Code) } checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{ filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"), }) checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{ filepath.Join(p, "blobs", "sha256-89a2116c3a82d6a97f59f748d86ed4417214353fd178ee54df418fde32495fad"), filepath.Join(p, "blobs", "sha256-b507b9c2f6ca642bffcd06665ea7c91f235fd32daeefdf875a0f938db05fb315"), filepath.Join(p, "blobs", "sha256-f6e7e4b28e0b1d0c635f2d465bd248c5387c3e75b61a48c4374192b26d832a56"), }) w = createRequest(t, s.CreateHandler, api.CreateRequest{ Name: "test", Files: map[string]string{"test.gguf": digest}, Template: "{{ .System }} {{ .Prompt }}", Stream: &stream, }) if w.Code != http.StatusOK { t.Fatalf("expected status code 200, actual %d", w.Code) } checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{ filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"), }) checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{ filepath.Join(p, "blobs", "sha256-136bf7c76bac2ec09d6617885507d37829e04b41acc47687d45e512b544e893a"), filepath.Join(p, "blobs", "sha256-89a2116c3a82d6a97f59f748d86ed4417214353fd178ee54df418fde32495fad"), filepath.Join(p, "blobs", "sha256-fe7ac77b725cda2ccad03f88a880ecdfd7a33192d6cae08fce2c0ee1455991ed"), }) } func TestCreateUnsetsSystem(t *testing.T) { gin.SetMode(gin.TestMode) p := t.TempDir() t.Setenv("OLLAMA_MODELS", p) var s Server _, digest := createBinFile(t, nil, nil) w := createRequest(t, s.CreateHandler, api.CreateRequest{ Name: "test", Files: map[string]string{"test.gguf": digest}, System: "Say hi!", Stream: &stream, }) if w.Code != http.StatusOK { t.Fatalf("expected status code 200, actual %d", w.Code) } checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{ filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"), }) checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{ filepath.Join(p, "blobs", "sha256-0a666d113e8e0a3d27e9c7bd136a0bdfb6241037db50729d81568451ebfdbde8"), filepath.Join(p, "blobs", "sha256-89a2116c3a82d6a97f59f748d86ed4417214353fd178ee54df418fde32495fad"), filepath.Join(p, "blobs", "sha256-f29e82a8284dbdf5910b1555580ff60b04238b8da9d5e51159ada67a4d0d5851"), }) w = createRequest(t, s.CreateHandler, api.CreateRequest{ Name: "test", Files: map[string]string{"test.gguf": digest}, System: "", Stream: &stream, }) if w.Code != http.StatusOK { t.Fatalf("expected status code 200, actual %d", w.Code) } checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{ filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"), }) checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{ filepath.Join(p, "blobs", "sha256-6bcdb8859d417753645538d7bbfbd7ca91a3f0c191aef5379c53c05e86b669dd"), filepath.Join(p, "blobs", "sha256-89a2116c3a82d6a97f59f748d86ed4417214353fd178ee54df418fde32495fad"), }) } func TestCreateMergeParameters(t *testing.T) { gin.SetMode(gin.TestMode) p := t.TempDir() t.Setenv("OLLAMA_MODELS", p) var s Server _, digest := createBinFile(t, nil, nil) w := createRequest(t, s.CreateHandler, api.CreateRequest{ Name: "test", Files: map[string]string{"test.gguf": digest}, Parameters: map[string]any{ "temperature": 1, "top_k": 10, "stop": []string{"USER:", "ASSISTANT:"}, }, Stream: &stream, }) if w.Code != http.StatusOK { t.Fatalf("expected status code 200, actual %d", w.Code) } checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{ filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"), }) checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{ filepath.Join(p, "blobs", "sha256-1d0ad71299d48c2fb7ae2b98e683643e771f8a5b72be34942af90d97a91c1e37"), filepath.Join(p, "blobs", "sha256-6d6e36c1f90fc7deefc33a7300aa21ad4b67c506e33ecdeddfafa98147e60bbf"), filepath.Join(p, "blobs", "sha256-89a2116c3a82d6a97f59f748d86ed4417214353fd178ee54df418fde32495fad"), }) // in order to merge parameters, the second model must be created FROM the first w = createRequest(t, s.CreateHandler, api.CreateRequest{ Name: "test2", From: "test", Parameters: map[string]any{ "temperature": 0.6, "top_p": 0.7, }, Stream: &stream, }) if w.Code != http.StatusOK { t.Fatalf("expected status code 200, actual %d", w.Code) } checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{ filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"), filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test2", "latest"), }) // Display contents of each blob in the directory blobDir := filepath.Join(p, "blobs") entries, err := os.ReadDir(blobDir) if err != nil { t.Fatalf("failed to read blobs directory: %v", err) } for _, entry := range entries { blobPath := filepath.Join(blobDir, entry.Name()) content, err := os.ReadFile(blobPath) if err != nil { t.Fatalf("failed to read blob %s: %v", entry.Name(), err) } t.Logf("Contents of %s:\n%s", entry.Name(), string(content)) } checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{ filepath.Join(p, "blobs", "sha256-1d0ad71299d48c2fb7ae2b98e683643e771f8a5b72be34942af90d97a91c1e37"), filepath.Join(p, "blobs", "sha256-6d6e36c1f90fc7deefc33a7300aa21ad4b67c506e33ecdeddfafa98147e60bbf"), filepath.Join(p, "blobs", "sha256-89a2116c3a82d6a97f59f748d86ed4417214353fd178ee54df418fde32495fad"), filepath.Join(p, "blobs", "sha256-bbdce269dabe013033632238b4b2d1e02fac2f97787c5e895f4da84e09cccd5d"), filepath.Join(p, "blobs", "sha256-e29a7b3c47287a2489c895d21fe413c20f859a85d20e749492f52a838e36e1ba"), }) actual, err := os.ReadFile(filepath.Join(p, "blobs", "sha256-e29a7b3c47287a2489c895d21fe413c20f859a85d20e749492f52a838e36e1ba")) if err != nil { t.Fatal(err) } expect, err := json.Marshal(map[string]any{"temperature": 0.6, "top_k": 10, "top_p": 0.7, "stop": []string{"USER:", "ASSISTANT:"}}) if err != nil { t.Fatal(err) } if !bytes.Equal(bytes.TrimSpace(expect), bytes.TrimSpace(actual)) { t.Errorf("expected %s, actual %s", string(expect), string(actual)) } // slices are replaced w = createRequest(t, s.CreateHandler, api.CreateRequest{ Name: "test2", From: "test", Parameters: map[string]any{ "temperature": 0.6, "top_p": 0.7, "stop": []string{"<|endoftext|>"}, }, Stream: &stream, }) if w.Code != http.StatusOK { t.Fatalf("expected status code 200, actual %d", w.Code) } checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{ filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"), filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test2", "latest"), }) checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{ filepath.Join(p, "blobs", "sha256-12f58bb75cb3042d69a7e013ab87fb3c3c7088f50ddc62f0c77bd332f0d44d35"), filepath.Join(p, "blobs", "sha256-1d0ad71299d48c2fb7ae2b98e683643e771f8a5b72be34942af90d97a91c1e37"), filepath.Join(p, "blobs", "sha256-6d6e36c1f90fc7deefc33a7300aa21ad4b67c506e33ecdeddfafa98147e60bbf"), filepath.Join(p, "blobs", "sha256-89a2116c3a82d6a97f59f748d86ed4417214353fd178ee54df418fde32495fad"), filepath.Join(p, "blobs", "sha256-9443591d14be23c1e33d101934d76ad03bdb0715fe0879e8b0d1819e7bb063dd"), }) actual, err = os.ReadFile(filepath.Join(p, "blobs", "sha256-12f58bb75cb3042d69a7e013ab87fb3c3c7088f50ddc62f0c77bd332f0d44d35")) if err != nil { t.Fatal(err) } expect, err = json.Marshal(map[string]any{"temperature": 0.6, "top_k": 10, "top_p": 0.7, "stop": []string{"<|endoftext|>"}}) if err != nil { t.Fatal(err) } if !bytes.Equal(bytes.TrimSpace(expect), bytes.TrimSpace(actual)) { t.Errorf("expected %s, actual %s", string(expect), string(actual)) } } func TestCreateReplacesMessages(t *testing.T) { gin.SetMode(gin.TestMode) p := t.TempDir() t.Setenv("OLLAMA_MODELS", p) var s Server _, digest := createBinFile(t, nil, nil) w := createRequest(t, s.CreateHandler, api.CreateRequest{ Name: "test", Files: map[string]string{"test.gguf": digest}, Messages: []api.Message{ { Role: "assistant", Content: "What is my purpose?", }, { Role: "user", Content: "You run tests.", }, { Role: "assistant", Content: "Oh, my god.", }, }, Stream: &stream, }) if w.Code != http.StatusOK { t.Fatalf("expected status code 200, actual %d", w.Code) } checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{ filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"), }) checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{ filepath.Join(p, "blobs", "sha256-298baeaf6928a60cf666d88d64a1ba606feb43a2865687c39e40652e407bffc4"), filepath.Join(p, "blobs", "sha256-89a2116c3a82d6a97f59f748d86ed4417214353fd178ee54df418fde32495fad"), filepath.Join(p, "blobs", "sha256-c84aee28f2af350596f674de51d2a802ea782653ef2930a21d48bd43d5cd5317"), }) w = createRequest(t, s.CreateHandler, api.CreateRequest{ Name: "test2", From: "test", Messages: []api.Message{ { Role: "assistant", Content: "You're a test, Harry.", }, { Role: "user", Content: "I-I'm a what?", }, { Role: "assistant", Content: "A test. And a thumping good one at that, I'd wager.", }, }, Stream: &stream, }) if w.Code != http.StatusOK { t.Fatalf("expected status code 200, actual %d", w.Code) } checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{ filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"), filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test2", "latest"), }) // Old layers will not have been pruned checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{ filepath.Join(p, "blobs", "sha256-09cfac3e6a637e25cb41aa85c24c110dc17ba89634de7df141b564dd2da4168b"), filepath.Join(p, "blobs", "sha256-298baeaf6928a60cf666d88d64a1ba606feb43a2865687c39e40652e407bffc4"), filepath.Join(p, "blobs", "sha256-89a2116c3a82d6a97f59f748d86ed4417214353fd178ee54df418fde32495fad"), filepath.Join(p, "blobs", "sha256-a60ecc9da299ec7ede453f99236e5577fd125e143689b646d9f0ddc9971bf4db"), filepath.Join(p, "blobs", "sha256-c84aee28f2af350596f674de51d2a802ea782653ef2930a21d48bd43d5cd5317"), }) type message struct { Role string `json:"role"` Content string `json:"content"` } f, err := os.Open(filepath.Join(p, "blobs", "sha256-a60ecc9da299ec7ede453f99236e5577fd125e143689b646d9f0ddc9971bf4db")) if err != nil { t.Fatal(err) } defer f.Close() var actual []message if err := json.NewDecoder(f).Decode(&actual); err != nil { t.Fatal(err) } expect := []message{ {Role: "assistant", Content: "You're a test, Harry."}, {Role: "user", Content: "I-I'm a what?"}, {Role: "assistant", Content: "A test. And a thumping good one at that, I'd wager."}, } if !slices.Equal(actual, expect) { t.Errorf("expected %s, actual %s", expect, actual) } } func TestCreateTemplateSystem(t *testing.T) { gin.SetMode(gin.TestMode) p := t.TempDir() t.Setenv("OLLAMA_MODELS", p) var s Server _, digest := createBinFile(t, nil, nil) w := createRequest(t, s.CreateHandler, api.CreateRequest{ Name: "test", Files: map[string]string{"test.gguf": digest}, Template: "{{ .System }} {{ .Prompt }}", System: "Say bye!", Stream: &stream, }) if w.Code != http.StatusOK { t.Fatalf("expected status code 200, actual %d", w.Code) } checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{ filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"), }) checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{ filepath.Join(p, "blobs", "sha256-0a04d979734167da3b80811a1874d734697f366a689f3912589b99d2e86e7ad1"), filepath.Join(p, "blobs", "sha256-4c5f51faac758fecaff8db42f0b7382891a4d0c0bb885f7b86be88c814a7cc86"), filepath.Join(p, "blobs", "sha256-89a2116c3a82d6a97f59f748d86ed4417214353fd178ee54df418fde32495fad"), filepath.Join(p, "blobs", "sha256-fe7ac77b725cda2ccad03f88a880ecdfd7a33192d6cae08fce2c0ee1455991ed"), }) template, err := os.ReadFile(filepath.Join(p, "blobs", "sha256-fe7ac77b725cda2ccad03f88a880ecdfd7a33192d6cae08fce2c0ee1455991ed")) if err != nil { t.Fatal(err) } if string(template) != "{{ .System }} {{ .Prompt }}" { t.Errorf("expected \"{{ .System }} {{ .Prompt }}\", actual %s", template) } system, err := os.ReadFile(filepath.Join(p, "blobs", "sha256-4c5f51faac758fecaff8db42f0b7382891a4d0c0bb885f7b86be88c814a7cc86")) if err != nil { t.Fatal(err) } if string(system) != "Say bye!" { t.Errorf("expected \"Say bye!\", actual %s", system) } t.Run("incomplete template", func(t *testing.T) { _, digest := createBinFile(t, nil, nil) w := createRequest(t, s.CreateHandler, api.CreateRequest{ Name: "test", Files: map[string]string{"test.gguf": digest}, Template: "{{ .Prompt", Stream: &stream, }) if w.Code != http.StatusBadRequest { t.Fatalf("expected status code 400, actual %d", w.Code) } }) t.Run("template with unclosed if", func(t *testing.T) { _, digest := createBinFile(t, nil, nil) w := createRequest(t, s.CreateHandler, api.CreateRequest{ Name: "test", Files: map[string]string{"test.gguf": digest}, Template: "{{ if .Prompt }}", Stream: &stream, }) if w.Code != http.StatusBadRequest { t.Fatalf("expected status code 400, actual %d", w.Code) } }) t.Run("template with undefined function", func(t *testing.T) { _, digest := createBinFile(t, nil, nil) w := createRequest(t, s.CreateHandler, api.CreateRequest{ Name: "test", Files: map[string]string{"test.gguf": digest}, Template: "{{ Prompt }}", Stream: &stream, }) if w.Code != http.StatusBadRequest { t.Fatalf("expected status code 400, actual %d", w.Code) } }) } func TestCreateAndShowRemoteModel(t *testing.T) { gin.SetMode(gin.TestMode) var s Server w := createRequest(t, s.CreateHandler, api.CreateRequest{ Model: "test", From: "bob", RemoteHost: "https://ollama.com", Info: map[string]any{ "capabilities": []string{"completion", "tools", "thinking"}, "model_family": "gptoss", "context_length": 131072, "embedding_length": 2880, "quantization_level": "MXFP4", "parameter_size": "20.9B", }, Stream: &stream, }) if w.Code != http.StatusOK { t.Fatalf("exected status code 200, actual %d", w.Code) } w = createRequest(t, s.ShowHandler, api.ShowRequest{Model: "test"}) if w.Code != http.StatusOK { t.Fatalf("exected status code 200, actual %d", w.Code) } var resp api.ShowResponse if err := json.NewDecoder(w.Body).Decode(&resp); err != nil { t.Fatal(err) } expectedDetails := api.ModelDetails{ ParentModel: "", Format: "", Family: "gptoss", Families: []string{"gptoss"}, ParameterSize: "20.9B", QuantizationLevel: "MXFP4", } if !reflect.DeepEqual(resp.Details, expectedDetails) { t.Errorf("model details: expected %#v, actual %#v", expectedDetails, resp.Details) } expectedCaps := []model.Capability{ model.Capability("completion"), model.Capability("tools"), model.Capability("thinking"), } if !slices.Equal(resp.Capabilities, expectedCaps) { t.Errorf("capabilities: expected %#v, actual %#v", expectedCaps, resp.Capabilities) } v, ok := resp.ModelInfo["gptoss.context_length"] ctxlen := v.(float64) if !ok || int(ctxlen) != 131072 { t.Errorf("context len: expected %d, actual %d", 131072, int(ctxlen)) } v, ok = resp.ModelInfo["gptoss.embedding_length"] embedlen := v.(float64) if !ok || int(embedlen) != 2880 { t.Errorf("embed len: expected %d, actual %d", 2880, int(embedlen)) } fmt.Printf("resp = %#v\n", resp) } func TestCreateLicenses(t *testing.T) { gin.SetMode(gin.TestMode) p := t.TempDir() t.Setenv("OLLAMA_MODELS", p) var s Server _, digest := createBinFile(t, nil, nil) w := createRequest(t, s.CreateHandler, api.CreateRequest{ Name: "test", Files: map[string]string{"test.gguf": digest}, License: []string{"MIT", "Apache-2.0"}, Stream: &stream, }) if w.Code != http.StatusOK { t.Fatalf("expected status code 200, actual %d", w.Code) } checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{ filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"), }) checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{ filepath.Join(p, "blobs", "sha256-2af71558e438db0b73a20beab92dc278a94e1bbe974c00c1a33e3ab62d53a608"), filepath.Join(p, "blobs", "sha256-89a2116c3a82d6a97f59f748d86ed4417214353fd178ee54df418fde32495fad"), filepath.Join(p, "blobs", "sha256-a762f214df0d96c9a7b82f96da98d99ceb2776c88e3ea7ffa09d1e5835516ec6"), filepath.Join(p, "blobs", "sha256-e5dcffe836b6ec8a58e492419b550e65fb8cbdc308503979e5dacb33ac7ea3b7"), }) mit, err := os.ReadFile(filepath.Join(p, "blobs", "sha256-e5dcffe836b6ec8a58e492419b550e65fb8cbdc308503979e5dacb33ac7ea3b7")) if err != nil { t.Fatal(err) } if string(mit) != "MIT" { t.Errorf("expected MIT, actual %s", mit) } apache, err := os.ReadFile(filepath.Join(p, "blobs", "sha256-2af71558e438db0b73a20beab92dc278a94e1bbe974c00c1a33e3ab62d53a608")) if err != nil { t.Fatal(err) } if string(apache) != "Apache-2.0" { t.Errorf("expected Apache-2.0, actual %s", apache) } } func TestCreateDetectTemplate(t *testing.T) { gin.SetMode(gin.TestMode) p := t.TempDir() t.Setenv("OLLAMA_MODELS", p) var s Server t.Run("matched", func(t *testing.T) { _, digest := createBinFile(t, ggml.KV{ "tokenizer.chat_template": "{{ bos_token }}{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}", }, nil) w := createRequest(t, s.CreateHandler, api.CreateRequest{ Name: "test", Files: map[string]string{"test.gguf": digest}, Stream: &stream, }) if w.Code != http.StatusOK { t.Fatalf("expected status code 200, actual %d", w.Code) } checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{ filepath.Join(p, "blobs", "sha256-0d79f567714c62c048378f2107fb332dabee0135d080c302d884317da9433cc5"), filepath.Join(p, "blobs", "sha256-3322a0c650c758b7386ff55629d27d07c07b6c3d3515e259dc3e5598c41e9f4e"), filepath.Join(p, "blobs", "sha256-35360843d0c84fb1506952a131bbef13cd2bb4a541251f22535170c05b56e672"), filepath.Join(p, "blobs", "sha256-a56c12acca8068cb6c335e237da6643e8a802a92959a63ad5bd17828e3b5e9b0"), }) }) t.Run("unmatched", func(t *testing.T) { _, digest := createBinFile(t, nil, nil) w := createRequest(t, s.CreateHandler, api.CreateRequest{ Name: "test", Files: map[string]string{"test.gguf": digest}, Stream: &stream, }) if w.Code != http.StatusOK { t.Fatalf("expected status code 200, actual %d", w.Code) } checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{ filepath.Join(p, "blobs", "sha256-6bcdb8859d417753645538d7bbfbd7ca91a3f0c191aef5379c53c05e86b669dd"), filepath.Join(p, "blobs", "sha256-89a2116c3a82d6a97f59f748d86ed4417214353fd178ee54df418fde32495fad"), }) }) } func TestDetectModelTypeFromFiles(t *testing.T) { t.Run("gguf file", func(t *testing.T) { _, digest := createBinFile(t, nil, nil) files := map[string]string{ "model.gguf": digest, } modelType := detectModelTypeFromFiles(files) if modelType != "gguf" { t.Fatalf("expected model type 'gguf', got %q", modelType) } }) t.Run("gguf file w/o extension", func(t *testing.T) { _, digest := createBinFile(t, nil, nil) files := map[string]string{ fmt.Sprintf("%x", digest): digest, } modelType := detectModelTypeFromFiles(files) if modelType != "gguf" { t.Fatalf("expected model type 'gguf', got %q", modelType) } }) t.Run("safetensors file", func(t *testing.T) { files := map[string]string{ "model.safetensors": "sha256:abc123", } modelType := detectModelTypeFromFiles(files) if modelType != "safetensors" { t.Fatalf("expected model type 'safetensors', got %q", modelType) } }) t.Run("unsupported file type", func(t *testing.T) { p := t.TempDir() t.Setenv("OLLAMA_MODELS", p) data := []byte("12345678") digest := fmt.Sprintf("sha256:%x", sha256.Sum256(data)) if err := os.MkdirAll(filepath.Join(p, "blobs"), 0o755); err != nil { t.Fatal(err) } f, err := os.Create(filepath.Join(p, "blobs", fmt.Sprintf("sha256-%s", strings.TrimPrefix(digest, "sha256:")))) if err != nil { t.Fatal(err) } defer f.Close() if _, err := f.Write(data); err != nil { t.Fatal(err) } files := map[string]string{ "model.bin": digest, } modelType := detectModelTypeFromFiles(files) if modelType != "" { t.Fatalf("expected empty model type for unsupported file, got %q", modelType) } }) t.Run("file with less than 4 bytes", func(t *testing.T) { p := t.TempDir() t.Setenv("OLLAMA_MODELS", p) data := []byte("123") digest := fmt.Sprintf("sha256:%x", sha256.Sum256(data)) if err := os.MkdirAll(filepath.Join(p, "blobs"), 0o755); err != nil { t.Fatal(err) } f, err := os.Create(filepath.Join(p, "blobs", fmt.Sprintf("sha256-%s", strings.TrimPrefix(digest, "sha256:")))) if err != nil { t.Fatal(err) } defer f.Close() if _, err := f.Write(data); err != nil { t.Fatal(err) } files := map[string]string{ "noext": digest, } modelType := detectModelTypeFromFiles(files) if modelType != "" { t.Fatalf("expected empty model type for small file, got %q", modelType) } }) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/server/sched.go
server/sched.go
package server import ( "context" "errors" "fmt" "log/slog" "reflect" "slices" "sort" "strings" "sync" "time" "github.com/ollama/ollama/api" "github.com/ollama/ollama/discover" "github.com/ollama/ollama/envconfig" "github.com/ollama/ollama/format" "github.com/ollama/ollama/fs/ggml" "github.com/ollama/ollama/llm" "github.com/ollama/ollama/logutil" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/types/model" ) type LlmRequest struct { ctx context.Context //nolint:containedctx model *Model opts api.Options sessionDuration *api.Duration successCh chan *runnerRef errCh chan error schedAttempts uint } type Scheduler struct { pendingReqCh chan *LlmRequest finishedReqCh chan *LlmRequest expiredCh chan *runnerRef unloadedCh chan any // loadedMu protects loaded and activeLoading loadedMu sync.Mutex // activeLoading is the model that we are currently working on loading, // including by evicting one or more other models. We can only load // one model at a time but new requests to models that already loaded can // happen in parallel activeLoading llm.LlamaServer loaded map[string]*runnerRef loadFn func(req *LlmRequest, f *ggml.GGML, systemInfo ml.SystemInfo, gpus []ml.DeviceInfo, requireFull bool) bool newServerFn func(systemInfo ml.SystemInfo, gpus []ml.DeviceInfo, model string, f *ggml.GGML, adapters []string, projectors []string, opts api.Options, numParallel int) (llm.LlamaServer, error) getGpuFn func(ctx context.Context, runners []ml.FilteredRunnerDiscovery) []ml.DeviceInfo getSystemInfoFn func() ml.SystemInfo waitForRecovery time.Duration } // Default automatic value for number of models we allow per GPU // Model will still need to fit in VRAM, but loading many small models // on a large GPU can cause stalling var defaultModelsPerGPU = 3 var ErrMaxQueue = errors.New("server busy, please try again. maximum pending requests exceeded") func InitScheduler(ctx context.Context) *Scheduler { maxQueue := envconfig.MaxQueue() sched := &Scheduler{ pendingReqCh: make(chan *LlmRequest, maxQueue), finishedReqCh: make(chan *LlmRequest, maxQueue), expiredCh: make(chan *runnerRef, maxQueue), unloadedCh: make(chan any, maxQueue), loaded: make(map[string]*runnerRef), newServerFn: llm.NewLlamaServer, getGpuFn: discover.GPUDevices, getSystemInfoFn: discover.GetSystemInfo, waitForRecovery: 5 * time.Second, } sched.loadFn = sched.load return sched } // context must be canceled to decrement ref count and release the runner func (s *Scheduler) GetRunner(c context.Context, m *Model, opts api.Options, sessionDuration *api.Duration) (chan *runnerRef, chan error) { if opts.NumCtx < 4 { opts.NumCtx = 4 } if m.CheckCapabilities(model.CapabilityVision) == nil { // multimodal models require at least 2048 context opts.NumCtx = max(opts.NumCtx, 2048) } req := &LlmRequest{ ctx: c, model: m, opts: opts, sessionDuration: sessionDuration, successCh: make(chan *runnerRef, 1), errCh: make(chan error, 1), } s.loadedMu.Lock() runner := s.loaded[req.model.ModelPath] s.loadedMu.Unlock() if runner != nil && !runner.needsReload(c, req) { req.useLoadedRunner(runner, s.finishedReqCh) } else { select { case s.pendingReqCh <- req: default: req.errCh <- ErrMaxQueue } } return req.successCh, req.errCh } // Returns immediately, spawns go routines for the scheduler which will shutdown when ctx is done func (s *Scheduler) Run(ctx context.Context) { slog.Debug("starting llm scheduler") go func() { s.processPending(ctx) }() go func() { s.processCompleted(ctx) }() } func (s *Scheduler) processPending(ctx context.Context) { maxRunners := envconfig.MaxRunners() for { select { case <-ctx.Done(): slog.Debug("shutting down scheduler pending loop") return case pending := <-s.pendingReqCh: // Block other requests until we get this pending request running pending.schedAttempts++ if pending.ctx.Err() != nil { slog.Debug("pending request cancelled or timed out, skipping scheduling") continue } logutil.Trace("processing incoming request", "model", pending.model.ModelPath) for { var runnerToExpire *runnerRef s.loadedMu.Lock() runner := s.loaded[pending.model.ModelPath] loadedCount := len(s.loaded) runnersSnapshot := make([]ml.FilteredRunnerDiscovery, 0, len(s.loaded)) for _, r := range s.loaded { runnersSnapshot = append(runnersSnapshot, r) } s.loadedMu.Unlock() if runner != nil { if runner.needsReload(ctx, pending) { slog.Debug("reloading", "runner", runner) runnerToExpire = runner } else { // Runner is usable, return it logutil.Trace("using existing loaded runner", "model", pending.model.ModelPath) pending.useLoadedRunner(runner, s.finishedReqCh) break } } else if maxRunners > 0 && loadedCount >= int(maxRunners) { slog.Debug("max runners achieved, unloading one to make room", "runner_count", loadedCount) runnerToExpire = s.findRunnerToUnload() } else { // Either no models are loaded or below envconfig.MaxRunners // Get a refreshed GPU list var gpus []ml.DeviceInfo if pending.opts.NumGPU == 0 { gpus = []ml.DeviceInfo{} } else { logutil.Trace("refreshing GPU list", "model", pending.model.ModelPath) gpus = s.getGpuFn(ctx, runnersSnapshot) } logutil.Trace("refreshing system information", "model", pending.model.ModelPath) systemInfo := s.getSystemInfoFn() if maxRunners <= 0 { // No user specified MaxRunners, so figure out what automatic setting to use for the next load attempt if pending.opts.NumGPU == 0 { // Need to get actual GPU list to set the correct default max models logutil.Trace("refreshing GPU list", "model", pending.model.ModelPath) g := s.getGpuFn(ctx, runnersSnapshot) maxRunners = uint(defaultModelsPerGPU * max(len(g), 1)) } else { maxRunners = uint(defaultModelsPerGPU * max(len(gpus), 1)) } slog.Debug("updating default concurrency", "OLLAMA_MAX_LOADED_MODELS", maxRunners, "gpu_count", len(gpus)) } // Load model for fitting logutil.Trace("loading model metadata", "model", pending.model.ModelPath) ggml, err := llm.LoadModel(pending.model.ModelPath, 1024) if err != nil { pending.errCh <- err break } // Update free memory from currently loaded models logutil.Trace("updating free space", "gpu_count", len(gpus), "model", pending.model.ModelPath) s.updateFreeSpace(gpus) if loadedCount == 0 { // No models loaded. Load the model but prefer the best fit. slog.Debug("loading first model", "model", pending.model.ModelPath) s.loadFn(pending, ggml, systemInfo, gpus, false) break } // More than one loaded model, so we have to see if the // new one fits logutil.Trace("loading additional model", "model", pending.model.ModelPath) needEvict := s.loadFn(pending, ggml, systemInfo, gpus, true) if !needEvict { slog.Debug("new model fits with existing models, loading") break } runnerToExpire = s.findRunnerToUnload() } if runnerToExpire == nil { // While we were performing load calculations, the loaded runner(s) unloaded in parallel // so findRunnerToUnload returned no runners. We'll try again and the loadedCount should be zero slog.Debug("runner to expire was nil, retrying") continue } // Trigger an expiration to unload once it's done runnerToExpire.refMu.Lock() slog.Debug("resetting model to expire immediately to make room", "runner", runnerToExpire, "refCount", runnerToExpire.refCount) if runnerToExpire.expireTimer != nil { runnerToExpire.expireTimer.Stop() runnerToExpire.expireTimer = nil } runnerToExpire.sessionDuration = 0 if runnerToExpire.refCount <= 0 { s.expiredCh <- runnerToExpire } runnerToExpire.refMu.Unlock() // Wait for the unload to happen slog.Debug("waiting for pending requests to complete and unload to occur", "runner", runnerToExpire) select { case <-ctx.Done(): slog.Debug("shutting down scheduler pending loop") return case <-s.unloadedCh: slog.Debug("unload completed", "runner", runnerToExpire) continue } } case <-s.unloadedCh: // An unload request when there are no pending request can be ignored slog.Debug("ignoring unload event with no pending requests") } } } func (s *Scheduler) processCompleted(ctx context.Context) { // Process completed requests, expired timers, and unloading models for { select { case <-ctx.Done(): slog.Debug("shutting down scheduler completed loop") return case finished := <-s.finishedReqCh: s.loadedMu.Lock() runner := s.loaded[finished.model.ModelPath] s.loadedMu.Unlock() if runner == nil { slog.Error("finished request signal received after model unloaded", "modelPath", finished.model.ModelPath) continue } runner.refMu.Lock() runner.refCount-- if runner.refCount <= 0 { if runner.sessionDuration <= 0 { slog.Debug("runner with zero duration has gone idle, expiring to unload", "runner", runner) if runner.expireTimer != nil { runner.expireTimer.Stop() runner.expireTimer = nil } s.expiredCh <- runner } else if runner.expireTimer == nil { slog.Debug("runner with non-zero duration has gone idle, adding timer", "runner", runner, "duration", runner.sessionDuration) runner.expireTimer = time.AfterFunc(runner.sessionDuration, func() { slog.Debug("timer expired, expiring to unload", "runner", runner) runner.refMu.Lock() defer runner.refMu.Unlock() if runner.expireTimer != nil { runner.expireTimer.Stop() runner.expireTimer = nil } s.expiredCh <- runner }) runner.expiresAt = time.Now().Add(runner.sessionDuration) } else { slog.Debug("runner with non-zero duration has gone idle, resetting timer", "runner", runner, "duration", runner.sessionDuration) runner.expireTimer.Reset(runner.sessionDuration) runner.expiresAt = time.Now().Add(runner.sessionDuration) } } slog.Debug("after processing request finished event", "runner", runner, "refCount", runner.refCount) runner.refMu.Unlock() case runner := <-s.expiredCh: slog.Debug("runner expired event received", "runner", runner) runner.refMu.Lock() if runner.refCount > 0 { slog.Debug("expired event with positive ref count, retrying", "runner", runner, "refCount", runner.refCount) go func(runner *runnerRef) { // We can't unload yet, but want to as soon as the current request completes // So queue up another expired event time.Sleep(10 * time.Millisecond) s.expiredCh <- runner }(runner) runner.refMu.Unlock() continue } s.loadedMu.Lock() slog.Debug("got lock to unload expired event", "runner", runner) runnerToUnload := s.loaded[runner.modelPath] if runnerToUnload == nil { // If runnerToUnload is nil, we already processed an event and // unloaded it. This double unload can happen if the initial // request is canceled and we're trying to load another model // that requires this one to be evicted, or the settings change // and require a reload s.loadedMu.Unlock() runner.refMu.Unlock() slog.Debug("duplicate expired event, ignoring", "runner", runner) } else if runner.pid != runnerToUnload.pid { // If the pids do not match, we likely had multiple load // failures for the same model in quick succession due to // request context canceled and are draining the queue of // events. Ensure the orphaned runner is properly shut down, but // do not delete the mismatched loaded runner, or wait for VRAM // convergence. slog.Debug("orphaned runner shutting down", "orphan", runner, "loaded", runnerToUnload) runner.unload() s.loadedMu.Unlock() runner.refMu.Unlock() } else { slog.Debug("starting background wait for VRAM recovery", "runner", runner) runnersSnapshot := make([]ml.FilteredRunnerDiscovery, 0, len(s.loaded)) for _, r := range s.loaded { runnersSnapshot = append(runnersSnapshot, r) } finished := s.waitForVRAMRecovery(runner, runnersSnapshot) runner.unload() delete(s.loaded, runner.modelPath) s.loadedMu.Unlock() slog.Debug("runner terminated and removed from list, blocking for VRAM recovery", "runner", runner) <-finished runner.refMu.Unlock() slog.Debug("sending an unloaded event", "runner", runner) s.unloadedCh <- struct{}{} } } } } // Complete the pending request and send the runner back to the requester // Wires up a finished event after the request context is completed // Updates session duration, and resets expiration timer func (pending *LlmRequest) useLoadedRunner(runner *runnerRef, finished chan *LlmRequest) { runner.refMu.Lock() defer runner.refMu.Unlock() runner.refCount++ if runner.expireTimer != nil { runner.expireTimer.Stop() runner.expireTimer = nil } if pending.sessionDuration != nil { runner.sessionDuration = pending.sessionDuration.Duration } pending.successCh <- runner go func() { <-pending.ctx.Done() slog.Debug("context for request finished", "runner", runner) finished <- pending }() } // load creates a new model based on req and loads it. If requireFull is true then the model must be loaded fully onto GPUs // (if any). Returns whether the scheduler needs to evict a model to make this one fit. func (s *Scheduler) load(req *LlmRequest, f *ggml.GGML, systemInfo ml.SystemInfo, gpus []ml.DeviceInfo, requireFull bool) bool { numParallel := max(int(envconfig.NumParallel()), 1) // Embedding models should always be loaded with parallel=1 if req.model.CheckCapabilities(model.CapabilityCompletion) != nil { numParallel = 1 } // `mllama`, `qwen3vl`, and `qwen3vlmoe` are snowflakes and uses an encoder cache which cannot be used with num_parallel > 1 // ref: https://github.com/ollama/ollama/issues/4165 if slices.Contains([]string{"mllama", "qwen3vl", "qwen3vlmoe"}, req.model.Config.ModelFamily) && numParallel != 1 { numParallel = 1 slog.Warn("model architecture does not currently support parallel requests", "architecture", req.model.Config.ModelFamily) } sessionDuration := envconfig.KeepAlive() if req.sessionDuration != nil { sessionDuration = req.sessionDuration.Duration } s.loadedMu.Lock() llama := s.activeLoading if llama == nil { var err error llama, err = s.newServerFn(systemInfo, gpus, req.model.ModelPath, f, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts, numParallel) if err != nil { // some older models are not compatible with newer versions of llama.cpp // show a generalized compatibility error until there is a better way to // check for model compatibility if errors.Is(err, ggml.ErrUnsupportedFormat) || strings.Contains(err.Error(), "failed to load model") { err = fmt.Errorf("%v: this model may be incompatible with your version of Ollama. If you previously pulled this model, try updating it by running `ollama pull %s`", err, req.model.ShortName) } slog.Info("NewLlamaServer failed", "model", req.model.ModelPath, "error", err) req.errCh <- err s.loadedMu.Unlock() return false } s.activeLoading = llama } else { if s.activeLoading.ModelPath() != req.model.ModelPath { panic(fmt.Errorf("attempting to load different model after eviction (original %v new %v)", s.activeLoading.ModelPath(), req.model.ModelPath)) } } s.loadedMu.Unlock() systemTotalMemory := systemInfo.TotalMemory systemFreeMemory := systemInfo.FreeMemory systemSwapFreeMemory := systemInfo.FreeSwap slog.Info("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "free_swap", format.HumanBytes2(systemSwapFreeMemory)) for _, gpu := range gpus { available := gpu.FreeMemory - envconfig.GpuOverhead() - gpu.MinimumMemory() if gpu.FreeMemory < envconfig.GpuOverhead()+gpu.MinimumMemory() { available = 0 } slog.Info("gpu memory", "id", gpu.ID, "library", gpu.Library, "available", format.HumanBytes2(available), "free", format.HumanBytes2(gpu.FreeMemory), "minimum", format.HumanBytes2(gpu.MinimumMemory()), "overhead", format.HumanBytes2(envconfig.GpuOverhead())) } gpuIDs, err := llama.Load(req.ctx, systemInfo, gpus, requireFull) if err != nil { if errors.Is(err, llm.ErrLoadRequiredFull) { if !requireFull { // No other models loaded, yet we still don't fit, so report an error slog.Info("model is too large for system memory", "requireFull", requireFull) s.activeLoading.Close() s.activeLoading = nil req.errCh <- err } return true } slog.Info("Load failed", "model", req.model.ModelPath, "error", err) s.activeLoading.Close() s.activeLoading = nil req.errCh <- err return false } // Determine if we have discrete GPUs which we should monitor VRAM usage on during shutdown discreteGPUs := false iGPUScan: for _, devid := range gpuIDs { for _, dev := range gpus { if dev.DeviceID == devid { if !dev.Integrated { discreteGPUs = true break iGPUScan } } } } runner := &runnerRef{ model: req.model, modelPath: req.model.ModelPath, llama: llama, Options: &req.opts, sessionDuration: sessionDuration, gpus: gpuIDs, discreteGPUs: discreteGPUs, vramSize: llama.VRAMSize(), totalSize: llama.TotalSize(), loading: true, pid: llama.Pid(), } runner.numParallel = numParallel runner.refMu.Lock() // hold lock until running or aborted s.loadedMu.Lock() if oldRunner, ok := s.loaded[req.model.ModelPath]; ok { // Shouldn't happen, but safeguard against leaking a runner slog.Warn("model was still loaded", "old_runner", oldRunner, "new_runner", runner) oldRunner.refMu.Lock() oldRunner.unload() oldRunner.refMu.Unlock() } s.activeLoading = nil s.loaded[req.model.ModelPath] = runner slog.Info("loaded runners", "count", len(s.loaded)) s.loadedMu.Unlock() go func() { defer runner.refMu.Unlock() if err = llama.WaitUntilRunning(req.ctx); err != nil { slog.Error("error loading llama server", "error", err) req.errCh <- err slog.Debug("triggering expiration for failed load", "runner", runner) s.expiredCh <- runner return } slog.Debug("finished setting up", "runner", runner) if runner.pid < 0 { runner.pid = llama.Pid() } runner.refCount++ runner.loading = false go func() { <-req.ctx.Done() slog.Debug("context for request finished") s.finishedReqCh <- req }() req.successCh <- runner }() return false } func (s *Scheduler) updateFreeSpace(allGpus []ml.DeviceInfo) { if len(allGpus) == 0 { return } predMap := map[ml.DeviceID]uint64{} // Sum up the total predicted usage per GPU for all runners s.loadedMu.Lock() runners := make([]*runnerRef, 0, len(s.loaded)) for _, r := range s.loaded { runners = append(runners, r) } s.loadedMu.Unlock() for _, r := range runners { r.refMu.Lock() if r.llama != nil { for _, gpu := range allGpus { predMap[gpu.DeviceID] += r.llama.VRAMByGPU(gpu.DeviceID) } } else { slog.Warn("unexpected nil runner reference, memory prediction may be incorrect") } r.refMu.Unlock() } // Now that we've summed up all the GPU usage predictions across all the loaded runners, update the gpu list for i := range allGpus { if p, ok := predMap[allGpus[i].DeviceID]; ok { slog.Debug("gpu reported", "gpu", allGpus[i].ID, "library", allGpus[i].Library, "available", format.HumanBytes2(allGpus[i].FreeMemory)) if p > allGpus[i].TotalMemory { // Shouldn't happen slog.Warn("predicted usage exceeds VRAM", "gpu", allGpus[i].ID, "totalMemory", allGpus[i].TotalMemory, "predicted", p) allGpus[i].FreeMemory = 0 } else if (allGpus[i].TotalMemory - p) < allGpus[i].FreeMemory { // predicted free is smaller than reported free, use it // TODO maybe we should just always trust our numbers, since cuda's free memory reporting is laggy // and we might unload models we didn't actually need to. The risk is if some other GPU intensive app is loaded // after we start our first runner, then we'll never account for that, so picking the smallest free value seems prudent. allGpus[i].FreeMemory = allGpus[i].TotalMemory - p } slog.Info("updated VRAM based on existing loaded models", "gpu", allGpus[i].ID, "library", allGpus[i].Library, "total", format.HumanBytes2(allGpus[i].TotalMemory), "available", format.HumanBytes2(allGpus[i].FreeMemory)) } } } // TODO consolidate sched_types.go type runnerRef struct { refMu sync.Mutex refCount uint // prevent unloading if > 0 llama llm.LlamaServer pid int loading bool // True only during initial load, then false forever gpus []ml.DeviceID // Recorded at time of provisioning discreteGPUs bool // True if all devices are discrete GPUs - used to skip VRAM recovery check for iGPUs vramSize uint64 totalSize uint64 sessionDuration time.Duration expireTimer *time.Timer expiresAt time.Time model *Model modelPath string numParallel int *api.Options } // The refMu must already be held when calling unload func (runner *runnerRef) unload() { if runner.expireTimer != nil { runner.expireTimer.Stop() runner.expireTimer = nil } if runner.llama != nil { runner.llama.Close() } runner.model = nil runner.Options = nil runner.gpus = nil } func (runner *runnerRef) needsReload(ctx context.Context, req *LlmRequest) bool { slog.Debug("evaluating already loaded", "model", req.model.ModelPath) runner.refMu.Lock() defer runner.refMu.Unlock() timeout := 10 * time.Second if runner.loading { timeout = 2 * time.Minute // Initial load can take a long time for big models on slow systems... } if runner.Options == nil { return true } // Don't reload runner if num_gpu=-1 was provided optsExisting := runner.Options.Runner optsNew := req.opts.Runner if optsNew.NumGPU < 0 { optsExisting.NumGPU = -1 optsNew.NumGPU = -1 } ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() if !reflect.DeepEqual(runner.model.AdapterPaths, req.model.AdapterPaths) || // have the adapters changed? !reflect.DeepEqual(runner.model.ProjectorPaths, req.model.ProjectorPaths) || // have the projectors changed? !reflect.DeepEqual(optsExisting, optsNew) || // have the runner options changed? runner.llama.Ping(ctx) != nil { return true } return false } // Free memory reporting on GPUs can lag for a while even after the runner // exits, so we have to keep checking until we see the available memory recover, // otherwise subsequent model loads will get far less layers loaded or worse // case, may completely fall back to CPU mode. // This routine must be called before the runner unloads so it can establish // a before and after GPU memory allocation. The returned channel // will be notified when we're done waiting, or have timed out and should // proceed anyway func (s *Scheduler) waitForVRAMRecovery(runner *runnerRef, runners []ml.FilteredRunnerDiscovery) chan any { finished := make(chan any, 1) // CPU, Metal and iGPUs don't need checking, so no waiting required if len(runner.gpus) == 0 || !runner.discreteGPUs || (len(runner.gpus) == 1 && runner.gpus[0].Library == "Metal") { finished <- struct{}{} slog.Debug("no need to wait for VRAM recovery", "runner", runner) return finished } start := time.Now() // Establish a baseline before we unload gpusBefore := s.getGpuFn(context.Background(), runners) var totalMemoryBefore, freeMemoryBefore uint64 for _, gpu := range gpusBefore { totalMemoryBefore += gpu.TotalMemory freeMemoryBefore += gpu.FreeMemory } totalMemoryNow := totalMemoryBefore freeMemoryNow := freeMemoryBefore go func() { // typical convergence is 0.5-1.5s - If it takes too long to discover and converge, let the scheduler estimate VRAM usage ctx, cancel := context.WithTimeout(context.Background(), s.waitForRecovery) defer cancel() ticker := time.NewTicker(250 * time.Millisecond) defer ticker.Stop() for { select { case <-ticker.C: // Query GPUs, look for free to go back up gpusNow := s.getGpuFn(ctx, runners) totalMemoryNow = 0 freeMemoryNow = 0 for _, gpu := range gpusNow { totalMemoryNow += gpu.TotalMemory freeMemoryNow += gpu.FreeMemory } if freeMemoryNow > freeMemoryBefore { logutil.Trace("gpu VRAM convergence", "percent", int(float32(freeMemoryNow-freeMemoryBefore)/float32(runner.vramSize)*100)) } else { logutil.Trace("gpu VRAM convergence", "percent", 0) } // If we're within ~75% of the estimated memory usage recovered, bail out if float32(freeMemoryNow-freeMemoryBefore) > float32(runner.vramSize)*0.75 { slog.Debug(fmt.Sprintf("gpu VRAM free memory converged after %0.2f seconds", time.Since(start).Seconds()), "free_before", format.HumanBytes2(freeMemoryBefore), "free_now", format.HumanBytes2(freeMemoryNow), "runner", runner) finished <- struct{}{} return } case <-ctx.Done(): slog.Debug("gpu VRAM usage didn't recover within timeout", "seconds", time.Since(start).Seconds(), "free_before", format.HumanBytes2(freeMemoryBefore), "free_now", format.HumanBytes2(freeMemoryNow), "runner", runner) finished <- struct{}{} return } } }() return finished } func (runner *runnerRef) LogValue() slog.Value { if runner == nil { return slog.StringValue("nil") } attrs := []slog.Attr{} if runner.model != nil { attrs = append(attrs, slog.String("name", runner.model.Name)) } if len(runner.gpus) > 0 { attrs = append(attrs, slog.Any("inference", runner.gpus), ) } attrs = append(attrs, slog.String("size", format.HumanBytes2(runner.totalSize)), slog.String("vram", format.HumanBytes2(runner.vramSize)), slog.Int("parallel", runner.numParallel), slog.Int("pid", runner.pid), slog.String("model", runner.modelPath), ) if runner.Options != nil { attrs = append(attrs, slog.Int("num_ctx", runner.Options.NumCtx)) } return slog.GroupValue(attrs...) } // Implements discover.RunnerDiscovery func (runner *runnerRef) GetPort() int { if runner.llama != nil { return runner.llama.GetPort() } return -1 } func (runner *runnerRef) GetDeviceInfos(ctx context.Context) []ml.DeviceInfo { if runner.llama != nil { return runner.llama.GetDeviceInfos(ctx) } return nil } func (runner *runnerRef) GetActiveDeviceIDs() []ml.DeviceID { return runner.gpus } func (runner *runnerRef) HasExited() bool { if runner.llama != nil { return runner.llama.HasExited() } return true } type ByDurationAndName []*runnerRef func (a ByDurationAndName) Len() int { return len(a) } func (a ByDurationAndName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a ByDurationAndName) Less(i, j int) bool { // Primary sort by session duration (uint64 to handle negatives) d1 := uint64(a[i].sessionDuration) d2 := uint64(a[j].sessionDuration) if d1 != d2 { return d1 < d2 } // Secondary sort by model path lex order return a[i].modelPath < a[j].modelPath } // TODO - future consideration to pick runners based on size // type BySize []*runnerRef // func (a BySize) Len() int { return len(a) } // func (a BySize) Swap(i, j int) { a[i], a[j] = a[j], a[i] } // func (a BySize) Less(i, j int) bool { return a[i].vramSize < a[j].vramSize } // findRunnerToUnload finds a runner to unload to make room for a new model func (s *Scheduler) findRunnerToUnload() *runnerRef { s.loadedMu.Lock() runnerList := make([]*runnerRef, 0, len(s.loaded)) for _, r := range s.loaded { runnerList = append(runnerList, r) } s.loadedMu.Unlock() if len(runnerList) == 0 { slog.Debug("no loaded runner to unload") return nil } // In the future we can enhance the algorithm to be smarter about picking the optimal runner to unload // e.g., if we have multiple options, will one make room for the request? sort.Sort(ByDurationAndName(runnerList)) // First try to find a runner that's already idle for _, runner := range runnerList { runner.refMu.Lock() rc := runner.refCount runner.refMu.Unlock() if rc == 0 { slog.Debug("found an idle runner to unload", "runner", runner) return runner } } // None appear idle, just wait for the one with the shortest duration slog.Debug("no idle runners, picking the shortest duration", "runner_count", len(runnerList), "runner", runnerList[0]) return runnerList[0] } func (s *Scheduler) unloadAllRunners() { s.loadedMu.Lock() defer s.loadedMu.Unlock() if s.activeLoading != nil { slog.Debug("shutting down currently loading runner") s.activeLoading.Close() s.activeLoading = nil } for model, runner := range s.loaded { if runner.llama != nil { slog.Debug("shutting down runner", "model", model) runner.llama.Close() } } } func (s *Scheduler) expireRunner(model *Model) { s.loadedMu.Lock() runner, ok := s.loaded[model.ModelPath] s.loadedMu.Unlock() if ok { runner.refMu.Lock() runner.expiresAt = time.Now() if runner.expireTimer != nil { runner.expireTimer.Stop() runner.expireTimer = nil } runner.sessionDuration = 0 if runner.refCount <= 0 { s.expiredCh <- runner } runner.refMu.Unlock() } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/server/create.go
server/create.go
package server import ( "bytes" "cmp" "context" "encoding/json" "errors" "fmt" "io" "io/fs" "log/slog" "net" "net/http" "net/url" "os" "path" "path/filepath" "slices" "strings" "sync/atomic" "github.com/gin-gonic/gin" "github.com/ollama/ollama/api" "github.com/ollama/ollama/convert" "github.com/ollama/ollama/envconfig" "github.com/ollama/ollama/format" "github.com/ollama/ollama/fs/ggml" "github.com/ollama/ollama/template" "github.com/ollama/ollama/types/errtypes" "github.com/ollama/ollama/types/model" ) var ( errNoFilesProvided = errors.New("no files provided to convert") errOnlyOneAdapterSupported = errors.New("only one adapter is currently supported") errOnlyGGUFSupported = errors.New("supplied file was not in GGUF format") errUnknownType = errors.New("unknown type") errNeitherFromOrFiles = errors.New("neither 'from' or 'files' was specified") errFilePath = errors.New("file path must be relative") ) func (s *Server) CreateHandler(c *gin.Context) { config := &model.ConfigV2{ OS: "linux", Architecture: "amd64", RootFS: model.RootFS{ Type: "layers", }, } var r api.CreateRequest if err := c.ShouldBindJSON(&r); errors.Is(err, io.EOF) { c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "missing request body"}) return } else if err != nil { c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } config.Renderer = r.Renderer config.Parser = r.Parser config.Requires = r.Requires for v := range r.Files { if !fs.ValidPath(v) { c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": errFilePath.Error()}) return } } name := model.ParseName(cmp.Or(r.Model, r.Name)) if !name.IsValid() { c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": errtypes.InvalidModelNameErrMsg}) return } name, err := getExistingName(name) if err != nil { c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } ch := make(chan any) go func() { defer close(ch) fn := func(resp api.ProgressResponse) { ch <- resp } oldManifest, _ := ParseNamedManifest(name) var baseLayers []*layerGGML var err error var remote bool if r.From != "" { slog.Debug("create model from model name", "from", r.From) fromName := model.ParseName(r.From) if !fromName.IsValid() { ch <- gin.H{"error": errtypes.InvalidModelNameErrMsg, "status": http.StatusBadRequest} return } if r.RemoteHost != "" { ru, err := remoteURL(r.RemoteHost) if err != nil { ch <- gin.H{"error": "bad remote", "status": http.StatusBadRequest} return } config.RemoteModel = r.From config.RemoteHost = ru remote = true } else { ctx, cancel := context.WithCancel(c.Request.Context()) defer cancel() baseLayers, err = parseFromModel(ctx, fromName, fn) if err != nil { ch <- gin.H{"error": err.Error()} } if err == nil && !remote && (config.Renderer == "" || config.Parser == "" || config.Requires == "") { manifest, mErr := ParseNamedManifest(fromName) if mErr == nil && manifest.Config.Digest != "" { configPath, pErr := GetBlobsPath(manifest.Config.Digest) if pErr == nil { if cfgFile, fErr := os.Open(configPath); fErr == nil { var baseConfig model.ConfigV2 if decErr := json.NewDecoder(cfgFile).Decode(&baseConfig); decErr == nil { if config.Renderer == "" { config.Renderer = baseConfig.Renderer } if config.Parser == "" { config.Parser = baseConfig.Parser } if config.Requires == "" { config.Requires = baseConfig.Requires } } cfgFile.Close() } } } } } } else if r.Files != nil { baseLayers, err = convertModelFromFiles(r.Files, baseLayers, false, fn) if err != nil { for _, badReq := range []error{errNoFilesProvided, errOnlyGGUFSupported, errUnknownType} { if errors.Is(err, badReq) { ch <- gin.H{"error": err.Error(), "status": http.StatusBadRequest} return } } ch <- gin.H{"error": err.Error()} return } } else { ch <- gin.H{"error": errNeitherFromOrFiles.Error(), "status": http.StatusBadRequest} return } var adapterLayers []*layerGGML if !remote && r.Adapters != nil { adapterLayers, err = convertModelFromFiles(r.Adapters, baseLayers, true, fn) if err != nil { for _, badReq := range []error{errNoFilesProvided, errOnlyOneAdapterSupported, errOnlyGGUFSupported, errUnknownType, errFilePath} { if errors.Is(err, badReq) { ch <- gin.H{"error": err.Error(), "status": http.StatusBadRequest} return } } ch <- gin.H{"error": err.Error(), "status": http.StatusBadRequest} return } } if len(adapterLayers) > 0 { baseLayers = append(baseLayers, adapterLayers...) } // Info is not currently exposed by Modelfiles, but allows overriding various // config values if r.Info != nil { caps, ok := r.Info["capabilities"] if ok { switch tcaps := caps.(type) { case []any: caps := make([]string, len(tcaps)) for i, c := range tcaps { str, ok := c.(string) if !ok { continue } caps[i] = str } config.Capabilities = append(config.Capabilities, caps...) } } strFromInfo := func(k string) string { v, ok := r.Info[k] if ok { val := v.(string) return val } return "" } vFromInfo := func(k string) float64 { v, ok := r.Info[k] if ok { val := v.(float64) return val } return 0 } config.ModelFamily = strFromInfo("model_family") if config.ModelFamily != "" { config.ModelFamilies = []string{config.ModelFamily} } config.BaseName = strFromInfo("base_name") config.FileType = strFromInfo("quantization_level") config.ModelType = strFromInfo("parameter_size") config.ContextLen = int(vFromInfo("context_length")) config.EmbedLen = int(vFromInfo("embedding_length")) } if err := createModel(r, name, baseLayers, config, fn); err != nil { if errors.Is(err, errBadTemplate) { ch <- gin.H{"error": err.Error(), "status": http.StatusBadRequest} return } ch <- gin.H{"error": err.Error()} return } if !envconfig.NoPrune() && oldManifest != nil { if err := oldManifest.RemoveLayers(); err != nil { ch <- gin.H{"error": err.Error()} } } ch <- api.ProgressResponse{Status: "success"} }() if r.Stream != nil && !*r.Stream { waitForStream(c, ch) return } streamResponse(c, ch) } func remoteURL(raw string) (string, error) { // Special‑case: user supplied only a path ("/foo/bar"). if strings.HasPrefix(raw, "/") { return (&url.URL{ Scheme: "http", Host: net.JoinHostPort("localhost", "11434"), Path: path.Clean(raw), }).String(), nil } if !strings.Contains(raw, "://") { raw = "http://" + raw } if raw == "ollama.com" || raw == "http://ollama.com" { raw = "https://ollama.com:443" } u, err := url.Parse(raw) if err != nil { return "", fmt.Errorf("parse error: %w", err) } if u.Host == "" { u.Host = "localhost" } hostPart, portPart, err := net.SplitHostPort(u.Host) if err == nil { u.Host = net.JoinHostPort(hostPart, portPart) } else { u.Host = net.JoinHostPort(u.Host, "11434") } if u.Path != "" { u.Path = path.Clean(u.Path) } if u.Path == "/" { u.Path = "" } return u.String(), nil } func convertModelFromFiles(files map[string]string, baseLayers []*layerGGML, isAdapter bool, fn func(resp api.ProgressResponse)) ([]*layerGGML, error) { switch detectModelTypeFromFiles(files) { case "safetensors": layers, err := convertFromSafetensors(files, baseLayers, isAdapter, fn) if err != nil { slog.Error("error converting from safetensors", "error", err) return nil, err } return layers, nil case "gguf": if len(files) == 0 { return nil, errNoFilesProvided } else if len(files) > 1 && isAdapter { return nil, errOnlyOneAdapterSupported } var digest string var allLayers []*layerGGML for _, v := range files { digest = v layers, err := ggufLayers(digest, fn) if err != nil { return nil, err } allLayers = append(allLayers, layers...) } return allLayers, nil default: return nil, errUnknownType } } func detectModelTypeFromFiles(files map[string]string) string { for fn := range files { if strings.HasSuffix(fn, ".safetensors") { return "safetensors" } else if strings.HasSuffix(fn, ".gguf") { return "gguf" } else { // try to see if we can find a gguf file even without the file extension blobPath, err := GetBlobsPath(files[fn]) if err != nil { slog.Error("error getting blobs path", "file", fn) return "" } f, err := os.Open(blobPath) if err != nil { slog.Error("error reading file", "error", err) return "" } defer f.Close() buf := make([]byte, 4) _, err = f.Read(buf) if err != nil { slog.Error("error reading file", "error", err) return "" } ct := ggml.DetectContentType(buf) if ct == "gguf" { return "gguf" } } } return "" } func convertFromSafetensors(files map[string]string, baseLayers []*layerGGML, isAdapter bool, fn func(resp api.ProgressResponse)) ([]*layerGGML, error) { tmpDir, err := os.MkdirTemp(envconfig.Models(), "ollama-safetensors") if err != nil { return nil, err } defer os.RemoveAll(tmpDir) // Set up a root to validate paths root, err := os.OpenRoot(tmpDir) if err != nil { return nil, err } defer root.Close() for fp, digest := range files { if !fs.ValidPath(fp) { return nil, fmt.Errorf("%w: %s", errFilePath, fp) } if _, err := root.Stat(fp); err != nil && !errors.Is(err, fs.ErrNotExist) { // Path is likely outside the root return nil, fmt.Errorf("%w: %s: %s", errFilePath, err, fp) } blobPath, err := GetBlobsPath(digest) if err != nil { return nil, err } if err := createLink(blobPath, filepath.Join(tmpDir, fp)); err != nil { return nil, err } } t, err := os.CreateTemp(tmpDir, "fp16") if err != nil { return nil, err } defer t.Close() var mediaType string if !isAdapter { fn(api.ProgressResponse{Status: "converting model"}) mediaType = "application/vnd.ollama.image.model" if err := convert.ConvertModel(os.DirFS(tmpDir), t); err != nil { return nil, err } } else { kv, err := kvFromLayers(baseLayers) if err != nil { return nil, err } fn(api.ProgressResponse{Status: "converting adapter"}) mediaType = "application/vnd.ollama.image.adapter" if err := convert.ConvertAdapter(os.DirFS(tmpDir), t, kv); err != nil { return nil, err } } if _, err := t.Seek(0, io.SeekStart); err != nil { return nil, err } layer, err := NewLayer(t, mediaType) if err != nil { return nil, err } bin, err := layer.Open() if err != nil { return nil, err } defer bin.Close() f, err := ggml.Decode(bin, -1) if err != nil { return nil, err } layers := []*layerGGML{{layer, f}} if !isAdapter { return detectChatTemplate(layers) } return layers, nil } func kvFromLayers(baseLayers []*layerGGML) (ggml.KV, error) { for _, l := range baseLayers { if l.GGML != nil { return l.KV(), nil } } return ggml.KV{}, fmt.Errorf("no base model was found") } func createModel(r api.CreateRequest, name model.Name, baseLayers []*layerGGML, config *model.ConfigV2, fn func(resp api.ProgressResponse)) (err error) { var layers []Layer for _, layer := range baseLayers { if layer.GGML != nil { quantType := strings.ToUpper(cmp.Or(r.Quantize, r.Quantization)) if quantType != "" && layer.GGML.Name() == "gguf" && layer.MediaType == "application/vnd.ollama.image.model" { want, err := ggml.ParseFileType(quantType) if err != nil { return err } ft := layer.GGML.KV().FileType() if !slices.Contains([]string{"F16", "F32"}, ft.String()) { return errors.New("quantization is only supported for F16 and F32 models") } else if ft != want { layer, err = quantizeLayer(layer, quantType, fn) if err != nil { return err } } } config.ModelFormat = cmp.Or(config.ModelFormat, layer.GGML.Name()) config.ModelFamily = cmp.Or(config.ModelFamily, layer.GGML.KV().Architecture()) config.ModelType = cmp.Or(config.ModelType, format.HumanNumber(layer.GGML.KV().ParameterCount())) config.FileType = cmp.Or(config.FileType, layer.GGML.KV().FileType().String()) config.ModelFamilies = append(config.ModelFamilies, layer.GGML.KV().Architecture()) } layers = append(layers, layer.Layer) } if r.Template != "" { layers, err = setTemplate(layers, r.Template) if err != nil { return err } } if r.System != "" { layers, err = setSystem(layers, r.System) if err != nil { return err } } if r.License != nil { switch l := r.License.(type) { case string: if l != "" { layers, err = setLicense(layers, l) if err != nil { return err } } case any: var licenses []string b, _ := json.Marshal(l) // re-marshal to JSON if err := json.Unmarshal(b, &licenses); err != nil { return err } for _, v := range licenses { layers, err = setLicense(layers, v) if err != nil { return err } } default: return fmt.Errorf("unknown license type: %T", l) } } layers, err = setParameters(layers, r.Parameters) if err != nil { return err } layers, err = setMessages(layers, r.Messages) if err != nil { return err } configLayer, err := createConfigLayer(layers, *config) if err != nil { return err } for _, layer := range layers { if layer.status != "" { fn(api.ProgressResponse{Status: layer.status}) } } fn(api.ProgressResponse{Status: "writing manifest"}) if err := WriteManifest(name, *configLayer, layers); err != nil { return err } return nil } func quantizeLayer(layer *layerGGML, quantizeType string, fn func(resp api.ProgressResponse)) (*layerGGML, error) { ft := layer.GGML.KV().FileType() var doneBytes atomic.Uint64 totalBytes := uint64(layer.Size) - layer.GGML.Tensors().Offset fnWrap := func(n uint64) { done := doneBytes.Add(n) progress := float32(done) / float32(totalBytes) fn(api.ProgressResponse{Status: fmt.Sprintf("quantizing %s model to %s", ft, quantizeType), Digest: "0000000000000000000", Total: layer.Size, Completed: int64(progress * float32(layer.Size))}) } ftype, err := ggml.ParseFileType(quantizeType) if err != nil { return nil, err } blob, err := GetBlobsPath(layer.Digest) if err != nil { return nil, err } fp, err := os.Open(blob) if err != nil { return nil, err } defer fp.Close() temp, err := os.CreateTemp(filepath.Dir(blob), quantizeType) if err != nil { return nil, err } defer temp.Close() defer os.Remove(temp.Name()) if err := quantize(fp, temp, layer.GGML, ftype, fnWrap); err != nil { return nil, err } temp.Seek(0, io.SeekStart) fn(api.ProgressResponse{Status: "verifying conversion"}) newLayer, err := NewLayer(temp, layer.MediaType) if err != nil { return nil, err } if _, err := temp.Seek(0, io.SeekStart); err != nil { return nil, err } f, err := ggml.Decode(temp, 1024) if err != nil { slog.Error(fmt.Sprintf("error decoding ggml: %s\n", err)) return nil, err } return &layerGGML{newLayer, f}, nil } func ggufLayers(digest string, fn func(resp api.ProgressResponse)) ([]*layerGGML, error) { var layers []*layerGGML fn(api.ProgressResponse{Status: "parsing GGUF"}) blobPath, err := GetBlobsPath(digest) if err != nil { return nil, err } blob, err := os.Open(blobPath) if err != nil { return nil, err } defer blob.Close() sr := io.NewSectionReader(blob, 0, 512) contentType, err := detectContentType(sr) if err != nil { return nil, err } if contentType != "gguf" { slog.Error(fmt.Sprintf("unsupported content type: %s", contentType)) return nil, errOnlyGGUFSupported } f, err := ggml.Decode(blob, -1) if err != nil { return nil, err } mediatype := "application/vnd.ollama.image.model" if f.KV().Kind() == "adapter" { mediatype = "application/vnd.ollama.image.adapter" } else if (f.KV().Uint("block_count") == 0 && f.KV().Uint("vision.block_count") > 0) || f.KV().Kind() == "projector" { // if a model has vision.block_count but not block_count, it is a standalone vision model mediatype = "application/vnd.ollama.image.projector" } layer, err := NewLayerFromLayer(digest, mediatype, blob.Name()) if err != nil { slog.Debug("could not create new layer from layer", "error", err) return nil, err } layers = append(layers, &layerGGML{layer, f}) return detectChatTemplate(layers) } func removeLayer(layers []Layer, mediatype string) []Layer { return slices.DeleteFunc(layers, func(layer Layer) bool { if layer.MediaType != mediatype { return false } if err := layer.Remove(); err != nil { slog.Warn("couldn't remove blob", "digest", layer.Digest, "error", err) return true } return true }) } func setTemplate(layers []Layer, t string) ([]Layer, error) { layers = removeLayer(layers, "application/vnd.ollama.image.template") if _, err := template.Parse(t); err != nil { return nil, fmt.Errorf("%w: %s", errBadTemplate, err) } if _, err := template.Parse(t); err != nil { return nil, fmt.Errorf("%w: %s", errBadTemplate, err) } blob := strings.NewReader(t) layer, err := NewLayer(blob, "application/vnd.ollama.image.template") if err != nil { return nil, err } layers = append(layers, layer) return layers, nil } func setSystem(layers []Layer, s string) ([]Layer, error) { layers = removeLayer(layers, "application/vnd.ollama.image.system") if s != "" { blob := strings.NewReader(s) layer, err := NewLayer(blob, "application/vnd.ollama.image.system") if err != nil { return nil, err } layers = append(layers, layer) } return layers, nil } func setLicense(layers []Layer, l string) ([]Layer, error) { blob := strings.NewReader(l) layer, err := NewLayer(blob, "application/vnd.ollama.image.license") if err != nil { return nil, err } layers = append(layers, layer) return layers, nil } func setParameters(layers []Layer, p map[string]any) ([]Layer, error) { if p == nil { p = make(map[string]any) } for _, layer := range layers { if layer.MediaType != "application/vnd.ollama.image.params" { continue } digestPath, err := GetBlobsPath(layer.Digest) if err != nil { return nil, err } fn, err := os.Open(digestPath) if err != nil { return nil, err } defer fn.Close() var existing map[string]any if err := json.NewDecoder(fn).Decode(&existing); err != nil { return nil, err } for k, v := range existing { if _, exists := p[k]; exists { continue } p[k] = v } } if len(p) == 0 { return layers, nil } layers = removeLayer(layers, "application/vnd.ollama.image.params") var b bytes.Buffer if err := json.NewEncoder(&b).Encode(p); err != nil { return nil, err } layer, err := NewLayer(&b, "application/vnd.ollama.image.params") if err != nil { return nil, err } layers = append(layers, layer) return layers, nil } func setMessages(layers []Layer, m []api.Message) ([]Layer, error) { // this leaves the old messages intact if no new messages were specified // which may not be the correct behaviour if len(m) == 0 { return layers, nil } fmt.Printf("removing old messages\n") layers = removeLayer(layers, "application/vnd.ollama.image.messages") var b bytes.Buffer if err := json.NewEncoder(&b).Encode(m); err != nil { return nil, err } layer, err := NewLayer(&b, "application/vnd.ollama.image.messages") if err != nil { return nil, err } layers = append(layers, layer) return layers, nil } func createConfigLayer(layers []Layer, config model.ConfigV2) (*Layer, error) { digests := make([]string, len(layers)) for i, layer := range layers { digests[i] = layer.Digest } config.RootFS.DiffIDs = digests var b bytes.Buffer if err := json.NewEncoder(&b).Encode(config); err != nil { return nil, err } layer, err := NewLayer(&b, "application/vnd.docker.container.image.v1+json") if err != nil { return nil, err } return &layer, nil } func createLink(src, dst string) error { // make any subdirs for dst if err := os.MkdirAll(filepath.Dir(dst), 0o755); err != nil { return err } _ = os.Remove(dst) if err := os.Symlink(src, dst); err != nil { if err := copyFile(src, dst); err != nil { return err } } return nil } func copyFile(src, dst string) error { srcFile, err := os.Open(src) if err != nil { return err } defer srcFile.Close() dstFile, err := os.Create(dst) if err != nil { return err } defer dstFile.Close() _, err = io.Copy(dstFile, srcFile) return err }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/server/prompt_test.go
server/prompt_test.go
package server import ( "bytes" "testing" "github.com/google/go-cmp/cmp" "github.com/ollama/ollama/api" "github.com/ollama/ollama/template" ) func TestChatPrompt(t *testing.T) { type expect struct { prompt string images [][]byte error error } tmpl, err := template.Parse(` {{- if .System }}{{ .System }} {{ end }} {{- if .Prompt }}{{ .Prompt }} {{ end }} {{- if .Response }}{{ .Response }} {{ end }}`) if err != nil { t.Fatal(err) } visionModel := Model{Template: tmpl, ProjectorPaths: []string{"vision"}} cases := []struct { name string model Model limit int truncate bool msgs []api.Message expect }{ { name: "messages", model: visionModel, limit: 64, truncate: true, msgs: []api.Message{ {Role: "user", Content: "You're a test, Harry!"}, {Role: "assistant", Content: "I-I'm a what?"}, {Role: "user", Content: "A test. And a thumping good one at that, I'd wager."}, }, expect: expect{ prompt: "You're a test, Harry! I-I'm a what? A test. And a thumping good one at that, I'd wager. ", }, }, { name: "truncate messages", model: visionModel, limit: 1, truncate: true, msgs: []api.Message{ {Role: "user", Content: "You're a test, Harry!"}, {Role: "assistant", Content: "I-I'm a what?"}, {Role: "user", Content: "A test. And a thumping good one at that, I'd wager."}, }, expect: expect{ prompt: "A test. And a thumping good one at that, I'd wager. ", }, }, { name: "truncate messages with image", model: visionModel, limit: 64, truncate: true, msgs: []api.Message{ {Role: "user", Content: "You're a test, Harry!"}, {Role: "assistant", Content: "I-I'm a what?"}, {Role: "user", Content: "A test. And a thumping good one at that, I'd wager.", Images: []api.ImageData{[]byte("something")}}, }, expect: expect{ prompt: "[img-0]A test. And a thumping good one at that, I'd wager. ", images: [][]byte{ []byte("something"), }, }, }, { name: "truncate messages with images", model: visionModel, limit: 64, truncate: true, msgs: []api.Message{ {Role: "user", Content: "You're a test, Harry!", Images: []api.ImageData{[]byte("something")}}, {Role: "assistant", Content: "I-I'm a what?"}, {Role: "user", Content: "A test. And a thumping good one at that, I'd wager.", Images: []api.ImageData{[]byte("somethingelse")}}, }, expect: expect{ prompt: "[img-0]A test. And a thumping good one at that, I'd wager. ", images: [][]byte{ []byte("somethingelse"), }, }, }, { name: "messages with images", model: visionModel, limit: 2048, truncate: true, msgs: []api.Message{ {Role: "user", Content: "You're a test, Harry!", Images: []api.ImageData{[]byte("something")}}, {Role: "assistant", Content: "I-I'm a what?"}, {Role: "user", Content: "A test. And a thumping good one at that, I'd wager.", Images: []api.ImageData{[]byte("somethingelse")}}, }, expect: expect{ prompt: "[img-0]You're a test, Harry! I-I'm a what? [img-1]A test. And a thumping good one at that, I'd wager. ", images: [][]byte{ []byte("something"), []byte("somethingelse"), }, }, }, { name: "message with image tag", model: visionModel, limit: 2048, truncate: true, msgs: []api.Message{ {Role: "user", Content: "You're a test, Harry! [img]", Images: []api.ImageData{[]byte("something")}}, {Role: "assistant", Content: "I-I'm a what?"}, {Role: "user", Content: "A test. And a thumping good one at that, I'd wager.", Images: []api.ImageData{[]byte("somethingelse")}}, }, expect: expect{ prompt: "You're a test, Harry! [img-0] I-I'm a what? [img-1]A test. And a thumping good one at that, I'd wager. ", images: [][]byte{ []byte("something"), []byte("somethingelse"), }, }, }, { name: "messages with interleaved images", model: visionModel, limit: 2048, truncate: true, msgs: []api.Message{ {Role: "user", Content: "You're a test, Harry!"}, {Role: "user", Images: []api.ImageData{[]byte("something")}}, {Role: "user", Images: []api.ImageData{[]byte("somethingelse")}}, {Role: "assistant", Content: "I-I'm a what?"}, {Role: "user", Content: "A test. And a thumping good one at that, I'd wager."}, }, expect: expect{ prompt: "You're a test, Harry!\n\n[img-0]\n\n[img-1] I-I'm a what? A test. And a thumping good one at that, I'd wager. ", images: [][]byte{ []byte("something"), []byte("somethingelse"), }, }, }, { name: "truncate message with interleaved images", model: visionModel, limit: 1024, truncate: true, msgs: []api.Message{ {Role: "user", Content: "You're a test, Harry!"}, {Role: "user", Images: []api.ImageData{[]byte("something")}}, {Role: "user", Images: []api.ImageData{[]byte("somethingelse")}}, {Role: "assistant", Content: "I-I'm a what?"}, {Role: "user", Content: "A test. And a thumping good one at that, I'd wager."}, }, expect: expect{ prompt: "[img-0] I-I'm a what? A test. And a thumping good one at that, I'd wager. ", images: [][]byte{ []byte("somethingelse"), }, }, }, { name: "message with system prompt", model: visionModel, limit: 2048, truncate: true, msgs: []api.Message{ {Role: "system", Content: "You are the Test Who Lived."}, {Role: "user", Content: "You're a test, Harry!"}, {Role: "assistant", Content: "I-I'm a what?"}, {Role: "user", Content: "A test. And a thumping good one at that, I'd wager."}, }, expect: expect{ prompt: "You are the Test Who Lived. You're a test, Harry! I-I'm a what? A test. And a thumping good one at that, I'd wager. ", }, }, { name: "out of order system", model: visionModel, limit: 2048, truncate: true, msgs: []api.Message{ {Role: "user", Content: "You're a test, Harry!"}, {Role: "assistant", Content: "I-I'm a what?"}, {Role: "system", Content: "You are the Test Who Lived."}, {Role: "user", Content: "A test. And a thumping good one at that, I'd wager."}, }, expect: expect{ prompt: "You're a test, Harry! I-I'm a what? You are the Test Who Lived. A test. And a thumping good one at that, I'd wager. ", }, }, { name: "multiple images same prompt", model: visionModel, limit: 2048, truncate: true, msgs: []api.Message{ {Role: "user", Content: "Compare these two pictures of hotdogs", Images: []api.ImageData{[]byte("one hotdog"), []byte("two hotdogs")}}, }, expect: expect{ prompt: "[img-0][img-1]Compare these two pictures of hotdogs ", images: [][]byte{[]byte("one hotdog"), []byte("two hotdogs")}, }, }, { name: "no truncate with limit exceeded", model: visionModel, limit: 10, truncate: false, msgs: []api.Message{ {Role: "user", Content: "You're a test, Harry!"}, {Role: "assistant", Content: "I-I'm a what?"}, {Role: "user", Content: "A test. And a thumping good one at that, I'd wager."}, }, expect: expect{ prompt: "You're a test, Harry! I-I'm a what? A test. And a thumping good one at that, I'd wager. ", }, }, } for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { model := tt.model opts := api.Options{Runner: api.Runner{NumCtx: tt.limit}} think := false prompt, images, err := chatPrompt(t.Context(), &model, mockRunner{}.Tokenize, &opts, tt.msgs, nil, &api.ThinkValue{Value: think}, tt.truncate) if tt.error == nil && err != nil { t.Fatal(err) } else if tt.error != nil && err != tt.error { t.Fatalf("expected err '%q', got '%q'", tt.error, err) } if diff := cmp.Diff(prompt, tt.prompt); diff != "" { t.Errorf("mismatch (-got +want):\n%s", diff) } if len(images) != len(tt.images) { t.Fatalf("expected %d images, got %d", len(tt.images), len(images)) } for i := range images { if images[i].ID != i { t.Errorf("expected ID %d, got %d", i, images[i].ID) } if len(model.Config.ModelFamilies) == 0 { if !bytes.Equal(images[i].Data, tt.images[i]) { t.Errorf("expected %q, got %q", tt.images[i], images[i].Data) } } } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/server/manifest.go
server/manifest.go
package server import ( "crypto/sha256" "encoding/hex" "encoding/json" "errors" "fmt" "io" "log/slog" "os" "path/filepath" "github.com/ollama/ollama/types/model" ) type Manifest struct { SchemaVersion int `json:"schemaVersion"` MediaType string `json:"mediaType"` Config Layer `json:"config"` Layers []Layer `json:"layers"` filepath string fi os.FileInfo digest string } func (m *Manifest) Size() (size int64) { for _, layer := range append(m.Layers, m.Config) { size += layer.Size } return } func (m *Manifest) Remove() error { if err := os.Remove(m.filepath); err != nil { return err } manifests, err := GetManifestPath() if err != nil { return err } return PruneDirectory(manifests) } func (m *Manifest) RemoveLayers() error { for _, layer := range append(m.Layers, m.Config) { if layer.Digest != "" { if err := layer.Remove(); errors.Is(err, os.ErrNotExist) { slog.Debug("layer does not exist", "digest", layer.Digest) } else if err != nil { return err } } } return nil } func ParseNamedManifest(n model.Name) (*Manifest, error) { if !n.IsFullyQualified() { return nil, model.Unqualified(n) } manifests, err := GetManifestPath() if err != nil { return nil, err } p := filepath.Join(manifests, n.Filepath()) var m Manifest f, err := os.Open(p) if err != nil { return nil, err } defer f.Close() fi, err := f.Stat() if err != nil { return nil, err } sha256sum := sha256.New() if err := json.NewDecoder(io.TeeReader(f, sha256sum)).Decode(&m); err != nil { return nil, err } m.filepath = p m.fi = fi m.digest = hex.EncodeToString(sha256sum.Sum(nil)) return &m, nil } func WriteManifest(name model.Name, config Layer, layers []Layer) error { manifests, err := GetManifestPath() if err != nil { return err } p := filepath.Join(manifests, name.Filepath()) if err := os.MkdirAll(filepath.Dir(p), 0o755); err != nil { return err } f, err := os.Create(p) if err != nil { return err } defer f.Close() m := Manifest{ SchemaVersion: 2, MediaType: "application/vnd.docker.distribution.manifest.v2+json", Config: config, Layers: layers, } return json.NewEncoder(f).Encode(m) } func Manifests(continueOnError bool) (map[model.Name]*Manifest, error) { manifests, err := GetManifestPath() if err != nil { return nil, err } // TODO(mxyng): use something less brittle matches, err := filepath.Glob(filepath.Join(manifests, "*", "*", "*", "*")) if err != nil { return nil, err } ms := make(map[model.Name]*Manifest) for _, match := range matches { fi, err := os.Stat(match) if err != nil { return nil, err } if !fi.IsDir() { rel, err := filepath.Rel(manifests, match) if err != nil { if !continueOnError { return nil, fmt.Errorf("%s %w", match, err) } slog.Warn("bad filepath", "path", match, "error", err) continue } n := model.ParseNameFromFilepath(rel) if !n.IsValid() { if !continueOnError { return nil, fmt.Errorf("%s %w", rel, err) } slog.Warn("bad manifest name", "path", rel) continue } m, err := ParseNamedManifest(n) if err != nil { if !continueOnError { return nil, fmt.Errorf("%s %w", n, err) } slog.Warn("bad manifest", "name", n, "error", err) continue } ms[n] = m } } return ms, nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/server/modelpath_test.go
server/modelpath_test.go
package server import ( "path/filepath" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestGetBlobsPath(t *testing.T) { // GetBlobsPath expects an actual directory to exist tempDir := t.TempDir() tests := []struct { name string digest string expected string err error }{ { "empty digest", "", filepath.Join(tempDir, "blobs"), nil, }, { "valid with colon", "sha256:456402914e838a953e0cf80caa6adbe75383d9e63584a964f504a7bbb8f7aad9", filepath.Join(tempDir, "blobs", "sha256-456402914e838a953e0cf80caa6adbe75383d9e63584a964f504a7bbb8f7aad9"), nil, }, { "valid with dash", "sha256-456402914e838a953e0cf80caa6adbe75383d9e63584a964f504a7bbb8f7aad9", filepath.Join(tempDir, "blobs", "sha256-456402914e838a953e0cf80caa6adbe75383d9e63584a964f504a7bbb8f7aad9"), nil, }, { "digest too short", "sha256-45640291", "", ErrInvalidDigestFormat, }, { "digest too long", "sha256-456402914e838a953e0cf80caa6adbe75383d9e63584a964f504a7bbb8f7aad9aaaaaaaaaa", "", ErrInvalidDigestFormat, }, { "digest invalid chars", "../sha256-456402914e838a953e0cf80caa6adbe75383d9e63584a964f504a7bbb8f7a", "", ErrInvalidDigestFormat, }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { t.Setenv("OLLAMA_MODELS", tempDir) got, err := GetBlobsPath(tc.digest) require.ErrorIs(t, tc.err, err, tc.name) assert.Equal(t, tc.expected, got, tc.name) }) } } func TestParseModelPath(t *testing.T) { tests := []struct { name string arg string want ModelPath }{ { "full path https", "https://example.com/ns/repo:tag", ModelPath{ ProtocolScheme: "https", Registry: "example.com", Namespace: "ns", Repository: "repo", Tag: "tag", }, }, { "full path http", "http://example.com/ns/repo:tag", ModelPath{ ProtocolScheme: "http", Registry: "example.com", Namespace: "ns", Repository: "repo", Tag: "tag", }, }, { "no protocol", "example.com/ns/repo:tag", ModelPath{ ProtocolScheme: "https", Registry: "example.com", Namespace: "ns", Repository: "repo", Tag: "tag", }, }, { "no registry", "ns/repo:tag", ModelPath{ ProtocolScheme: "https", Registry: DefaultRegistry, Namespace: "ns", Repository: "repo", Tag: "tag", }, }, { "no namespace", "repo:tag", ModelPath{ ProtocolScheme: "https", Registry: DefaultRegistry, Namespace: DefaultNamespace, Repository: "repo", Tag: "tag", }, }, { "no tag", "repo", ModelPath{ ProtocolScheme: "https", Registry: DefaultRegistry, Namespace: DefaultNamespace, Repository: "repo", Tag: DefaultTag, }, }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { got := ParseModelPath(tc.arg) if got != tc.want { t.Errorf("got: %q want: %q", got, tc.want) } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/server/routes_delete_test.go
server/routes_delete_test.go
package server import ( "bytes" "encoding/json" "net/http" "path/filepath" "testing" "github.com/gin-gonic/gin" "github.com/ollama/ollama/api" "github.com/ollama/ollama/types/model" ) func TestDelete(t *testing.T) { gin.SetMode(gin.TestMode) p := t.TempDir() t.Setenv("OLLAMA_MODELS", p) var s Server _, digest := createBinFile(t, nil, nil) w := createRequest(t, s.CreateHandler, api.CreateRequest{ Name: "test", Files: map[string]string{"test.gguf": digest}, }) if w.Code != http.StatusOK { t.Fatalf("expected status code 200, actual %d", w.Code) } w = createRequest(t, s.CreateHandler, api.CreateRequest{ Name: "test2", Files: map[string]string{"test.gguf": digest}, Template: "{{ .System }} {{ .Prompt }}", }) if w.Code != http.StatusOK { t.Fatalf("expected status code 200, actual %d", w.Code) } checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{ filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"), filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test2", "latest"), }) checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{ filepath.Join(p, "blobs", "sha256-136bf7c76bac2ec09d6617885507d37829e04b41acc47687d45e512b544e893a"), filepath.Join(p, "blobs", "sha256-6bcdb8859d417753645538d7bbfbd7ca91a3f0c191aef5379c53c05e86b669dd"), filepath.Join(p, "blobs", "sha256-89a2116c3a82d6a97f59f748d86ed4417214353fd178ee54df418fde32495fad"), filepath.Join(p, "blobs", "sha256-fe7ac77b725cda2ccad03f88a880ecdfd7a33192d6cae08fce2c0ee1455991ed"), }) w = createRequest(t, s.DeleteHandler, api.DeleteRequest{Name: "test"}) if w.Code != http.StatusOK { t.Fatalf("expected status code 200, actual %d", w.Code) } checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{ filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test2", "latest"), }) checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{ filepath.Join(p, "blobs", "sha256-136bf7c76bac2ec09d6617885507d37829e04b41acc47687d45e512b544e893a"), filepath.Join(p, "blobs", "sha256-89a2116c3a82d6a97f59f748d86ed4417214353fd178ee54df418fde32495fad"), filepath.Join(p, "blobs", "sha256-fe7ac77b725cda2ccad03f88a880ecdfd7a33192d6cae08fce2c0ee1455991ed"), }) w = createRequest(t, s.DeleteHandler, api.DeleteRequest{Name: "test2"}) if w.Code != http.StatusOK { t.Fatalf("expected status code 200, actual %d", w.Code) } checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{}) checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{}) } func TestDeleteDuplicateLayers(t *testing.T) { gin.SetMode(gin.TestMode) p := t.TempDir() t.Setenv("OLLAMA_MODELS", p) var s Server n := model.ParseName("test") var b bytes.Buffer if err := json.NewEncoder(&b).Encode(&model.ConfigV2{}); err != nil { t.Fatal(err) } config, err := NewLayer(&b, "application/vnd.docker.container.image.v1+json") if err != nil { t.Fatal(err) } // create a manifest with duplicate layers if err := WriteManifest(n, config, []Layer{config}); err != nil { t.Fatal(err) } w := createRequest(t, s.DeleteHandler, api.DeleteRequest{Name: "test"}) if w.Code != http.StatusOK { t.Errorf("expected status code 200, actual %d", w.Code) } checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{}) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/server/manifest_test.go
server/manifest_test.go
package server import ( "encoding/json" "os" "path/filepath" "slices" "testing" "github.com/ollama/ollama/types/model" ) func createManifest(t *testing.T, path, name string) { t.Helper() p := filepath.Join(path, "manifests", name) if err := os.MkdirAll(filepath.Dir(p), 0o755); err != nil { t.Fatal(err) } f, err := os.Create(p) if err != nil { t.Fatal(err) } defer f.Close() if err := json.NewEncoder(f).Encode(Manifest{}); err != nil { t.Fatal(err) } } func TestManifests(t *testing.T) { cases := map[string]struct { ps []string wantValidCount int wantInvalidCount int }{ "empty": {}, "single": { ps: []string{ filepath.Join("host", "namespace", "model", "tag"), }, wantValidCount: 1, }, "multiple": { ps: []string{ filepath.Join("registry.ollama.ai", "library", "llama3", "latest"), filepath.Join("registry.ollama.ai", "library", "llama3", "q4_0"), filepath.Join("registry.ollama.ai", "library", "llama3", "q4_1"), filepath.Join("registry.ollama.ai", "library", "llama3", "q8_0"), filepath.Join("registry.ollama.ai", "library", "llama3", "q5_0"), filepath.Join("registry.ollama.ai", "library", "llama3", "q5_1"), filepath.Join("registry.ollama.ai", "library", "llama3", "q2_K"), filepath.Join("registry.ollama.ai", "library", "llama3", "q3_K_S"), filepath.Join("registry.ollama.ai", "library", "llama3", "q3_K_M"), filepath.Join("registry.ollama.ai", "library", "llama3", "q3_K_L"), filepath.Join("registry.ollama.ai", "library", "llama3", "q4_K_S"), filepath.Join("registry.ollama.ai", "library", "llama3", "q4_K_M"), filepath.Join("registry.ollama.ai", "library", "llama3", "q5_K_S"), filepath.Join("registry.ollama.ai", "library", "llama3", "q5_K_M"), filepath.Join("registry.ollama.ai", "library", "llama3", "q6_K"), }, wantValidCount: 15, }, "hidden": { ps: []string{ filepath.Join("host", "namespace", "model", "tag"), filepath.Join("host", "namespace", "model", ".hidden"), }, wantValidCount: 1, wantInvalidCount: 1, }, "subdir": { ps: []string{ filepath.Join("host", "namespace", "model", "tag", "one"), filepath.Join("host", "namespace", "model", "tag", "another", "one"), }, wantInvalidCount: 2, }, "upper tag": { ps: []string{ filepath.Join("host", "namespace", "model", "TAG"), }, wantValidCount: 1, }, "upper model": { ps: []string{ filepath.Join("host", "namespace", "MODEL", "tag"), }, wantValidCount: 1, }, "upper namespace": { ps: []string{ filepath.Join("host", "NAMESPACE", "model", "tag"), }, wantValidCount: 1, }, "upper host": { ps: []string{ filepath.Join("HOST", "namespace", "model", "tag"), }, wantValidCount: 1, }, } for n, wants := range cases { t.Run(n, func(t *testing.T) { d := t.TempDir() t.Setenv("OLLAMA_MODELS", d) for _, p := range wants.ps { createManifest(t, d, p) } ms, err := Manifests(true) if err != nil { t.Fatal(err) } var ns []model.Name for k := range ms { ns = append(ns, k) } var gotValidCount, gotInvalidCount int for _, p := range wants.ps { n := model.ParseNameFromFilepath(p) if n.IsValid() { gotValidCount++ } else { gotInvalidCount++ } if !n.IsValid() && slices.Contains(ns, n) { t.Errorf("unexpected invalid name: %s", p) } else if n.IsValid() && !slices.Contains(ns, n) { t.Errorf("missing valid name: %s", p) } } if gotValidCount != wants.wantValidCount { t.Errorf("got valid count %d, want %d", gotValidCount, wants.wantValidCount) } if gotInvalidCount != wants.wantInvalidCount { t.Errorf("got invalid count %d, want %d", gotInvalidCount, wants.wantInvalidCount) } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/server/routes_debug_test.go
server/routes_debug_test.go
package server import ( "bytes" "encoding/json" "net/http" "testing" "time" "github.com/gin-gonic/gin" "github.com/ollama/ollama/api" "github.com/ollama/ollama/fs/ggml" "github.com/ollama/ollama/llm" "github.com/ollama/ollama/ml" ) func TestGenerateDebugRenderOnly(t *testing.T) { gin.SetMode(gin.TestMode) mock := mockRunner{ CompletionResponse: llm.CompletionResponse{ Done: true, DoneReason: llm.DoneReasonStop, PromptEvalCount: 1, PromptEvalDuration: 1, EvalCount: 1, EvalDuration: 1, }, } s := Server{ sched: &Scheduler{ pendingReqCh: make(chan *LlmRequest, 1), finishedReqCh: make(chan *LlmRequest, 1), expiredCh: make(chan *runnerRef, 1), unloadedCh: make(chan any, 1), loaded: make(map[string]*runnerRef), newServerFn: newMockServer(&mock), getGpuFn: getGpuFn, getSystemInfoFn: getSystemInfoFn, waitForRecovery: 250 * time.Millisecond, loadFn: func(req *LlmRequest, _ *ggml.GGML, _ ml.SystemInfo, _ []ml.DeviceInfo, _ bool) bool { // add small delay to simulate loading time.Sleep(time.Millisecond) req.successCh <- &runnerRef{ llama: &mock, } return false }, }, } go s.sched.Run(t.Context()) // Create a test model stream := false _, digest := createBinFile(t, ggml.KV{ "general.architecture": "llama", "llama.block_count": uint32(1), "llama.context_length": uint32(8192), "llama.embedding_length": uint32(4096), "llama.attention.head_count": uint32(32), "llama.attention.head_count_kv": uint32(8), "tokenizer.ggml.tokens": []string{""}, "tokenizer.ggml.scores": []float32{0}, "tokenizer.ggml.token_type": []int32{0}, }, []*ggml.Tensor{ {Name: "token_embd.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.attn_norm.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.ffn_down.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.ffn_gate.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.ffn_up.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.ffn_norm.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.attn_k.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.attn_output.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.attn_q.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.attn_v.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "output.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, }) w := createRequest(t, s.CreateHandler, api.CreateRequest{ Model: "test-model", Files: map[string]string{"file.gguf": digest}, Template: "{{ .Prompt }}", Stream: &stream, }) if w.Code != http.StatusOK { t.Fatalf("expected status 200, got %d", w.Code) } tests := []struct { name string request api.GenerateRequest expectDebug bool expectTemplate string expectNumImages int }{ { name: "debug render only enabled", request: api.GenerateRequest{ Model: "test-model", Prompt: "Hello, world!", DebugRenderOnly: true, }, expectDebug: true, expectTemplate: "Hello, world!", }, { name: "debug render only disabled", request: api.GenerateRequest{ Model: "test-model", Prompt: "Hello, world!", DebugRenderOnly: false, }, expectDebug: false, }, { name: "debug render only with system prompt", request: api.GenerateRequest{ Model: "test-model", Prompt: "User question", System: "You are a helpful assistant", DebugRenderOnly: true, }, expectDebug: true, expectTemplate: "User question", }, { name: "debug render only with template", request: api.GenerateRequest{ Model: "test-model", Prompt: "Hello", Template: "PROMPT: {{ .Prompt }}", DebugRenderOnly: true, }, expectDebug: true, expectTemplate: "PROMPT: Hello", }, { name: "debug render only with images", request: api.GenerateRequest{ Model: "test-model", Prompt: "Describe this image", Images: []api.ImageData{[]byte("fake-image-data")}, DebugRenderOnly: true, }, expectDebug: true, expectTemplate: "[img-0]Describe this image", expectNumImages: 1, }, { name: "debug render only with raw mode", request: api.GenerateRequest{ Model: "test-model", Prompt: "Raw prompt text", Raw: true, DebugRenderOnly: true, }, expectDebug: true, expectTemplate: "Raw prompt text", }, } for _, tt := range tests { // Test both with and without streaming streamValues := []bool{false, true} for _, stream := range streamValues { streamSuffix := "" if stream { streamSuffix = " (streaming)" } t.Run(tt.name+streamSuffix, func(t *testing.T) { req := tt.request req.Stream = &stream w := createRequest(t, s.GenerateHandler, req) if tt.expectDebug { if w.Code != http.StatusOK { t.Errorf("expected status %d, got %d, body: %s", http.StatusOK, w.Code, w.Body.String()) } var response api.GenerateResponse if err := json.Unmarshal(w.Body.Bytes(), &response); err != nil { t.Fatalf("failed to unmarshal response: %v", err) } if response.Model != tt.request.Model { t.Errorf("expected model %s, got %s", tt.request.Model, response.Model) } if tt.expectTemplate != "" && response.DebugInfo.RenderedTemplate != tt.expectTemplate { t.Errorf("expected template %q, got %q", tt.expectTemplate, response.DebugInfo.RenderedTemplate) } if tt.expectNumImages > 0 && response.DebugInfo.ImageCount != tt.expectNumImages { t.Errorf("expected image count %d, got %d", tt.expectNumImages, response.DebugInfo.ImageCount) } } else { // When debug is disabled, it should attempt normal processing if w.Code != http.StatusOK { t.Errorf("expected status %d, got %d", http.StatusOK, w.Code) } } }) } } } func TestChatDebugRenderOnly(t *testing.T) { gin.SetMode(gin.TestMode) mock := mockRunner{ CompletionResponse: llm.CompletionResponse{ Done: true, DoneReason: llm.DoneReasonStop, PromptEvalCount: 1, PromptEvalDuration: 1, EvalCount: 1, EvalDuration: 1, }, } s := Server{ sched: &Scheduler{ pendingReqCh: make(chan *LlmRequest, 1), finishedReqCh: make(chan *LlmRequest, 1), expiredCh: make(chan *runnerRef, 1), unloadedCh: make(chan any, 1), loaded: make(map[string]*runnerRef), newServerFn: newMockServer(&mock), getGpuFn: getGpuFn, getSystemInfoFn: getSystemInfoFn, waitForRecovery: 250 * time.Millisecond, loadFn: func(req *LlmRequest, _ *ggml.GGML, _ ml.SystemInfo, _ []ml.DeviceInfo, _ bool) bool { // add small delay to simulate loading time.Sleep(time.Millisecond) req.successCh <- &runnerRef{ llama: &mock, } return false }, }, } go s.sched.Run(t.Context()) // Create a test model stream := false _, digest := createBinFile(t, ggml.KV{ "general.architecture": "llama", "llama.block_count": uint32(1), "llama.context_length": uint32(8192), "llama.embedding_length": uint32(4096), "llama.attention.head_count": uint32(32), "llama.attention.head_count_kv": uint32(8), "tokenizer.ggml.tokens": []string{""}, "tokenizer.ggml.scores": []float32{0}, "tokenizer.ggml.token_type": []int32{0}, }, []*ggml.Tensor{ {Name: "token_embd.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.attn_norm.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.ffn_down.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.ffn_gate.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.ffn_up.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.ffn_norm.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.attn_k.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.attn_output.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.attn_q.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.attn_v.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "output.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, }) w := createRequest(t, s.CreateHandler, api.CreateRequest{ Model: "test-model", Files: map[string]string{"file.gguf": digest}, Template: "{{ if .Tools }}{{ .Tools }}{{ end }}{{ range .Messages }}{{ .Role }}: {{ .Content }}\n{{ end }}", Stream: &stream, }) if w.Code != http.StatusOK { t.Fatalf("expected status 200, got %d", w.Code) } tests := []struct { name string request api.ChatRequest expectDebug bool expectTemplate string expectNumImages int }{ { name: "chat debug render only enabled", request: api.ChatRequest{ Model: "test-model", Messages: []api.Message{ {Role: "system", Content: "You are a helpful assistant"}, {Role: "user", Content: "Hello"}, }, DebugRenderOnly: true, }, expectDebug: true, expectTemplate: "system: You are a helpful assistant\nuser: Hello\n", }, { name: "chat debug render only disabled", request: api.ChatRequest{ Model: "test-model", Messages: []api.Message{ {Role: "user", Content: "Hello"}, }, DebugRenderOnly: false, }, expectDebug: false, }, { name: "chat debug with assistant message", request: api.ChatRequest{ Model: "test-model", Messages: []api.Message{ {Role: "user", Content: "Hello"}, {Role: "assistant", Content: "Hi there!"}, {Role: "user", Content: "How are you?"}, }, DebugRenderOnly: true, }, expectDebug: true, expectTemplate: "user: Hello\nassistant: Hi there!\nuser: How are you?\n", }, { name: "chat debug with images", request: api.ChatRequest{ Model: "test-model", Messages: []api.Message{ { Role: "user", Content: "What's in this image?", Images: []api.ImageData{[]byte("fake-image-data")}, }, }, DebugRenderOnly: true, }, expectDebug: true, expectTemplate: "user: [img-0]What's in this image?\n", expectNumImages: 1, }, { name: "chat debug with tools", request: api.ChatRequest{ Model: "test-model", Messages: []api.Message{ {Role: "user", Content: "Get the weather"}, }, Tools: api.Tools{ { Type: "function", Function: api.ToolFunction{ Name: "get_weather", Description: "Get weather information", }, }, }, DebugRenderOnly: true, }, expectDebug: true, expectTemplate: "[{\"type\":\"function\",\"function\":{\"name\":\"get_weather\",\"description\":\"Get weather information\",\"parameters\":{\"type\":\"\",\"properties\":null}}}]user: Get the weather\n", }, } for _, tt := range tests { // Test both with and without streaming streamValues := []bool{false, true} for _, stream := range streamValues { streamSuffix := "" if stream { streamSuffix = " (streaming)" } t.Run(tt.name+streamSuffix, func(t *testing.T) { req := tt.request req.Stream = &stream w := createRequest(t, s.ChatHandler, req) if tt.expectDebug { if w.Code != http.StatusOK { t.Errorf("expected status %d, got %d, body: %s", http.StatusOK, w.Code, w.Body.String()) } var response api.ChatResponse if err := json.Unmarshal(w.Body.Bytes(), &response); err != nil { t.Fatalf("failed to unmarshal response: %v", err) } if response.Model != tt.request.Model { t.Errorf("expected model %s, got %s", tt.request.Model, response.Model) } if tt.expectTemplate != "" && response.DebugInfo.RenderedTemplate != tt.expectTemplate { t.Errorf("expected template %q, got %q", tt.expectTemplate, response.DebugInfo.RenderedTemplate) } if tt.expectNumImages > 0 && response.DebugInfo.ImageCount != tt.expectNumImages { t.Errorf("expected image count %d, got %d", tt.expectNumImages, response.DebugInfo.ImageCount) } } else { // When debug is disabled, it should attempt normal processing if w.Code != http.StatusOK { t.Errorf("expected status %d, got %d", http.StatusOK, w.Code) } } }) } } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/server/create_test.go
server/create_test.go
package server import ( "bytes" "encoding/binary" "errors" "os" "path/filepath" "strings" "testing" "github.com/ollama/ollama/api" ) func TestConvertFromSafetensors(t *testing.T) { t.Setenv("OLLAMA_MODELS", t.TempDir()) // Helper function to create a new layer and return its digest makeTemp := func(content string) string { l, err := NewLayer(strings.NewReader(content), "application/octet-stream") if err != nil { t.Fatalf("Failed to create layer: %v", err) } return l.Digest } // Create a safetensors compatible file with empty JSON content var buf bytes.Buffer headerSize := int64(len("{}")) binary.Write(&buf, binary.LittleEndian, headerSize) buf.WriteString("{}") model := makeTemp(buf.String()) config := makeTemp(`{ "architectures": ["LlamaForCausalLM"], "vocab_size": 32000 }`) tokenizer := makeTemp(`{ "version": "1.0", "truncation": null, "padding": null, "added_tokens": [ { "id": 0, "content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": false, "special": true } ] }`) tests := []struct { name string filePath string wantErr error }{ // Invalid { name: "InvalidRelativePathShallow", filePath: filepath.Join("..", "file.safetensors"), wantErr: errFilePath, }, { name: "InvalidRelativePathDeep", filePath: filepath.Join("..", "..", "..", "..", "..", "..", "data", "file.txt"), wantErr: errFilePath, }, { name: "InvalidNestedPath", filePath: filepath.Join("dir", "..", "..", "..", "..", "..", "other.safetensors"), wantErr: errFilePath, }, { name: "AbsolutePathOutsideRoot", filePath: filepath.Join(os.TempDir(), "model.safetensors"), wantErr: errFilePath, // Should fail since it's outside tmpDir }, { name: "ValidRelativePath", filePath: "model.safetensors", wantErr: nil, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { // Create the minimum required file map for convertFromSafetensors files := map[string]string{ tt.filePath: model, "config.json": config, "tokenizer.json": tokenizer, } _, err := convertFromSafetensors(files, nil, false, func(resp api.ProgressResponse) {}) if (tt.wantErr == nil && err != nil) || (tt.wantErr != nil && err == nil) || (tt.wantErr != nil && !errors.Is(err, tt.wantErr)) { t.Errorf("convertFromSafetensors() error = %v, wantErr %v", err, tt.wantErr) } }) } } func TestRemoteURL(t *testing.T) { tests := []struct { name string input string expected string hasError bool }{ { name: "absolute path", input: "/foo/bar", expected: "http://localhost:11434/foo/bar", hasError: false, }, { name: "absolute path with cleanup", input: "/foo/../bar", expected: "http://localhost:11434/bar", hasError: false, }, { name: "root path", input: "/", expected: "http://localhost:11434/", hasError: false, }, { name: "host without scheme", input: "example.com", expected: "http://example.com:11434", hasError: false, }, { name: "host with port", input: "example.com:8080", expected: "http://example.com:8080", hasError: false, }, { name: "full URL", input: "https://example.com:8080/path", expected: "https://example.com:8080/path", hasError: false, }, { name: "full URL with path cleanup", input: "https://example.com:8080/path/../other", expected: "https://example.com:8080/other", hasError: false, }, { name: "ollama.com special case", input: "ollama.com", expected: "https://ollama.com:443", hasError: false, }, { name: "http ollama.com special case", input: "http://ollama.com", expected: "https://ollama.com:443", hasError: false, }, { name: "URL with only host", input: "http://example.com", expected: "http://example.com:11434", hasError: false, }, { name: "URL with root path cleaned", input: "http://example.com/", expected: "http://example.com:11434", hasError: false, }, { name: "invalid URL", input: "http://[::1]:namedport", // invalid port expected: "", hasError: true, }, { name: "empty string", input: "", expected: "http://localhost:11434", hasError: false, }, { name: "host with scheme but no port", input: "http://localhost", expected: "http://localhost:11434", hasError: false, }, { name: "complex path cleanup", input: "/a/b/../../c/./d", expected: "http://localhost:11434/c/d", hasError: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result, err := remoteURL(tt.input) if tt.hasError { if err == nil { t.Errorf("expected error but got none") } return } if err != nil { t.Errorf("unexpected error: %v", err) return } if result != tt.expected { t.Errorf("expected %q, got %q", tt.expected, result) } }) } } func TestRemoteURL_Idempotent(t *testing.T) { // Test that applying remoteURL twice gives the same result as applying it once testInputs := []string{ "/foo/bar", "example.com", "https://example.com:8080/path", "ollama.com", "http://localhost:11434", } for _, input := range testInputs { t.Run(input, func(t *testing.T) { firstResult, err := remoteURL(input) if err != nil { t.Fatalf("first call failed: %v", err) } secondResult, err := remoteURL(firstResult) if err != nil { t.Fatalf("second call failed: %v", err) } if firstResult != secondResult { t.Errorf("function is not idempotent: first=%q, second=%q", firstResult, secondResult) } }) } }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/server/routes_generate_renderer_test.go
server/routes_generate_renderer_test.go
package server import ( "bytes" "encoding/json" "net/http" "strings" "testing" "time" "github.com/gin-gonic/gin" "github.com/google/go-cmp/cmp" "github.com/ollama/ollama/api" "github.com/ollama/ollama/fs/ggml" "github.com/ollama/ollama/llm" "github.com/ollama/ollama/ml" ) // TestGenerateWithBuiltinRenderer tests that api/generate uses built-in renderers // when in chat-like flow (messages present, no suffix, no template) func TestGenerateWithBuiltinRenderer(t *testing.T) { gin.SetMode(gin.TestMode) mock := mockRunner{ CompletionResponse: llm.CompletionResponse{ Done: true, DoneReason: llm.DoneReasonStop, PromptEvalCount: 1, PromptEvalDuration: 1, EvalCount: 1, EvalDuration: 1, }, } s := Server{ sched: &Scheduler{ pendingReqCh: make(chan *LlmRequest, 1), finishedReqCh: make(chan *LlmRequest, 1), expiredCh: make(chan *runnerRef, 1), unloadedCh: make(chan any, 1), loaded: make(map[string]*runnerRef), newServerFn: newMockServer(&mock), getGpuFn: getGpuFn, getSystemInfoFn: getSystemInfoFn, waitForRecovery: 250 * time.Millisecond, loadFn: func(req *LlmRequest, _ *ggml.GGML, _ ml.SystemInfo, _ []ml.DeviceInfo, _ bool) bool { time.Sleep(time.Millisecond) req.successCh <- &runnerRef{ llama: &mock, } return false }, }, } go s.sched.Run(t.Context()) // Create a model with a built-in renderer (qwen3-coder) _, digest := createBinFile(t, ggml.KV{ "general.architecture": "qwen3", "qwen3.block_count": uint32(1), "qwen3.context_length": uint32(8192), "qwen3.embedding_length": uint32(4096), "qwen3.attention.head_count": uint32(32), "qwen3.attention.head_count_kv": uint32(8), "tokenizer.ggml.tokens": []string{""}, "tokenizer.ggml.scores": []float32{0}, "tokenizer.ggml.token_type": []int32{0}, }, []*ggml.Tensor{ {Name: "token_embd.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.attn_norm.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.ffn_down.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.ffn_gate.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.ffn_up.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.ffn_norm.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.attn_k.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.attn_output.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.attn_q.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.attn_v.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "output.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, }) // Create a model with the qwen3-coder renderer w := createRequest(t, s.CreateHandler, api.CreateRequest{ Model: "test-renderer", Files: map[string]string{"file.gguf": digest}, Renderer: "qwen3-coder", Stream: &stream, }) if w.Code != http.StatusOK { t.Fatalf("expected status 200, got %d", w.Code) } mock.CompletionResponse.Content = "Hi!" t.Run("chat-like flow uses renderer", func(t *testing.T) { // Test that when using messages (chat-like flow), the built-in renderer is used w := createRequest(t, s.GenerateHandler, api.GenerateRequest{ Model: "test-renderer", Prompt: "Write a hello world function", Stream: &stream, }) if w.Code != http.StatusOK { t.Errorf("expected status 200, got %d", w.Code) } // The qwen3-coder renderer produces output with <|im_start|> and <|im_end|> tags // When messages are built internally from prompt, it should use the renderer if !strings.Contains(mock.CompletionRequest.Prompt, "<|im_start|>") { t.Errorf("expected prompt to contain <|im_start|> from qwen3-coder renderer, got: %s", mock.CompletionRequest.Prompt) } if !strings.Contains(mock.CompletionRequest.Prompt, "<|im_end|>") { t.Errorf("expected prompt to contain <|im_end|> from qwen3-coder renderer, got: %s", mock.CompletionRequest.Prompt) } }) t.Run("chat-like flow with system message uses renderer", func(t *testing.T) { // Test that system messages work with the renderer w := createRequest(t, s.GenerateHandler, api.GenerateRequest{ Model: "test-renderer", Prompt: "Write a hello world function", System: "You are a helpful coding assistant.", Stream: &stream, }) if w.Code != http.StatusOK { t.Errorf("expected status 200, got %d", w.Code) } // Should contain the system message and use renderer format if !strings.Contains(mock.CompletionRequest.Prompt, "<|im_start|>system") { t.Errorf("expected prompt to contain system message with renderer format, got: %s", mock.CompletionRequest.Prompt) } if !strings.Contains(mock.CompletionRequest.Prompt, "You are a helpful coding assistant.") { t.Errorf("expected prompt to contain system message content, got: %s", mock.CompletionRequest.Prompt) } }) t.Run("custom template bypasses renderer", func(t *testing.T) { // Test that providing a custom template uses the legacy flow w := createRequest(t, s.GenerateHandler, api.GenerateRequest{ Model: "test-renderer", Prompt: "Write a hello world function", Template: "{{ .Prompt }}", Stream: &stream, }) if w.Code != http.StatusOK { t.Errorf("expected status 200, got %d", w.Code) } // Should NOT use the renderer format when custom template is provided if strings.Contains(mock.CompletionRequest.Prompt, "<|im_start|>") { t.Errorf("expected prompt to NOT use renderer when custom template provided, got: %s", mock.CompletionRequest.Prompt) } // Should just be the raw prompt from the template if diff := cmp.Diff(mock.CompletionRequest.Prompt, "Write a hello world function"); diff != "" { t.Errorf("mismatch (-got +want):\n%s", diff) } }) // Create a model with suffix support for the next test w = createRequest(t, s.CreateHandler, api.CreateRequest{ Model: "test-suffix-renderer", From: "test-renderer", Template: `{{- if .Suffix }}<PRE> {{ .Prompt }} <SUF>{{ .Suffix }} <MID> {{- else }}{{ .Prompt }} {{- end }}`, }) if w.Code != http.StatusOK { t.Fatalf("expected status 200, got %d", w.Code) } t.Run("suffix bypasses renderer", func(t *testing.T) { // Test that providing a suffix uses the legacy flow w := createRequest(t, s.GenerateHandler, api.GenerateRequest{ Model: "test-suffix-renderer", Prompt: "def add(", Suffix: " return c", }) if w.Code != http.StatusOK { t.Errorf("expected status 200, got %d", w.Code) } // Should NOT use the renderer format when suffix is provided if strings.Contains(mock.CompletionRequest.Prompt, "<|im_start|>") { t.Errorf("expected prompt to NOT use renderer when suffix provided, got: %s", mock.CompletionRequest.Prompt) } // Should use the suffix template format if diff := cmp.Diff(mock.CompletionRequest.Prompt, "<PRE> def add( <SUF> return c <MID>"); diff != "" { t.Errorf("mismatch (-got +want):\n%s", diff) } }) } // TestGenerateWithDebugRenderOnly tests that debug_render_only works with built-in renderers func TestGenerateWithDebugRenderOnly(t *testing.T) { gin.SetMode(gin.TestMode) mock := mockRunner{ CompletionResponse: llm.CompletionResponse{ Done: true, DoneReason: llm.DoneReasonStop, PromptEvalCount: 1, PromptEvalDuration: 1, EvalCount: 1, EvalDuration: 1, }, } s := Server{ sched: &Scheduler{ pendingReqCh: make(chan *LlmRequest, 1), finishedReqCh: make(chan *LlmRequest, 1), expiredCh: make(chan *runnerRef, 1), unloadedCh: make(chan any, 1), loaded: make(map[string]*runnerRef), newServerFn: newMockServer(&mock), getGpuFn: getGpuFn, getSystemInfoFn: getSystemInfoFn, waitForRecovery: 250 * time.Millisecond, loadFn: func(req *LlmRequest, _ *ggml.GGML, _ ml.SystemInfo, _ []ml.DeviceInfo, _ bool) bool { time.Sleep(time.Millisecond) req.successCh <- &runnerRef{ llama: &mock, } return false }, }, } go s.sched.Run(t.Context()) // Create a model with a built-in renderer _, digest := createBinFile(t, ggml.KV{ "general.architecture": "qwen3", "qwen3.block_count": uint32(1), "qwen3.context_length": uint32(8192), "qwen3.embedding_length": uint32(4096), "qwen3.attention.head_count": uint32(32), "qwen3.attention.head_count_kv": uint32(8), "tokenizer.ggml.tokens": []string{""}, "tokenizer.ggml.scores": []float32{0}, "tokenizer.ggml.token_type": []int32{0}, }, []*ggml.Tensor{ {Name: "token_embd.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.attn_norm.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.ffn_down.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.ffn_gate.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.ffn_up.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.ffn_norm.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.attn_k.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.attn_output.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.attn_q.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "blk.0.attn_v.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, {Name: "output.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))}, }) w := createRequest(t, s.CreateHandler, api.CreateRequest{ Model: "test-debug-renderer", Files: map[string]string{"file.gguf": digest}, Renderer: "qwen3-coder", Stream: &stream, }) if w.Code != http.StatusOK { t.Fatalf("expected status 200, got %d", w.Code) } t.Run("debug_render_only with renderer", func(t *testing.T) { w := createRequest(t, s.GenerateHandler, api.GenerateRequest{ Model: "test-debug-renderer", Prompt: "Write a hello world function", System: "You are a coding assistant", DebugRenderOnly: true, }) if w.Code != http.StatusOK { t.Errorf("expected status 200, got %d", w.Code) } var resp api.GenerateResponse if err := json.NewDecoder(w.Body).Decode(&resp); err != nil { t.Fatal(err) } if resp.DebugInfo == nil { t.Fatalf("expected debug info, got nil") } // Verify that the rendered template uses the built-in renderer if !strings.Contains(resp.DebugInfo.RenderedTemplate, "<|im_start|>") { t.Errorf("expected rendered template to use qwen3-coder renderer format, got: %s", resp.DebugInfo.RenderedTemplate) } if !strings.Contains(resp.DebugInfo.RenderedTemplate, "You are a coding assistant") { t.Errorf("expected rendered template to contain system message, got: %s", resp.DebugInfo.RenderedTemplate) } if !strings.Contains(resp.DebugInfo.RenderedTemplate, "Write a hello world function") { t.Errorf("expected rendered template to contain prompt, got: %s", resp.DebugInfo.RenderedTemplate) } }) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/server/model.go
server/model.go
package server import ( "bytes" "context" "encoding/json" "errors" "fmt" "io" "log/slog" "net/http" "os" "github.com/ollama/ollama/api" "github.com/ollama/ollama/fs/ggml" "github.com/ollama/ollama/template" "github.com/ollama/ollama/types/model" ) var intermediateBlobs map[string]string = make(map[string]string) type layerGGML struct { Layer *ggml.GGML } func parseFromModel(ctx context.Context, name model.Name, fn func(api.ProgressResponse)) (layers []*layerGGML, err error) { m, err := ParseNamedManifest(name) switch { case errors.Is(err, os.ErrNotExist): if err := PullModel(ctx, name.String(), &registryOptions{}, fn); err != nil { return nil, err } m, err = ParseNamedManifest(name) if err != nil { return nil, err } case err != nil: return nil, err } for _, layer := range m.Layers { layer, err := NewLayerFromLayer(layer.Digest, layer.MediaType, name.DisplayShortest()) if err != nil { return nil, err } switch layer.MediaType { case "application/vnd.ollama.image.model", "application/vnd.ollama.image.projector", "application/vnd.ollama.image.adapter": blobpath, err := GetBlobsPath(layer.Digest) if err != nil { return nil, err } blob, err := os.Open(blobpath) if err != nil { return nil, err } defer blob.Close() f, err := ggml.Decode(blob, -1) if err != nil { return nil, err } layers = append(layers, &layerGGML{layer, f}) default: layers = append(layers, &layerGGML{layer, nil}) } } return layers, nil } func detectChatTemplate(layers []*layerGGML) ([]*layerGGML, error) { for _, layer := range layers { if s := layer.GGML.KV().ChatTemplate(); s != "" { if t, err := template.Named(s); err != nil { slog.Debug("template detection", "error", err, "template", s) } else { layer, err := NewLayer(t.Reader(), "application/vnd.ollama.image.template") if err != nil { return nil, err } layer.status = fmt.Sprintf("using autodetected template %s", t.Name) layers = append(layers, &layerGGML{layer, nil}) if t.Parameters != nil { var b bytes.Buffer if err := json.NewEncoder(&b).Encode(t.Parameters); err != nil { return nil, err } layer, err := NewLayer(&b, "application/vnd.ollama.image.params") if err != nil { return nil, err } layers = append(layers, &layerGGML{layer, nil}) } } } } return layers, nil } func detectContentType(r io.Reader) (string, error) { var b bytes.Buffer if _, err := io.Copy(&b, r); err != nil { return "", err } if contentType := ggml.DetectContentType(b.Bytes()); contentType != "" { return contentType, nil } if contentType := http.DetectContentType(b.Bytes()); contentType != "application/octet-stream" { return contentType, nil } return "unknown", nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/server/images.go
server/images.go
package server import ( "bytes" "context" "crypto/sha256" "encoding/hex" "encoding/json" "errors" "fmt" "io" "log" "log/slog" "net" "net/http" "net/url" "os" "path/filepath" "runtime" "slices" "strconv" "strings" "github.com/ollama/ollama/api" "github.com/ollama/ollama/envconfig" "github.com/ollama/ollama/fs/gguf" "github.com/ollama/ollama/model/parsers" "github.com/ollama/ollama/parser" "github.com/ollama/ollama/template" "github.com/ollama/ollama/thinking" "github.com/ollama/ollama/types/model" "github.com/ollama/ollama/version" ) var ( errCapabilities = errors.New("does not support") errCapabilityCompletion = errors.New("completion") errCapabilityTools = errors.New("tools") errCapabilityInsert = errors.New("insert") errCapabilityVision = errors.New("vision") errCapabilityEmbedding = errors.New("embedding") errCapabilityThinking = errors.New("thinking") errInsecureProtocol = errors.New("insecure protocol http") ) type registryOptions struct { Insecure bool Username string Password string Token string CheckRedirect func(req *http.Request, via []*http.Request) error } type Model struct { Name string `json:"name"` Config model.ConfigV2 ShortName string ModelPath string ParentModel string AdapterPaths []string ProjectorPaths []string System string License []string Digest string Options map[string]any Messages []api.Message Template *template.Template } // Capabilities returns the capabilities that the model supports func (m *Model) Capabilities() []model.Capability { capabilities := []model.Capability{} // Check for completion capability if m.ModelPath != "" { f, err := gguf.Open(m.ModelPath) if err == nil { defer f.Close() if f.KeyValue("pooling_type").Valid() { capabilities = append(capabilities, model.CapabilityEmbedding) } else { // If no embedding is specified, we assume the model supports completion capabilities = append(capabilities, model.CapabilityCompletion) } if f.KeyValue("vision.block_count").Valid() { capabilities = append(capabilities, model.CapabilityVision) } } else { slog.Error("couldn't open model file", "error", err) } } else if len(m.Config.Capabilities) > 0 { for _, c := range m.Config.Capabilities { capabilities = append(capabilities, model.Capability(c)) } } else { slog.Warn("unknown capabilities for model", "model", m.Name) } if m.Template == nil { return capabilities } builtinParser := parsers.ParserForName(m.Config.Parser) // Check for tools capability v, err := m.Template.Vars() if err != nil { slog.Warn("model template contains errors", "error", err) } if slices.Contains(v, "tools") || (builtinParser != nil && builtinParser.HasToolSupport()) { capabilities = append(capabilities, model.CapabilityTools) } // Check for insert capability if slices.Contains(v, "suffix") { capabilities = append(capabilities, model.CapabilityInsert) } // Check for vision capability in projector-based models if len(m.ProjectorPaths) > 0 { capabilities = append(capabilities, model.CapabilityVision) } // Skip the thinking check if it's already set if slices.Contains(capabilities, "thinking") { return capabilities } // Check for thinking capability openingTag, closingTag := thinking.InferTags(m.Template.Template) hasTags := openingTag != "" && closingTag != "" isGptoss := slices.Contains([]string{"gptoss", "gpt-oss"}, m.Config.ModelFamily) if hasTags || isGptoss || (builtinParser != nil && builtinParser.HasThinkingSupport()) { capabilities = append(capabilities, model.CapabilityThinking) } return capabilities } // CheckCapabilities checks if the model has the specified capabilities returning an error describing // any missing or unknown capabilities func (m *Model) CheckCapabilities(want ...model.Capability) error { available := m.Capabilities() var errs []error // Map capabilities to their corresponding error capToErr := map[model.Capability]error{ model.CapabilityCompletion: errCapabilityCompletion, model.CapabilityTools: errCapabilityTools, model.CapabilityInsert: errCapabilityInsert, model.CapabilityVision: errCapabilityVision, model.CapabilityEmbedding: errCapabilityEmbedding, model.CapabilityThinking: errCapabilityThinking, } for _, cap := range want { err, ok := capToErr[cap] if !ok { slog.Error("unknown capability", "capability", cap) return fmt.Errorf("unknown capability: %s", cap) } if !slices.Contains(available, cap) { errs = append(errs, err) } } var err error if len(errs) > 0 { err = fmt.Errorf("%w %w", errCapabilities, errors.Join(errs...)) } if slices.Contains(errs, errCapabilityThinking) { if m.Config.ModelFamily == "qwen3" || model.ParseName(m.Name).Model == "deepseek-r1" { // append a message to the existing error return fmt.Errorf("%w. Pull the model again to get the latest version with full thinking support", err) } } return err } func (m *Model) String() string { var modelfile parser.Modelfile modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: "model", Args: m.ModelPath, }) for _, adapter := range m.AdapterPaths { modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: "adapter", Args: adapter, }) } for _, projector := range m.ProjectorPaths { modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: "model", Args: projector, }) } if m.Template != nil { modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: "template", Args: m.Template.String(), }) } if m.System != "" { modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: "system", Args: m.System, }) } if m.Config.Renderer != "" { modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: "renderer", Args: m.Config.Renderer, }) } if m.Config.Parser != "" { modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: "parser", Args: m.Config.Parser, }) } for k, v := range m.Options { switch v := v.(type) { case []any: for _, s := range v { modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: k, Args: fmt.Sprintf("%v", s), }) } default: modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: k, Args: fmt.Sprintf("%v", v), }) } } for _, license := range m.License { modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: "license", Args: license, }) } for _, msg := range m.Messages { modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: "message", Args: fmt.Sprintf("%s: %s", msg.Role, msg.Content), }) } return modelfile.String() } func GetManifest(mp ModelPath) (*Manifest, string, error) { fp, err := mp.GetManifestPath() if err != nil { return nil, "", err } f, err := os.Open(fp) if err != nil { return nil, "", err } defer f.Close() sha256sum := sha256.New() var manifest Manifest if err := json.NewDecoder(io.TeeReader(f, sha256sum)).Decode(&manifest); err != nil { return nil, "", err } return &manifest, hex.EncodeToString(sha256sum.Sum(nil)), nil } func GetModel(name string) (*Model, error) { mp := ParseModelPath(name) manifest, digest, err := GetManifest(mp) if err != nil { return nil, err } model := &Model{ Name: mp.GetFullTagname(), ShortName: mp.GetShortTagname(), Digest: digest, Template: template.DefaultTemplate, } if manifest.Config.Digest != "" { filename, err := GetBlobsPath(manifest.Config.Digest) if err != nil { return nil, err } configFile, err := os.Open(filename) if err != nil { return nil, err } defer configFile.Close() if err := json.NewDecoder(configFile).Decode(&model.Config); err != nil { return nil, err } } for _, layer := range manifest.Layers { filename, err := GetBlobsPath(layer.Digest) if err != nil { return nil, err } switch layer.MediaType { case "application/vnd.ollama.image.model": model.ModelPath = filename model.ParentModel = layer.From case "application/vnd.ollama.image.embed": // Deprecated in versions > 0.1.2 // TODO: remove this warning in a future version slog.Info("WARNING: model contains embeddings, but embeddings in modelfiles have been deprecated and will be ignored.") case "application/vnd.ollama.image.adapter": model.AdapterPaths = append(model.AdapterPaths, filename) case "application/vnd.ollama.image.projector": model.ProjectorPaths = append(model.ProjectorPaths, filename) case "application/vnd.ollama.image.prompt", "application/vnd.ollama.image.template": bts, err := os.ReadFile(filename) if err != nil { return nil, err } model.Template, err = template.Parse(string(bts)) if err != nil { return nil, err } case "application/vnd.ollama.image.system": bts, err := os.ReadFile(filename) if err != nil { return nil, err } model.System = string(bts) case "application/vnd.ollama.image.params": params, err := os.Open(filename) if err != nil { return nil, err } defer params.Close() // parse model options parameters into a map so that we can see which fields have been specified explicitly if err = json.NewDecoder(params).Decode(&model.Options); err != nil { return nil, err } case "application/vnd.ollama.image.messages": msgs, err := os.Open(filename) if err != nil { return nil, err } defer msgs.Close() if err = json.NewDecoder(msgs).Decode(&model.Messages); err != nil { return nil, err } case "application/vnd.ollama.image.license": bts, err := os.ReadFile(filename) if err != nil { return nil, err } model.License = append(model.License, string(bts)) } } return model, nil } func CopyModel(src, dst model.Name) error { if !dst.IsFullyQualified() { return model.Unqualified(dst) } if !src.IsFullyQualified() { return model.Unqualified(src) } if src.Filepath() == dst.Filepath() { return nil } manifests, err := GetManifestPath() if err != nil { return err } dstpath := filepath.Join(manifests, dst.Filepath()) if err := os.MkdirAll(filepath.Dir(dstpath), 0o755); err != nil { return err } srcpath := filepath.Join(manifests, src.Filepath()) srcfile, err := os.Open(srcpath) if err != nil { return err } defer srcfile.Close() dstfile, err := os.Create(dstpath) if err != nil { return err } defer dstfile.Close() _, err = io.Copy(dstfile, srcfile) return err } func deleteUnusedLayers(deleteMap map[string]struct{}) error { // Ignore corrupt manifests to avoid blocking deletion of layers that are freshly orphaned manifests, err := Manifests(true) if err != nil { return err } for _, manifest := range manifests { for _, layer := range manifest.Layers { delete(deleteMap, layer.Digest) } delete(deleteMap, manifest.Config.Digest) } // only delete the files which are still in the deleteMap for k := range deleteMap { fp, err := GetBlobsPath(k) if err != nil { slog.Info(fmt.Sprintf("couldn't get file path for '%s': %v", k, err)) continue } if err := os.Remove(fp); err != nil { slog.Info(fmt.Sprintf("couldn't remove file '%s': %v", fp, err)) continue } } return nil } func PruneLayers() error { deleteMap := make(map[string]struct{}) p, err := GetBlobsPath("") if err != nil { return err } blobs, err := os.ReadDir(p) if err != nil { slog.Info(fmt.Sprintf("couldn't read dir '%s': %v", p, err)) return err } for _, blob := range blobs { name := blob.Name() name = strings.ReplaceAll(name, "-", ":") _, err := GetBlobsPath(name) if err != nil { if errors.Is(err, ErrInvalidDigestFormat) { // remove invalid blobs (e.g. partial downloads) if err := os.Remove(filepath.Join(p, blob.Name())); err != nil { slog.Error("couldn't remove blob", "blob", blob.Name(), "error", err) } } continue } deleteMap[name] = struct{}{} } slog.Info(fmt.Sprintf("total blobs: %d", len(deleteMap))) if err := deleteUnusedLayers(deleteMap); err != nil { slog.Error(fmt.Sprintf("couldn't remove unused layers: %v", err)) return nil } slog.Info(fmt.Sprintf("total unused blobs removed: %d", len(deleteMap))) return nil } func PruneDirectory(path string) error { info, err := os.Lstat(path) if err != nil { return err } if info.IsDir() && info.Mode()&os.ModeSymlink == 0 { entries, err := os.ReadDir(path) if err != nil { return err } for _, entry := range entries { if err := PruneDirectory(filepath.Join(path, entry.Name())); err != nil { return err } } entries, err = os.ReadDir(path) if err != nil { return err } if len(entries) > 0 { return nil } return os.Remove(path) } return nil } func PushModel(ctx context.Context, name string, regOpts *registryOptions, fn func(api.ProgressResponse)) error { mp := ParseModelPath(name) fn(api.ProgressResponse{Status: "retrieving manifest"}) if mp.ProtocolScheme == "http" && !regOpts.Insecure { return errInsecureProtocol } manifest, _, err := GetManifest(mp) if err != nil { fn(api.ProgressResponse{Status: "couldn't retrieve manifest"}) return err } var layers []Layer layers = append(layers, manifest.Layers...) if manifest.Config.Digest != "" { layers = append(layers, manifest.Config) } for _, layer := range layers { if err := uploadBlob(ctx, mp, layer, regOpts, fn); err != nil { slog.Info(fmt.Sprintf("error uploading blob: %v", err)) return err } } fn(api.ProgressResponse{Status: "pushing manifest"}) requestURL := mp.BaseURL() requestURL = requestURL.JoinPath("v2", mp.GetNamespaceRepository(), "manifests", mp.Tag) manifestJSON, err := json.Marshal(manifest) if err != nil { return err } headers := make(http.Header) headers.Set("Content-Type", "application/vnd.docker.distribution.manifest.v2+json") resp, err := makeRequestWithRetry(ctx, http.MethodPut, requestURL, headers, bytes.NewReader(manifestJSON), regOpts) if err != nil { return err } defer resp.Body.Close() fn(api.ProgressResponse{Status: "success"}) return nil } func PullModel(ctx context.Context, name string, regOpts *registryOptions, fn func(api.ProgressResponse)) error { mp := ParseModelPath(name) // build deleteMap to prune unused layers deleteMap := make(map[string]struct{}) manifest, _, err := GetManifest(mp) if errors.Is(err, os.ErrNotExist) { // noop } else if err != nil { slog.Warn("pulling model with bad existing manifest", "name", name, "error", err) } else { for _, l := range manifest.Layers { deleteMap[l.Digest] = struct{}{} } if manifest.Config.Digest != "" { deleteMap[manifest.Config.Digest] = struct{}{} } } if mp.ProtocolScheme == "http" && !regOpts.Insecure { return errInsecureProtocol } fn(api.ProgressResponse{Status: "pulling manifest"}) manifest, err = pullModelManifest(ctx, mp, regOpts) if err != nil { return fmt.Errorf("pull model manifest: %s", err) } var layers []Layer layers = append(layers, manifest.Layers...) if manifest.Config.Digest != "" { layers = append(layers, manifest.Config) } skipVerify := make(map[string]bool) for _, layer := range layers { cacheHit, err := downloadBlob(ctx, downloadOpts{ mp: mp, digest: layer.Digest, regOpts: regOpts, fn: fn, }) if err != nil { return err } skipVerify[layer.Digest] = cacheHit delete(deleteMap, layer.Digest) } delete(deleteMap, manifest.Config.Digest) fn(api.ProgressResponse{Status: "verifying sha256 digest"}) for _, layer := range layers { if skipVerify[layer.Digest] { continue } if err := verifyBlob(layer.Digest); err != nil { if errors.Is(err, errDigestMismatch) { // something went wrong, delete the blob fp, err := GetBlobsPath(layer.Digest) if err != nil { return err } if err := os.Remove(fp); err != nil { // log this, but return the original error slog.Info(fmt.Sprintf("couldn't remove file with digest mismatch '%s': %v", fp, err)) } } return err } } fn(api.ProgressResponse{Status: "writing manifest"}) manifestJSON, err := json.Marshal(manifest) if err != nil { return err } fp, err := mp.GetManifestPath() if err != nil { return err } if err := os.MkdirAll(filepath.Dir(fp), 0o755); err != nil { return err } err = os.WriteFile(fp, manifestJSON, 0o644) if err != nil { slog.Info(fmt.Sprintf("couldn't write to %s", fp)) return err } if !envconfig.NoPrune() && len(deleteMap) > 0 { fn(api.ProgressResponse{Status: "removing unused layers"}) if err := deleteUnusedLayers(deleteMap); err != nil { fn(api.ProgressResponse{Status: fmt.Sprintf("couldn't remove unused layers: %v", err)}) } } fn(api.ProgressResponse{Status: "success"}) return nil } func pullModelManifest(ctx context.Context, mp ModelPath, regOpts *registryOptions) (*Manifest, error) { requestURL := mp.BaseURL().JoinPath("v2", mp.GetNamespaceRepository(), "manifests", mp.Tag) headers := make(http.Header) headers.Set("Accept", "application/vnd.docker.distribution.manifest.v2+json") resp, err := makeRequestWithRetry(ctx, http.MethodGet, requestURL, headers, nil, regOpts) if err != nil { return nil, err } defer resp.Body.Close() var m Manifest if err := json.NewDecoder(resp.Body).Decode(&m); err != nil { return nil, err } return &m, err } // GetSHA256Digest returns the SHA256 hash of a given buffer and returns it, and the size of buffer func GetSHA256Digest(r io.Reader) (string, int64) { h := sha256.New() n, err := io.Copy(h, r) if err != nil { log.Fatal(err) } return fmt.Sprintf("sha256:%x", h.Sum(nil)), n } var errUnauthorized = errors.New("unauthorized: access denied") func makeRequestWithRetry(ctx context.Context, method string, requestURL *url.URL, headers http.Header, body io.ReadSeeker, regOpts *registryOptions) (*http.Response, error) { for range 2 { resp, err := makeRequest(ctx, method, requestURL, headers, body, regOpts) if err != nil { if !errors.Is(err, context.Canceled) { slog.Info(fmt.Sprintf("request failed: %v", err)) } return nil, err } switch { case resp.StatusCode == http.StatusUnauthorized: resp.Body.Close() // Handle authentication error with one retry challenge := parseRegistryChallenge(resp.Header.Get("www-authenticate")) token, err := getAuthorizationToken(ctx, challenge) if err != nil { return nil, err } regOpts.Token = token if body != nil { _, err = body.Seek(0, io.SeekStart) if err != nil { return nil, err } } case resp.StatusCode == http.StatusNotFound: resp.Body.Close() return nil, os.ErrNotExist case resp.StatusCode >= http.StatusBadRequest: defer resp.Body.Close() responseBody, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("%d: %s", resp.StatusCode, err) } return nil, fmt.Errorf("%d: %s", resp.StatusCode, responseBody) default: return resp, nil } } return nil, errUnauthorized } // testMakeRequestDialContext specifies the dial function for the http client in // makeRequest. It can be used to resolve hosts in model names to local // addresses for testing. For example, the model name ("example.com/my/model") // can be directed to push/pull from "127.0.0.1:1234". // // This is not safe to set across goroutines. It should be set in // the main test goroutine, and not by tests marked to run in parallel with // t.Parallel(). // // It should be cleared after use, otherwise it will affect other tests. // // Ideally we would have some set this up the stack, but the code is not // structured in a way that makes this easy, so this will have to do for now. var testMakeRequestDialContext func(ctx context.Context, network, addr string) (net.Conn, error) func makeRequest(ctx context.Context, method string, requestURL *url.URL, headers http.Header, body io.Reader, regOpts *registryOptions) (*http.Response, error) { if requestURL.Scheme != "http" && regOpts != nil && regOpts.Insecure { requestURL.Scheme = "http" } req, err := http.NewRequestWithContext(ctx, method, requestURL.String(), body) if err != nil { return nil, err } if headers != nil { req.Header = headers } if regOpts != nil { if regOpts.Token != "" { req.Header.Set("Authorization", "Bearer "+regOpts.Token) } else if regOpts.Username != "" && regOpts.Password != "" { req.SetBasicAuth(regOpts.Username, regOpts.Password) } } req.Header.Set("User-Agent", fmt.Sprintf("ollama/%s (%s %s) Go/%s", version.Version, runtime.GOARCH, runtime.GOOS, runtime.Version())) if s := req.Header.Get("Content-Length"); s != "" { contentLength, err := strconv.ParseInt(s, 10, 64) if err != nil { return nil, err } req.ContentLength = contentLength } c := &http.Client{ CheckRedirect: regOpts.CheckRedirect, } if testMakeRequestDialContext != nil { tr := http.DefaultTransport.(*http.Transport).Clone() tr.DialContext = testMakeRequestDialContext c.Transport = tr } return c.Do(req) } func getValue(header, key string) string { startIdx := strings.Index(header, key+"=") if startIdx == -1 { return "" } // Move the index to the starting quote after the key. startIdx += len(key) + 2 endIdx := startIdx for endIdx < len(header) { if header[endIdx] == '"' { if endIdx+1 < len(header) && header[endIdx+1] != ',' { // If the next character isn't a comma, continue endIdx++ continue } break } endIdx++ } return header[startIdx:endIdx] } func parseRegistryChallenge(authStr string) registryChallenge { authStr = strings.TrimPrefix(authStr, "Bearer ") return registryChallenge{ Realm: getValue(authStr, "realm"), Service: getValue(authStr, "service"), Scope: getValue(authStr, "scope"), } } var errDigestMismatch = errors.New("digest mismatch, file must be downloaded again") func verifyBlob(digest string) error { fp, err := GetBlobsPath(digest) if err != nil { return err } f, err := os.Open(fp) if err != nil { return err } defer f.Close() fileDigest, _ := GetSHA256Digest(f) if digest != fileDigest { return fmt.Errorf("%w: want %s, got %s", errDigestMismatch, digest, fileDigest) } return nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/server/sched_test.go
server/sched_test.go
package server import ( "bytes" "context" "errors" "log/slog" "os" "testing" "time" "github.com/stretchr/testify/require" "github.com/ollama/ollama/api" "github.com/ollama/ollama/format" "github.com/ollama/ollama/fs/ggml" "github.com/ollama/ollama/llm" "github.com/ollama/ollama/ml" ) func TestMain(m *testing.M) { os.Setenv("OLLAMA_DEBUG", "1") logger := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug})) slog.SetDefault(logger) os.Exit(m.Run()) } func TestSchedInit(t *testing.T) { ctx, done := context.WithCancel(t.Context()) defer done() s := InitScheduler(ctx) s.loadedMu.Lock() require.NotNil(t, s.loaded) s.loadedMu.Unlock() } func TestSchedLoad(t *testing.T) { ctx, done := context.WithTimeout(t.Context(), 20*time.Millisecond) defer done() s := InitScheduler(ctx) s.waitForRecovery = 10 * time.Millisecond var f *ggml.GGML // value not used in tests req := &LlmRequest{ ctx: ctx, model: &Model{ModelPath: "foo"}, opts: api.DefaultOptions(), successCh: make(chan *runnerRef, 1), errCh: make(chan error, 1), sessionDuration: &api.Duration{Duration: 2 * time.Second}, } // Fail to load model first s.newServerFn = func(systemInfo ml.SystemInfo, gpus []ml.DeviceInfo, model string, f *ggml.GGML, adapters []string, projectors []string, opts api.Options, numParallel int) (llm.LlamaServer, error) { return nil, errors.New("something failed to load model blah") } gpus := []ml.DeviceInfo{} systemInfo := ml.SystemInfo{} s.load(req, f, systemInfo, gpus, false) require.Empty(t, req.successCh) require.Len(t, req.errCh, 1) s.loadedMu.Lock() require.Empty(t, s.loaded) s.loadedMu.Unlock() err := <-req.errCh require.Contains(t, err.Error(), "this model may be incompatible") server := &mockLlm{vramSize: 10, vramByGPU: map[ml.DeviceID]uint64{}} s.newServerFn = func(systemInfo ml.SystemInfo, gpus []ml.DeviceInfo, model string, f *ggml.GGML, adapters []string, projectors []string, opts api.Options, numParallel int) (llm.LlamaServer, error) { server.modelPath = model return server, nil } s.load(req, f, systemInfo, gpus, false) select { case err := <-req.errCh: require.NoError(t, err) case resp := <-req.successCh: require.Equal(t, uint64(10), resp.vramSize) require.Equal(t, uint(1), resp.refCount) s.loadedMu.Lock() require.Len(t, s.loaded, 1) s.loadedMu.Unlock() } req.model.ModelPath = "dummy_model_path" server.waitResp = errors.New("wait failure") s.load(req, f, systemInfo, gpus, false) select { case err := <-req.errCh: require.Contains(t, err.Error(), "wait failure") case resp := <-req.successCh: t.Fatalf("unexpected success %v", resp) } s.loadedMu.Lock() runner := s.loaded["dummy_model_path"] s.loadedMu.Unlock() require.NotNil(t, runner) require.Equal(t, uint(0), runner.refCount) time.Sleep(1 * time.Millisecond) require.Len(t, s.expiredCh, 1) } type reqBundle struct { ctx context.Context //nolint:containedctx ctxDone func() srv *mockLlm req *LlmRequest f *ggml.GGML } func (scenario *reqBundle) newServer(systemInfo ml.SystemInfo, gpus []ml.DeviceInfo, model string, f *ggml.GGML, adapters []string, projectors []string, opts api.Options, numParallel int) (llm.LlamaServer, error) { scenario.srv.modelPath = model return scenario.srv, nil } func newScenarioRequest(t *testing.T, ctx context.Context, modelName string, vramSize uint64, duration *api.Duration, vramByGPU map[ml.DeviceID]uint64) *reqBundle { b := &reqBundle{} b.ctx, b.ctxDone = context.WithCancel(ctx) t.Helper() p, _ := createBinFile(t, ggml.KV{ "general.architecture": "llama", "llama.context_length": uint32(32), "llama.embedding_length": uint32(4096), "llama.block_count": uint32(1), "llama.attention.head_count": uint32(32), "llama.attention.head_count_kv": uint32(32), "tokenizer.ggml.tokens": []string{" "}, "tokenizer.ggml.scores": []float32{0}, "tokenizer.ggml.token_type": []int32{0}, }, []*ggml.Tensor{ {Name: "blk.0.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: bytes.NewReader(make([]byte, 32))}, {Name: "output.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: bytes.NewReader(make([]byte, 32))}, }) model := &Model{Name: modelName, ModelPath: p} f, err := llm.LoadModel(model.ModelPath, 0) if err != nil { t.Fatal(err) } b.f = f if duration == nil { duration = &api.Duration{Duration: 5 * time.Millisecond} } b.req = &LlmRequest{ ctx: b.ctx, model: model, opts: api.DefaultOptions(), sessionDuration: duration, successCh: make(chan *runnerRef, 1), errCh: make(chan error, 1), } b.srv = &mockLlm{vramSize: vramSize, vramByGPU: vramByGPU} return b } func getGpuFn(ctx context.Context, runners []ml.FilteredRunnerDiscovery) []ml.DeviceInfo { slog.Info("test getGpuFn called", "runners", runners) g := ml.DeviceInfo{DeviceID: ml.DeviceID{Library: "Metal"}} g.TotalMemory = 24 * format.GigaByte g.FreeMemory = 12 * format.GigaByte return []ml.DeviceInfo{g} } func getSystemInfoFn() ml.SystemInfo { slog.Info("test getSystemInfoFn called") return ml.SystemInfo{ TotalMemory: 32 * format.GigaByte, FreeMemory: 26 * format.GigaByte, } } func TestSchedRequestsSameModelSameRequest(t *testing.T) { ctx, done := context.WithTimeout(t.Context(), 500*time.Millisecond) defer done() s := InitScheduler(ctx) s.waitForRecovery = 10 * time.Millisecond s.getGpuFn = getGpuFn s.getSystemInfoFn = getSystemInfoFn a := newScenarioRequest(t, ctx, "ollama-model-1", 10, &api.Duration{Duration: 5 * time.Millisecond}, nil) b := newScenarioRequest(t, ctx, "ollama-model-1", 11, &api.Duration{Duration: 0}, nil) b.req.model = a.req.model b.f = a.f s.newServerFn = a.newServer slog.Info("a") s.pendingReqCh <- a.req require.Len(t, s.pendingReqCh, 1) s.Run(ctx) select { case resp := <-a.req.successCh: require.Equal(t, resp.llama, a.srv) require.Empty(t, s.pendingReqCh) require.Empty(t, a.req.errCh) case err := <-a.req.errCh: t.Fatal(err.Error()) case <-ctx.Done(): t.Fatal("timeout") } // Same runner as first request due to not needing a reload s.newServerFn = b.newServer slog.Info("b") s.pendingReqCh <- b.req select { case resp := <-b.req.successCh: require.Equal(t, resp.llama, a.srv) require.Empty(t, s.pendingReqCh) require.Empty(t, b.req.errCh) case err := <-b.req.errCh: t.Fatal(err.Error()) case <-ctx.Done(): t.Fatal("timeout") } } func TestSchedRequestsSimpleReloadSameModel(t *testing.T) { ctx, done := context.WithTimeout(t.Context(), 5000*time.Millisecond) defer done() s := InitScheduler(ctx) s.waitForRecovery = 10 * time.Millisecond s.getGpuFn = getGpuFn s.getSystemInfoFn = getSystemInfoFn a := newScenarioRequest(t, ctx, "ollama-model-1", 10, &api.Duration{Duration: 5 * time.Millisecond}, nil) b := newScenarioRequest(t, ctx, "ollama-model-1", 20, &api.Duration{Duration: 5 * time.Millisecond}, nil) tmpModel := *a.req.model b.req.model = &tmpModel b.f = a.f s.newServerFn = a.newServer slog.Info("a") s.pendingReqCh <- a.req require.Len(t, s.pendingReqCh, 1) s.Run(ctx) select { case resp := <-a.req.successCh: require.Equal(t, resp.llama, a.srv) require.Empty(t, s.pendingReqCh) require.Empty(t, a.req.errCh) case err := <-a.req.errCh: t.Fatal(err.Error()) case <-ctx.Done(): t.Fatal("timeout") } // Trigger a reload s.newServerFn = b.newServer b.req.model.AdapterPaths = []string{"new"} slog.Info("b") s.pendingReqCh <- b.req // finish first two requests, so model can reload time.Sleep(1 * time.Millisecond) a.ctxDone() // Report recovered VRAM usage time.Sleep(1 * time.Millisecond) s.getGpuFn = func(ctx context.Context, runners []ml.FilteredRunnerDiscovery) []ml.DeviceInfo { slog.Info("altered getGpuFn called") g := ml.DeviceInfo{DeviceID: ml.DeviceID{Library: "Metal"}} g.TotalMemory = 24 * format.GigaByte g.FreeMemory = 24 * format.GigaByte return []ml.DeviceInfo{g} } select { case resp := <-b.req.successCh: require.Equal(t, resp.llama, b.srv) require.Empty(t, s.pendingReqCh) require.Empty(t, b.req.errCh) case err := <-b.req.errCh: t.Fatal(err.Error()) case <-ctx.Done(): t.Fatal("timeout") } } func TestSchedRequestsMultipleLoadedModels(t *testing.T) { slog.Info("TestRequestsMultipleLoadedModels") ctx, done := context.WithTimeout(t.Context(), 1000*time.Millisecond) defer done() s := InitScheduler(ctx) s.waitForRecovery = 10 * time.Millisecond s.getGpuFn = getGpuFn // 1 Metal GPU s.getSystemInfoFn = getSystemInfoFn // Multiple loaded models a := newScenarioRequest(t, ctx, "model-a-1g-gpu", 1*format.GigaByte, nil, map[ml.DeviceID]uint64{{Library: "Metal"}: 1 * format.GigaByte}) a.req.sessionDuration = &api.Duration{Duration: 5 * time.Millisecond} b := newScenarioRequest(t, ctx, "model-b-10g-gpu", 10*format.GigaByte, nil, map[ml.DeviceID]uint64{{Library: "Metal"}: 10 * format.GigaByte}) b.req.sessionDuration = &api.Duration{Duration: 5 * time.Millisecond} c := newScenarioRequest(t, ctx, "model-c-10g-cpu", 10*format.GigaByte, nil, nil /* No GPU load */) c.req.opts.NumGPU = 0 // CPU load, will be allowed b.req.sessionDuration = &api.Duration{Duration: 10 * time.Millisecond} // longer than b to cause the scheduler to favor unloading b over c d := newScenarioRequest(t, ctx, "model-d-10g-gpu", 13*format.GigaByte, nil, map[ml.DeviceID]uint64{{Library: "Metal"}: 13 * format.GigaByte}) // Needs prior unloaded s.newServerFn = a.newServer slog.Info("Loading A") s.pendingReqCh <- a.req s.Run(ctx) select { case resp := <-a.req.successCh: require.Equal(t, resp.llama, a.srv) require.Empty(t, s.pendingReqCh) require.Empty(t, a.req.errCh) case err := <-a.req.errCh: t.Fatal(err.Error()) case <-ctx.Done(): t.Fatal("timeout") } s.loadedMu.Lock() require.Len(t, s.loaded, 1) s.loadedMu.Unlock() t.Setenv("OLLAMA_MAX_LOADED_MODELS", "0") s.newServerFn = b.newServer slog.Info("Loading B") s.pendingReqCh <- b.req select { case resp := <-b.req.successCh: require.Equal(t, resp.llama, b.srv) require.Empty(t, s.pendingReqCh) require.Empty(t, b.req.errCh) case err := <-b.req.errCh: t.Fatal(err.Error()) case <-ctx.Done(): t.Fatal("timeout") } s.loadedMu.Lock() require.Len(t, s.loaded, 2) s.loadedMu.Unlock() // This is a CPU load with NumGPU = 0 so it should load s.newServerFn = c.newServer slog.Info("Loading C") s.pendingReqCh <- c.req select { case resp := <-c.req.successCh: require.Equal(t, resp.llama, c.srv) require.Empty(t, s.pendingReqCh) require.Empty(t, c.req.errCh) case err := <-c.req.errCh: t.Fatal(err.Error()) case <-ctx.Done(): slog.Info("FAIL: scheduler state", "s.loaded", s.loaded) t.Fatal("timeout") } s.loadedMu.Lock() require.Len(t, s.loaded, 3) s.loadedMu.Unlock() // Try to load a model that won't fit s.newServerFn = d.newServer slog.Info("d") s.loadedMu.Lock() require.Len(t, s.loaded, 3) s.loadedMu.Unlock() a.ctxDone() // Won't help since this one isn't big enough to make room time.Sleep(2 * time.Millisecond) s.pendingReqCh <- d.req // finish prior request, so new model can load time.Sleep(6 * time.Millisecond) s.loadedMu.Lock() require.Len(t, s.loaded, 2) s.loadedMu.Unlock() // Mark b done so it can unload b.ctxDone() // Report recovered VRAM usage so scheduler will finish waiting and unload time.Sleep(1 * time.Millisecond) s.getGpuFn = func(ctx context.Context, runners []ml.FilteredRunnerDiscovery) []ml.DeviceInfo { g := ml.DeviceInfo{DeviceID: ml.DeviceID{Library: "Metal"}} g.TotalMemory = 24 * format.GigaByte g.FreeMemory = 24 * format.GigaByte return []ml.DeviceInfo{g} } select { case resp := <-d.req.successCh: require.Equal(t, resp.llama, d.srv) require.Empty(t, s.pendingReqCh) require.Empty(t, d.req.errCh) case <-ctx.Done(): t.Fatal("timeout") } // Wait for b to close closeWait: for { select { case <-ctx.Done(): t.Fatal("timeout") default: if b.srv.closeCalled { break closeWait } time.Sleep(1 * time.Millisecond) } } s.loadedMu.Lock() require.Len(t, s.loaded, 2) s.loadedMu.Unlock() } func TestSchedGetRunner(t *testing.T) { ctx, done := context.WithTimeout(t.Context(), 3*time.Second) defer done() a := newScenarioRequest(t, ctx, "ollama-model-1a", 10, &api.Duration{Duration: 2 * time.Millisecond}, nil) b := newScenarioRequest(t, ctx, "ollama-model-1b", 10, &api.Duration{Duration: 2 * time.Millisecond}, nil) c := newScenarioRequest(t, ctx, "ollama-model-1c", 10, &api.Duration{Duration: 2 * time.Millisecond}, nil) t.Setenv("OLLAMA_MAX_QUEUE", "1") s := InitScheduler(ctx) s.waitForRecovery = 10 * time.Millisecond s.getGpuFn = getGpuFn s.getSystemInfoFn = getSystemInfoFn s.newServerFn = a.newServer slog.Info("a") successCh1a, errCh1a := s.GetRunner(a.ctx, a.req.model, a.req.opts, a.req.sessionDuration) require.Len(t, s.pendingReqCh, 1) slog.Info("b") successCh1b, errCh1b := s.GetRunner(b.ctx, b.req.model, b.req.opts, b.req.sessionDuration) require.Len(t, s.pendingReqCh, 1) require.Empty(t, successCh1b) require.Len(t, errCh1b, 1) err := <-errCh1b require.Contains(t, err.Error(), "server busy") s.Run(ctx) select { case resp := <-successCh1a: require.Equal(t, resp.llama, a.srv) require.Empty(t, s.pendingReqCh) require.Empty(t, errCh1a) case err := <-errCh1a: t.Fatal(err.Error()) case <-ctx.Done(): t.Fatal("timeout") } a.ctxDone() // Set "a" model to idle so it can unload s.loadedMu.Lock() require.Len(t, s.loaded, 1) s.loadedMu.Unlock() c.req.model.ModelPath = "bad path" slog.Info("c") successCh1c, errCh1c := s.GetRunner(c.ctx, c.req.model, c.req.opts, c.req.sessionDuration) // Starts in pending channel, then should be quickly processed to return an error time.Sleep(50 * time.Millisecond) // Long enough for the "a" model to expire and unload require.Empty(t, successCh1c) s.loadedMu.Lock() require.Empty(t, s.loaded) s.loadedMu.Unlock() require.Len(t, errCh1c, 1) err = <-errCh1c require.Contains(t, err.Error(), "bad path") b.ctxDone() } func TestSchedExpireRunner(t *testing.T) { ctx, done := context.WithTimeout(t.Context(), 20*time.Millisecond) defer done() s := InitScheduler(ctx) s.waitForRecovery = 10 * time.Millisecond req := &LlmRequest{ ctx: ctx, model: &Model{ModelPath: "foo"}, opts: api.DefaultOptions(), successCh: make(chan *runnerRef, 1), errCh: make(chan error, 1), sessionDuration: &api.Duration{Duration: 2 * time.Minute}, } var f *ggml.GGML gpus := []ml.DeviceInfo{} systemInfo := ml.SystemInfo{} server := &mockLlm{vramSize: 10, vramByGPU: map[ml.DeviceID]uint64{}} s.newServerFn = func(systemInfo ml.SystemInfo, gpus []ml.DeviceInfo, model string, f *ggml.GGML, adapters []string, projectors []string, opts api.Options, numParallel int) (llm.LlamaServer, error) { server.modelPath = model return server, nil } s.load(req, f, systemInfo, gpus, false) select { case err := <-req.errCh: if err != nil { t.Fatalf("expected no errors when loading, got '%s'", err.Error()) } case resp := <-req.successCh: s.loadedMu.Lock() if resp.refCount != uint(1) || len(s.loaded) != 1 { t.Fatalf("expected a model to be loaded") } s.loadedMu.Unlock() } s.expireRunner(&Model{ModelPath: "foo"}) s.finishedReqCh <- req s.processCompleted(ctx) s.loadedMu.Lock() if len(s.loaded) != 0 { t.Fatalf("expected model to be unloaded") } s.loadedMu.Unlock() } // TODO - add one scenario that triggers the bogus finished event with positive ref count func TestSchedPrematureExpired(t *testing.T) { ctx, done := context.WithTimeout(t.Context(), 1000*time.Millisecond) defer done() // Same model, same request scenario1a := newScenarioRequest(t, ctx, "ollama-model-1a", 10, &api.Duration{Duration: 100 * time.Millisecond}, nil) s := InitScheduler(ctx) s.waitForRecovery = 10 * time.Millisecond s.getGpuFn = getGpuFn s.getSystemInfoFn = getSystemInfoFn s.newServerFn = scenario1a.newServer successCh1a, errCh1a := s.GetRunner(scenario1a.ctx, scenario1a.req.model, scenario1a.req.opts, scenario1a.req.sessionDuration) require.Len(t, s.pendingReqCh, 1) s.Run(ctx) select { case resp := <-successCh1a: require.Equal(t, resp.llama, scenario1a.srv) require.Empty(t, s.pendingReqCh) require.Empty(t, errCh1a) s.loadedMu.Lock() require.Len(t, s.loaded, 1) s.loadedMu.Unlock() slog.Info("sending premature expired event now") s.expiredCh <- resp // Shouldn't happen in real life, but make sure its safe case err := <-errCh1a: t.Fatal(err.Error()) case <-ctx.Done(): t.Fatal("timeout") } time.Sleep(scenario1a.req.sessionDuration.Duration) scenario1a.ctxDone() time.Sleep(20 * time.Millisecond) require.LessOrEqual(t, len(s.finishedReqCh), 1) time.Sleep(10 * time.Millisecond) require.Empty(t, s.finishedReqCh) s.loadedMu.Lock() require.Empty(t, s.loaded) s.loadedMu.Unlock() // also shouldn't happen in real life s.finishedReqCh <- scenario1a.req time.Sleep(5 * time.Millisecond) } func TestSchedUseLoadedRunner(t *testing.T) { ctx, done := context.WithTimeout(t.Context(), 100*time.Millisecond) req := &LlmRequest{ ctx: ctx, opts: api.DefaultOptions(), successCh: make(chan *runnerRef, 1), sessionDuration: &api.Duration{Duration: 2}, } finished := make(chan *LlmRequest) llm1 := &mockLlm{vramByGPU: map[ml.DeviceID]uint64{}} r1 := &runnerRef{llama: llm1, sessionDuration: 1, numParallel: 1} req.useLoadedRunner(r1, finished) require.Equal(t, uint(1), r1.refCount) require.Equal(t, time.Duration(2), r1.sessionDuration) select { case success := <-req.successCh: require.Equal(t, r1, success) case err := <-req.errCh: t.Fatal(err.Error()) case <-ctx.Done(): t.Fatal("timeout") } done() fin := <-finished require.Equal(t, req, fin) } func TestSchedUpdateFreeSpace(t *testing.T) { ctx, done := context.WithTimeout(t.Context(), 100*time.Millisecond) defer done() gpus := []ml.DeviceInfo{ { DeviceID: ml.DeviceID{ ID: "1", }, }, { DeviceID: ml.DeviceID{ ID: "2", }, }, } gpus[0].TotalMemory = 1000 gpus[0].FreeMemory = 900 gpus[1].TotalMemory = 2000 gpus[1].FreeMemory = 1900 gpuIDs := []ml.DeviceID{ { ID: "1", }, { ID: "2", }, } llm1 := &mockLlm{vramByGPU: map[ml.DeviceID]uint64{{ID: "1"}: 50, {ID: "2"}: 50}} llm2 := &mockLlm{vramByGPU: map[ml.DeviceID]uint64{{ID: "1"}: 125, {ID: "2"}: 75}} r1 := &runnerRef{llama: llm1, gpus: gpuIDs, numParallel: 1} r2 := &runnerRef{llama: llm2, gpus: gpuIDs, numParallel: 1} s := InitScheduler(ctx) s.waitForRecovery = 10 * time.Millisecond s.loadedMu.Lock() s.loaded["a"] = r1 s.loaded["b"] = r2 s.loadedMu.Unlock() s.updateFreeSpace(gpus) require.Equal(t, uint64(1000-50-125), gpus[0].FreeMemory) require.Equal(t, uint64(2000-50-75), gpus[1].FreeMemory) } func TestSchedFindRunnerToUnload(t *testing.T) { ctx, done := context.WithTimeout(t.Context(), 100*time.Millisecond) defer done() r1 := &runnerRef{refCount: 1, sessionDuration: 1, numParallel: 1} r2 := &runnerRef{sessionDuration: 2, numParallel: 1} s := InitScheduler(ctx) s.waitForRecovery = 10 * time.Millisecond s.loadedMu.Lock() s.loaded["a"] = r1 s.loaded["b"] = r2 s.loadedMu.Unlock() resp := s.findRunnerToUnload() require.Equal(t, r2, resp) r2.refCount = 1 resp = s.findRunnerToUnload() require.Equal(t, r1, resp) } func TestSchedNeedsReload(t *testing.T) { ctx, done := context.WithTimeout(t.Context(), 100*time.Millisecond) defer done() llm := &mockLlm{vramByGPU: map[ml.DeviceID]uint64{}} do := api.DefaultOptions() runner := &runnerRef{ model: &Model{ AdapterPaths: []string{"adapter1"}, ProjectorPaths: []string{"projector1"}, }, Options: &do, llama: llm, numParallel: 1, } req := &LlmRequest{ model: &Model{ AdapterPaths: []string{"adapter2"}, ProjectorPaths: []string{"projector2"}, }, opts: api.DefaultOptions(), } resp := runner.needsReload(ctx, req) require.True(t, resp) req.model.AdapterPaths = runner.model.AdapterPaths resp = runner.needsReload(ctx, req) require.True(t, resp) req.model.ProjectorPaths = runner.model.ProjectorPaths runner.loading = true req.opts.NumBatch = 1234 resp = runner.needsReload(ctx, req) require.True(t, resp) req.opts.NumBatch = runner.Options.NumBatch llm.pingResp = errors.New("foo") resp = runner.needsReload(ctx, req) require.True(t, resp) llm.pingResp = nil resp = runner.needsReload(ctx, req) require.False(t, resp) req.opts.NumGPU = 99 resp = runner.needsReload(ctx, req) require.True(t, resp) req.opts.NumGPU = -1 resp = runner.needsReload(ctx, req) require.False(t, resp) } func TestSchedUnloadAllRunners(t *testing.T) { ctx, done := context.WithTimeout(t.Context(), 100*time.Millisecond) defer done() llm1 := &mockLlm{vramByGPU: map[ml.DeviceID]uint64{}} llm2 := &mockLlm{vramByGPU: map[ml.DeviceID]uint64{}} s := InitScheduler(ctx) s.waitForRecovery = 10 * time.Millisecond s.unloadAllRunners() r1 := &runnerRef{llama: llm1, numParallel: 1} r2 := &runnerRef{llama: llm2, numParallel: 1} s.loadedMu.Lock() s.loaded["a"] = r1 s.loaded["b"] = r2 s.loadedMu.Unlock() s.unloadAllRunners() require.True(t, llm1.closeCalled) require.True(t, llm2.closeCalled) } func TestSchedUnload(t *testing.T) { llm1 := &mockLlm{vramByGPU: map[ml.DeviceID]uint64{}} r1 := &runnerRef{llama: llm1, numParallel: 1} r2 := &runnerRef{model: &Model{AdapterPaths: []string{"A"}}, numParallel: 1} r1.unload() require.True(t, llm1.closeCalled) r2.unload() require.Nil(t, r2.model) } func TestSchedAlreadyCanceled(t *testing.T) { ctx, done := context.WithTimeout(t.Context(), 500*time.Millisecond) defer done() dctx, done2 := context.WithCancel(ctx) done2() scenario1a := newScenarioRequest(t, dctx, "ollama-model-1", 10, &api.Duration{Duration: 0}, nil) s := InitScheduler(ctx) s.waitForRecovery = 10 * time.Millisecond slog.Info("scenario1a") s.pendingReqCh <- scenario1a.req require.Len(t, s.pendingReqCh, 1) s.Run(ctx) time.Sleep(5 * time.Millisecond) require.Empty(t, s.pendingReqCh) require.Empty(t, scenario1a.req.errCh) require.Empty(t, scenario1a.req.successCh) } type mockLlm struct { modelPath string pingResp error waitResp error completionResp error embeddingResp []float32 embeddingRespErr error tokenizeResp []int tokenizeRespErr error detokenizeResp string detonekizeRespErr error closeResp error closeCalled bool vramSize uint64 totalSize uint64 vramByGPU map[ml.DeviceID]uint64 } func (s *mockLlm) ModelPath() string { return s.modelPath } func (s *mockLlm) Load(ctx context.Context, sytemInfo ml.SystemInfo, gpus []ml.DeviceInfo, requireFull bool) ([]ml.DeviceID, error) { if requireFull { if len(gpus) == 0 { slog.Info("mockLlm.Load CPU based load") return nil, nil } for _, g := range gpus { if g.FreeMemory >= s.vramSize { return []ml.DeviceID{g.DeviceID}, nil } } return nil, llm.ErrLoadRequiredFull } gpuIDs := make([]ml.DeviceID, len(gpus)) for i := range gpus { gpuIDs[i] = gpus[i].DeviceID } return gpuIDs, nil } func (s *mockLlm) Ping(ctx context.Context) error { return s.pingResp } func (s *mockLlm) WaitUntilRunning(ctx context.Context) error { return s.waitResp } func (s *mockLlm) Completion(ctx context.Context, req llm.CompletionRequest, fn func(llm.CompletionResponse)) error { return s.completionResp } func (s *mockLlm) Embedding(ctx context.Context, input string) ([]float32, int, error) { return s.embeddingResp, 0, s.embeddingRespErr } func (s *mockLlm) Tokenize(ctx context.Context, content string) ([]int, error) { return s.tokenizeResp, s.tokenizeRespErr } func (s *mockLlm) Detokenize(ctx context.Context, tokens []int) (string, error) { return s.detokenizeResp, s.detonekizeRespErr } func (s *mockLlm) Close() error { s.closeCalled = true return s.closeResp } func (s *mockLlm) VRAMSize() uint64 { return s.vramSize } func (s *mockLlm) TotalSize() uint64 { return s.totalSize } func (s *mockLlm) VRAMByGPU(id ml.DeviceID) uint64 { return s.vramByGPU[id] } func (s *mockLlm) Pid() int { return -1 } func (s *mockLlm) GetPort() int { return -1 } func (s *mockLlm) GetDeviceInfos(ctx context.Context) []ml.DeviceInfo { return nil } func (s *mockLlm) HasExited() bool { return false } func (s *mockLlm) GetActiveDeviceIDs() []ml.DeviceID { return nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/server/modelpath.go
server/modelpath.go
package server import ( "errors" "fmt" "io/fs" "net/url" "os" "path/filepath" "regexp" "strings" "github.com/ollama/ollama/envconfig" "github.com/ollama/ollama/types/model" ) type ModelPath struct { ProtocolScheme string Registry string Namespace string Repository string Tag string } const ( DefaultRegistry = "registry.ollama.ai" DefaultNamespace = "library" DefaultTag = "latest" DefaultProtocolScheme = "https" ) var ( ErrInvalidImageFormat = errors.New("invalid image format") ErrInvalidDigestFormat = errors.New("invalid digest format") ErrInvalidProtocol = errors.New("invalid protocol scheme") ErrInsecureProtocol = errors.New("insecure protocol http") ErrModelPathInvalid = errors.New("invalid model path") ) func ParseModelPath(name string) ModelPath { mp := ModelPath{ ProtocolScheme: DefaultProtocolScheme, Registry: DefaultRegistry, Namespace: DefaultNamespace, Repository: "", Tag: DefaultTag, } before, after, found := strings.Cut(name, "://") if found { mp.ProtocolScheme = before name = after } name = strings.ReplaceAll(name, string(os.PathSeparator), "/") parts := strings.Split(name, "/") switch len(parts) { case 3: mp.Registry = parts[0] mp.Namespace = parts[1] mp.Repository = parts[2] case 2: mp.Namespace = parts[0] mp.Repository = parts[1] case 1: mp.Repository = parts[0] } if repo, tag, found := strings.Cut(mp.Repository, ":"); found { mp.Repository = repo mp.Tag = tag } return mp } func (mp ModelPath) GetNamespaceRepository() string { return fmt.Sprintf("%s/%s", mp.Namespace, mp.Repository) } func (mp ModelPath) GetFullTagname() string { return fmt.Sprintf("%s/%s/%s:%s", mp.Registry, mp.Namespace, mp.Repository, mp.Tag) } func (mp ModelPath) GetShortTagname() string { if mp.Registry == DefaultRegistry { if mp.Namespace == DefaultNamespace { return fmt.Sprintf("%s:%s", mp.Repository, mp.Tag) } return fmt.Sprintf("%s/%s:%s", mp.Namespace, mp.Repository, mp.Tag) } return fmt.Sprintf("%s/%s/%s:%s", mp.Registry, mp.Namespace, mp.Repository, mp.Tag) } // GetManifestPath returns the path to the manifest file for the given model path, it is up to the caller to create the directory if it does not exist. func (mp ModelPath) GetManifestPath() (string, error) { name := model.Name{ Host: mp.Registry, Namespace: mp.Namespace, Model: mp.Repository, Tag: mp.Tag, } if !name.IsValid() { return "", fs.ErrNotExist } return filepath.Join(envconfig.Models(), "manifests", name.Filepath()), nil } func (mp ModelPath) BaseURL() *url.URL { return &url.URL{ Scheme: mp.ProtocolScheme, Host: mp.Registry, } } func GetManifestPath() (string, error) { path := filepath.Join(envconfig.Models(), "manifests") if err := os.MkdirAll(path, 0o755); err != nil { return "", fmt.Errorf("%w: ensure path elements are traversable", err) } return path, nil } func GetBlobsPath(digest string) (string, error) { // only accept actual sha256 digests pattern := "^sha256[:-][0-9a-fA-F]{64}$" re := regexp.MustCompile(pattern) if digest != "" && !re.MatchString(digest) { return "", ErrInvalidDigestFormat } digest = strings.ReplaceAll(digest, ":", "-") path := filepath.Join(envconfig.Models(), "blobs", digest) dirPath := filepath.Dir(path) if digest == "" { dirPath = path } if err := os.MkdirAll(dirPath, 0o755); err != nil { return "", fmt.Errorf("%w: ensure path elements are traversable", err) } return path, nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/server/logprob.go
server/logprob.go
package server import ( "github.com/ollama/ollama/api" "github.com/ollama/ollama/llm" ) // toAPILogprobs converts llm.Logprobs to api.Logprobs func toAPILogprobs(logprobs []llm.Logprob) []api.Logprob { result := make([]api.Logprob, len(logprobs)) for i, lp := range logprobs { result[i] = api.Logprob{ TokenLogprob: api.TokenLogprob{ Token: lp.Token, Bytes: stringToByteInts(lp.Token), Logprob: lp.Logprob, }, } if len(lp.TopLogprobs) > 0 { result[i].TopLogprobs = make([]api.TokenLogprob, len(lp.TopLogprobs)) for j, tlp := range lp.TopLogprobs { result[i].TopLogprobs[j] = api.TokenLogprob{ Token: tlp.Token, Bytes: stringToByteInts(tlp.Token), Logprob: tlp.Logprob, } } } } return result } func stringToByteInts(s string) []int { if s == "" { return nil } raw := []byte(s) ints := make([]int, len(raw)) for i, b := range raw { ints[i] = int(b) } return ints }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/server/fixblobs_test.go
server/fixblobs_test.go
package server import ( "io/fs" "os" "path/filepath" "runtime" "slices" "strings" "testing" ) func TestFixBlobs(t *testing.T) { cases := []struct { path []string want []string }{ {path: []string{"sha256-1234"}, want: []string{"sha256-1234"}}, {path: []string{"sha256:1234"}, want: []string{"sha256-1234"}}, {path: []string{"sha259:5678"}, want: []string{"sha259:5678"}}, {path: []string{"sha256:abcd"}, want: []string{"sha256-abcd"}}, {path: []string{"x/y/sha256:abcd"}, want: []string{"x/y/sha256-abcd"}}, {path: []string{"x:y/sha256:abcd"}, want: []string{"x:y/sha256-abcd"}}, {path: []string{"x:y/sha256:abcd"}, want: []string{"x:y/sha256-abcd"}}, {path: []string{"x:y/sha256:abcd", "sha256:1234"}, want: []string{"x:y/sha256-abcd", "sha256-1234"}}, {path: []string{"x:y/sha256:abcd", "sha256-1234"}, want: []string{"x:y/sha256-abcd", "sha256-1234"}}, } for _, tt := range cases { t.Run(strings.Join(tt.path, "|"), func(t *testing.T) { hasColon := slices.ContainsFunc(tt.path, func(s string) bool { return strings.Contains(s, ":") }) if hasColon && runtime.GOOS == "windows" { t.Skip("skipping test on windows") } rootDir := t.TempDir() for _, path := range tt.path { fullPath := filepath.Join(rootDir, path) fullDir, _ := filepath.Split(fullPath) t.Logf("creating dir %s", fullDir) if err := os.MkdirAll(fullDir, 0o755); err != nil { t.Fatal(err) } t.Logf("writing file %s", fullPath) if err := os.WriteFile(fullPath, nil, 0o644); err != nil { t.Fatal(err) } } if err := fixBlobs(rootDir); err != nil { t.Fatal(err) } got := slurpFiles(os.DirFS(rootDir)) slices.Sort(tt.want) slices.Sort(got) if !slices.Equal(got, tt.want) { t.Fatalf("got = %v, want %v", got, tt.want) } }) } } func slurpFiles(fsys fs.FS) []string { var sfs []string fn := func(path string, d fs.DirEntry, err error) error { if err != nil { return err } if d.IsDir() { return nil } sfs = append(sfs, path) return nil } if err := fs.WalkDir(fsys, ".", fn); err != nil { panic(err) } return sfs }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/server/fixblobs.go
server/fixblobs.go
package server import ( "os" "path/filepath" "strings" ) // fixBlobs walks the provided dir and replaces (":") to ("-") in the file // prefix. (e.g. sha256:1234 -> sha256-1234) func fixBlobs(dir string) error { return filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { if err != nil { return err } baseName := filepath.Base(path) typ, sha, ok := strings.Cut(baseName, ":") if ok && typ == "sha256" { newPath := filepath.Join(filepath.Dir(path), typ+"-"+sha) if err := os.Rename(path, newPath); err != nil { return err } } return nil }) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/server/auth.go
server/auth.go
package server import ( "context" "crypto/rand" "crypto/sha256" "encoding/base64" "encoding/hex" "encoding/json" "fmt" "io" "net/http" "net/url" "strconv" "strings" "time" "github.com/ollama/ollama/api" "github.com/ollama/ollama/auth" ) type registryChallenge struct { Realm string Service string Scope string } func (r registryChallenge) URL() (*url.URL, error) { redirectURL, err := url.Parse(r.Realm) if err != nil { return nil, err } values := redirectURL.Query() values.Add("service", r.Service) for _, s := range strings.Split(r.Scope, " ") { values.Add("scope", s) } values.Add("ts", strconv.FormatInt(time.Now().Unix(), 10)) nonce, err := auth.NewNonce(rand.Reader, 16) if err != nil { return nil, err } values.Add("nonce", nonce) redirectURL.RawQuery = values.Encode() return redirectURL, nil } func getAuthorizationToken(ctx context.Context, challenge registryChallenge) (string, error) { redirectURL, err := challenge.URL() if err != nil { return "", err } sha256sum := sha256.Sum256(nil) data := []byte(fmt.Sprintf("%s,%s,%s", http.MethodGet, redirectURL.String(), base64.StdEncoding.EncodeToString([]byte(hex.EncodeToString(sha256sum[:]))))) headers := make(http.Header) signature, err := auth.Sign(ctx, data) if err != nil { return "", err } headers.Add("Authorization", signature) response, err := makeRequest(ctx, http.MethodGet, redirectURL, headers, nil, &registryOptions{}) if err != nil { return "", err } defer response.Body.Close() body, err := io.ReadAll(response.Body) if err != nil { return "", fmt.Errorf("%d: %v", response.StatusCode, err) } if response.StatusCode >= http.StatusBadRequest { if len(body) > 0 { return "", fmt.Errorf("%d: %s", response.StatusCode, body) } else { return "", fmt.Errorf("%d", response.StatusCode) } } var token api.TokenResponse if err := json.Unmarshal(body, &token); err != nil { return "", err } return token.Token, nil }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/server/upload.go
server/upload.go
package server import ( "context" "crypto/md5" "errors" "fmt" "hash" "io" "log/slog" "math" "net/http" "net/url" "os" "strconv" "sync" "sync/atomic" "time" "golang.org/x/sync/errgroup" "github.com/ollama/ollama/api" "github.com/ollama/ollama/format" ) var blobUploadManager sync.Map type blobUpload struct { Layer Total int64 Completed atomic.Int64 Parts []blobUploadPart nextURL chan *url.URL context.CancelFunc file *os.File done bool err error references atomic.Int32 } const ( numUploadParts = 16 minUploadPartSize int64 = 100 * format.MegaByte maxUploadPartSize int64 = 1000 * format.MegaByte ) func (b *blobUpload) Prepare(ctx context.Context, requestURL *url.URL, opts *registryOptions) error { p, err := GetBlobsPath(b.Digest) if err != nil { return err } if b.From != "" { values := requestURL.Query() values.Add("mount", b.Digest) values.Add("from", ParseModelPath(b.From).GetNamespaceRepository()) requestURL.RawQuery = values.Encode() } resp, err := makeRequestWithRetry(ctx, http.MethodPost, requestURL, nil, nil, opts) if err != nil { return err } defer resp.Body.Close() location := resp.Header.Get("Docker-Upload-Location") if location == "" { location = resp.Header.Get("Location") } fi, err := os.Stat(p) if err != nil { return err } b.Total = fi.Size() // http.StatusCreated indicates a blob has been mounted // ref: https://distribution.github.io/distribution/spec/api/#cross-repository-blob-mount if resp.StatusCode == http.StatusCreated { b.Completed.Store(b.Total) b.done = true return nil } size := b.Total / numUploadParts switch { case size < minUploadPartSize: size = minUploadPartSize case size > maxUploadPartSize: size = maxUploadPartSize } var offset int64 for offset < fi.Size() { if offset+size > fi.Size() { size = fi.Size() - offset } // set part.N to the current number of parts b.Parts = append(b.Parts, blobUploadPart{N: len(b.Parts), Offset: offset, Size: size}) offset += size } if len(b.Parts) > 0 { slog.Info(fmt.Sprintf("uploading %s in %d %s part(s)", b.Digest[7:19], len(b.Parts), format.HumanBytes(b.Parts[0].Size))) } requestURL, err = url.Parse(location) if err != nil { return err } b.nextURL = make(chan *url.URL, 1) b.nextURL <- requestURL return nil } // Run uploads blob parts to the upstream. If the upstream supports redirection, parts will be uploaded // in parallel as defined by Prepare. Otherwise, parts will be uploaded serially. Run sets b.err on error. func (b *blobUpload) Run(ctx context.Context, opts *registryOptions) { defer blobUploadManager.Delete(b.Digest) ctx, b.CancelFunc = context.WithCancel(ctx) p, err := GetBlobsPath(b.Digest) if err != nil { b.err = err return } b.file, err = os.Open(p) if err != nil { b.err = err return } defer b.file.Close() g, inner := errgroup.WithContext(ctx) g.SetLimit(numUploadParts) for i := range b.Parts { part := &b.Parts[i] select { case <-inner.Done(): case requestURL := <-b.nextURL: g.Go(func() error { var err error for try := range maxRetries { err = b.uploadPart(inner, http.MethodPatch, requestURL, part, opts) switch { case errors.Is(err, context.Canceled): return err case errors.Is(err, errMaxRetriesExceeded): return err case err != nil: sleep := time.Second * time.Duration(math.Pow(2, float64(try))) slog.Info(fmt.Sprintf("%s part %d attempt %d failed: %v, retrying in %s", b.Digest[7:19], part.N, try, err, sleep)) time.Sleep(sleep) continue } return nil } return fmt.Errorf("%w: %w", errMaxRetriesExceeded, err) }) } } if err := g.Wait(); err != nil { b.err = err return } requestURL := <-b.nextURL // calculate md5 checksum and add it to the commit request md5sum := md5.New() for _, part := range b.Parts { md5sum.Write(part.Sum(nil)) } values := requestURL.Query() values.Add("digest", b.Digest) values.Add("etag", fmt.Sprintf("%x-%d", md5sum.Sum(nil), len(b.Parts))) requestURL.RawQuery = values.Encode() headers := make(http.Header) headers.Set("Content-Type", "application/octet-stream") headers.Set("Content-Length", "0") for try := range maxRetries { var resp *http.Response resp, err = makeRequestWithRetry(ctx, http.MethodPut, requestURL, headers, nil, opts) if errors.Is(err, context.Canceled) { break } else if err != nil { sleep := time.Second * time.Duration(math.Pow(2, float64(try))) slog.Info(fmt.Sprintf("%s complete upload attempt %d failed: %v, retrying in %s", b.Digest[7:19], try, err, sleep)) time.Sleep(sleep) continue } defer resp.Body.Close() break } b.err = err b.done = true } func (b *blobUpload) uploadPart(ctx context.Context, method string, requestURL *url.URL, part *blobUploadPart, opts *registryOptions) error { headers := make(http.Header) headers.Set("Content-Type", "application/octet-stream") headers.Set("Content-Length", strconv.FormatInt(part.Size, 10)) if method == http.MethodPatch { headers.Set("X-Redirect-Uploads", "1") headers.Set("Content-Range", fmt.Sprintf("%d-%d", part.Offset, part.Offset+part.Size-1)) } sr := io.NewSectionReader(b.file, part.Offset, part.Size) md5sum := md5.New() w := &progressWriter{blobUpload: b} resp, err := makeRequest(ctx, method, requestURL, headers, io.TeeReader(sr, io.MultiWriter(w, md5sum)), opts) if err != nil { w.Rollback() return err } defer resp.Body.Close() location := resp.Header.Get("Docker-Upload-Location") if location == "" { location = resp.Header.Get("Location") } nextURL, err := url.Parse(location) if err != nil { w.Rollback() return err } switch { case resp.StatusCode == http.StatusTemporaryRedirect: w.Rollback() b.nextURL <- nextURL redirectURL, err := resp.Location() if err != nil { return err } // retry uploading to the redirect URL for try := range maxRetries { err = b.uploadPart(ctx, http.MethodPut, redirectURL, part, &registryOptions{}) switch { case errors.Is(err, context.Canceled): return err case errors.Is(err, errMaxRetriesExceeded): return err case err != nil: sleep := time.Second * time.Duration(math.Pow(2, float64(try))) slog.Info(fmt.Sprintf("%s part %d attempt %d failed: %v, retrying in %s", b.Digest[7:19], part.N, try, err, sleep)) time.Sleep(sleep) continue } return nil } return fmt.Errorf("%w: %w", errMaxRetriesExceeded, err) case resp.StatusCode == http.StatusUnauthorized: w.Rollback() challenge := parseRegistryChallenge(resp.Header.Get("www-authenticate")) token, err := getAuthorizationToken(ctx, challenge) if err != nil { return err } opts.Token = token fallthrough case resp.StatusCode >= http.StatusBadRequest: w.Rollback() body, err := io.ReadAll(resp.Body) if err != nil { return err } return fmt.Errorf("http status %s: %s", resp.Status, body) } if method == http.MethodPatch { b.nextURL <- nextURL } part.Hash = md5sum return nil } func (b *blobUpload) acquire() { b.references.Add(1) } func (b *blobUpload) release() { if b.references.Add(-1) == 0 { b.CancelFunc() } } func (b *blobUpload) Wait(ctx context.Context, fn func(api.ProgressResponse)) error { b.acquire() defer b.release() ticker := time.NewTicker(60 * time.Millisecond) for { select { case <-ticker.C: case <-ctx.Done(): return ctx.Err() } fn(api.ProgressResponse{ Status: fmt.Sprintf("pushing %s", b.Digest[7:19]), Digest: b.Digest, Total: b.Total, Completed: b.Completed.Load(), }) if b.done || b.err != nil { return b.err } } } type blobUploadPart struct { // N is the part number N int Offset int64 Size int64 hash.Hash } type progressWriter struct { written int64 *blobUpload } func (p *progressWriter) Write(b []byte) (n int, err error) { n = len(b) p.written += int64(n) p.Completed.Add(int64(n)) return n, nil } func (p *progressWriter) Rollback() { p.Completed.Add(-p.written) p.written = 0 } func uploadBlob(ctx context.Context, mp ModelPath, layer Layer, opts *registryOptions, fn func(api.ProgressResponse)) error { requestURL := mp.BaseURL() requestURL = requestURL.JoinPath("v2", mp.GetNamespaceRepository(), "blobs", layer.Digest) resp, err := makeRequestWithRetry(ctx, http.MethodHead, requestURL, nil, nil, opts) switch { case errors.Is(err, os.ErrNotExist): case err != nil: return err default: defer resp.Body.Close() fn(api.ProgressResponse{ Status: fmt.Sprintf("pushing %s", layer.Digest[7:19]), Digest: layer.Digest, Total: layer.Size, Completed: layer.Size, }) return nil } data, ok := blobUploadManager.LoadOrStore(layer.Digest, &blobUpload{Layer: layer}) upload := data.(*blobUpload) if !ok { requestURL := mp.BaseURL() requestURL = requestURL.JoinPath("v2", mp.GetNamespaceRepository(), "blobs/uploads/") if err := upload.Prepare(ctx, requestURL, opts); err != nil { blobUploadManager.Delete(layer.Digest) return err } //nolint:contextcheck go upload.Run(context.Background(), opts) } return upload.Wait(ctx, fn) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false
ollama/ollama
https://github.com/ollama/ollama/blob/626af2d80973270c4d59b8df7153ac47ad67ed7b/server/layer.go
server/layer.go
package server import ( "crypto/sha256" "errors" "fmt" "io" "os" ) type Layer struct { MediaType string `json:"mediaType"` Digest string `json:"digest"` Size int64 `json:"size"` From string `json:"from,omitempty"` status string } func NewLayer(r io.Reader, mediatype string) (Layer, error) { blobs, err := GetBlobsPath("") if err != nil { return Layer{}, err } temp, err := os.CreateTemp(blobs, "sha256-") if err != nil { return Layer{}, err } defer temp.Close() defer os.Remove(temp.Name()) sha256sum := sha256.New() n, err := io.Copy(io.MultiWriter(temp, sha256sum), r) if err != nil { return Layer{}, err } if err := temp.Close(); err != nil { return Layer{}, err } digest := fmt.Sprintf("sha256:%x", sha256sum.Sum(nil)) blob, err := GetBlobsPath(digest) if err != nil { return Layer{}, err } status := "using existing layer" if _, err := os.Stat(blob); err != nil { status = "creating new layer" if err := os.Rename(temp.Name(), blob); err != nil { return Layer{}, err } if err := os.Chmod(blob, 0o644); err != nil { return Layer{}, err } } return Layer{ MediaType: mediatype, Digest: digest, Size: n, status: fmt.Sprintf("%s %s", status, digest), }, nil } func NewLayerFromLayer(digest, mediatype, from string) (Layer, error) { if digest == "" { return Layer{}, errors.New("creating new layer from layer with empty digest") } blob, err := GetBlobsPath(digest) if err != nil { return Layer{}, err } fi, err := os.Stat(blob) if err != nil { return Layer{}, err } return Layer{ MediaType: mediatype, Digest: digest, Size: fi.Size(), From: from, status: fmt.Sprintf("using existing layer %s", digest), }, nil } func (l *Layer) Open() (io.ReadSeekCloser, error) { if l.Digest == "" { return nil, errors.New("opening layer with empty digest") } blob, err := GetBlobsPath(l.Digest) if err != nil { return nil, err } return os.Open(blob) } func (l *Layer) Remove() error { if l.Digest == "" { return nil } // Ignore corrupt manifests to avoid blocking deletion of layers that are freshly orphaned ms, err := Manifests(true) if err != nil { return err } for _, m := range ms { for _, layer := range append(m.Layers, m.Config) { if layer.Digest == l.Digest { // something is using this layer return nil } } } blob, err := GetBlobsPath(l.Digest) if err != nil { return err } return os.Remove(blob) }
go
MIT
626af2d80973270c4d59b8df7153ac47ad67ed7b
2026-01-07T08:35:43.337630Z
false