repo stringlengths 6 47 | file_url stringlengths 77 269 | file_path stringlengths 5 186 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-07 08:35:43 2026-01-07 08:55:24 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/focus.go | focus.go | package tea
// FocusMsg represents a terminal focus message.
// This occurs when the terminal gains focus.
type FocusMsg struct{}
// BlurMsg represents a terminal blur message.
// This occurs when the terminal loses focus.
type BlurMsg struct{}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/options_test.go | options_test.go | package tea
import (
"bytes"
"context"
"os"
"sync/atomic"
"testing"
)
func TestOptions(t *testing.T) {
t.Run("output", func(t *testing.T) {
var b bytes.Buffer
p := NewProgram(nil, WithOutput(&b))
if f, ok := p.output.(*os.File); ok {
t.Errorf("expected output to custom, got %v", f.Fd())
}
})
t.Run("custom input", func(t *testing.T) {
var b bytes.Buffer
p := NewProgram(nil, WithInput(&b))
if p.input != &b {
t.Errorf("expected input to custom, got %v", p.input)
}
if p.inputType != customInput {
t.Errorf("expected startup options to have custom input set, got %v", p.input)
}
})
t.Run("renderer", func(t *testing.T) {
p := NewProgram(nil, WithoutRenderer())
switch p.renderer.(type) {
case *nilRenderer:
return
default:
t.Errorf("expected renderer to be a nilRenderer, got %v", p.renderer)
}
})
t.Run("without signals", func(t *testing.T) {
p := NewProgram(nil, WithoutSignals())
if atomic.LoadUint32(&p.ignoreSignals) == 0 {
t.Errorf("ignore signals should have been set")
}
})
t.Run("filter", func(t *testing.T) {
p := NewProgram(nil, WithFilter(func(_ Model, msg Msg) Msg { return msg }))
if p.filter == nil {
t.Errorf("expected filter to be set")
}
})
t.Run("external context", func(t *testing.T) {
extCtx, extCancel := context.WithCancel(context.Background())
defer extCancel()
p := NewProgram(nil, WithContext(extCtx))
if p.externalCtx != extCtx || p.externalCtx == context.Background() {
t.Errorf("expected passed in external context, got default (nil)")
}
})
t.Run("input options", func(t *testing.T) {
exercise := func(t *testing.T, opt ProgramOption, expect inputType) {
p := NewProgram(nil, opt)
if p.inputType != expect {
t.Errorf("expected input type %s, got %s", expect, p.inputType)
}
}
t.Run("tty input", func(t *testing.T) {
exercise(t, WithInputTTY(), ttyInput)
})
t.Run("custom input", func(t *testing.T) {
var b bytes.Buffer
exercise(t, WithInput(&b), customInput)
})
})
t.Run("startup options", func(t *testing.T) {
exercise := func(t *testing.T, opt ProgramOption, expect startupOptions) {
p := NewProgram(nil, opt)
if !p.startupOptions.has(expect) {
t.Errorf("expected startup options have %v, got %v", expect, p.startupOptions)
}
}
t.Run("alt screen", func(t *testing.T) {
exercise(t, WithAltScreen(), withAltScreen)
})
t.Run("bracketed paste disabled", func(t *testing.T) {
exercise(t, WithoutBracketedPaste(), withoutBracketedPaste)
})
t.Run("ansi compression", func(t *testing.T) {
exercise(t, WithANSICompressor(), withANSICompressor)
})
t.Run("without catch panics", func(t *testing.T) {
exercise(t, WithoutCatchPanics(), withoutCatchPanics)
})
t.Run("without signal handler", func(t *testing.T) {
exercise(t, WithoutSignalHandler(), withoutSignalHandler)
})
t.Run("mouse cell motion", func(t *testing.T) {
p := NewProgram(nil, WithMouseAllMotion(), WithMouseCellMotion())
if !p.startupOptions.has(withMouseCellMotion) {
t.Errorf("expected startup options have %v, got %v", withMouseCellMotion, p.startupOptions)
}
if p.startupOptions.has(withMouseAllMotion) {
t.Errorf("expected startup options not have %v, got %v", withMouseAllMotion, p.startupOptions)
}
})
t.Run("mouse all motion", func(t *testing.T) {
p := NewProgram(nil, WithMouseCellMotion(), WithMouseAllMotion())
if !p.startupOptions.has(withMouseAllMotion) {
t.Errorf("expected startup options have %v, got %v", withMouseAllMotion, p.startupOptions)
}
if p.startupOptions.has(withMouseCellMotion) {
t.Errorf("expected startup options not have %v, got %v", withMouseCellMotion, p.startupOptions)
}
})
})
t.Run("multiple", func(t *testing.T) {
p := NewProgram(nil, WithMouseAllMotion(), WithoutBracketedPaste(), WithAltScreen(), WithInputTTY())
for _, opt := range []startupOptions{withMouseAllMotion, withoutBracketedPaste, withAltScreen} {
if !p.startupOptions.has(opt) {
t.Errorf("expected startup options have %v, got %v", opt, p.startupOptions)
}
if p.inputType != ttyInput {
t.Errorf("expected input to be %v, got %v", opt, p.startupOptions)
}
}
})
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/options.go | options.go | package tea
import (
"context"
"io"
"sync/atomic"
)
// ProgramOption is used to set options when initializing a Program. Program can
// accept a variable number of options.
//
// Example usage:
//
// p := NewProgram(model, WithInput(someInput), WithOutput(someOutput))
type ProgramOption func(*Program)
// WithContext lets you specify a context in which to run the Program. This is
// useful if you want to cancel the execution from outside. When a Program gets
// cancelled it will exit with an error ErrProgramKilled.
func WithContext(ctx context.Context) ProgramOption {
return func(p *Program) {
p.externalCtx = ctx
}
}
// WithOutput sets the output which, by default, is stdout. In most cases you
// won't need to use this.
func WithOutput(output io.Writer) ProgramOption {
return func(p *Program) {
p.output = output
}
}
// WithInput sets the input which, by default, is stdin. In most cases you
// won't need to use this. To disable input entirely pass nil.
//
// p := NewProgram(model, WithInput(nil))
func WithInput(input io.Reader) ProgramOption {
return func(p *Program) {
p.input = input
p.inputType = customInput
}
}
// WithInputTTY opens a new TTY for input (or console input device on Windows).
func WithInputTTY() ProgramOption {
return func(p *Program) {
p.inputType = ttyInput
}
}
// WithEnvironment sets the environment variables that the program will use.
// This useful when the program is running in a remote session (e.g. SSH) and
// you want to pass the environment variables from the remote session to the
// program.
//
// Example:
//
// var sess ssh.Session // ssh.Session is a type from the github.com/charmbracelet/ssh package
// pty, _, _ := sess.Pty()
// environ := append(sess.Environ(), "TERM="+pty.Term)
// p := tea.NewProgram(model, tea.WithEnvironment(environ)
func WithEnvironment(env []string) ProgramOption {
return func(p *Program) {
p.environ = env
}
}
// WithoutSignalHandler disables the signal handler that Bubble Tea sets up for
// Programs. This is useful if you want to handle signals yourself.
func WithoutSignalHandler() ProgramOption {
return func(p *Program) {
p.startupOptions |= withoutSignalHandler
}
}
// WithoutCatchPanics disables the panic catching that Bubble Tea does by
// default. If panic catching is disabled the terminal will be in a fairly
// unusable state after a panic because Bubble Tea will not perform its usual
// cleanup on exit.
func WithoutCatchPanics() ProgramOption {
return func(p *Program) {
p.startupOptions |= withoutCatchPanics
}
}
// WithoutSignals will ignore OS signals.
// This is mainly useful for testing.
func WithoutSignals() ProgramOption {
return func(p *Program) {
atomic.StoreUint32(&p.ignoreSignals, 1)
}
}
// WithAltScreen starts the program with the alternate screen buffer enabled
// (i.e. the program starts in full window mode). Note that the altscreen will
// be automatically exited when the program quits.
//
// Example:
//
// p := tea.NewProgram(Model{}, tea.WithAltScreen())
// if _, err := p.Run(); err != nil {
// fmt.Println("Error running program:", err)
// os.Exit(1)
// }
//
// To enter the altscreen once the program has already started running use the
// EnterAltScreen command.
func WithAltScreen() ProgramOption {
return func(p *Program) {
p.startupOptions |= withAltScreen
}
}
// WithoutBracketedPaste starts the program with bracketed paste disabled.
func WithoutBracketedPaste() ProgramOption {
return func(p *Program) {
p.startupOptions |= withoutBracketedPaste
}
}
// WithMouseCellMotion starts the program with the mouse enabled in "cell
// motion" mode.
//
// Cell motion mode enables mouse click, release, and wheel events. Mouse
// movement events are also captured if a mouse button is pressed (i.e., drag
// events). Cell motion mode is better supported than all motion mode.
//
// This will try to enable the mouse in extended mode (SGR), if that is not
// supported by the terminal it will fall back to normal mode (X10).
//
// To enable mouse cell motion once the program has already started running use
// the EnableMouseCellMotion command. To disable the mouse when the program is
// running use the DisableMouse command.
//
// The mouse will be automatically disabled when the program exits.
func WithMouseCellMotion() ProgramOption {
return func(p *Program) {
p.startupOptions |= withMouseCellMotion // set
p.startupOptions &^= withMouseAllMotion // clear
}
}
// WithMouseAllMotion starts the program with the mouse enabled in "all motion"
// mode.
//
// EnableMouseAllMotion is a special command that enables mouse click, release,
// wheel, and motion events, which are delivered regardless of whether a mouse
// button is pressed, effectively enabling support for hover interactions.
//
// This will try to enable the mouse in extended mode (SGR), if that is not
// supported by the terminal it will fall back to normal mode (X10).
//
// Many modern terminals support this, but not all. If in doubt, use
// EnableMouseCellMotion instead.
//
// To enable the mouse once the program has already started running use the
// EnableMouseAllMotion command. To disable the mouse when the program is
// running use the DisableMouse command.
//
// The mouse will be automatically disabled when the program exits.
func WithMouseAllMotion() ProgramOption {
return func(p *Program) {
p.startupOptions |= withMouseAllMotion // set
p.startupOptions &^= withMouseCellMotion // clear
}
}
// WithoutRenderer disables the renderer. When this is set output and log
// statements will be plainly sent to stdout (or another output if one is set)
// without any rendering and redrawing logic. In other words, printing and
// logging will behave the same way it would in a non-TUI commandline tool.
// This can be useful if you want to use the Bubble Tea framework for a non-TUI
// application, or to provide an additional non-TUI mode to your Bubble Tea
// programs. For example, your program could behave like a daemon if output is
// not a TTY.
func WithoutRenderer() ProgramOption {
return func(p *Program) {
p.renderer = &nilRenderer{}
}
}
// WithANSICompressor removes redundant ANSI sequences to produce potentially
// smaller output, at the cost of some processing overhead.
//
// This feature is provisional, and may be changed or removed in a future version
// of this package.
//
// Deprecated: this incurs a noticeable performance hit. A future release will
// optimize ANSI automatically without the performance penalty.
func WithANSICompressor() ProgramOption {
return func(p *Program) {
p.startupOptions |= withANSICompressor
}
}
// WithFilter supplies an event filter that will be invoked before Bubble Tea
// processes a tea.Msg. The event filter can return any tea.Msg which will then
// get handled by Bubble Tea instead of the original event. If the event filter
// returns nil, the event will be ignored and Bubble Tea will not process it.
//
// As an example, this could be used to prevent a program from shutting down if
// there are unsaved changes.
//
// Example:
//
// func filter(m tea.Model, msg tea.Msg) tea.Msg {
// if _, ok := msg.(tea.QuitMsg); !ok {
// return msg
// }
//
// model := m.(myModel)
// if model.hasChanges {
// return nil
// }
//
// return msg
// }
//
// p := tea.NewProgram(Model{}, tea.WithFilter(filter));
//
// if _,err := p.Run(); err != nil {
// fmt.Println("Error running program:", err)
// os.Exit(1)
// }
func WithFilter(filter func(Model, Msg) Msg) ProgramOption {
return func(p *Program) {
p.filter = filter
}
}
// WithFPS sets a custom maximum FPS at which the renderer should run. If
// less than 1, the default value of 60 will be used. If over 120, the FPS
// will be capped at 120.
func WithFPS(fps int) ProgramOption {
return func(p *Program) {
p.fps = fps
}
}
// WithReportFocus enables reporting when the terminal gains and loses
// focus. When this is enabled [FocusMsg] and [BlurMsg] messages will be sent
// to your Update method.
//
// Note that while most terminals and multiplexers support focus reporting,
// some do not. Also note that tmux needs to be configured to report focus
// events.
func WithReportFocus() ProgramOption {
return func(p *Program) {
p.startupOptions |= withReportFocus
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/key_windows.go | key_windows.go | //go:build windows
// +build windows
package tea
import (
"context"
"fmt"
"io"
"time"
"github.com/erikgeiser/coninput"
localereader "github.com/mattn/go-localereader"
"github.com/muesli/cancelreader"
)
func readInputs(ctx context.Context, msgs chan<- Msg, input io.Reader) error {
if coninReader, ok := input.(*conInputReader); ok {
return readConInputs(ctx, msgs, coninReader)
}
return readAnsiInputs(ctx, msgs, localereader.NewReader(input))
}
func readConInputs(ctx context.Context, msgsch chan<- Msg, con *conInputReader) error {
var ps coninput.ButtonState // keep track of previous mouse state
var ws coninput.WindowBufferSizeEventRecord // keep track of the last window size event
for {
events, err := peekAndReadConsInput(con)
if err != nil {
return err
}
for _, event := range events {
var msgs []Msg
switch e := event.Unwrap().(type) {
case coninput.KeyEventRecord:
if !e.KeyDown || e.VirtualKeyCode == coninput.VK_SHIFT {
continue
}
for i := 0; i < int(e.RepeatCount); i++ {
eventKeyType := keyType(e)
var runes []rune
// Add the character only if the key type is an actual character and not a control sequence.
// This mimics the behavior in readAnsiInputs where the character is also removed.
// We don't need to handle KeySpace here. See the comment in keyType().
if eventKeyType == KeyRunes {
runes = []rune{e.Char}
}
msgs = append(msgs, KeyMsg{
Type: eventKeyType,
Runes: runes,
Alt: e.ControlKeyState.Contains(coninput.LEFT_ALT_PRESSED | coninput.RIGHT_ALT_PRESSED),
})
}
case coninput.WindowBufferSizeEventRecord:
if e != ws {
ws = e
msgs = append(msgs, WindowSizeMsg{
Width: int(e.Size.X),
Height: int(e.Size.Y),
})
}
case coninput.MouseEventRecord:
event := mouseEvent(ps, e)
if event.Type != MouseUnknown {
msgs = append(msgs, event)
}
ps = e.ButtonState
case coninput.FocusEventRecord, coninput.MenuEventRecord:
// ignore
default: // unknown event
continue
}
// Send all messages to the channel
for _, msg := range msgs {
select {
case msgsch <- msg:
case <-ctx.Done():
err := ctx.Err()
if err != nil {
return fmt.Errorf("coninput context error: %w", err)
}
return nil
}
}
}
}
}
// Peek for new input in a tight loop and then read the input.
// windows.CancelIo* does not work reliably so peek first and only use the data if
// the console input is not cancelled.
func peekAndReadConsInput(con *conInputReader) ([]coninput.InputRecord, error) {
events, err := peekConsInput(con)
if err != nil {
return events, err
}
events, err = coninput.ReadNConsoleInputs(con.conin, intToUint32OrDie(len(events)))
if con.isCanceled() {
return events, cancelreader.ErrCanceled
}
if err != nil {
return events, fmt.Errorf("read coninput events: %w", err)
}
return events, nil
}
// Convert i to unit32 or panic if it cannot be converted. Check satisfies lint G115.
func intToUint32OrDie(i int) uint32 {
if i < 0 {
panic("cannot convert numEvents " + fmt.Sprint(i) + " to uint32")
}
return uint32(i) //nolint:gosec
}
// Keeps peeking until there is data or the input is cancelled.
func peekConsInput(con *conInputReader) ([]coninput.InputRecord, error) {
for {
events, err := coninput.PeekNConsoleInputs(con.conin, 16)
if con.isCanceled() {
return events, cancelreader.ErrCanceled
}
if err != nil {
return events, fmt.Errorf("peek coninput events: %w", err)
}
if len(events) > 0 {
return events, nil
}
// Sleep for a bit to avoid busy waiting.
time.Sleep(16 * time.Millisecond)
}
}
func mouseEventButton(p, s coninput.ButtonState) (button MouseButton, action MouseAction) {
btn := p ^ s
action = MouseActionPress
if btn&s == 0 {
action = MouseActionRelease
}
if btn == 0 {
switch {
case s&coninput.FROM_LEFT_1ST_BUTTON_PRESSED > 0:
button = MouseButtonLeft
case s&coninput.FROM_LEFT_2ND_BUTTON_PRESSED > 0:
button = MouseButtonMiddle
case s&coninput.RIGHTMOST_BUTTON_PRESSED > 0:
button = MouseButtonRight
case s&coninput.FROM_LEFT_3RD_BUTTON_PRESSED > 0:
button = MouseButtonBackward
case s&coninput.FROM_LEFT_4TH_BUTTON_PRESSED > 0:
button = MouseButtonForward
}
return button, action
}
switch btn {
case coninput.FROM_LEFT_1ST_BUTTON_PRESSED: // left button
button = MouseButtonLeft
case coninput.RIGHTMOST_BUTTON_PRESSED: // right button
button = MouseButtonRight
case coninput.FROM_LEFT_2ND_BUTTON_PRESSED: // middle button
button = MouseButtonMiddle
case coninput.FROM_LEFT_3RD_BUTTON_PRESSED: // unknown (possibly mouse backward)
button = MouseButtonBackward
case coninput.FROM_LEFT_4TH_BUTTON_PRESSED: // unknown (possibly mouse forward)
button = MouseButtonForward
}
return button, action
}
func mouseEvent(p coninput.ButtonState, e coninput.MouseEventRecord) MouseMsg {
ev := MouseMsg{
X: int(e.MousePositon.X),
Y: int(e.MousePositon.Y),
Alt: e.ControlKeyState.Contains(coninput.LEFT_ALT_PRESSED | coninput.RIGHT_ALT_PRESSED),
Ctrl: e.ControlKeyState.Contains(coninput.LEFT_CTRL_PRESSED | coninput.RIGHT_CTRL_PRESSED),
Shift: e.ControlKeyState.Contains(coninput.SHIFT_PRESSED),
}
switch e.EventFlags {
case coninput.CLICK, coninput.DOUBLE_CLICK:
ev.Button, ev.Action = mouseEventButton(p, e.ButtonState)
if ev.Action == MouseActionRelease {
ev.Type = MouseRelease
}
switch ev.Button { //nolint:exhaustive
case MouseButtonLeft:
ev.Type = MouseLeft
case MouseButtonMiddle:
ev.Type = MouseMiddle
case MouseButtonRight:
ev.Type = MouseRight
case MouseButtonBackward:
ev.Type = MouseBackward
case MouseButtonForward:
ev.Type = MouseForward
}
case coninput.MOUSE_WHEELED:
if e.WheelDirection > 0 {
ev.Button = MouseButtonWheelUp
ev.Type = MouseWheelUp
} else {
ev.Button = MouseButtonWheelDown
ev.Type = MouseWheelDown
}
case coninput.MOUSE_HWHEELED:
if e.WheelDirection > 0 {
ev.Button = MouseButtonWheelRight
ev.Type = MouseWheelRight
} else {
ev.Button = MouseButtonWheelLeft
ev.Type = MouseWheelLeft
}
case coninput.MOUSE_MOVED:
ev.Button, _ = mouseEventButton(p, e.ButtonState)
ev.Action = MouseActionMotion
ev.Type = MouseMotion
}
return ev
}
func keyType(e coninput.KeyEventRecord) KeyType {
code := e.VirtualKeyCode
shiftPressed := e.ControlKeyState.Contains(coninput.SHIFT_PRESSED)
ctrlPressed := e.ControlKeyState.Contains(coninput.LEFT_CTRL_PRESSED | coninput.RIGHT_CTRL_PRESSED)
switch code { //nolint:exhaustive
case coninput.VK_RETURN:
return KeyEnter
case coninput.VK_BACK:
return KeyBackspace
case coninput.VK_TAB:
if shiftPressed {
return KeyShiftTab
}
return KeyTab
case coninput.VK_SPACE:
return KeyRunes // this could be KeySpace but on unix space also produces KeyRunes
case coninput.VK_ESCAPE:
return KeyEscape
case coninput.VK_UP:
switch {
case shiftPressed && ctrlPressed:
return KeyCtrlShiftUp
case shiftPressed:
return KeyShiftUp
case ctrlPressed:
return KeyCtrlUp
default:
return KeyUp
}
case coninput.VK_DOWN:
switch {
case shiftPressed && ctrlPressed:
return KeyCtrlShiftDown
case shiftPressed:
return KeyShiftDown
case ctrlPressed:
return KeyCtrlDown
default:
return KeyDown
}
case coninput.VK_RIGHT:
switch {
case shiftPressed && ctrlPressed:
return KeyCtrlShiftRight
case shiftPressed:
return KeyShiftRight
case ctrlPressed:
return KeyCtrlRight
default:
return KeyRight
}
case coninput.VK_LEFT:
switch {
case shiftPressed && ctrlPressed:
return KeyCtrlShiftLeft
case shiftPressed:
return KeyShiftLeft
case ctrlPressed:
return KeyCtrlLeft
default:
return KeyLeft
}
case coninput.VK_HOME:
switch {
case shiftPressed && ctrlPressed:
return KeyCtrlShiftHome
case shiftPressed:
return KeyShiftHome
case ctrlPressed:
return KeyCtrlHome
default:
return KeyHome
}
case coninput.VK_END:
switch {
case shiftPressed && ctrlPressed:
return KeyCtrlShiftEnd
case shiftPressed:
return KeyShiftEnd
case ctrlPressed:
return KeyCtrlEnd
default:
return KeyEnd
}
case coninput.VK_PRIOR:
return KeyPgUp
case coninput.VK_NEXT:
return KeyPgDown
case coninput.VK_DELETE:
return KeyDelete
case coninput.VK_F1:
return KeyF1
case coninput.VK_F2:
return KeyF2
case coninput.VK_F3:
return KeyF3
case coninput.VK_F4:
return KeyF4
case coninput.VK_F5:
return KeyF5
case coninput.VK_F6:
return KeyF6
case coninput.VK_F7:
return KeyF7
case coninput.VK_F8:
return KeyF8
case coninput.VK_F9:
return KeyF9
case coninput.VK_F10:
return KeyF10
case coninput.VK_F11:
return KeyF11
case coninput.VK_F12:
return KeyF12
case coninput.VK_F13:
return KeyF13
case coninput.VK_F14:
return KeyF14
case coninput.VK_F15:
return KeyF15
case coninput.VK_F16:
return KeyF16
case coninput.VK_F17:
return KeyF17
case coninput.VK_F18:
return KeyF18
case coninput.VK_F19:
return KeyF19
case coninput.VK_F20:
return KeyF20
default:
switch {
case e.ControlKeyState.Contains(coninput.LEFT_CTRL_PRESSED) && e.ControlKeyState.Contains(coninput.RIGHT_ALT_PRESSED):
// AltGr is pressed, then it's a rune.
fallthrough
case !e.ControlKeyState.Contains(coninput.LEFT_CTRL_PRESSED) && !e.ControlKeyState.Contains(coninput.RIGHT_CTRL_PRESSED):
return KeyRunes
}
switch e.Char {
case '@':
return KeyCtrlAt
case '\x01':
return KeyCtrlA
case '\x02':
return KeyCtrlB
case '\x03':
return KeyCtrlC
case '\x04':
return KeyCtrlD
case '\x05':
return KeyCtrlE
case '\x06':
return KeyCtrlF
case '\a':
return KeyCtrlG
case '\b':
return KeyCtrlH
case '\t':
return KeyCtrlI
case '\n':
return KeyCtrlJ
case '\v':
return KeyCtrlK
case '\f':
return KeyCtrlL
case '\r':
return KeyCtrlM
case '\x0e':
return KeyCtrlN
case '\x0f':
return KeyCtrlO
case '\x10':
return KeyCtrlP
case '\x11':
return KeyCtrlQ
case '\x12':
return KeyCtrlR
case '\x13':
return KeyCtrlS
case '\x14':
return KeyCtrlT
case '\x15':
return KeyCtrlU
case '\x16':
return KeyCtrlV
case '\x17':
return KeyCtrlW
case '\x18':
return KeyCtrlX
case '\x19':
return KeyCtrlY
case '\x1a':
return KeyCtrlZ
case '\x1b':
return KeyCtrlOpenBracket // KeyEscape
case '\x1c':
return KeyCtrlBackslash
case '\x1f':
return KeyCtrlUnderscore
}
switch code { //nolint:exhaustive
case coninput.VK_OEM_4:
return KeyCtrlOpenBracket
case coninput.VK_OEM_6:
return KeyCtrlCloseBracket
}
return KeyRunes
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/inputreader_other.go | inputreader_other.go | //go:build !windows
// +build !windows
package tea
import (
"fmt"
"io"
"github.com/muesli/cancelreader"
)
func newInputReader(r io.Reader, _ bool) (cancelreader.CancelReader, error) {
cr, err := cancelreader.NewReader(r)
if err != nil {
return nil, fmt.Errorf("bubbletea: error creating cancel reader: %w", err)
}
return cr, nil
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/tty_windows.go | tty_windows.go | //go:build windows
// +build windows
package tea
import (
"fmt"
"os"
"github.com/charmbracelet/x/term"
"golang.org/x/sys/windows"
)
func (p *Program) initInput() (err error) {
// Save stdin state and enable VT input
// We also need to enable VT
// input here.
if f, ok := p.input.(term.File); ok && term.IsTerminal(f.Fd()) {
p.ttyInput = f
p.previousTtyInputState, err = term.MakeRaw(p.ttyInput.Fd())
if err != nil {
return fmt.Errorf("error making raw: %w", err)
}
// Enable VT input
var mode uint32
if err := windows.GetConsoleMode(windows.Handle(p.ttyInput.Fd()), &mode); err != nil {
return fmt.Errorf("error getting console mode: %w", err)
}
if err := windows.SetConsoleMode(windows.Handle(p.ttyInput.Fd()), mode|windows.ENABLE_VIRTUAL_TERMINAL_INPUT); err != nil {
return fmt.Errorf("error setting console mode: %w", err)
}
}
// Save output screen buffer state and enable VT processing.
if f, ok := p.output.(term.File); ok && term.IsTerminal(f.Fd()) {
p.ttyOutput = f
p.previousOutputState, err = term.GetState(f.Fd())
if err != nil {
return fmt.Errorf("error getting state: %w", err)
}
var mode uint32
if err := windows.GetConsoleMode(windows.Handle(p.ttyOutput.Fd()), &mode); err != nil {
return fmt.Errorf("error getting console mode: %w", err)
}
if err := windows.SetConsoleMode(windows.Handle(p.ttyOutput.Fd()), mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err != nil {
return fmt.Errorf("error setting console mode: %w", err)
}
}
return nil
}
// Open the Windows equivalent of a TTY.
func openInputTTY() (*os.File, error) {
f, err := os.OpenFile("CONIN$", os.O_RDWR, 0o644) //nolint:gosec
if err != nil {
return nil, fmt.Errorf("error opening file: %w", err)
}
return f, nil
}
const suspendSupported = false
func suspendProcess() {}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/tty_unix.go | tty_unix.go | //go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || aix || zos
// +build darwin dragonfly freebsd linux netbsd openbsd solaris aix zos
package tea
import (
"fmt"
"os"
"os/signal"
"syscall"
"github.com/charmbracelet/x/term"
)
func (p *Program) initInput() (err error) {
// Check if input is a terminal
if f, ok := p.input.(term.File); ok && term.IsTerminal(f.Fd()) {
p.ttyInput = f
p.previousTtyInputState, err = term.MakeRaw(p.ttyInput.Fd())
if err != nil {
return fmt.Errorf("error entering raw mode: %w", err)
}
}
if f, ok := p.output.(term.File); ok && term.IsTerminal(f.Fd()) {
p.ttyOutput = f
}
return nil
}
func openInputTTY() (*os.File, error) {
f, err := os.Open("/dev/tty")
if err != nil {
return nil, fmt.Errorf("could not open a new TTY: %w", err)
}
return f, nil
}
const suspendSupported = true
// Send SIGTSTP to the entire process group.
func suspendProcess() {
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGCONT)
_ = syscall.Kill(0, syscall.SIGTSTP)
// blocks until a CONT happens...
<-c
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/key.go | key.go | package tea
import (
"context"
"fmt"
"io"
"regexp"
"strings"
"unicode/utf8"
)
// KeyMsg contains information about a keypress. KeyMsgs are always sent to
// the program's update function. There are a couple general patterns you could
// use to check for keypresses:
//
// // Switch on the string representation of the key (shorter)
// switch msg := msg.(type) {
// case KeyMsg:
// switch msg.String() {
// case "enter":
// fmt.Println("you pressed enter!")
// case "a":
// fmt.Println("you pressed a!")
// }
// }
//
// // Switch on the key type (more foolproof)
// switch msg := msg.(type) {
// case KeyMsg:
// switch msg.Type {
// case KeyEnter:
// fmt.Println("you pressed enter!")
// case KeyRunes:
// switch string(msg.Runes) {
// case "a":
// fmt.Println("you pressed a!")
// }
// }
// }
//
// Note that Key.Runes will always contain at least one character, so you can
// always safely call Key.Runes[0]. In most cases Key.Runes will only contain
// one character, though certain input method editors (most notably Chinese
// IMEs) can input multiple runes at once.
type KeyMsg Key
// String returns a string representation for a key message. It's safe (and
// encouraged) for use in key comparison.
func (k KeyMsg) String() (str string) {
return Key(k).String()
}
// Key contains information about a keypress.
type Key struct {
Type KeyType
Runes []rune
Alt bool
Paste bool
}
// String returns a friendly string representation for a key. It's safe (and
// encouraged) for use in key comparison.
//
// k := Key{Type: KeyEnter}
// fmt.Println(k)
// // Output: enter
func (k Key) String() (str string) {
var buf strings.Builder
if k.Alt {
buf.WriteString("alt+")
}
if k.Type == KeyRunes {
if k.Paste {
// Note: bubbles/keys bindings currently do string compares to
// recognize shortcuts. Since pasted text should never activate
// shortcuts, we need to ensure that the binding code doesn't
// match Key events that result from pastes. We achieve this
// here by enclosing pastes in '[...]' so that the string
// comparison in Matches() fails in that case.
buf.WriteByte('[')
}
buf.WriteString(string(k.Runes))
if k.Paste {
buf.WriteByte(']')
}
return buf.String()
} else if s, ok := keyNames[k.Type]; ok {
buf.WriteString(s)
return buf.String()
}
return ""
}
// KeyType indicates the key pressed, such as KeyEnter or KeyBreak or KeyCtrlC.
// All other keys will be type KeyRunes. To get the rune value, check the Rune
// method on a Key struct, or use the Key.String() method:
//
// k := Key{Type: KeyRunes, Runes: []rune{'a'}, Alt: true}
// if k.Type == KeyRunes {
//
// fmt.Println(k.Runes)
// // Output: a
//
// fmt.Println(k.String())
// // Output: alt+a
//
// }
type KeyType int
func (k KeyType) String() (str string) {
if s, ok := keyNames[k]; ok {
return s
}
return ""
}
// Control keys. We could do this with an iota, but the values are very
// specific, so we set the values explicitly to avoid any confusion.
//
// See also:
// https://en.wikipedia.org/wiki/C0_and_C1_control_codes
const (
keyNUL KeyType = 0 // null, \0
keySOH KeyType = 1 // start of heading
keySTX KeyType = 2 // start of text
keyETX KeyType = 3 // break, ctrl+c
keyEOT KeyType = 4 // end of transmission
keyENQ KeyType = 5 // enquiry
keyACK KeyType = 6 // acknowledge
keyBEL KeyType = 7 // bell, \a
keyBS KeyType = 8 // backspace
keyHT KeyType = 9 // horizontal tabulation, \t
keyLF KeyType = 10 // line feed, \n
keyVT KeyType = 11 // vertical tabulation \v
keyFF KeyType = 12 // form feed \f
keyCR KeyType = 13 // carriage return, \r
keySO KeyType = 14 // shift out
keySI KeyType = 15 // shift in
keyDLE KeyType = 16 // data link escape
keyDC1 KeyType = 17 // device control one
keyDC2 KeyType = 18 // device control two
keyDC3 KeyType = 19 // device control three
keyDC4 KeyType = 20 // device control four
keyNAK KeyType = 21 // negative acknowledge
keySYN KeyType = 22 // synchronous idle
keyETB KeyType = 23 // end of transmission block
keyCAN KeyType = 24 // cancel
keyEM KeyType = 25 // end of medium
keySUB KeyType = 26 // substitution
keyESC KeyType = 27 // escape, \e
keyFS KeyType = 28 // file separator
keyGS KeyType = 29 // group separator
keyRS KeyType = 30 // record separator
keyUS KeyType = 31 // unit separator
keyDEL KeyType = 127 // delete. on most systems this is mapped to backspace, I hear
)
// Control key aliases.
const (
KeyNull KeyType = keyNUL
KeyBreak KeyType = keyETX
KeyEnter KeyType = keyCR
KeyBackspace KeyType = keyDEL
KeyTab KeyType = keyHT
KeyEsc KeyType = keyESC
KeyEscape KeyType = keyESC
KeyCtrlAt KeyType = keyNUL // ctrl+@
KeyCtrlA KeyType = keySOH
KeyCtrlB KeyType = keySTX
KeyCtrlC KeyType = keyETX
KeyCtrlD KeyType = keyEOT
KeyCtrlE KeyType = keyENQ
KeyCtrlF KeyType = keyACK
KeyCtrlG KeyType = keyBEL
KeyCtrlH KeyType = keyBS
KeyCtrlI KeyType = keyHT
KeyCtrlJ KeyType = keyLF
KeyCtrlK KeyType = keyVT
KeyCtrlL KeyType = keyFF
KeyCtrlM KeyType = keyCR
KeyCtrlN KeyType = keySO
KeyCtrlO KeyType = keySI
KeyCtrlP KeyType = keyDLE
KeyCtrlQ KeyType = keyDC1
KeyCtrlR KeyType = keyDC2
KeyCtrlS KeyType = keyDC3
KeyCtrlT KeyType = keyDC4
KeyCtrlU KeyType = keyNAK
KeyCtrlV KeyType = keySYN
KeyCtrlW KeyType = keyETB
KeyCtrlX KeyType = keyCAN
KeyCtrlY KeyType = keyEM
KeyCtrlZ KeyType = keySUB
KeyCtrlOpenBracket KeyType = keyESC // ctrl+[
KeyCtrlBackslash KeyType = keyFS // ctrl+\
KeyCtrlCloseBracket KeyType = keyGS // ctrl+]
KeyCtrlCaret KeyType = keyRS // ctrl+^
KeyCtrlUnderscore KeyType = keyUS // ctrl+_
KeyCtrlQuestionMark KeyType = keyDEL // ctrl+?
)
// Other keys.
const (
KeyRunes KeyType = -(iota + 1)
KeyUp
KeyDown
KeyRight
KeyLeft
KeyShiftTab
KeyHome
KeyEnd
KeyPgUp
KeyPgDown
KeyCtrlPgUp
KeyCtrlPgDown
KeyDelete
KeyInsert
KeySpace
KeyCtrlUp
KeyCtrlDown
KeyCtrlRight
KeyCtrlLeft
KeyCtrlHome
KeyCtrlEnd
KeyShiftUp
KeyShiftDown
KeyShiftRight
KeyShiftLeft
KeyShiftHome
KeyShiftEnd
KeyCtrlShiftUp
KeyCtrlShiftDown
KeyCtrlShiftLeft
KeyCtrlShiftRight
KeyCtrlShiftHome
KeyCtrlShiftEnd
KeyF1
KeyF2
KeyF3
KeyF4
KeyF5
KeyF6
KeyF7
KeyF8
KeyF9
KeyF10
KeyF11
KeyF12
KeyF13
KeyF14
KeyF15
KeyF16
KeyF17
KeyF18
KeyF19
KeyF20
)
// Mappings for control keys and other special keys to friendly consts.
var keyNames = map[KeyType]string{
// Control keys.
keyNUL: "ctrl+@", // also ctrl+` (that's ctrl+backtick)
keySOH: "ctrl+a",
keySTX: "ctrl+b",
keyETX: "ctrl+c",
keyEOT: "ctrl+d",
keyENQ: "ctrl+e",
keyACK: "ctrl+f",
keyBEL: "ctrl+g",
keyBS: "ctrl+h",
keyHT: "tab", // also ctrl+i
keyLF: "ctrl+j",
keyVT: "ctrl+k",
keyFF: "ctrl+l",
keyCR: "enter",
keySO: "ctrl+n",
keySI: "ctrl+o",
keyDLE: "ctrl+p",
keyDC1: "ctrl+q",
keyDC2: "ctrl+r",
keyDC3: "ctrl+s",
keyDC4: "ctrl+t",
keyNAK: "ctrl+u",
keySYN: "ctrl+v",
keyETB: "ctrl+w",
keyCAN: "ctrl+x",
keyEM: "ctrl+y",
keySUB: "ctrl+z",
keyESC: "esc",
keyFS: "ctrl+\\",
keyGS: "ctrl+]",
keyRS: "ctrl+^",
keyUS: "ctrl+_",
keyDEL: "backspace",
// Other keys.
KeyRunes: "runes",
KeyUp: "up",
KeyDown: "down",
KeyRight: "right",
KeySpace: " ", // for backwards compatibility
KeyLeft: "left",
KeyShiftTab: "shift+tab",
KeyHome: "home",
KeyEnd: "end",
KeyCtrlHome: "ctrl+home",
KeyCtrlEnd: "ctrl+end",
KeyShiftHome: "shift+home",
KeyShiftEnd: "shift+end",
KeyCtrlShiftHome: "ctrl+shift+home",
KeyCtrlShiftEnd: "ctrl+shift+end",
KeyPgUp: "pgup",
KeyPgDown: "pgdown",
KeyCtrlPgUp: "ctrl+pgup",
KeyCtrlPgDown: "ctrl+pgdown",
KeyDelete: "delete",
KeyInsert: "insert",
KeyCtrlUp: "ctrl+up",
KeyCtrlDown: "ctrl+down",
KeyCtrlRight: "ctrl+right",
KeyCtrlLeft: "ctrl+left",
KeyShiftUp: "shift+up",
KeyShiftDown: "shift+down",
KeyShiftRight: "shift+right",
KeyShiftLeft: "shift+left",
KeyCtrlShiftUp: "ctrl+shift+up",
KeyCtrlShiftDown: "ctrl+shift+down",
KeyCtrlShiftLeft: "ctrl+shift+left",
KeyCtrlShiftRight: "ctrl+shift+right",
KeyF1: "f1",
KeyF2: "f2",
KeyF3: "f3",
KeyF4: "f4",
KeyF5: "f5",
KeyF6: "f6",
KeyF7: "f7",
KeyF8: "f8",
KeyF9: "f9",
KeyF10: "f10",
KeyF11: "f11",
KeyF12: "f12",
KeyF13: "f13",
KeyF14: "f14",
KeyF15: "f15",
KeyF16: "f16",
KeyF17: "f17",
KeyF18: "f18",
KeyF19: "f19",
KeyF20: "f20",
}
// Sequence mappings.
var sequences = map[string]Key{
// Arrow keys
"\x1b[A": {Type: KeyUp},
"\x1b[B": {Type: KeyDown},
"\x1b[C": {Type: KeyRight},
"\x1b[D": {Type: KeyLeft},
"\x1b[1;2A": {Type: KeyShiftUp},
"\x1b[1;2B": {Type: KeyShiftDown},
"\x1b[1;2C": {Type: KeyShiftRight},
"\x1b[1;2D": {Type: KeyShiftLeft},
"\x1b[OA": {Type: KeyShiftUp}, // DECCKM
"\x1b[OB": {Type: KeyShiftDown}, // DECCKM
"\x1b[OC": {Type: KeyShiftRight}, // DECCKM
"\x1b[OD": {Type: KeyShiftLeft}, // DECCKM
"\x1b[a": {Type: KeyShiftUp}, // urxvt
"\x1b[b": {Type: KeyShiftDown}, // urxvt
"\x1b[c": {Type: KeyShiftRight}, // urxvt
"\x1b[d": {Type: KeyShiftLeft}, // urxvt
"\x1b[1;3A": {Type: KeyUp, Alt: true},
"\x1b[1;3B": {Type: KeyDown, Alt: true},
"\x1b[1;3C": {Type: KeyRight, Alt: true},
"\x1b[1;3D": {Type: KeyLeft, Alt: true},
"\x1b[1;4A": {Type: KeyShiftUp, Alt: true},
"\x1b[1;4B": {Type: KeyShiftDown, Alt: true},
"\x1b[1;4C": {Type: KeyShiftRight, Alt: true},
"\x1b[1;4D": {Type: KeyShiftLeft, Alt: true},
"\x1b[1;5A": {Type: KeyCtrlUp},
"\x1b[1;5B": {Type: KeyCtrlDown},
"\x1b[1;5C": {Type: KeyCtrlRight},
"\x1b[1;5D": {Type: KeyCtrlLeft},
"\x1b[Oa": {Type: KeyCtrlUp, Alt: true}, // urxvt
"\x1b[Ob": {Type: KeyCtrlDown, Alt: true}, // urxvt
"\x1b[Oc": {Type: KeyCtrlRight, Alt: true}, // urxvt
"\x1b[Od": {Type: KeyCtrlLeft, Alt: true}, // urxvt
"\x1b[1;6A": {Type: KeyCtrlShiftUp},
"\x1b[1;6B": {Type: KeyCtrlShiftDown},
"\x1b[1;6C": {Type: KeyCtrlShiftRight},
"\x1b[1;6D": {Type: KeyCtrlShiftLeft},
"\x1b[1;7A": {Type: KeyCtrlUp, Alt: true},
"\x1b[1;7B": {Type: KeyCtrlDown, Alt: true},
"\x1b[1;7C": {Type: KeyCtrlRight, Alt: true},
"\x1b[1;7D": {Type: KeyCtrlLeft, Alt: true},
"\x1b[1;8A": {Type: KeyCtrlShiftUp, Alt: true},
"\x1b[1;8B": {Type: KeyCtrlShiftDown, Alt: true},
"\x1b[1;8C": {Type: KeyCtrlShiftRight, Alt: true},
"\x1b[1;8D": {Type: KeyCtrlShiftLeft, Alt: true},
// Miscellaneous keys
"\x1b[Z": {Type: KeyShiftTab},
"\x1b[2~": {Type: KeyInsert},
"\x1b[3;2~": {Type: KeyInsert, Alt: true},
"\x1b[3~": {Type: KeyDelete},
"\x1b[3;3~": {Type: KeyDelete, Alt: true},
"\x1b[5~": {Type: KeyPgUp},
"\x1b[5;3~": {Type: KeyPgUp, Alt: true},
"\x1b[5;5~": {Type: KeyCtrlPgUp},
"\x1b[5^": {Type: KeyCtrlPgUp}, // urxvt
"\x1b[5;7~": {Type: KeyCtrlPgUp, Alt: true},
"\x1b[6~": {Type: KeyPgDown},
"\x1b[6;3~": {Type: KeyPgDown, Alt: true},
"\x1b[6;5~": {Type: KeyCtrlPgDown},
"\x1b[6^": {Type: KeyCtrlPgDown}, // urxvt
"\x1b[6;7~": {Type: KeyCtrlPgDown, Alt: true},
"\x1b[1~": {Type: KeyHome},
"\x1b[H": {Type: KeyHome}, // xterm, lxterm
"\x1b[1;3H": {Type: KeyHome, Alt: true}, // xterm, lxterm
"\x1b[1;5H": {Type: KeyCtrlHome}, // xterm, lxterm
"\x1b[1;7H": {Type: KeyCtrlHome, Alt: true}, // xterm, lxterm
"\x1b[1;2H": {Type: KeyShiftHome}, // xterm, lxterm
"\x1b[1;4H": {Type: KeyShiftHome, Alt: true}, // xterm, lxterm
"\x1b[1;6H": {Type: KeyCtrlShiftHome}, // xterm, lxterm
"\x1b[1;8H": {Type: KeyCtrlShiftHome, Alt: true}, // xterm, lxterm
"\x1b[4~": {Type: KeyEnd},
"\x1b[F": {Type: KeyEnd}, // xterm, lxterm
"\x1b[1;3F": {Type: KeyEnd, Alt: true}, // xterm, lxterm
"\x1b[1;5F": {Type: KeyCtrlEnd}, // xterm, lxterm
"\x1b[1;7F": {Type: KeyCtrlEnd, Alt: true}, // xterm, lxterm
"\x1b[1;2F": {Type: KeyShiftEnd}, // xterm, lxterm
"\x1b[1;4F": {Type: KeyShiftEnd, Alt: true}, // xterm, lxterm
"\x1b[1;6F": {Type: KeyCtrlShiftEnd}, // xterm, lxterm
"\x1b[1;8F": {Type: KeyCtrlShiftEnd, Alt: true}, // xterm, lxterm
"\x1b[7~": {Type: KeyHome}, // urxvt
"\x1b[7^": {Type: KeyCtrlHome}, // urxvt
"\x1b[7$": {Type: KeyShiftHome}, // urxvt
"\x1b[7@": {Type: KeyCtrlShiftHome}, // urxvt
"\x1b[8~": {Type: KeyEnd}, // urxvt
"\x1b[8^": {Type: KeyCtrlEnd}, // urxvt
"\x1b[8$": {Type: KeyShiftEnd}, // urxvt
"\x1b[8@": {Type: KeyCtrlShiftEnd}, // urxvt
// Function keys, Linux console
"\x1b[[A": {Type: KeyF1}, // linux console
"\x1b[[B": {Type: KeyF2}, // linux console
"\x1b[[C": {Type: KeyF3}, // linux console
"\x1b[[D": {Type: KeyF4}, // linux console
"\x1b[[E": {Type: KeyF5}, // linux console
// Function keys, X11
"\x1bOP": {Type: KeyF1}, // vt100, xterm
"\x1bOQ": {Type: KeyF2}, // vt100, xterm
"\x1bOR": {Type: KeyF3}, // vt100, xterm
"\x1bOS": {Type: KeyF4}, // vt100, xterm
"\x1b[1;3P": {Type: KeyF1, Alt: true}, // vt100, xterm
"\x1b[1;3Q": {Type: KeyF2, Alt: true}, // vt100, xterm
"\x1b[1;3R": {Type: KeyF3, Alt: true}, // vt100, xterm
"\x1b[1;3S": {Type: KeyF4, Alt: true}, // vt100, xterm
"\x1b[11~": {Type: KeyF1}, // urxvt
"\x1b[12~": {Type: KeyF2}, // urxvt
"\x1b[13~": {Type: KeyF3}, // urxvt
"\x1b[14~": {Type: KeyF4}, // urxvt
"\x1b[15~": {Type: KeyF5}, // vt100, xterm, also urxvt
"\x1b[15;3~": {Type: KeyF5, Alt: true}, // vt100, xterm, also urxvt
"\x1b[17~": {Type: KeyF6}, // vt100, xterm, also urxvt
"\x1b[18~": {Type: KeyF7}, // vt100, xterm, also urxvt
"\x1b[19~": {Type: KeyF8}, // vt100, xterm, also urxvt
"\x1b[20~": {Type: KeyF9}, // vt100, xterm, also urxvt
"\x1b[21~": {Type: KeyF10}, // vt100, xterm, also urxvt
"\x1b[17;3~": {Type: KeyF6, Alt: true}, // vt100, xterm
"\x1b[18;3~": {Type: KeyF7, Alt: true}, // vt100, xterm
"\x1b[19;3~": {Type: KeyF8, Alt: true}, // vt100, xterm
"\x1b[20;3~": {Type: KeyF9, Alt: true}, // vt100, xterm
"\x1b[21;3~": {Type: KeyF10, Alt: true}, // vt100, xterm
"\x1b[23~": {Type: KeyF11}, // vt100, xterm, also urxvt
"\x1b[24~": {Type: KeyF12}, // vt100, xterm, also urxvt
"\x1b[23;3~": {Type: KeyF11, Alt: true}, // vt100, xterm
"\x1b[24;3~": {Type: KeyF12, Alt: true}, // vt100, xterm
"\x1b[1;2P": {Type: KeyF13},
"\x1b[1;2Q": {Type: KeyF14},
"\x1b[25~": {Type: KeyF13}, // vt100, xterm, also urxvt
"\x1b[26~": {Type: KeyF14}, // vt100, xterm, also urxvt
"\x1b[25;3~": {Type: KeyF13, Alt: true}, // vt100, xterm
"\x1b[26;3~": {Type: KeyF14, Alt: true}, // vt100, xterm
"\x1b[1;2R": {Type: KeyF15},
"\x1b[1;2S": {Type: KeyF16},
"\x1b[28~": {Type: KeyF15}, // vt100, xterm, also urxvt
"\x1b[29~": {Type: KeyF16}, // vt100, xterm, also urxvt
"\x1b[28;3~": {Type: KeyF15, Alt: true}, // vt100, xterm
"\x1b[29;3~": {Type: KeyF16, Alt: true}, // vt100, xterm
"\x1b[15;2~": {Type: KeyF17},
"\x1b[17;2~": {Type: KeyF18},
"\x1b[18;2~": {Type: KeyF19},
"\x1b[19;2~": {Type: KeyF20},
"\x1b[31~": {Type: KeyF17},
"\x1b[32~": {Type: KeyF18},
"\x1b[33~": {Type: KeyF19},
"\x1b[34~": {Type: KeyF20},
// Powershell sequences.
"\x1bOA": {Type: KeyUp, Alt: false},
"\x1bOB": {Type: KeyDown, Alt: false},
"\x1bOC": {Type: KeyRight, Alt: false},
"\x1bOD": {Type: KeyLeft, Alt: false},
}
// unknownInputByteMsg is reported by the input reader when an invalid
// utf-8 byte is detected on the input. Currently, it is not handled
// further by bubbletea. However, having this event makes it possible
// to troubleshoot invalid inputs.
type unknownInputByteMsg byte
func (u unknownInputByteMsg) String() string {
return fmt.Sprintf("?%#02x?", int(u))
}
// unknownCSISequenceMsg is reported by the input reader when an
// unrecognized CSI sequence is detected on the input. Currently, it
// is not handled further by bubbletea. However, having this event
// makes it possible to troubleshoot invalid inputs.
type unknownCSISequenceMsg []byte
func (u unknownCSISequenceMsg) String() string {
return fmt.Sprintf("?CSI%+v?", []byte(u)[2:])
}
var spaceRunes = []rune{' '}
// readAnsiInputs reads keypress and mouse inputs from a TTY and produces messages
// containing information about the key or mouse events accordingly.
func readAnsiInputs(ctx context.Context, msgs chan<- Msg, input io.Reader) error {
var buf [256]byte
var leftOverFromPrevIteration []byte
loop:
for {
// Read and block.
numBytes, err := input.Read(buf[:])
if err != nil {
return fmt.Errorf("error reading input: %w", err)
}
b := buf[:numBytes]
if leftOverFromPrevIteration != nil {
b = append(leftOverFromPrevIteration, b...)
}
// If we had a short read (numBytes < len(buf)), we're sure that
// the end of this read is an event boundary, so there is no doubt
// if we are encountering the end of the buffer while parsing a message.
// However, if we've succeeded in filling up the buffer, there may
// be more data in the OS buffer ready to be read in, to complete
// the last message in the input. In that case, we will retry with
// the left over data in the next iteration.
canHaveMoreData := numBytes == len(buf)
var i, w int
for i, w = 0, 0; i < len(b); i += w {
var msg Msg
w, msg = detectOneMsg(b[i:], canHaveMoreData)
if w == 0 {
// Expecting more bytes beyond the current buffer. Try waiting
// for more input.
leftOverFromPrevIteration = make([]byte, 0, len(b[i:])+len(buf))
leftOverFromPrevIteration = append(leftOverFromPrevIteration, b[i:]...)
continue loop
}
select {
case msgs <- msg:
case <-ctx.Done():
err := ctx.Err()
if err != nil {
err = fmt.Errorf("found context error while reading input: %w", err)
}
return err
}
}
leftOverFromPrevIteration = nil
}
}
var (
unknownCSIRe = regexp.MustCompile(`^\x1b\[[\x30-\x3f]*[\x20-\x2f]*[\x40-\x7e]`)
mouseSGRRegex = regexp.MustCompile(`(\d+);(\d+);(\d+)([Mm])`)
)
func detectOneMsg(b []byte, canHaveMoreData bool) (w int, msg Msg) {
// Detect mouse events.
// X10 mouse events have a length of 6 bytes
const mouseEventX10Len = 6
if len(b) >= mouseEventX10Len && b[0] == '\x1b' && b[1] == '[' {
switch b[2] {
case 'M':
return mouseEventX10Len, MouseMsg(parseX10MouseEvent(b))
case '<':
if matchIndices := mouseSGRRegex.FindSubmatchIndex(b[3:]); matchIndices != nil {
// SGR mouse events length is the length of the match plus the length of the escape sequence
mouseEventSGRLen := matchIndices[1] + 3 //nolint:mnd
return mouseEventSGRLen, MouseMsg(parseSGRMouseEvent(b))
}
}
}
// Detect focus events.
var foundRF bool
foundRF, w, msg = detectReportFocus(b)
if foundRF {
return w, msg
}
// Detect bracketed paste.
var foundbp bool
foundbp, w, msg = detectBracketedPaste(b)
if foundbp {
return w, msg
}
// Detect escape sequence and control characters other than NUL,
// possibly with an escape character in front to mark the Alt
// modifier.
var foundSeq bool
foundSeq, w, msg = detectSequence(b)
if foundSeq {
return w, msg
}
// No non-NUL control character or escape sequence.
// If we are seeing at least an escape character, remember it for later below.
alt := false
i := 0
if b[0] == '\x1b' {
alt = true
i++
}
// Are we seeing a standalone NUL? This is not handled by detectSequence().
if i < len(b) && b[i] == 0 {
return i + 1, KeyMsg{Type: keyNUL, Alt: alt}
}
// Find the longest sequence of runes that are not control
// characters from this point.
var runes []rune
for rw := 0; i < len(b); i += rw {
var r rune
r, rw = utf8.DecodeRune(b[i:])
if r == utf8.RuneError || r <= rune(keyUS) || r == rune(keyDEL) || r == ' ' {
// Rune errors are handled below; control characters and spaces will
// be handled by detectSequence in the next call to detectOneMsg.
break
}
runes = append(runes, r)
if alt {
// We only support a single rune after an escape alt modifier.
i += rw
break
}
}
if i >= len(b) && canHaveMoreData {
// We have encountered the end of the input buffer. Alas, we can't
// be sure whether the data in the remainder of the buffer is
// complete (maybe there was a short read). Instead of sending anything
// dumb to the message channel, do a short read. The outer loop will
// handle this case by extending the buffer as necessary.
return 0, nil
}
// If we found at least one rune, we report the bunch of them as
// a single KeyRunes or KeySpace event.
if len(runes) > 0 {
k := Key{Type: KeyRunes, Runes: runes, Alt: alt}
if len(runes) == 1 && runes[0] == ' ' {
k.Type = KeySpace
}
return i, KeyMsg(k)
}
// We didn't find an escape sequence, nor a valid rune. Was this a
// lone escape character at the end of the input?
if alt && len(b) == 1 {
return 1, KeyMsg(Key{Type: KeyEscape})
}
// The character at the current position is neither an escape
// sequence, a valid rune start or a sole escape character. Report
// it as an invalid byte.
return 1, unknownInputByteMsg(b[0])
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/logging_test.go | logging_test.go | package tea
import (
"log"
"os"
"path/filepath"
"testing"
)
func TestLogToFile(t *testing.T) {
path := filepath.Join(t.TempDir(), "log.txt")
prefix := "logprefix"
f, err := LogToFile(path, prefix)
if err != nil {
t.Error(err)
}
log.SetFlags(log.Lmsgprefix)
log.Println("some test log")
if err := f.Close(); err != nil {
t.Error(err)
}
out, err := os.ReadFile(path)
if err != nil {
t.Error(err)
}
if string(out) != prefix+" some test log\n" {
t.Fatalf("wrong log msg: %q", string(out))
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/tutorials/commands/main.go | tutorials/commands/main.go | package main
import (
"fmt"
"net/http"
"os"
"time"
tea "github.com/charmbracelet/bubbletea"
)
const url = "https://charm.sh/"
type model struct {
status int
err error
}
func checkServer() tea.Msg {
c := &http.Client{Timeout: 10 * time.Second}
res, err := c.Get(url)
if err != nil {
return errMsg{err}
}
defer res.Body.Close() // nolint:errcheck
return statusMsg(res.StatusCode)
}
type statusMsg int
type errMsg struct{ err error }
// For messages that contain errors it's often handy to also implement the
// error interface on the message.
func (e errMsg) Error() string { return e.err.Error() }
func (m model) Init() tea.Cmd {
return checkServer
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case statusMsg:
m.status = int(msg)
return m, tea.Quit
case errMsg:
m.err = msg
return m, tea.Quit
case tea.KeyMsg:
if msg.Type == tea.KeyCtrlC {
return m, tea.Quit
}
}
return m, nil
}
func (m model) View() string {
if m.err != nil {
return fmt.Sprintf("\nWe had some trouble: %v\n\n", m.err)
}
s := fmt.Sprintf("Checking %s ... ", url)
if m.status > 0 {
s += fmt.Sprintf("%d %s!", m.status, http.StatusText(m.status))
}
return "\n" + s + "\n\n"
}
func main() {
if _, err := tea.NewProgram(model{}).Run(); err != nil {
fmt.Printf("Uh oh, there was an error: %v\n", err)
os.Exit(1)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/tutorials/basics/main.go | tutorials/basics/main.go | package main
import (
"fmt"
"os"
tea "github.com/charmbracelet/bubbletea"
)
type model struct {
cursor int
choices []string
selected map[int]struct{}
}
func initialModel() model {
return model{
choices: []string{"Buy carrots", "Buy celery", "Buy kohlrabi"},
// A map which indicates which choices are selected. We're using
// the map like a mathematical set. The keys refer to the indexes
// of the `choices` slice, above.
selected: make(map[int]struct{}),
}
}
func (m model) Init() tea.Cmd {
return tea.SetWindowTitle("Grocery List")
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
switch msg.String() {
case "ctrl+c", "q":
return m, tea.Quit
case "up", "k":
if m.cursor > 0 {
m.cursor--
}
case "down", "j":
if m.cursor < len(m.choices)-1 {
m.cursor++
}
case "enter", " ":
_, ok := m.selected[m.cursor]
if ok {
delete(m.selected, m.cursor)
} else {
m.selected[m.cursor] = struct{}{}
}
}
}
return m, nil
}
func (m model) View() string {
s := "What should we buy at the market?\n\n"
for i, choice := range m.choices {
cursor := " "
if m.cursor == i {
cursor = ">"
}
checked := " "
if _, ok := m.selected[i]; ok {
checked = "x"
}
s += fmt.Sprintf("%s [%s] %s\n", cursor, checked, choice)
}
s += "\nPress q to quit.\n"
return s
}
func main() {
p := tea.NewProgram(initialModel())
if _, err := p.Run(); err != nil {
fmt.Printf("Alas, there's been an error: %v", err)
os.Exit(1)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/textarea/main.go | examples/textarea/main.go | package main
// A simple program demonstrating the textarea component from the Bubbles
// component library.
import (
"fmt"
"log"
"github.com/charmbracelet/bubbles/textarea"
tea "github.com/charmbracelet/bubbletea"
)
func main() {
p := tea.NewProgram(initialModel())
if _, err := p.Run(); err != nil {
log.Fatal(err)
}
}
type errMsg error
type model struct {
textarea textarea.Model
err error
}
func initialModel() model {
ti := textarea.New()
ti.Placeholder = "Once upon a time..."
ti.Focus()
return model{
textarea: ti,
err: nil,
}
}
func (m model) Init() tea.Cmd {
return textarea.Blink
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
var cmds []tea.Cmd
var cmd tea.Cmd
switch msg := msg.(type) {
case tea.KeyMsg:
switch msg.Type {
case tea.KeyEsc:
if m.textarea.Focused() {
m.textarea.Blur()
}
case tea.KeyCtrlC:
return m, tea.Quit
default:
if !m.textarea.Focused() {
cmd = m.textarea.Focus()
cmds = append(cmds, cmd)
}
}
// We handle errors just like any other message
case errMsg:
m.err = msg
return m, nil
}
m.textarea, cmd = m.textarea.Update(msg)
cmds = append(cmds, cmd)
return m, tea.Batch(cmds...)
}
func (m model) View() string {
return fmt.Sprintf(
"Tell me a story.\n\n%s\n\n%s",
m.textarea.View(),
"(ctrl+c to quit)",
) + "\n\n"
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/chat/main.go | examples/chat/main.go | package main
// A simple program demonstrating the text area component from the Bubbles
// component library.
import (
"fmt"
"log"
"strings"
"github.com/charmbracelet/bubbles/textarea"
"github.com/charmbracelet/bubbles/viewport"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
const gap = "\n\n"
func main() {
p := tea.NewProgram(initialModel())
if _, err := p.Run(); err != nil {
log.Fatal(err)
}
}
type (
errMsg error
)
type model struct {
viewport viewport.Model
messages []string
textarea textarea.Model
senderStyle lipgloss.Style
err error
}
func initialModel() model {
ta := textarea.New()
ta.Placeholder = "Send a message..."
ta.Focus()
ta.Prompt = "┃ "
ta.CharLimit = 280
ta.SetWidth(30)
ta.SetHeight(3)
// Remove cursor line styling
ta.FocusedStyle.CursorLine = lipgloss.NewStyle()
ta.ShowLineNumbers = false
vp := viewport.New(30, 5)
vp.SetContent(`Welcome to the chat room!
Type a message and press Enter to send.`)
ta.KeyMap.InsertNewline.SetEnabled(false)
return model{
textarea: ta,
messages: []string{},
viewport: vp,
senderStyle: lipgloss.NewStyle().Foreground(lipgloss.Color("5")),
err: nil,
}
}
func (m model) Init() tea.Cmd {
return textarea.Blink
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
var (
tiCmd tea.Cmd
vpCmd tea.Cmd
)
m.textarea, tiCmd = m.textarea.Update(msg)
m.viewport, vpCmd = m.viewport.Update(msg)
switch msg := msg.(type) {
case tea.WindowSizeMsg:
m.viewport.Width = msg.Width
m.textarea.SetWidth(msg.Width)
m.viewport.Height = msg.Height - m.textarea.Height() - lipgloss.Height(gap)
if len(m.messages) > 0 {
// Wrap content before setting it.
m.viewport.SetContent(lipgloss.NewStyle().Width(m.viewport.Width).Render(strings.Join(m.messages, "\n")))
}
m.viewport.GotoBottom()
case tea.KeyMsg:
switch msg.Type {
case tea.KeyCtrlC, tea.KeyEsc:
fmt.Println(m.textarea.Value())
return m, tea.Quit
case tea.KeyEnter:
m.messages = append(m.messages, m.senderStyle.Render("You: ")+m.textarea.Value())
m.viewport.SetContent(lipgloss.NewStyle().Width(m.viewport.Width).Render(strings.Join(m.messages, "\n")))
m.textarea.Reset()
m.viewport.GotoBottom()
}
// We handle errors just like any other message
case errMsg:
m.err = msg
return m, nil
}
return m, tea.Batch(tiCmd, vpCmd)
}
func (m model) View() string {
return fmt.Sprintf(
"%s%s%s",
m.viewport.View(),
gap,
m.textarea.View(),
)
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/table-resize/main.go | examples/table-resize/main.go | package main
import (
"fmt"
"os"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
"github.com/charmbracelet/lipgloss/table"
)
type model struct {
table *table.Table
}
func (m model) Init() tea.Cmd { return nil }
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
var cmd tea.Cmd
switch msg := msg.(type) {
case tea.WindowSizeMsg:
m.table = m.table.Width(msg.Width)
m.table = m.table.Height(msg.Height)
case tea.KeyMsg:
switch msg.String() {
case "q", "ctrl+c":
return m, tea.Quit
case "enter":
}
}
return m, cmd
}
func (m model) View() string {
return "\n" + m.table.String() + "\n"
}
func main() {
re := lipgloss.NewRenderer(os.Stdout)
baseStyle := re.NewStyle().Padding(0, 1)
headerStyle := baseStyle.Foreground(lipgloss.Color("252")).Bold(true)
selectedStyle := baseStyle.Foreground(lipgloss.Color("#01BE85")).Background(lipgloss.Color("#00432F"))
typeColors := map[string]lipgloss.Color{
"Bug": lipgloss.Color("#D7FF87"),
"Electric": lipgloss.Color("#FDFF90"),
"Fire": lipgloss.Color("#FF7698"),
"Flying": lipgloss.Color("#FF87D7"),
"Grass": lipgloss.Color("#75FBAB"),
"Ground": lipgloss.Color("#FF875F"),
"Normal": lipgloss.Color("#929292"),
"Poison": lipgloss.Color("#7D5AFC"),
"Water": lipgloss.Color("#00E2C7"),
}
dimTypeColors := map[string]lipgloss.Color{
"Bug": lipgloss.Color("#97AD64"),
"Electric": lipgloss.Color("#FCFF5F"),
"Fire": lipgloss.Color("#BA5F75"),
"Flying": lipgloss.Color("#C97AB2"),
"Grass": lipgloss.Color("#59B980"),
"Ground": lipgloss.Color("#C77252"),
"Normal": lipgloss.Color("#727272"),
"Poison": lipgloss.Color("#634BD0"),
"Water": lipgloss.Color("#439F8E"),
}
headers := []string{"#", "NAME", "TYPE 1", "TYPE 2", "JAPANESE", "OFFICIAL ROM."}
rows := [][]string{
{"1", "Bulbasaur", "Grass", "Poison", "フシギダネ", "Bulbasaur"},
{"2", "Ivysaur", "Grass", "Poison", "フシギソウ", "Ivysaur"},
{"3", "Venusaur", "Grass", "Poison", "フシギバナ", "Venusaur"},
{"4", "Charmander", "Fire", "", "ヒトカゲ", "Hitokage"},
{"5", "Charmeleon", "Fire", "", "リザード", "Lizardo"},
{"6", "Charizard", "Fire", "Flying", "リザードン", "Lizardon"},
{"7", "Squirtle", "Water", "", "ゼニガメ", "Zenigame"},
{"8", "Wartortle", "Water", "", "カメール", "Kameil"},
{"9", "Blastoise", "Water", "", "カメックス", "Kamex"},
{"10", "Caterpie", "Bug", "", "キャタピー", "Caterpie"},
{"11", "Metapod", "Bug", "", "トランセル", "Trancell"},
{"12", "Butterfree", "Bug", "Flying", "バタフリー", "Butterfree"},
{"13", "Weedle", "Bug", "Poison", "ビードル", "Beedle"},
{"14", "Kakuna", "Bug", "Poison", "コクーン", "Cocoon"},
{"15", "Beedrill", "Bug", "Poison", "スピアー", "Spear"},
{"16", "Pidgey", "Normal", "Flying", "ポッポ", "Poppo"},
{"17", "Pidgeotto", "Normal", "Flying", "ピジョン", "Pigeon"},
{"18", "Pidgeot", "Normal", "Flying", "ピジョット", "Pigeot"},
{"19", "Rattata", "Normal", "", "コラッタ", "Koratta"},
{"20", "Raticate", "Normal", "", "ラッタ", "Ratta"},
{"21", "Spearow", "Normal", "Flying", "オニスズメ", "Onisuzume"},
{"22", "Fearow", "Normal", "Flying", "オニドリル", "Onidrill"},
{"23", "Ekans", "Poison", "", "アーボ", "Arbo"},
{"24", "Arbok", "Poison", "", "アーボック", "Arbok"},
{"25", "Pikachu", "Electric", "", "ピカチュウ", "Pikachu"},
{"26", "Raichu", "Electric", "", "ライチュウ", "Raichu"},
{"27", "Sandshrew", "Ground", "", "サンド", "Sand"},
{"28", "Sandslash", "Ground", "", "サンドパン", "Sandpan"},
}
t := table.New().
Headers(headers...).
Rows(rows...).
Border(lipgloss.NormalBorder()).
BorderStyle(re.NewStyle().Foreground(lipgloss.Color("238"))).
StyleFunc(func(row, col int) lipgloss.Style {
if row == 0 {
return headerStyle
}
rowIndex := row - 1
if rowIndex < 0 || rowIndex >= len(rows) {
return baseStyle
}
if rows[rowIndex][1] == "Pikachu" {
return selectedStyle
}
even := row%2 == 0
switch col {
case 2, 3: // Type 1 + 2
c := typeColors
if even {
c = dimTypeColors
}
if col >= len(rows[rowIndex]) {
return baseStyle
}
color, ok := c[rows[rowIndex][col]]
if !ok {
return baseStyle
}
return baseStyle.Foreground(color)
}
if even {
return baseStyle.Foreground(lipgloss.Color("245"))
}
return baseStyle.Foreground(lipgloss.Color("252"))
}).
Border(lipgloss.ThickBorder())
m := model{t}
if _, err := tea.NewProgram(m, tea.WithAltScreen()).Run(); err != nil {
fmt.Println("Error running program:", err)
os.Exit(1)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/eyes/main.go | examples/eyes/main.go | // roughly converted to Go from https://github.com/dmtrKovalenko/esp32-smooth-eye-blinking/blob/main/src/main.cpp
package main
import (
"fmt"
"math"
"math/rand"
"strings"
"time"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
const (
// Eye dimensions (corresponding to original EYE_WIDTH and EYE_HEIGHT)
eyeWidth = 15
eyeHeight = 12 // Increased height for taller eyes
eyeSpacing = 40
// Blink animation timing (matching original constants)
blinkFrames = 20
openTimeMin = 1000
openTimeMax = 4000
)
// Characters for drawing the eyes
const (
eyeChar = "●"
bgChar = " "
)
type model struct {
width int
height int
eyePositions [2]int
eyeY int
isBlinking bool
blinkState int
lastBlink time.Time
openTime time.Duration
}
type tickMsg time.Time
func main() {
p := tea.NewProgram(initialModel(), tea.WithAltScreen())
if _, err := p.Run(); err != nil {
fmt.Printf("Error running program: %v\n", err)
}
}
func initialModel() model {
m := model{
width: 80,
height: 24,
isBlinking: false,
blinkState: 0,
lastBlink: time.Now(),
openTime: time.Duration(rand.Intn(openTimeMax-openTimeMin)+openTimeMin) * time.Millisecond,
}
m.updateEyePositions()
return m
}
func (m *model) updateEyePositions() {
startX := (m.width - eyeSpacing) / 2
m.eyeY = m.height / 2
m.eyePositions[0] = startX
m.eyePositions[1] = startX + eyeSpacing
}
func (m model) Init() tea.Cmd {
return tea.Batch(
tickCmd(),
tea.EnterAltScreen,
)
}
func tickCmd() tea.Cmd {
return tea.Tick(50*time.Millisecond, func(t time.Time) tea.Msg {
return tickMsg(t)
})
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
if msg.Type == tea.KeyCtrlC || msg.Type == tea.KeyEsc {
return m, tea.Quit
}
case tea.WindowSizeMsg:
m.width = msg.Width
m.height = msg.Height
m.updateEyePositions()
case tickMsg:
currentTime := time.Now()
if !m.isBlinking && currentTime.Sub(m.lastBlink) >= m.openTime {
m.isBlinking = true
m.blinkState = 0
}
if m.isBlinking {
m.blinkState++
if m.blinkState >= blinkFrames {
m.isBlinking = false
m.lastBlink = currentTime
m.openTime = time.Duration(rand.Intn(openTimeMax-openTimeMin)+openTimeMin) * time.Millisecond
// 10% chance of double blink (matching original logic)
if rand.Intn(10) == 0 {
m.openTime = 300 * time.Millisecond
}
}
}
}
return m, tickCmd()
}
func (m model) View() string {
// Create empty canvas
canvas := make([][]string, m.height)
for y := range canvas {
canvas[y] = make([]string, m.width)
for x := range canvas[y] {
canvas[y][x] = bgChar
}
}
// Calculate current eye height based on blink state
currentHeight := eyeHeight
if m.isBlinking {
var blinkProgress float64
if m.blinkState < blinkFrames/2 {
// Closing eyes (with easing function from original)
blinkProgress = float64(m.blinkState) / float64(blinkFrames/2)
blinkProgress = 1.0 - (blinkProgress * blinkProgress)
} else {
// Opening eyes (with easing function from original)
blinkProgress = float64(m.blinkState-blinkFrames/2) / float64(blinkFrames/2)
blinkProgress = blinkProgress * (2.0 - blinkProgress)
}
currentHeight = int(math.Max(1, float64(eyeHeight)*blinkProgress))
}
// Draw both eyes
for i := 0; i < 2; i++ {
drawEllipse(canvas, m.eyePositions[i], m.eyeY, eyeWidth, currentHeight)
}
// Convert canvas to string
var s strings.Builder
for _, row := range canvas {
for _, cell := range row {
s.WriteString(cell)
}
s.WriteString("\n")
}
// Style output
style := lipgloss.NewStyle().
Foreground(lipgloss.Color("#F0F0F0"))
return style.Render(s.String())
}
func drawEllipse(canvas [][]string, x0, y0, rx, ry int) {
// Improved ellipse drawing algorithm with better angles
for y := -ry; y <= ry; y++ {
// Calculate the width at this y position for a smoother ellipse
// Use a slightly modified formula to improve the angles
width := int(float64(rx) * math.Sqrt(1.0-math.Pow(float64(y)/float64(ry), 2.0)))
for x := -width; x <= width; x++ {
// Calculate canvas position
canvasX := x0 + x
canvasY := y0 + y
// Make sure we're within canvas bounds
if canvasX >= 0 && canvasX < len(canvas[0]) && canvasY >= 0 && canvasY < len(canvas) {
canvas[canvasY][canvasX] = eyeChar
}
}
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/focus-blur/main.go | examples/focus-blur/main.go | package main
// A simple program that handled losing and acquiring focus.
import (
"log"
tea "github.com/charmbracelet/bubbletea"
)
func main() {
p := tea.NewProgram(model{
// assume we start focused...
focused: true,
reporting: true,
}, tea.WithReportFocus())
if _, err := p.Run(); err != nil {
log.Fatal(err)
}
}
type model struct {
focused bool
reporting bool
}
func (m model) Init() tea.Cmd {
return nil
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.FocusMsg:
m.focused = true
case tea.BlurMsg:
m.focused = false
case tea.KeyMsg:
switch msg.String() {
case "t":
m.reporting = !m.reporting
case "ctrl+c", "q":
return m, tea.Quit
}
}
return m, nil
}
func (m model) View() string {
s := "Hi. Focus report is currently "
if m.reporting {
s += "enabled"
} else {
s += "disabled"
}
s += ".\n\n"
if m.reporting {
if m.focused {
s += "This program is currently focused!"
} else {
s += "This program is currently blurred!"
}
}
return s + "\n\nTo quit sooner press ctrl-c, or t to toggle focus reporting...\n"
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/autocomplete/main.go | examples/autocomplete/main.go | package main
import (
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"github.com/charmbracelet/bubbles/help"
"github.com/charmbracelet/bubbles/key"
"github.com/charmbracelet/bubbles/textinput"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
func main() {
p := tea.NewProgram(initialModel())
if _, err := p.Run(); err != nil {
log.Fatal(err)
}
}
type gotReposSuccessMsg []repo
type gotReposErrMsg error
type repo struct {
Name string `json:"name"`
}
const reposURL = "https://api.github.com/orgs/charmbracelet/repos"
func getRepos() tea.Msg {
req, err := http.NewRequest(http.MethodGet, reposURL, nil)
if err != nil {
return gotReposErrMsg(err)
}
req.Header.Add("Accept", "application/vnd.github+json")
req.Header.Add("X-GitHub-Api-Version", "2022-11-28")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return gotReposErrMsg(err)
}
defer resp.Body.Close() // nolint: errcheck
data, err := io.ReadAll(resp.Body)
if err != nil {
return gotReposErrMsg(err)
}
var repos []repo
err = json.Unmarshal(data, &repos)
if err != nil {
return gotReposErrMsg(err)
}
return gotReposSuccessMsg(repos)
}
type model struct {
textInput textinput.Model
help help.Model
keymap keymap
}
type keymap struct{}
func (k keymap) ShortHelp() []key.Binding {
return []key.Binding{
key.NewBinding(key.WithKeys("tab"), key.WithHelp("tab", "complete")),
key.NewBinding(key.WithKeys("ctrl+n"), key.WithHelp("ctrl+n", "next")),
key.NewBinding(key.WithKeys("ctrl+p"), key.WithHelp("ctrl+p", "prev")),
key.NewBinding(key.WithKeys("esc"), key.WithHelp("esc", "quit")),
}
}
func (k keymap) FullHelp() [][]key.Binding {
return [][]key.Binding{k.ShortHelp()}
}
func initialModel() model {
ti := textinput.New()
ti.Placeholder = "repository"
ti.Prompt = "charmbracelet/"
ti.PromptStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("63"))
ti.Cursor.Style = lipgloss.NewStyle().Foreground(lipgloss.Color("63"))
ti.Focus()
ti.CharLimit = 50
ti.Width = 20
ti.ShowSuggestions = true
h := help.New()
km := keymap{}
return model{textInput: ti, help: h, keymap: km}
}
func (m model) Init() tea.Cmd {
return tea.Batch(getRepos, textinput.Blink)
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
switch msg.Type {
case tea.KeyEnter, tea.KeyCtrlC, tea.KeyEsc:
return m, tea.Quit
}
case gotReposSuccessMsg:
var suggestions []string
for _, r := range msg {
suggestions = append(suggestions, r.Name)
}
m.textInput.SetSuggestions(suggestions)
}
var cmd tea.Cmd
m.textInput, cmd = m.textInput.Update(msg)
return m, cmd
}
func (m model) View() string {
return fmt.Sprintf(
"Pick a Charm™ repo:\n\n %s\n\n%s\n\n",
m.textInput.View(),
m.help.View(m.keymap),
)
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/tui-daemon-combo/main.go | examples/tui-daemon-combo/main.go | package main
import (
"flag"
"fmt"
"io"
"log"
"math/rand"
"os"
"time"
"github.com/charmbracelet/bubbles/spinner"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
"github.com/mattn/go-isatty"
)
var (
helpStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("241")).Render
mainStyle = lipgloss.NewStyle().MarginLeft(1)
)
func main() {
var (
daemonMode bool
showHelp bool
opts []tea.ProgramOption
)
flag.BoolVar(&daemonMode, "d", false, "run as a daemon")
flag.BoolVar(&showHelp, "h", false, "show help")
flag.Parse()
if showHelp {
flag.Usage()
os.Exit(0)
}
if daemonMode || !isatty.IsTerminal(os.Stdout.Fd()) {
// If we're in daemon mode don't render the TUI
opts = []tea.ProgramOption{tea.WithoutRenderer()}
} else {
// If we're in TUI mode, discard log output
log.SetOutput(io.Discard)
}
p := tea.NewProgram(newModel(), opts...)
if _, err := p.Run(); err != nil {
fmt.Println("Error starting Bubble Tea program:", err)
os.Exit(1)
}
}
type result struct {
duration time.Duration
emoji string
}
type model struct {
spinner spinner.Model
results []result
quitting bool
}
func newModel() model {
const showLastResults = 5
sp := spinner.New()
sp.Style = lipgloss.NewStyle().Foreground(lipgloss.Color("206"))
return model{
spinner: sp,
results: make([]result, showLastResults),
}
}
func (m model) Init() tea.Cmd {
log.Println("Starting work...")
return tea.Batch(
m.spinner.Tick,
runPretendProcess,
)
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
m.quitting = true
return m, tea.Quit
case spinner.TickMsg:
var cmd tea.Cmd
m.spinner, cmd = m.spinner.Update(msg)
return m, cmd
case processFinishedMsg:
d := time.Duration(msg)
res := result{emoji: randomEmoji(), duration: d}
log.Printf("%s Job finished in %s", res.emoji, res.duration)
m.results = append(m.results[1:], res)
return m, runPretendProcess
default:
return m, nil
}
}
func (m model) View() string {
s := "\n" +
m.spinner.View() + " Doing some work...\n\n"
for _, res := range m.results {
if res.duration == 0 {
s += "........................\n"
} else {
s += fmt.Sprintf("%s Job finished in %s\n", res.emoji, res.duration)
}
}
s += helpStyle("\nPress any key to exit\n")
if m.quitting {
s += "\n"
}
return mainStyle.Render(s)
}
// processFinishedMsg is sent when a pretend process completes.
type processFinishedMsg time.Duration
// pretendProcess simulates a long-running process.
func runPretendProcess() tea.Msg {
pause := time.Duration(rand.Int63n(899)+100) * time.Millisecond // nolint:gosec
time.Sleep(pause)
return processFinishedMsg(pause)
}
func randomEmoji() string {
emojis := []rune("🍦🧋🍡🤠👾😭🦊🐯🦆🥨🎏🍔🍒🍥🎮📦🦁🐶🐸🍕🥐🧲🚒🥇🏆🌽")
return string(emojis[rand.Intn(len(emojis))]) // nolint:gosec
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/list-simple/main.go | examples/list-simple/main.go | package main
import (
"fmt"
"io"
"os"
"strings"
"github.com/charmbracelet/bubbles/list"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
const listHeight = 14
var (
titleStyle = lipgloss.NewStyle().MarginLeft(2)
itemStyle = lipgloss.NewStyle().PaddingLeft(4)
selectedItemStyle = lipgloss.NewStyle().PaddingLeft(2).Foreground(lipgloss.Color("170"))
paginationStyle = list.DefaultStyles().PaginationStyle.PaddingLeft(4)
helpStyle = list.DefaultStyles().HelpStyle.PaddingLeft(4).PaddingBottom(1)
quitTextStyle = lipgloss.NewStyle().Margin(1, 0, 2, 4)
)
type item string
func (i item) FilterValue() string { return "" }
type itemDelegate struct{}
func (d itemDelegate) Height() int { return 1 }
func (d itemDelegate) Spacing() int { return 0 }
func (d itemDelegate) Update(_ tea.Msg, _ *list.Model) tea.Cmd { return nil }
func (d itemDelegate) Render(w io.Writer, m list.Model, index int, listItem list.Item) {
i, ok := listItem.(item)
if !ok {
return
}
str := fmt.Sprintf("%d. %s", index+1, i)
fn := itemStyle.Render
if index == m.Index() {
fn = func(s ...string) string {
return selectedItemStyle.Render("> " + strings.Join(s, " "))
}
}
fmt.Fprint(w, fn(str))
}
type model struct {
list list.Model
choice string
quitting bool
}
func (m model) Init() tea.Cmd {
return nil
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.WindowSizeMsg:
m.list.SetWidth(msg.Width)
return m, nil
case tea.KeyMsg:
switch keypress := msg.String(); keypress {
case "q", "ctrl+c":
m.quitting = true
return m, tea.Quit
case "enter":
i, ok := m.list.SelectedItem().(item)
if ok {
m.choice = string(i)
}
return m, tea.Quit
}
}
var cmd tea.Cmd
m.list, cmd = m.list.Update(msg)
return m, cmd
}
func (m model) View() string {
if m.choice != "" {
return quitTextStyle.Render(fmt.Sprintf("%s? Sounds good to me.", m.choice))
}
if m.quitting {
return quitTextStyle.Render("Not hungry? That’s cool.")
}
return "\n" + m.list.View()
}
func main() {
items := []list.Item{
item("Ramen"),
item("Tomato Soup"),
item("Hamburgers"),
item("Cheeseburgers"),
item("Currywurst"),
item("Okonomiyaki"),
item("Pasta"),
item("Fillet Mignon"),
item("Caviar"),
item("Just Wine"),
}
const defaultWidth = 20
l := list.New(items, itemDelegate{}, defaultWidth, listHeight)
l.Title = "What do you want for dinner?"
l.SetShowStatusBar(false)
l.SetFilteringEnabled(false)
l.Styles.Title = titleStyle
l.Styles.PaginationStyle = paginationStyle
l.Styles.HelpStyle = helpStyle
m := model{list: l}
if _, err := tea.NewProgram(m).Run(); err != nil {
fmt.Println("Error running program:", err)
os.Exit(1)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/progress-download/tui.go | examples/progress-download/tui.go | package main
import (
"strings"
"time"
"github.com/charmbracelet/bubbles/progress"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
var helpStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#626262")).Render
const (
padding = 2
maxWidth = 80
)
type progressMsg float64
type progressErrMsg struct{ err error }
func finalPause() tea.Cmd {
return tea.Tick(time.Millisecond*750, func(_ time.Time) tea.Msg {
return nil
})
}
type model struct {
pw *progressWriter
progress progress.Model
err error
}
func (m model) Init() tea.Cmd {
return nil
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
return m, tea.Quit
case tea.WindowSizeMsg:
m.progress.Width = msg.Width - padding*2 - 4
if m.progress.Width > maxWidth {
m.progress.Width = maxWidth
}
return m, nil
case progressErrMsg:
m.err = msg.err
return m, tea.Quit
case progressMsg:
var cmds []tea.Cmd
if msg >= 1.0 {
cmds = append(cmds, tea.Sequence(finalPause(), tea.Quit))
}
cmds = append(cmds, m.progress.SetPercent(float64(msg)))
return m, tea.Batch(cmds...)
// FrameMsg is sent when the progress bar wants to animate itself
case progress.FrameMsg:
progressModel, cmd := m.progress.Update(msg)
m.progress = progressModel.(progress.Model)
return m, cmd
default:
return m, nil
}
}
func (m model) View() string {
if m.err != nil {
return "Error downloading: " + m.err.Error() + "\n"
}
pad := strings.Repeat(" ", padding)
return "\n" +
pad + m.progress.View() + "\n\n" +
pad + helpStyle("Press any key to quit")
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/progress-download/main.go | examples/progress-download/main.go | package main
import (
"flag"
"fmt"
"io"
"log"
"net/http"
"os"
"path/filepath"
"github.com/charmbracelet/bubbles/progress"
tea "github.com/charmbracelet/bubbletea"
)
var p *tea.Program
type progressWriter struct {
total int
downloaded int
file *os.File
reader io.Reader
onProgress func(float64)
}
func (pw *progressWriter) Start() {
// TeeReader calls pw.Write() each time a new response is received
_, err := io.Copy(pw.file, io.TeeReader(pw.reader, pw))
if err != nil {
p.Send(progressErrMsg{err})
}
}
func (pw *progressWriter) Write(p []byte) (int, error) {
pw.downloaded += len(p)
if pw.total > 0 && pw.onProgress != nil {
pw.onProgress(float64(pw.downloaded) / float64(pw.total))
}
return len(p), nil
}
func getResponse(url string) (*http.Response, error) {
resp, err := http.Get(url) // nolint:gosec
if err != nil {
log.Fatal(err)
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("receiving status of %d for url: %s", resp.StatusCode, url)
}
return resp, nil
}
func main() {
url := flag.String("url", "", "url for the file to download")
flag.Parse()
if *url == "" {
flag.Usage()
os.Exit(1)
}
resp, err := getResponse(*url)
if err != nil {
fmt.Println("could not get response", err)
os.Exit(1)
}
defer resp.Body.Close() // nolint:errcheck
// Don't add TUI if the header doesn't include content size
// it's impossible see progress without total
if resp.ContentLength <= 0 {
fmt.Println("can't parse content length, aborting download")
os.Exit(1)
}
filename := filepath.Base(*url)
file, err := os.Create(filename)
if err != nil {
fmt.Println("could not create file:", err)
os.Exit(1)
}
defer file.Close() // nolint:errcheck
pw := &progressWriter{
total: int(resp.ContentLength),
file: file,
reader: resp.Body,
onProgress: func(ratio float64) {
p.Send(progressMsg(ratio))
},
}
m := model{
pw: pw,
progress: progress.New(progress.WithDefaultGradient()),
}
// Start Bubble Tea
p = tea.NewProgram(m)
// Start the download
go pw.Start()
if _, err := p.Run(); err != nil {
fmt.Println("error running program:", err)
os.Exit(1)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/split-editors/main.go | examples/split-editors/main.go | package main
import (
"fmt"
"os"
"github.com/charmbracelet/bubbles/help"
"github.com/charmbracelet/bubbles/key"
"github.com/charmbracelet/bubbles/textarea"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
const (
initialInputs = 2
maxInputs = 6
minInputs = 1
helpHeight = 5
)
var (
cursorStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("212"))
cursorLineStyle = lipgloss.NewStyle().
Background(lipgloss.Color("57")).
Foreground(lipgloss.Color("230"))
placeholderStyle = lipgloss.NewStyle().
Foreground(lipgloss.Color("238"))
endOfBufferStyle = lipgloss.NewStyle().
Foreground(lipgloss.Color("235"))
focusedPlaceholderStyle = lipgloss.NewStyle().
Foreground(lipgloss.Color("99"))
focusedBorderStyle = lipgloss.NewStyle().
Border(lipgloss.RoundedBorder()).
BorderForeground(lipgloss.Color("238"))
blurredBorderStyle = lipgloss.NewStyle().
Border(lipgloss.HiddenBorder())
)
type keymap = struct {
next, prev, add, remove, quit key.Binding
}
func newTextarea() textarea.Model {
t := textarea.New()
t.Prompt = ""
t.Placeholder = "Type something"
t.ShowLineNumbers = true
t.Cursor.Style = cursorStyle
t.FocusedStyle.Placeholder = focusedPlaceholderStyle
t.BlurredStyle.Placeholder = placeholderStyle
t.FocusedStyle.CursorLine = cursorLineStyle
t.FocusedStyle.Base = focusedBorderStyle
t.BlurredStyle.Base = blurredBorderStyle
t.FocusedStyle.EndOfBuffer = endOfBufferStyle
t.BlurredStyle.EndOfBuffer = endOfBufferStyle
t.KeyMap.DeleteWordBackward.SetEnabled(false)
t.KeyMap.LineNext = key.NewBinding(key.WithKeys("down"))
t.KeyMap.LinePrevious = key.NewBinding(key.WithKeys("up"))
t.Blur()
return t
}
type model struct {
width int
height int
keymap keymap
help help.Model
inputs []textarea.Model
focus int
}
func newModel() model {
m := model{
inputs: make([]textarea.Model, initialInputs),
help: help.New(),
keymap: keymap{
next: key.NewBinding(
key.WithKeys("tab"),
key.WithHelp("tab", "next"),
),
prev: key.NewBinding(
key.WithKeys("shift+tab"),
key.WithHelp("shift+tab", "prev"),
),
add: key.NewBinding(
key.WithKeys("ctrl+n"),
key.WithHelp("ctrl+n", "add an editor"),
),
remove: key.NewBinding(
key.WithKeys("ctrl+w"),
key.WithHelp("ctrl+w", "remove an editor"),
),
quit: key.NewBinding(
key.WithKeys("esc", "ctrl+c"),
key.WithHelp("esc", "quit"),
),
},
}
for i := 0; i < initialInputs; i++ {
m.inputs[i] = newTextarea()
}
m.inputs[m.focus].Focus()
m.updateKeybindings()
return m
}
func (m model) Init() tea.Cmd {
return textarea.Blink
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
var cmds []tea.Cmd
switch msg := msg.(type) {
case tea.KeyMsg:
switch {
case key.Matches(msg, m.keymap.quit):
for i := range m.inputs {
m.inputs[i].Blur()
}
return m, tea.Quit
case key.Matches(msg, m.keymap.next):
m.inputs[m.focus].Blur()
m.focus++
if m.focus > len(m.inputs)-1 {
m.focus = 0
}
cmd := m.inputs[m.focus].Focus()
cmds = append(cmds, cmd)
case key.Matches(msg, m.keymap.prev):
m.inputs[m.focus].Blur()
m.focus--
if m.focus < 0 {
m.focus = len(m.inputs) - 1
}
cmd := m.inputs[m.focus].Focus()
cmds = append(cmds, cmd)
case key.Matches(msg, m.keymap.add):
m.inputs = append(m.inputs, newTextarea())
case key.Matches(msg, m.keymap.remove):
m.inputs = m.inputs[:len(m.inputs)-1]
if m.focus > len(m.inputs)-1 {
m.focus = len(m.inputs) - 1
}
}
case tea.WindowSizeMsg:
m.height = msg.Height
m.width = msg.Width
}
m.updateKeybindings()
m.sizeInputs()
// Update all textareas
for i := range m.inputs {
newModel, cmd := m.inputs[i].Update(msg)
m.inputs[i] = newModel
cmds = append(cmds, cmd)
}
return m, tea.Batch(cmds...)
}
func (m *model) sizeInputs() {
for i := range m.inputs {
m.inputs[i].SetWidth(m.width / len(m.inputs))
m.inputs[i].SetHeight(m.height - helpHeight)
}
}
func (m *model) updateKeybindings() {
m.keymap.add.SetEnabled(len(m.inputs) < maxInputs)
m.keymap.remove.SetEnabled(len(m.inputs) > minInputs)
}
func (m model) View() string {
help := m.help.ShortHelpView([]key.Binding{
m.keymap.next,
m.keymap.prev,
m.keymap.add,
m.keymap.remove,
m.keymap.quit,
})
var views []string
for i := range m.inputs {
views = append(views, m.inputs[i].View())
}
return lipgloss.JoinHorizontal(lipgloss.Top, views...) + "\n\n" + help
}
func main() {
if _, err := tea.NewProgram(newModel(), tea.WithAltScreen()).Run(); err != nil {
fmt.Println("Error while running program:", err)
os.Exit(1)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/help/main.go | examples/help/main.go | package main
import (
"fmt"
"os"
"strings"
"github.com/charmbracelet/bubbles/help"
"github.com/charmbracelet/bubbles/key"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
// keyMap defines a set of keybindings. To work for help it must satisfy
// key.Map. It could also very easily be a map[string]key.Binding.
type keyMap struct {
Up key.Binding
Down key.Binding
Left key.Binding
Right key.Binding
Help key.Binding
Quit key.Binding
}
// ShortHelp returns keybindings to be shown in the mini help view. It's part
// of the key.Map interface.
func (k keyMap) ShortHelp() []key.Binding {
return []key.Binding{k.Help, k.Quit}
}
// FullHelp returns keybindings for the expanded help view. It's part of the
// key.Map interface.
func (k keyMap) FullHelp() [][]key.Binding {
return [][]key.Binding{
{k.Up, k.Down, k.Left, k.Right}, // first column
{k.Help, k.Quit}, // second column
}
}
var keys = keyMap{
Up: key.NewBinding(
key.WithKeys("up", "k"),
key.WithHelp("↑/k", "move up"),
),
Down: key.NewBinding(
key.WithKeys("down", "j"),
key.WithHelp("↓/j", "move down"),
),
Left: key.NewBinding(
key.WithKeys("left", "h"),
key.WithHelp("←/h", "move left"),
),
Right: key.NewBinding(
key.WithKeys("right", "l"),
key.WithHelp("→/l", "move right"),
),
Help: key.NewBinding(
key.WithKeys("?"),
key.WithHelp("?", "toggle help"),
),
Quit: key.NewBinding(
key.WithKeys("q", "esc", "ctrl+c"),
key.WithHelp("q", "quit"),
),
}
type model struct {
keys keyMap
help help.Model
inputStyle lipgloss.Style
lastKey string
quitting bool
}
func newModel() model {
return model{
keys: keys,
help: help.New(),
inputStyle: lipgloss.NewStyle().Foreground(lipgloss.Color("#FF75B7")),
}
}
func (m model) Init() tea.Cmd {
return nil
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.WindowSizeMsg:
// If we set a width on the help menu it can gracefully truncate
// its view as needed.
m.help.Width = msg.Width
case tea.KeyMsg:
switch {
case key.Matches(msg, m.keys.Up):
m.lastKey = "↑"
case key.Matches(msg, m.keys.Down):
m.lastKey = "↓"
case key.Matches(msg, m.keys.Left):
m.lastKey = "←"
case key.Matches(msg, m.keys.Right):
m.lastKey = "→"
case key.Matches(msg, m.keys.Help):
m.help.ShowAll = !m.help.ShowAll
case key.Matches(msg, m.keys.Quit):
m.quitting = true
return m, tea.Quit
}
}
return m, nil
}
func (m model) View() string {
if m.quitting {
return "Bye!\n"
}
var status string
if m.lastKey == "" {
status = "Waiting for input..."
} else {
status = "You chose: " + m.inputStyle.Render(m.lastKey)
}
helpView := m.help.View(m.keys)
height := 8 - strings.Count(status, "\n") - strings.Count(helpView, "\n")
return "\n" + status + strings.Repeat("\n", height) + helpView
}
func main() {
if os.Getenv("HELP_DEBUG") != "" {
f, err := tea.LogToFile("debug.log", "help")
if err != nil {
fmt.Println("Couldn't open a file for logging:", err)
os.Exit(1)
}
defer f.Close() // nolint:errcheck
}
if _, err := tea.NewProgram(newModel()).Run(); err != nil {
fmt.Printf("Could not start program :(\n%v\n", err)
os.Exit(1)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/cellbuffer/main.go | examples/cellbuffer/main.go | package main
// A simple example demonstrating how to draw and animate on a cellular grid.
// Note that the cellbuffer implementation in this example does not support
// double-width runes.
import (
"fmt"
"os"
"strings"
"time"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/harmonica"
)
const (
fps = 60
frequency = 7.5
damping = 0.15
asterisk = "*"
)
func drawEllipse(cb *cellbuffer, xc, yc, rx, ry float64) {
var (
dx, dy, d1, d2 float64
x float64
y = ry
)
d1 = ry*ry - rx*rx*ry + 0.25*rx*rx
dx = 2 * ry * ry * x
dy = 2 * rx * rx * y
for dx < dy {
cb.set(int(x+xc), int(y+yc))
cb.set(int(-x+xc), int(y+yc))
cb.set(int(x+xc), int(-y+yc))
cb.set(int(-x+xc), int(-y+yc))
if d1 < 0 {
x++
dx = dx + (2 * ry * ry)
d1 = d1 + dx + (ry * ry)
} else {
x++
y--
dx = dx + (2 * ry * ry)
dy = dy - (2 * rx * rx)
d1 = d1 + dx - dy + (ry * ry)
}
}
d2 = ((ry * ry) * ((x + 0.5) * (x + 0.5))) + ((rx * rx) * ((y - 1) * (y - 1))) - (rx * rx * ry * ry)
for y >= 0 {
cb.set(int(x+xc), int(y+yc))
cb.set(int(-x+xc), int(y+yc))
cb.set(int(x+xc), int(-y+yc))
cb.set(int(-x+xc), int(-y+yc))
if d2 > 0 {
y--
dy = dy - (2 * rx * rx)
d2 = d2 + (rx * rx) - dy
} else {
y--
x++
dx = dx + (2 * ry * ry)
dy = dy - (2 * rx * rx)
d2 = d2 + dx - dy + (rx * rx)
}
}
}
type cellbuffer struct {
cells []string
stride int
}
func (c *cellbuffer) init(w, h int) {
if w == 0 {
return
}
c.stride = w
c.cells = make([]string, w*h)
c.wipe()
}
func (c cellbuffer) set(x, y int) {
i := y*c.stride + x
if i > len(c.cells)-1 || x < 0 || y < 0 || x >= c.width() || y >= c.height() {
return
}
c.cells[i] = asterisk
}
func (c *cellbuffer) wipe() {
for i := range c.cells {
c.cells[i] = " "
}
}
func (c cellbuffer) width() int {
return c.stride
}
func (c cellbuffer) height() int {
h := len(c.cells) / c.stride
if len(c.cells)%c.stride != 0 {
h++
}
return h
}
func (c cellbuffer) ready() bool {
return len(c.cells) > 0
}
func (c cellbuffer) String() string {
var b strings.Builder
for i := 0; i < len(c.cells); i++ {
if i > 0 && i%c.stride == 0 && i < len(c.cells)-1 {
b.WriteRune('\n')
}
b.WriteString(c.cells[i])
}
return b.String()
}
type frameMsg struct{}
func animate() tea.Cmd {
return tea.Tick(time.Second/fps, func(_ time.Time) tea.Msg {
return frameMsg{}
})
}
type model struct {
cells cellbuffer
spring harmonica.Spring
targetX, targetY float64
x, y float64
xVelocity, yVelocity float64
}
func (m model) Init() tea.Cmd {
return animate()
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
return m, tea.Quit
case tea.WindowSizeMsg:
if !m.cells.ready() {
m.targetX, m.targetY = float64(msg.Width)/2, float64(msg.Height)/2
}
m.cells.init(msg.Width, msg.Height)
return m, nil
case tea.MouseMsg:
if !m.cells.ready() {
return m, nil
}
m.targetX, m.targetY = float64(msg.X), float64(msg.Y)
return m, nil
case frameMsg:
if !m.cells.ready() {
return m, nil
}
m.cells.wipe()
m.x, m.xVelocity = m.spring.Update(m.x, m.xVelocity, m.targetX)
m.y, m.yVelocity = m.spring.Update(m.y, m.yVelocity, m.targetY)
drawEllipse(&m.cells, m.x, m.y, 16, 8)
return m, animate()
default:
return m, nil
}
}
func (m model) View() string {
return m.cells.String()
}
func main() {
m := model{
spring: harmonica.NewSpring(harmonica.FPS(fps), frequency, damping),
}
p := tea.NewProgram(m, tea.WithAltScreen(), tea.WithMouseCellMotion())
if _, err := p.Run(); err != nil {
fmt.Println("Uh oh:", err)
os.Exit(1)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/list-default/main.go | examples/list-default/main.go | package main
import (
"fmt"
"os"
"github.com/charmbracelet/bubbles/list"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
var docStyle = lipgloss.NewStyle().Margin(1, 2)
type item struct {
title, desc string
}
func (i item) Title() string { return i.title }
func (i item) Description() string { return i.desc }
func (i item) FilterValue() string { return i.title }
type model struct {
list list.Model
}
func (m model) Init() tea.Cmd {
return nil
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
if msg.String() == "ctrl+c" {
return m, tea.Quit
}
case tea.WindowSizeMsg:
h, v := docStyle.GetFrameSize()
m.list.SetSize(msg.Width-h, msg.Height-v)
}
var cmd tea.Cmd
m.list, cmd = m.list.Update(msg)
return m, cmd
}
func (m model) View() string {
return docStyle.Render(m.list.View())
}
func main() {
items := []list.Item{
item{title: "Raspberry Pi’s", desc: "I have ’em all over my house"},
item{title: "Nutella", desc: "It's good on toast"},
item{title: "Bitter melon", desc: "It cools you down"},
item{title: "Nice socks", desc: "And by that I mean socks without holes"},
item{title: "Eight hours of sleep", desc: "I had this once"},
item{title: "Cats", desc: "Usually"},
item{title: "Plantasia, the album", desc: "My plants love it too"},
item{title: "Pour over coffee", desc: "It takes forever to make though"},
item{title: "VR", desc: "Virtual reality...what is there to say?"},
item{title: "Noguchi Lamps", desc: "Such pleasing organic forms"},
item{title: "Linux", desc: "Pretty much the best OS"},
item{title: "Business school", desc: "Just kidding"},
item{title: "Pottery", desc: "Wet clay is a great feeling"},
item{title: "Shampoo", desc: "Nothing like clean hair"},
item{title: "Table tennis", desc: "It’s surprisingly exhausting"},
item{title: "Milk crates", desc: "Great for packing in your extra stuff"},
item{title: "Afternoon tea", desc: "Especially the tea sandwich part"},
item{title: "Stickers", desc: "The thicker the vinyl the better"},
item{title: "20° Weather", desc: "Celsius, not Fahrenheit"},
item{title: "Warm light", desc: "Like around 2700 Kelvin"},
item{title: "The vernal equinox", desc: "The autumnal equinox is pretty good too"},
item{title: "Gaffer’s tape", desc: "Basically sticky fabric"},
item{title: "Terrycloth", desc: "In other words, towel fabric"},
}
m := model{list: list.New(items, list.NewDefaultDelegate(), 0, 0)}
m.list.Title = "My Fave Things"
p := tea.NewProgram(m, tea.WithAltScreen())
if _, err := p.Run(); err != nil {
fmt.Println("Error running program:", err)
os.Exit(1)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/progress-animated/main.go | examples/progress-animated/main.go | package main
// A simple example that shows how to render an animated progress bar. In this
// example we bump the progress by 25% every two seconds, animating our
// progress bar to its new target state.
//
// It's also possible to render a progress bar in a more static fashion without
// transitions. For details on that approach see the progress-static example.
import (
"fmt"
"os"
"strings"
"time"
"github.com/charmbracelet/bubbles/progress"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
const (
padding = 2
maxWidth = 80
)
var helpStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#626262")).Render
func main() {
m := model{
progress: progress.New(progress.WithDefaultGradient()),
}
if _, err := tea.NewProgram(m).Run(); err != nil {
fmt.Println("Oh no!", err)
os.Exit(1)
}
}
type tickMsg time.Time
type model struct {
progress progress.Model
}
func (m model) Init() tea.Cmd {
return tickCmd()
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
return m, tea.Quit
case tea.WindowSizeMsg:
m.progress.Width = msg.Width - padding*2 - 4
if m.progress.Width > maxWidth {
m.progress.Width = maxWidth
}
return m, nil
case tickMsg:
if m.progress.Percent() == 1.0 {
return m, tea.Quit
}
// Note that you can also use progress.Model.SetPercent to set the
// percentage value explicitly, too.
cmd := m.progress.IncrPercent(0.25)
return m, tea.Batch(tickCmd(), cmd)
// FrameMsg is sent when the progress bar wants to animate itself
case progress.FrameMsg:
progressModel, cmd := m.progress.Update(msg)
m.progress = progressModel.(progress.Model)
return m, cmd
default:
return m, nil
}
}
func (m model) View() string {
pad := strings.Repeat(" ", padding)
return "\n" +
pad + m.progress.View() + "\n\n" +
pad + helpStyle("Press any key to quit")
}
func tickCmd() tea.Cmd {
return tea.Tick(time.Second*1, func(t time.Time) tea.Msg {
return tickMsg(t)
})
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/set-window-title/main.go | examples/set-window-title/main.go | package main
// A simple example illustrating how to set a window title.
import (
"fmt"
"os"
tea "github.com/charmbracelet/bubbletea"
)
type model struct{}
func (m model) Init() tea.Cmd {
return tea.SetWindowTitle("Bubble Tea Example")
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg.(type) {
case tea.KeyMsg:
return m, tea.Quit
}
return m, nil
}
func (m model) View() string {
return "\nPress any key to quit."
}
func main() {
if _, err := tea.NewProgram(model{}).Run(); err != nil {
fmt.Println("Uh oh:", err)
os.Exit(1)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/exec/main.go | examples/exec/main.go | package main
import (
"fmt"
"os"
"os/exec"
tea "github.com/charmbracelet/bubbletea"
)
type editorFinishedMsg struct{ err error }
func openEditor() tea.Cmd {
editor := os.Getenv("EDITOR")
if editor == "" {
editor = "vim"
}
c := exec.Command(editor) //nolint:gosec
return tea.ExecProcess(c, func(err error) tea.Msg {
return editorFinishedMsg{err}
})
}
type model struct {
altscreenActive bool
err error
}
func (m model) Init() tea.Cmd {
return nil
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
switch msg.String() {
case "a":
m.altscreenActive = !m.altscreenActive
cmd := tea.EnterAltScreen
if !m.altscreenActive {
cmd = tea.ExitAltScreen
}
return m, cmd
case "e":
return m, openEditor()
case "ctrl+c", "q":
return m, tea.Quit
}
case editorFinishedMsg:
if msg.err != nil {
m.err = msg.err
return m, tea.Quit
}
}
return m, nil
}
func (m model) View() string {
if m.err != nil {
return "Error: " + m.err.Error() + "\n"
}
return "Press 'e' to open your EDITOR.\nPress 'a' to toggle the altscreen\nPress 'q' to quit.\n"
}
func main() {
m := model{}
if _, err := tea.NewProgram(m).Run(); err != nil {
fmt.Println("Error running program:", err)
os.Exit(1)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/altscreen-toggle/main.go | examples/altscreen-toggle/main.go | package main
import (
"fmt"
"os"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
var (
keywordStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("204")).Background(lipgloss.Color("235"))
helpStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("241"))
)
type model struct {
altscreen bool
quitting bool
suspending bool
}
func (m model) Init() tea.Cmd {
return nil
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.ResumeMsg:
m.suspending = false
return m, nil
case tea.KeyMsg:
switch msg.String() {
case "q", "ctrl+c", "esc":
m.quitting = true
return m, tea.Quit
case "ctrl+z":
m.suspending = true
return m, tea.Suspend
case " ":
var cmd tea.Cmd
if m.altscreen {
cmd = tea.ExitAltScreen
} else {
cmd = tea.EnterAltScreen
}
m.altscreen = !m.altscreen
return m, cmd
}
}
return m, nil
}
func (m model) View() string {
if m.suspending {
return ""
}
if m.quitting {
return "Bye!\n"
}
const (
altscreenMode = " altscreen mode "
inlineMode = " inline mode "
)
var mode string
if m.altscreen {
mode = altscreenMode
} else {
mode = inlineMode
}
return fmt.Sprintf("\n\n You're in %s\n\n\n", keywordStyle.Render(mode)) +
helpStyle.Render(" space: switch modes • ctrl-z: suspend • q: exit\n")
}
func main() {
if _, err := tea.NewProgram(model{}).Run(); err != nil {
fmt.Println("Error running program:", err)
os.Exit(1)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/result/main.go | examples/result/main.go | package main
// A simple example that shows how to retrieve a value from a Bubble Tea
// program after the Bubble Tea has exited.
import (
"fmt"
"os"
"strings"
tea "github.com/charmbracelet/bubbletea"
)
var choices = []string{"Taro", "Coffee", "Lychee"}
type model struct {
cursor int
choice string
}
func (m model) Init() tea.Cmd {
return nil
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
switch msg.String() {
case "ctrl+c", "q", "esc":
return m, tea.Quit
case "enter":
// Send the choice on the channel and exit.
m.choice = choices[m.cursor]
return m, tea.Quit
case "down", "j":
m.cursor++
if m.cursor >= len(choices) {
m.cursor = 0
}
case "up", "k":
m.cursor--
if m.cursor < 0 {
m.cursor = len(choices) - 1
}
}
}
return m, nil
}
func (m model) View() string {
s := strings.Builder{}
s.WriteString("What kind of Bubble Tea would you like to order?\n\n")
for i := 0; i < len(choices); i++ {
if m.cursor == i {
s.WriteString("(•) ")
} else {
s.WriteString("( ) ")
}
s.WriteString(choices[i])
s.WriteString("\n")
}
s.WriteString("\n(press q to quit)\n")
return s.String()
}
func main() {
p := tea.NewProgram(model{})
// Run returns the model as a tea.Model.
m, err := p.Run()
if err != nil {
fmt.Println("Oh no:", err)
os.Exit(1)
}
// Assert the final tea.Model to our local model and print the choice.
if m, ok := m.(model); ok && m.choice != "" {
fmt.Printf("\n---\nYou chose %s!\n", m.choice)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/spinners/main.go | examples/spinners/main.go | package main
import (
"fmt"
"os"
"github.com/charmbracelet/bubbles/spinner"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
var (
// Available spinners
spinners = []spinner.Spinner{
spinner.Line,
spinner.Dot,
spinner.MiniDot,
spinner.Jump,
spinner.Pulse,
spinner.Points,
spinner.Globe,
spinner.Moon,
spinner.Monkey,
}
textStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("252")).Render
spinnerStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("69"))
helpStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("241")).Render
)
func main() {
m := model{}
m.resetSpinner()
if _, err := tea.NewProgram(m).Run(); err != nil {
fmt.Println("could not run program:", err)
os.Exit(1)
}
}
type model struct {
index int
spinner spinner.Model
}
func (m model) Init() tea.Cmd {
return m.spinner.Tick
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
switch msg.String() {
case "ctrl+c", "q", "esc":
return m, tea.Quit
case "h", "left":
m.index--
if m.index < 0 {
m.index = len(spinners) - 1
}
m.resetSpinner()
return m, m.spinner.Tick
case "l", "right":
m.index++
if m.index >= len(spinners) {
m.index = 0
}
m.resetSpinner()
return m, m.spinner.Tick
default:
return m, nil
}
case spinner.TickMsg:
var cmd tea.Cmd
m.spinner, cmd = m.spinner.Update(msg)
return m, cmd
default:
return m, nil
}
}
func (m *model) resetSpinner() {
m.spinner = spinner.New()
m.spinner.Style = spinnerStyle
m.spinner.Spinner = spinners[m.index]
}
func (m model) View() (s string) {
var gap string
switch m.index {
case 1:
gap = ""
default:
gap = " "
}
s += fmt.Sprintf("\n %s%s%s\n\n", m.spinner.View(), gap, textStyle("Spinning..."))
s += helpStyle("h/l, ←/→: change spinner • q: exit\n")
return
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/views/main.go | examples/views/main.go | package main
// An example demonstrating an application with multiple views.
//
// Note that this example was produced before the Bubbles progress component
// was available (github.com/charmbracelet/bubbles/progress) and thus, we're
// implementing a progress bar from scratch here.
import (
"fmt"
"math"
"strconv"
"strings"
"time"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
"github.com/fogleman/ease"
"github.com/lucasb-eyer/go-colorful"
)
const (
progressBarWidth = 71
progressFullChar = "█"
progressEmptyChar = "░"
dotChar = " • "
)
// General stuff for styling the view
var (
keywordStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("211"))
subtleStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("241"))
ticksStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("79"))
checkboxStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("212"))
progressEmpty = subtleStyle.Render(progressEmptyChar)
dotStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("236")).Render(dotChar)
mainStyle = lipgloss.NewStyle().MarginLeft(2)
// Gradient colors we'll use for the progress bar
ramp = makeRampStyles("#B14FFF", "#00FFA3", progressBarWidth)
)
func main() {
initialModel := model{0, false, 10, 0, 0, false, false}
p := tea.NewProgram(initialModel)
if _, err := p.Run(); err != nil {
fmt.Println("could not start program:", err)
}
}
type (
tickMsg struct{}
frameMsg struct{}
)
func tick() tea.Cmd {
return tea.Tick(time.Second, func(time.Time) tea.Msg {
return tickMsg{}
})
}
func frame() tea.Cmd {
return tea.Tick(time.Second/60, func(time.Time) tea.Msg {
return frameMsg{}
})
}
type model struct {
Choice int
Chosen bool
Ticks int
Frames int
Progress float64
Loaded bool
Quitting bool
}
func (m model) Init() tea.Cmd {
return tick()
}
// Main update function.
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
// Make sure these keys always quit
if msg, ok := msg.(tea.KeyMsg); ok {
k := msg.String()
if k == "q" || k == "esc" || k == "ctrl+c" {
m.Quitting = true
return m, tea.Quit
}
}
// Hand off the message and model to the appropriate update function for the
// appropriate view based on the current state.
if !m.Chosen {
return updateChoices(msg, m)
}
return updateChosen(msg, m)
}
// The main view, which just calls the appropriate sub-view
func (m model) View() string {
var s string
if m.Quitting {
return "\n See you later!\n\n"
}
if !m.Chosen {
s = choicesView(m)
} else {
s = chosenView(m)
}
return mainStyle.Render("\n" + s + "\n\n")
}
// Sub-update functions
// Update loop for the first view where you're choosing a task.
func updateChoices(msg tea.Msg, m model) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
switch msg.String() {
case "j", "down":
m.Choice++
if m.Choice > 3 {
m.Choice = 3
}
case "k", "up":
m.Choice--
if m.Choice < 0 {
m.Choice = 0
}
case "enter":
m.Chosen = true
return m, frame()
}
case tickMsg:
if m.Ticks == 0 {
m.Quitting = true
return m, tea.Quit
}
m.Ticks--
return m, tick()
}
return m, nil
}
// Update loop for the second view after a choice has been made
func updateChosen(msg tea.Msg, m model) (tea.Model, tea.Cmd) {
switch msg.(type) {
case frameMsg:
if !m.Loaded {
m.Frames++
m.Progress = ease.OutBounce(float64(m.Frames) / float64(100))
if m.Progress >= 1 {
m.Progress = 1
m.Loaded = true
m.Ticks = 3
return m, tick()
}
return m, frame()
}
case tickMsg:
if m.Loaded {
if m.Ticks == 0 {
m.Quitting = true
return m, tea.Quit
}
m.Ticks--
return m, tick()
}
}
return m, nil
}
// Sub-views
// The first view, where you're choosing a task
func choicesView(m model) string {
c := m.Choice
tpl := "What to do today?\n\n"
tpl += "%s\n\n"
tpl += "Program quits in %s seconds\n\n"
tpl += subtleStyle.Render("j/k, up/down: select") + dotStyle +
subtleStyle.Render("enter: choose") + dotStyle +
subtleStyle.Render("q, esc: quit")
choices := fmt.Sprintf(
"%s\n%s\n%s\n%s",
checkbox("Plant carrots", c == 0),
checkbox("Go to the market", c == 1),
checkbox("Read something", c == 2),
checkbox("See friends", c == 3),
)
return fmt.Sprintf(tpl, choices, ticksStyle.Render(strconv.Itoa(m.Ticks)))
}
// The second view, after a task has been chosen
func chosenView(m model) string {
var msg string
switch m.Choice {
case 0:
msg = fmt.Sprintf("Carrot planting?\n\nCool, we'll need %s and %s...", keywordStyle.Render("libgarden"), keywordStyle.Render("vegeutils"))
case 1:
msg = fmt.Sprintf("A trip to the market?\n\nOkay, then we should install %s and %s...", keywordStyle.Render("marketkit"), keywordStyle.Render("libshopping"))
case 2:
msg = fmt.Sprintf("Reading time?\n\nOkay, cool, then we’ll need a library. Yes, an %s.", keywordStyle.Render("actual library"))
default:
msg = fmt.Sprintf("It’s always good to see friends.\n\nFetching %s and %s...", keywordStyle.Render("social-skills"), keywordStyle.Render("conversationutils"))
}
label := "Downloading..."
if m.Loaded {
label = fmt.Sprintf("Downloaded. Exiting in %s seconds...", ticksStyle.Render(strconv.Itoa(m.Ticks)))
}
return msg + "\n\n" + label + "\n" + progressbar(m.Progress) + "%"
}
func checkbox(label string, checked bool) string {
if checked {
return checkboxStyle.Render("[x] " + label)
}
return fmt.Sprintf("[ ] %s", label)
}
func progressbar(percent float64) string {
w := float64(progressBarWidth)
fullSize := int(math.Round(w * percent))
var fullCells string
for i := 0; i < fullSize; i++ {
fullCells += ramp[i].Render(progressFullChar)
}
emptySize := int(w) - fullSize
emptyCells := strings.Repeat(progressEmpty, emptySize)
return fmt.Sprintf("%s%s %3.0f", fullCells, emptyCells, math.Round(percent*100))
}
// Utils
// Generate a blend of colors.
func makeRampStyles(colorA, colorB string, steps float64) (s []lipgloss.Style) {
cA, _ := colorful.Hex(colorA)
cB, _ := colorful.Hex(colorB)
for i := 0.0; i < steps; i++ {
c := cA.BlendLuv(cB, i/steps)
s = append(s, lipgloss.NewStyle().Foreground(lipgloss.Color(colorToHex(c))))
}
return
}
// Convert a colorful.Color to a hexadecimal format.
func colorToHex(c colorful.Color) string {
return fmt.Sprintf("#%s%s%s", colorFloatToHex(c.R), colorFloatToHex(c.G), colorFloatToHex(c.B))
}
// Helper function for converting colors to hex. Assumes a value between 0 and
// 1.
func colorFloatToHex(f float64) (s string) {
s = strconv.FormatInt(int64(f*255), 16)
if len(s) == 1 {
s = "0" + s
}
return
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/send-msg/main.go | examples/send-msg/main.go | package main
// A simple example that shows how to send messages to a Bubble Tea program
// from outside the program using Program.Send(Msg).
import (
"fmt"
"math/rand"
"os"
"strings"
"time"
"github.com/charmbracelet/bubbles/spinner"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
var (
spinnerStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("63"))
helpStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("241")).Margin(1, 0)
dotStyle = helpStyle.UnsetMargins()
durationStyle = dotStyle
appStyle = lipgloss.NewStyle().Margin(1, 2, 0, 2)
)
type resultMsg struct {
duration time.Duration
food string
}
func (r resultMsg) String() string {
if r.duration == 0 {
return dotStyle.Render(strings.Repeat(".", 30))
}
return fmt.Sprintf("🍔 Ate %s %s", r.food,
durationStyle.Render(r.duration.String()))
}
type model struct {
spinner spinner.Model
results []resultMsg
quitting bool
}
func newModel() model {
const numLastResults = 5
s := spinner.New()
s.Style = spinnerStyle
return model{
spinner: s,
results: make([]resultMsg, numLastResults),
}
}
func (m model) Init() tea.Cmd {
return m.spinner.Tick
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
m.quitting = true
return m, tea.Quit
case resultMsg:
m.results = append(m.results[1:], msg)
return m, nil
case spinner.TickMsg:
var cmd tea.Cmd
m.spinner, cmd = m.spinner.Update(msg)
return m, cmd
default:
return m, nil
}
}
func (m model) View() string {
var s string
if m.quitting {
s += "That’s all for today!"
} else {
s += m.spinner.View() + " Eating food..."
}
s += "\n\n"
for _, res := range m.results {
s += res.String() + "\n"
}
if !m.quitting {
s += helpStyle.Render("Press any key to exit")
}
if m.quitting {
s += "\n"
}
return appStyle.Render(s)
}
func main() {
p := tea.NewProgram(newModel())
// Simulate activity
go func() {
for {
pause := time.Duration(rand.Int63n(899)+100) * time.Millisecond // nolint:gosec
time.Sleep(pause)
// Send the Bubble Tea program a message from outside the
// tea.Program. This will block until it is ready to receive
// messages.
p.Send(resultMsg{food: randomFood(), duration: pause})
}
}()
if _, err := p.Run(); err != nil {
fmt.Println("Error running program:", err)
os.Exit(1)
}
}
func randomFood() string {
food := []string{
"an apple", "a pear", "a gherkin", "a party gherkin",
"a kohlrabi", "some spaghetti", "tacos", "a currywurst", "some curry",
"a sandwich", "some peanut butter", "some cashews", "some ramen",
}
return food[rand.Intn(len(food))] // nolint:gosec
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/composable-views/main.go | examples/composable-views/main.go | package main
import (
"fmt"
"log"
"time"
"github.com/charmbracelet/bubbles/spinner"
"github.com/charmbracelet/bubbles/timer"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
/*
This example assumes an existing understanding of commands and messages. If you
haven't already read our tutorials on the basics of Bubble Tea and working
with commands, we recommend reading those first.
Find them at:
https://github.com/charmbracelet/bubbletea/tree/master/tutorials/commands
https://github.com/charmbracelet/bubbletea/tree/master/tutorials/basics
*/
// sessionState is used to track which model is focused
type sessionState uint
const (
defaultTime = time.Minute
timerView sessionState = iota
spinnerView
)
var (
// Available spinners
spinners = []spinner.Spinner{
spinner.Line,
spinner.Dot,
spinner.MiniDot,
spinner.Jump,
spinner.Pulse,
spinner.Points,
spinner.Globe,
spinner.Moon,
spinner.Monkey,
}
modelStyle = lipgloss.NewStyle().
Width(15).
Height(5).
Align(lipgloss.Center, lipgloss.Center).
BorderStyle(lipgloss.HiddenBorder())
focusedModelStyle = lipgloss.NewStyle().
Width(15).
Height(5).
Align(lipgloss.Center, lipgloss.Center).
BorderStyle(lipgloss.NormalBorder()).
BorderForeground(lipgloss.Color("69"))
spinnerStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("69"))
helpStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("241"))
)
type mainModel struct {
state sessionState
timer timer.Model
spinner spinner.Model
index int
}
func newModel(timeout time.Duration) mainModel {
m := mainModel{state: timerView}
m.timer = timer.New(timeout)
m.spinner = spinner.New()
return m
}
func (m mainModel) Init() tea.Cmd {
// start the timer and spinner on program start
return tea.Batch(m.timer.Init(), m.spinner.Tick)
}
func (m mainModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
var cmd tea.Cmd
var cmds []tea.Cmd
switch msg := msg.(type) {
case tea.KeyMsg:
switch msg.String() {
case "ctrl+c", "q":
return m, tea.Quit
case "tab":
if m.state == timerView {
m.state = spinnerView
} else {
m.state = timerView
}
case "n":
if m.state == timerView {
m.timer = timer.New(defaultTime)
cmds = append(cmds, m.timer.Init())
} else {
m.Next()
m.resetSpinner()
cmds = append(cmds, m.spinner.Tick)
}
}
switch m.state {
// update whichever model is focused
case spinnerView:
m.spinner, cmd = m.spinner.Update(msg)
cmds = append(cmds, cmd)
default:
m.timer, cmd = m.timer.Update(msg)
cmds = append(cmds, cmd)
}
case spinner.TickMsg:
m.spinner, cmd = m.spinner.Update(msg)
cmds = append(cmds, cmd)
case timer.TickMsg:
m.timer, cmd = m.timer.Update(msg)
cmds = append(cmds, cmd)
}
return m, tea.Batch(cmds...)
}
func (m mainModel) View() string {
var s string
model := m.currentFocusedModel()
if m.state == timerView {
s += lipgloss.JoinHorizontal(lipgloss.Top, focusedModelStyle.Render(fmt.Sprintf("%4s", m.timer.View())), modelStyle.Render(m.spinner.View()))
} else {
s += lipgloss.JoinHorizontal(lipgloss.Top, modelStyle.Render(fmt.Sprintf("%4s", m.timer.View())), focusedModelStyle.Render(m.spinner.View()))
}
s += helpStyle.Render(fmt.Sprintf("\ntab: focus next • n: new %s • q: exit\n", model))
return s
}
func (m mainModel) currentFocusedModel() string {
if m.state == timerView {
return "timer"
}
return "spinner"
}
func (m *mainModel) Next() {
if m.index == len(spinners)-1 {
m.index = 0
} else {
m.index++
}
}
func (m *mainModel) resetSpinner() {
m.spinner = spinner.New()
m.spinner.Style = spinnerStyle
m.spinner.Spinner = spinners[m.index]
}
func main() {
p := tea.NewProgram(newModel(defaultTime))
if _, err := p.Run(); err != nil {
log.Fatal(err)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/timer/main.go | examples/timer/main.go | package main
import (
"fmt"
"os"
"time"
"github.com/charmbracelet/bubbles/help"
"github.com/charmbracelet/bubbles/key"
"github.com/charmbracelet/bubbles/timer"
tea "github.com/charmbracelet/bubbletea"
)
const timeout = time.Second * 5
type model struct {
timer timer.Model
keymap keymap
help help.Model
quitting bool
}
type keymap struct {
start key.Binding
stop key.Binding
reset key.Binding
quit key.Binding
}
func (m model) Init() tea.Cmd {
return m.timer.Init()
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case timer.TickMsg:
var cmd tea.Cmd
m.timer, cmd = m.timer.Update(msg)
return m, cmd
case timer.StartStopMsg:
var cmd tea.Cmd
m.timer, cmd = m.timer.Update(msg)
m.keymap.stop.SetEnabled(m.timer.Running())
m.keymap.start.SetEnabled(!m.timer.Running())
return m, cmd
case timer.TimeoutMsg:
m.quitting = true
return m, tea.Quit
case tea.KeyMsg:
switch {
case key.Matches(msg, m.keymap.quit):
m.quitting = true
return m, tea.Quit
case key.Matches(msg, m.keymap.reset):
m.timer.Timeout = timeout
case key.Matches(msg, m.keymap.start, m.keymap.stop):
return m, m.timer.Toggle()
}
}
return m, nil
}
func (m model) helpView() string {
return "\n" + m.help.ShortHelpView([]key.Binding{
m.keymap.start,
m.keymap.stop,
m.keymap.reset,
m.keymap.quit,
})
}
func (m model) View() string {
// For a more detailed timer view you could read m.timer.Timeout to get
// the remaining time as a time.Duration and skip calling m.timer.View()
// entirely.
s := m.timer.View()
if m.timer.Timedout() {
s = "All done!"
}
s += "\n"
if !m.quitting {
s = "Exiting in " + s
s += m.helpView()
}
return s
}
func main() {
m := model{
timer: timer.NewWithInterval(timeout, time.Millisecond),
keymap: keymap{
start: key.NewBinding(
key.WithKeys("s"),
key.WithHelp("s", "start"),
),
stop: key.NewBinding(
key.WithKeys("s"),
key.WithHelp("s", "stop"),
),
reset: key.NewBinding(
key.WithKeys("r"),
key.WithHelp("r", "reset"),
),
quit: key.NewBinding(
key.WithKeys("q", "ctrl+c"),
key.WithHelp("q", "quit"),
),
},
help: help.New(),
}
m.keymap.start.SetEnabled(false)
if _, err := tea.NewProgram(m).Run(); err != nil {
fmt.Println("Uh oh, we encountered an error:", err)
os.Exit(1)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/fullscreen/main.go | examples/fullscreen/main.go | package main
// A simple program that opens the alternate screen buffer then counts down
// from 5 and then exits.
import (
"fmt"
"log"
"time"
tea "github.com/charmbracelet/bubbletea"
)
type model int
type tickMsg time.Time
func main() {
p := tea.NewProgram(model(5), tea.WithAltScreen())
if _, err := p.Run(); err != nil {
log.Fatal(err)
}
}
func (m model) Init() tea.Cmd {
return tick()
}
func (m model) Update(message tea.Msg) (tea.Model, tea.Cmd) {
switch msg := message.(type) {
case tea.KeyMsg:
switch msg.String() {
case "q", "esc", "ctrl+c":
return m, tea.Quit
}
case tickMsg:
m--
if m <= 0 {
return m, tea.Quit
}
return m, tick()
}
return m, nil
}
func (m model) View() string {
return fmt.Sprintf("\n\n Hi. This program will exit in %d seconds...", m)
}
func tick() tea.Cmd {
return tea.Tick(time.Second, func(t time.Time) tea.Msg {
return tickMsg(t)
})
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/list-fancy/randomitems.go | examples/list-fancy/randomitems.go | package main
import (
"math/rand"
"sync"
)
type randomItemGenerator struct {
titles []string
descs []string
titleIndex int
descIndex int
mtx *sync.Mutex
shuffle *sync.Once
}
func (r *randomItemGenerator) reset() {
r.mtx = &sync.Mutex{}
r.shuffle = &sync.Once{}
r.titles = []string{
"Artichoke",
"Baking Flour",
"Bananas",
"Barley",
"Bean Sprouts",
"Bitter Melon",
"Black Cod",
"Blood Orange",
"Brown Sugar",
"Cashew Apple",
"Cashews",
"Cat Food",
"Coconut Milk",
"Cucumber",
"Curry Paste",
"Currywurst",
"Dill",
"Dragonfruit",
"Dried Shrimp",
"Eggs",
"Fish Cake",
"Furikake",
"Garlic",
"Gherkin",
"Ginger",
"Granulated Sugar",
"Grapefruit",
"Green Onion",
"Hazelnuts",
"Heavy whipping cream",
"Honey Dew",
"Horseradish",
"Jicama",
"Kohlrabi",
"Leeks",
"Lentils",
"Licorice Root",
"Meyer Lemons",
"Milk",
"Molasses",
"Muesli",
"Nectarine",
"Niagamo Root",
"Nopal",
"Nutella",
"Oat Milk",
"Oatmeal",
"Olives",
"Papaya",
"Party Gherkin",
"Peppers",
"Persian Lemons",
"Pickle",
"Pineapple",
"Plantains",
"Pocky",
"Powdered Sugar",
"Quince",
"Radish",
"Ramps",
"Star Anise",
"Sweet Potato",
"Tamarind",
"Unsalted Butter",
"Watermelon",
"Weißwurst",
"Yams",
"Yeast",
"Yuzu",
"Snow Peas",
}
r.descs = []string{
"A little weird",
"Bold flavor",
"Can’t get enough",
"Delectable",
"Expensive",
"Expired",
"Exquisite",
"Fresh",
"Gimme",
"In season",
"Kind of spicy",
"Looks fresh",
"Looks good to me",
"Maybe not",
"My favorite",
"Oh my",
"On sale",
"Organic",
"Questionable",
"Really fresh",
"Refreshing",
"Salty",
"Scrumptious",
"Delectable",
"Slightly sweet",
"Smells great",
"Tasty",
"Too ripe",
"At last",
"What?",
"Wow",
"Yum",
"Maybe",
"Sure, why not?",
}
r.shuffle.Do(func() {
shuf := func(x []string) {
rand.Shuffle(len(x), func(i, j int) { x[i], x[j] = x[j], x[i] })
}
shuf(r.titles)
shuf(r.descs)
})
}
func (r *randomItemGenerator) next() item {
if r.mtx == nil {
r.reset()
}
r.mtx.Lock()
defer r.mtx.Unlock()
i := item{
title: r.titles[r.titleIndex],
description: r.descs[r.descIndex],
}
r.titleIndex++
if r.titleIndex >= len(r.titles) {
r.titleIndex = 0
}
r.descIndex++
if r.descIndex >= len(r.descs) {
r.descIndex = 0
}
return i
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/list-fancy/delegate.go | examples/list-fancy/delegate.go | package main
import (
"github.com/charmbracelet/bubbles/key"
"github.com/charmbracelet/bubbles/list"
tea "github.com/charmbracelet/bubbletea"
)
func newItemDelegate(keys *delegateKeyMap) list.DefaultDelegate {
d := list.NewDefaultDelegate()
d.UpdateFunc = func(msg tea.Msg, m *list.Model) tea.Cmd {
var title string
if i, ok := m.SelectedItem().(item); ok {
title = i.Title()
} else {
return nil
}
switch msg := msg.(type) {
case tea.KeyMsg:
switch {
case key.Matches(msg, keys.choose):
return m.NewStatusMessage(statusMessageStyle("You chose " + title))
case key.Matches(msg, keys.remove):
index := m.Index()
m.RemoveItem(index)
if len(m.Items()) == 0 {
keys.remove.SetEnabled(false)
}
return m.NewStatusMessage(statusMessageStyle("Deleted " + title))
}
}
return nil
}
help := []key.Binding{keys.choose, keys.remove}
d.ShortHelpFunc = func() []key.Binding {
return help
}
d.FullHelpFunc = func() [][]key.Binding {
return [][]key.Binding{help}
}
return d
}
type delegateKeyMap struct {
choose key.Binding
remove key.Binding
}
// Additional short help entries. This satisfies the help.KeyMap interface and
// is entirely optional.
func (d delegateKeyMap) ShortHelp() []key.Binding {
return []key.Binding{
d.choose,
d.remove,
}
}
// Additional full help entries. This satisfies the help.KeyMap interface and
// is entirely optional.
func (d delegateKeyMap) FullHelp() [][]key.Binding {
return [][]key.Binding{
{
d.choose,
d.remove,
},
}
}
func newDelegateKeyMap() *delegateKeyMap {
return &delegateKeyMap{
choose: key.NewBinding(
key.WithKeys("enter"),
key.WithHelp("enter", "choose"),
),
remove: key.NewBinding(
key.WithKeys("x", "backspace"),
key.WithHelp("x", "delete"),
),
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/list-fancy/main.go | examples/list-fancy/main.go | package main
import (
"fmt"
"math/rand"
"os"
"time"
"github.com/charmbracelet/bubbles/key"
"github.com/charmbracelet/bubbles/list"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
var (
appStyle = lipgloss.NewStyle().Padding(1, 2)
titleStyle = lipgloss.NewStyle().
Foreground(lipgloss.Color("#FFFDF5")).
Background(lipgloss.Color("#25A065")).
Padding(0, 1)
statusMessageStyle = lipgloss.NewStyle().
Foreground(lipgloss.AdaptiveColor{Light: "#04B575", Dark: "#04B575"}).
Render
)
type item struct {
title string
description string
}
func (i item) Title() string { return i.title }
func (i item) Description() string { return i.description }
func (i item) FilterValue() string { return i.title }
type listKeyMap struct {
toggleSpinner key.Binding
toggleTitleBar key.Binding
toggleStatusBar key.Binding
togglePagination key.Binding
toggleHelpMenu key.Binding
insertItem key.Binding
}
func newListKeyMap() *listKeyMap {
return &listKeyMap{
insertItem: key.NewBinding(
key.WithKeys("a"),
key.WithHelp("a", "add item"),
),
toggleSpinner: key.NewBinding(
key.WithKeys("s"),
key.WithHelp("s", "toggle spinner"),
),
toggleTitleBar: key.NewBinding(
key.WithKeys("T"),
key.WithHelp("T", "toggle title"),
),
toggleStatusBar: key.NewBinding(
key.WithKeys("S"),
key.WithHelp("S", "toggle status"),
),
togglePagination: key.NewBinding(
key.WithKeys("P"),
key.WithHelp("P", "toggle pagination"),
),
toggleHelpMenu: key.NewBinding(
key.WithKeys("H"),
key.WithHelp("H", "toggle help"),
),
}
}
type model struct {
list list.Model
itemGenerator *randomItemGenerator
keys *listKeyMap
delegateKeys *delegateKeyMap
}
func newModel() model {
var (
itemGenerator randomItemGenerator
delegateKeys = newDelegateKeyMap()
listKeys = newListKeyMap()
)
// Make initial list of items
const numItems = 24
items := make([]list.Item, numItems)
for i := 0; i < numItems; i++ {
items[i] = itemGenerator.next()
}
// Setup list
delegate := newItemDelegate(delegateKeys)
groceryList := list.New(items, delegate, 0, 0)
groceryList.Title = "Groceries"
groceryList.Styles.Title = titleStyle
groceryList.AdditionalFullHelpKeys = func() []key.Binding {
return []key.Binding{
listKeys.toggleSpinner,
listKeys.insertItem,
listKeys.toggleTitleBar,
listKeys.toggleStatusBar,
listKeys.togglePagination,
listKeys.toggleHelpMenu,
}
}
return model{
list: groceryList,
keys: listKeys,
delegateKeys: delegateKeys,
itemGenerator: &itemGenerator,
}
}
func (m model) Init() tea.Cmd {
return nil
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
var cmds []tea.Cmd
switch msg := msg.(type) {
case tea.WindowSizeMsg:
h, v := appStyle.GetFrameSize()
m.list.SetSize(msg.Width-h, msg.Height-v)
case tea.KeyMsg:
// Don't match any of the keys below if we're actively filtering.
if m.list.FilterState() == list.Filtering {
break
}
switch {
case key.Matches(msg, m.keys.toggleSpinner):
cmd := m.list.ToggleSpinner()
return m, cmd
case key.Matches(msg, m.keys.toggleTitleBar):
v := !m.list.ShowTitle()
m.list.SetShowTitle(v)
m.list.SetShowFilter(v)
m.list.SetFilteringEnabled(v)
return m, nil
case key.Matches(msg, m.keys.toggleStatusBar):
m.list.SetShowStatusBar(!m.list.ShowStatusBar())
return m, nil
case key.Matches(msg, m.keys.togglePagination):
m.list.SetShowPagination(!m.list.ShowPagination())
return m, nil
case key.Matches(msg, m.keys.toggleHelpMenu):
m.list.SetShowHelp(!m.list.ShowHelp())
return m, nil
case key.Matches(msg, m.keys.insertItem):
m.delegateKeys.remove.SetEnabled(true)
newItem := m.itemGenerator.next()
insCmd := m.list.InsertItem(0, newItem)
statusCmd := m.list.NewStatusMessage(statusMessageStyle("Added " + newItem.Title()))
return m, tea.Batch(insCmd, statusCmd)
}
}
// This will also call our delegate's update function.
newListModel, cmd := m.list.Update(msg)
m.list = newListModel
cmds = append(cmds, cmd)
return m, tea.Batch(cmds...)
}
func (m model) View() string {
return appStyle.Render(m.list.View())
}
func main() {
rand.Seed(time.Now().UTC().UnixNano())
if _, err := tea.NewProgram(newModel(), tea.WithAltScreen()).Run(); err != nil {
fmt.Println("Error running program:", err)
os.Exit(1)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/package-manager/packages.go | examples/package-manager/packages.go | package main
import (
"fmt"
"math/rand"
)
var packages = []string{
"vegeutils",
"libgardening",
"currykit",
"spicerack",
"fullenglish",
"eggy",
"bad-kitty",
"chai",
"hojicha",
"libtacos",
"babys-monads",
"libpurring",
"currywurst-devel",
"xmodmeow",
"licorice-utils",
"cashew-apple",
"rock-lobster",
"standmixer",
"coffee-CUPS",
"libesszet",
"zeichenorientierte-benutzerschnittstellen",
"schnurrkit",
"old-socks-devel",
"jalapeño",
"molasses-utils",
"xkohlrabi",
"party-gherkin",
"snow-peas",
"libyuzu",
}
func getPackages() []string {
pkgs := packages
copy(pkgs, packages)
rand.Shuffle(len(pkgs), func(i, j int) {
pkgs[i], pkgs[j] = pkgs[j], pkgs[i]
})
for k := range pkgs {
pkgs[k] += fmt.Sprintf("-%d.%d.%d", rand.Intn(10), rand.Intn(10), rand.Intn(10)) //nolint:gosec
}
return pkgs
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/package-manager/main.go | examples/package-manager/main.go | package main
import (
"fmt"
"math/rand"
"os"
"strings"
"time"
"github.com/charmbracelet/bubbles/progress"
"github.com/charmbracelet/bubbles/spinner"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
type model struct {
packages []string
index int
width int
height int
spinner spinner.Model
progress progress.Model
done bool
}
var (
currentPkgNameStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("211"))
doneStyle = lipgloss.NewStyle().Margin(1, 2)
checkMark = lipgloss.NewStyle().Foreground(lipgloss.Color("42")).SetString("✓")
)
func newModel() model {
p := progress.New(
progress.WithDefaultGradient(),
progress.WithWidth(40),
progress.WithoutPercentage(),
)
s := spinner.New()
s.Style = lipgloss.NewStyle().Foreground(lipgloss.Color("63"))
return model{
packages: getPackages(),
spinner: s,
progress: p,
}
}
func (m model) Init() tea.Cmd {
return tea.Batch(downloadAndInstall(m.packages[m.index]), m.spinner.Tick)
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.WindowSizeMsg:
m.width, m.height = msg.Width, msg.Height
case tea.KeyMsg:
switch msg.String() {
case "ctrl+c", "esc", "q":
return m, tea.Quit
}
case installedPkgMsg:
pkg := m.packages[m.index]
if m.index >= len(m.packages)-1 {
// Everything's been installed. We're done!
m.done = true
return m, tea.Sequence(
tea.Printf("%s %s", checkMark, pkg), // print the last success message
tea.Quit, // exit the program
)
}
// Update progress bar
m.index++
progressCmd := m.progress.SetPercent(float64(m.index) / float64(len(m.packages)))
return m, tea.Batch(
progressCmd,
tea.Printf("%s %s", checkMark, pkg), // print success message above our program
downloadAndInstall(m.packages[m.index]), // download the next package
)
case spinner.TickMsg:
var cmd tea.Cmd
m.spinner, cmd = m.spinner.Update(msg)
return m, cmd
case progress.FrameMsg:
newModel, cmd := m.progress.Update(msg)
if newModel, ok := newModel.(progress.Model); ok {
m.progress = newModel
}
return m, cmd
}
return m, nil
}
func (m model) View() string {
n := len(m.packages)
w := lipgloss.Width(fmt.Sprintf("%d", n))
if m.done {
return doneStyle.Render(fmt.Sprintf("Done! Installed %d packages.\n", n))
}
pkgCount := fmt.Sprintf(" %*d/%*d", w, m.index, w, n)
spin := m.spinner.View() + " "
prog := m.progress.View()
cellsAvail := max(0, m.width-lipgloss.Width(spin+prog+pkgCount))
pkgName := currentPkgNameStyle.Render(m.packages[m.index])
info := lipgloss.NewStyle().MaxWidth(cellsAvail).Render("Installing " + pkgName)
cellsRemaining := max(0, m.width-lipgloss.Width(spin+info+prog+pkgCount))
gap := strings.Repeat(" ", cellsRemaining)
return spin + info + gap + prog + pkgCount
}
type installedPkgMsg string
func downloadAndInstall(pkg string) tea.Cmd {
// This is where you'd do i/o stuff to download and install packages. In
// our case we're just pausing for a moment to simulate the process.
d := time.Millisecond * time.Duration(rand.Intn(500)) //nolint:gosec
return tea.Tick(d, func(t time.Time) tea.Msg {
return installedPkgMsg(pkg)
})
}
func main() {
if _, err := tea.NewProgram(newModel()).Run(); err != nil {
fmt.Println("Error running program:", err)
os.Exit(1)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/paginator/main.go | examples/paginator/main.go | package main
// A simple program demonstrating the paginator component from the Bubbles
// component library.
import (
"fmt"
"log"
"strings"
"github.com/charmbracelet/bubbles/paginator"
"github.com/charmbracelet/lipgloss"
tea "github.com/charmbracelet/bubbletea"
)
func newModel() model {
var items []string
for i := 1; i < 101; i++ {
text := fmt.Sprintf("Item %d", i)
items = append(items, text)
}
p := paginator.New()
p.Type = paginator.Dots
p.PerPage = 10
p.ActiveDot = lipgloss.NewStyle().Foreground(lipgloss.AdaptiveColor{Light: "235", Dark: "252"}).Render("•")
p.InactiveDot = lipgloss.NewStyle().Foreground(lipgloss.AdaptiveColor{Light: "250", Dark: "238"}).Render("•")
p.SetTotalPages(len(items))
return model{
paginator: p,
items: items,
}
}
type model struct {
items []string
paginator paginator.Model
}
func (m model) Init() tea.Cmd {
return nil
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
var cmd tea.Cmd
switch msg := msg.(type) {
case tea.KeyMsg:
switch msg.String() {
case "q", "esc", "ctrl+c":
return m, tea.Quit
}
}
m.paginator, cmd = m.paginator.Update(msg)
return m, cmd
}
func (m model) View() string {
var b strings.Builder
b.WriteString("\n Paginator Example\n\n")
start, end := m.paginator.GetSliceBounds(len(m.items))
for _, item := range m.items[start:end] {
b.WriteString(" • " + item + "\n\n")
}
b.WriteString(" " + m.paginator.View())
b.WriteString("\n\n h/l ←/→ page • q: quit\n")
return b.String()
}
func main() {
p := tea.NewProgram(newModel())
if _, err := p.Run(); err != nil {
log.Fatal(err)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/pager/main.go | examples/pager/main.go | package main
// An example program demonstrating the pager component from the Bubbles
// component library.
import (
"fmt"
"os"
"strings"
"github.com/charmbracelet/bubbles/viewport"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
var (
titleStyle = func() lipgloss.Style {
b := lipgloss.RoundedBorder()
b.Right = "├"
return lipgloss.NewStyle().BorderStyle(b).Padding(0, 1)
}()
infoStyle = func() lipgloss.Style {
b := lipgloss.RoundedBorder()
b.Left = "┤"
return titleStyle.BorderStyle(b)
}()
)
type model struct {
content string
ready bool
viewport viewport.Model
}
func (m model) Init() tea.Cmd {
return nil
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
var (
cmd tea.Cmd
cmds []tea.Cmd
)
switch msg := msg.(type) {
case tea.KeyMsg:
if k := msg.String(); k == "ctrl+c" || k == "q" || k == "esc" {
return m, tea.Quit
}
case tea.WindowSizeMsg:
headerHeight := lipgloss.Height(m.headerView())
footerHeight := lipgloss.Height(m.footerView())
verticalMarginHeight := headerHeight + footerHeight
if !m.ready {
// Since this program is using the full size of the viewport we
// need to wait until we've received the window dimensions before
// we can initialize the viewport. The initial dimensions come in
// quickly, though asynchronously, which is why we wait for them
// here.
m.viewport = viewport.New(msg.Width, msg.Height-verticalMarginHeight)
m.viewport.YPosition = headerHeight
m.viewport.SetContent(m.content)
m.ready = true
} else {
m.viewport.Width = msg.Width
m.viewport.Height = msg.Height - verticalMarginHeight
}
}
// Handle keyboard and mouse events in the viewport
m.viewport, cmd = m.viewport.Update(msg)
cmds = append(cmds, cmd)
return m, tea.Batch(cmds...)
}
func (m model) View() string {
if !m.ready {
return "\n Initializing..."
}
return fmt.Sprintf("%s\n%s\n%s", m.headerView(), m.viewport.View(), m.footerView())
}
func (m model) headerView() string {
title := titleStyle.Render("Mr. Pager")
line := strings.Repeat("─", max(0, m.viewport.Width-lipgloss.Width(title)))
return lipgloss.JoinHorizontal(lipgloss.Center, title, line)
}
func (m model) footerView() string {
info := infoStyle.Render(fmt.Sprintf("%3.f%%", m.viewport.ScrollPercent()*100))
line := strings.Repeat("─", max(0, m.viewport.Width-lipgloss.Width(info)))
return lipgloss.JoinHorizontal(lipgloss.Center, line, info)
}
func main() {
// Load some text for our viewport
content, err := os.ReadFile("artichoke.md")
if err != nil {
fmt.Println("could not load file:", err)
os.Exit(1)
}
p := tea.NewProgram(
model{content: string(content)},
tea.WithAltScreen(), // use the full size of the terminal in its "alternate screen buffer"
tea.WithMouseCellMotion(), // turn on mouse support so we can track the mouse wheel
)
if _, err := p.Run(); err != nil {
fmt.Println("could not run program:", err)
os.Exit(1)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/prevent-quit/main.go | examples/prevent-quit/main.go | package main
// A program demonstrating how to use the WithFilter option to intercept events.
import (
"fmt"
"log"
"github.com/charmbracelet/bubbles/help"
"github.com/charmbracelet/bubbles/key"
"github.com/charmbracelet/bubbles/textarea"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
var (
choiceStyle = lipgloss.NewStyle().PaddingLeft(1).Foreground(lipgloss.Color("241"))
saveTextStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("170"))
quitViewStyle = lipgloss.NewStyle().Padding(1).Border(lipgloss.RoundedBorder()).BorderForeground(lipgloss.Color("170"))
)
func main() {
p := tea.NewProgram(initialModel(), tea.WithFilter(filter))
if _, err := p.Run(); err != nil {
log.Fatal(err)
}
}
func filter(teaModel tea.Model, msg tea.Msg) tea.Msg {
if _, ok := msg.(tea.QuitMsg); !ok {
return msg
}
m := teaModel.(model)
if m.hasChanges {
return nil
}
return msg
}
type model struct {
textarea textarea.Model
help help.Model
keymap keymap
saveText string
hasChanges bool
quitting bool
}
type keymap struct {
save key.Binding
quit key.Binding
}
func initialModel() model {
ti := textarea.New()
ti.Placeholder = "Only the best words"
ti.Focus()
return model{
textarea: ti,
help: help.New(),
keymap: keymap{
save: key.NewBinding(
key.WithKeys("ctrl+s"),
key.WithHelp("ctrl+s", "save"),
),
quit: key.NewBinding(
key.WithKeys("esc", "ctrl+c"),
key.WithHelp("esc", "quit"),
),
},
}
}
func (m model) Init() tea.Cmd {
return textarea.Blink
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
if m.quitting {
return m.updatePromptView(msg)
}
return m.updateTextView(msg)
}
func (m model) updateTextView(msg tea.Msg) (tea.Model, tea.Cmd) {
var cmds []tea.Cmd
var cmd tea.Cmd
switch msg := msg.(type) {
case tea.KeyMsg:
m.saveText = ""
switch {
case key.Matches(msg, m.keymap.save):
m.saveText = "Changes saved!"
m.hasChanges = false
case key.Matches(msg, m.keymap.quit):
m.quitting = true
return m, tea.Quit
case msg.Type == tea.KeyRunes:
m.saveText = ""
m.hasChanges = true
fallthrough
default:
if !m.textarea.Focused() {
cmd = m.textarea.Focus()
cmds = append(cmds, cmd)
}
}
}
m.textarea, cmd = m.textarea.Update(msg)
cmds = append(cmds, cmd)
return m, tea.Batch(cmds...)
}
func (m model) updatePromptView(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
// For simplicity's sake, we'll treat any key besides "y" as "no"
if key.Matches(msg, m.keymap.quit) || msg.String() == "y" {
m.hasChanges = false
return m, tea.Quit
}
m.quitting = false
}
return m, nil
}
func (m model) View() string {
if m.quitting {
if m.hasChanges {
text := lipgloss.JoinHorizontal(lipgloss.Top, "You have unsaved changes. Quit without saving?", choiceStyle.Render("[yn]"))
return quitViewStyle.Render(text)
}
return "Very important, thank you\n"
}
helpView := m.help.ShortHelpView([]key.Binding{
m.keymap.save,
m.keymap.quit,
})
return fmt.Sprintf(
"\nType some important things.\n\n%s\n\n %s\n %s",
m.textarea.View(),
saveTextStyle.Render(m.saveText),
helpView,
) + "\n\n"
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/tabs/main.go | examples/tabs/main.go | package main
import (
"fmt"
"os"
"strings"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
type model struct {
Tabs []string
TabContent []string
activeTab int
}
func (m model) Init() tea.Cmd {
return nil
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
switch keypress := msg.String(); keypress {
case "ctrl+c", "q":
return m, tea.Quit
case "right", "l", "n", "tab":
m.activeTab = min(m.activeTab+1, len(m.Tabs)-1)
return m, nil
case "left", "h", "p", "shift+tab":
m.activeTab = max(m.activeTab-1, 0)
return m, nil
}
}
return m, nil
}
func tabBorderWithBottom(left, middle, right string) lipgloss.Border {
border := lipgloss.RoundedBorder()
border.BottomLeft = left
border.Bottom = middle
border.BottomRight = right
return border
}
var (
inactiveTabBorder = tabBorderWithBottom("┴", "─", "┴")
activeTabBorder = tabBorderWithBottom("┘", " ", "└")
docStyle = lipgloss.NewStyle().Padding(1, 2, 1, 2)
highlightColor = lipgloss.AdaptiveColor{Light: "#874BFD", Dark: "#7D56F4"}
inactiveTabStyle = lipgloss.NewStyle().Border(inactiveTabBorder, true).BorderForeground(highlightColor).Padding(0, 1)
activeTabStyle = inactiveTabStyle.Border(activeTabBorder, true)
windowStyle = lipgloss.NewStyle().BorderForeground(highlightColor).Padding(2, 0).Align(lipgloss.Center).Border(lipgloss.NormalBorder()).UnsetBorderTop()
)
func (m model) View() string {
doc := strings.Builder{}
var renderedTabs []string
for i, t := range m.Tabs {
var style lipgloss.Style
isFirst, isLast, isActive := i == 0, i == len(m.Tabs)-1, i == m.activeTab
if isActive {
style = activeTabStyle
} else {
style = inactiveTabStyle
}
border, _, _, _, _ := style.GetBorder()
if isFirst && isActive {
border.BottomLeft = "│"
} else if isFirst && !isActive {
border.BottomLeft = "├"
} else if isLast && isActive {
border.BottomRight = "│"
} else if isLast && !isActive {
border.BottomRight = "┤"
}
style = style.Border(border)
renderedTabs = append(renderedTabs, style.Render(t))
}
row := lipgloss.JoinHorizontal(lipgloss.Top, renderedTabs...)
doc.WriteString(row)
doc.WriteString("\n")
doc.WriteString(windowStyle.Width((lipgloss.Width(row) - windowStyle.GetHorizontalFrameSize())).Render(m.TabContent[m.activeTab]))
return docStyle.Render(doc.String())
}
func main() {
tabs := []string{"Lip Gloss", "Blush", "Eye Shadow", "Mascara", "Foundation"}
tabContent := []string{"Lip Gloss Tab", "Blush Tab", "Eye Shadow Tab", "Mascara Tab", "Foundation Tab"}
m := model{Tabs: tabs, TabContent: tabContent}
if _, err := tea.NewProgram(m).Run(); err != nil {
fmt.Println("Error running program:", err)
os.Exit(1)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/mouse/main.go | examples/mouse/main.go | package main
// A simple program that opens the alternate screen buffer and displays mouse
// coordinates and events.
import (
"log"
tea "github.com/charmbracelet/bubbletea"
)
func main() {
p := tea.NewProgram(model{}, tea.WithMouseAllMotion())
if _, err := p.Run(); err != nil {
log.Fatal(err)
}
}
type model struct {
mouseEvent tea.MouseEvent
}
func (m model) Init() tea.Cmd {
return nil
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
if s := msg.String(); s == "ctrl+c" || s == "q" || s == "esc" {
return m, tea.Quit
}
case tea.MouseMsg:
return m, tea.Printf("(X: %d, Y: %d) %s", msg.X, msg.Y, tea.MouseEvent(msg))
}
return m, nil
}
func (m model) View() string {
s := "Do mouse stuff. When you're done press q to quit.\n"
return s
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/sequence/main.go | examples/sequence/main.go | package main
// A simple example illustrating how to run a series of commands in order.
import (
"fmt"
"os"
"time"
tea "github.com/charmbracelet/bubbletea"
)
type model struct{}
func (m model) Init() tea.Cmd {
return tea.Sequence(
tea.Batch(
tea.Sequence(
SleepPrintln("1-1-1", 1000),
SleepPrintln("1-1-2", 1000),
),
tea.Batch(
SleepPrintln("1-2-1", 1500),
SleepPrintln("1-2-2", 1250),
),
),
tea.Println("2"),
tea.Sequence(
tea.Batch(
SleepPrintln("3-1-1", 500),
SleepPrintln("3-1-2", 1000),
),
tea.Sequence(
SleepPrintln("3-2-1", 750),
SleepPrintln("3-2-2", 500),
),
),
tea.Quit,
)
}
// print string after stopping for a certain period of time
func SleepPrintln(s string, milisecond int) tea.Cmd {
printCmd := tea.Println(s)
return func() tea.Msg {
time.Sleep(time.Duration(milisecond) * time.Millisecond)
return printCmd()
}
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg.(type) {
case tea.KeyMsg:
return m, tea.Quit
}
return m, nil
}
func (m model) View() string {
return ""
}
func main() {
if _, err := tea.NewProgram(model{}).Run(); err != nil {
fmt.Println("Uh oh:", err)
os.Exit(1)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/simple/main_test.go | examples/simple/main_test.go | package main
import (
"bytes"
"io"
"regexp"
"testing"
"time"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/x/exp/teatest"
)
func TestApp(t *testing.T) {
m := model(10)
tm := teatest.NewTestModel(
t, m,
teatest.WithInitialTermSize(70, 30),
)
t.Cleanup(func() {
if err := tm.Quit(); err != nil {
t.Fatal(err)
}
})
time.Sleep(time.Second + time.Millisecond*200)
tm.Type("I'm typing things, but it'll be ignored by my program")
tm.Send("ignored msg")
tm.Send(tea.KeyMsg{
Type: tea.KeyEnter,
})
if err := tm.Quit(); err != nil {
t.Fatal(err)
}
out := readBts(t, tm.FinalOutput(t))
if !regexp.MustCompile(`This program will exit in \d+ seconds`).Match(out) {
t.Fatalf("output does not match the given regular expression: %s", string(out))
}
teatest.RequireEqualOutput(t, out)
if tm.FinalModel(t).(model) != 9 {
t.Errorf("expected model to be 10, was %d", m)
}
}
func TestAppInteractive(t *testing.T) {
m := model(10)
tm := teatest.NewTestModel(
t, m,
teatest.WithInitialTermSize(70, 30),
)
time.Sleep(time.Second + time.Millisecond*200)
tm.Send("ignored msg")
if bts := readBts(t, tm.Output()); !bytes.Contains(bts, []byte("This program will exit in 9 seconds")) {
t.Fatalf("output does not match: expected %q", string(bts))
}
teatest.WaitFor(t, tm.Output(), func(out []byte) bool {
return bytes.Contains(out, []byte("This program will exit in 7 seconds"))
}, teatest.WithDuration(5*time.Second))
tm.Send(tea.KeyMsg{
Type: tea.KeyEnter,
})
if err := tm.Quit(); err != nil {
t.Fatal(err)
}
if tm.FinalModel(t).(model) != 7 {
t.Errorf("expected model to be 7, was %d", m)
}
}
func readBts(tb testing.TB, r io.Reader) []byte {
tb.Helper()
bts, err := io.ReadAll(r)
if err != nil {
tb.Fatal(err)
}
return bts
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/simple/main.go | examples/simple/main.go | package main
// A simple program that counts down from 5 and then exits.
import (
"fmt"
"log"
"os"
"time"
tea "github.com/charmbracelet/bubbletea"
)
func main() {
// Log to a file. Useful in debugging since you can't really log to stdout.
// Not required.
logfilePath := os.Getenv("BUBBLETEA_LOG")
if logfilePath != "" {
if _, err := tea.LogToFile(logfilePath, "simple"); err != nil {
log.Fatal(err)
}
}
// Initialize our program
p := tea.NewProgram(model(5))
if _, err := p.Run(); err != nil {
log.Fatal(err)
}
}
// A model can be more or less any type of data. It holds all the data for a
// program, so often it's a struct. For this simple example, however, all
// we'll need is a simple integer.
type model int
// Init optionally returns an initial command we should run. In this case we
// want to start the timer.
func (m model) Init() tea.Cmd {
return tick
}
// Update is called when messages are received. The idea is that you inspect the
// message and send back an updated model accordingly. You can also return
// a command, which is a function that performs I/O and returns a message.
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
switch msg.String() {
case "ctrl+c", "q":
return m, tea.Quit
case "ctrl+z":
return m, tea.Suspend
}
case tickMsg:
m--
if m <= 0 {
return m, tea.Quit
}
return m, tick
}
return m, nil
}
// View returns a string based on data in the model. That string which will be
// rendered to the terminal.
func (m model) View() string {
return fmt.Sprintf("Hi. This program will exit in %d seconds.\n\nTo quit sooner press ctrl-c, or press ctrl-z to suspend...\n", m)
}
// Messages are events that we respond to in our Update function. This
// particular one indicates that the timer has ticked.
type tickMsg time.Time
func tick() tea.Msg {
time.Sleep(time.Second)
return tickMsg{}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/stopwatch/main.go | examples/stopwatch/main.go | package main
import (
"fmt"
"os"
"time"
"github.com/charmbracelet/bubbles/help"
"github.com/charmbracelet/bubbles/key"
"github.com/charmbracelet/bubbles/stopwatch"
tea "github.com/charmbracelet/bubbletea"
)
type model struct {
stopwatch stopwatch.Model
keymap keymap
help help.Model
quitting bool
}
type keymap struct {
start key.Binding
stop key.Binding
reset key.Binding
quit key.Binding
}
func (m model) Init() tea.Cmd {
return m.stopwatch.Init()
}
func (m model) View() string {
// Note: you could further customize the time output by getting the
// duration from m.stopwatch.Elapsed(), which returns a time.Duration, and
// skip m.stopwatch.View() altogether.
s := m.stopwatch.View() + "\n"
if !m.quitting {
s = "Elapsed: " + s
s += m.helpView()
}
return s
}
func (m model) helpView() string {
return "\n" + m.help.ShortHelpView([]key.Binding{
m.keymap.start,
m.keymap.stop,
m.keymap.reset,
m.keymap.quit,
})
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
switch {
case key.Matches(msg, m.keymap.quit):
m.quitting = true
return m, tea.Quit
case key.Matches(msg, m.keymap.reset):
return m, m.stopwatch.Reset()
case key.Matches(msg, m.keymap.start, m.keymap.stop):
m.keymap.stop.SetEnabled(!m.stopwatch.Running())
m.keymap.start.SetEnabled(m.stopwatch.Running())
return m, m.stopwatch.Toggle()
}
}
var cmd tea.Cmd
m.stopwatch, cmd = m.stopwatch.Update(msg)
return m, cmd
}
func main() {
m := model{
stopwatch: stopwatch.NewWithInterval(time.Millisecond),
keymap: keymap{
start: key.NewBinding(
key.WithKeys("s"),
key.WithHelp("s", "start"),
),
stop: key.NewBinding(
key.WithKeys("s"),
key.WithHelp("s", "stop"),
),
reset: key.NewBinding(
key.WithKeys("r"),
key.WithHelp("r", "reset"),
),
quit: key.NewBinding(
key.WithKeys("ctrl+c", "q"),
key.WithHelp("q", "quit"),
),
},
help: help.New(),
}
m.keymap.start.SetEnabled(false)
if _, err := tea.NewProgram(m).Run(); err != nil {
fmt.Println("Oh no, it didn't work:", err)
os.Exit(1)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/realtime/main.go | examples/realtime/main.go | package main
// A simple example that shows how to send activity to Bubble Tea in real-time
// through a channel.
import (
"fmt"
"math/rand"
"os"
"time"
"github.com/charmbracelet/bubbles/spinner"
tea "github.com/charmbracelet/bubbletea"
)
// A message used to indicate that activity has occurred. In the real world (for
// example, chat) this would contain actual data.
type responseMsg struct{}
// Simulate a process that sends events at an irregular interval in real time.
// In this case, we'll send events on the channel at a random interval between
// 100 to 1000 milliseconds. As a command, Bubble Tea will run this
// asynchronously.
func listenForActivity(sub chan struct{}) tea.Cmd {
return func() tea.Msg {
for {
time.Sleep(time.Millisecond * time.Duration(rand.Int63n(900)+100)) // nolint:gosec
sub <- struct{}{}
}
}
}
// A command that waits for the activity on a channel.
func waitForActivity(sub chan struct{}) tea.Cmd {
return func() tea.Msg {
return responseMsg(<-sub)
}
}
type model struct {
sub chan struct{} // where we'll receive activity notifications
responses int // how many responses we've received
spinner spinner.Model
quitting bool
}
func (m model) Init() tea.Cmd {
return tea.Batch(
m.spinner.Tick,
listenForActivity(m.sub), // generate activity
waitForActivity(m.sub), // wait for activity
)
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg.(type) {
case tea.KeyMsg:
m.quitting = true
return m, tea.Quit
case responseMsg:
m.responses++ // record external activity
return m, waitForActivity(m.sub) // wait for next event
case spinner.TickMsg:
var cmd tea.Cmd
m.spinner, cmd = m.spinner.Update(msg)
return m, cmd
default:
return m, nil
}
}
func (m model) View() string {
s := fmt.Sprintf("\n %s Events received: %d\n\n Press any key to exit\n", m.spinner.View(), m.responses)
if m.quitting {
s += "\n"
}
return s
}
func main() {
p := tea.NewProgram(model{
sub: make(chan struct{}),
spinner: spinner.New(),
})
if _, err := p.Run(); err != nil {
fmt.Println("could not start program:", err)
os.Exit(1)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/debounce/main.go | examples/debounce/main.go | package main
// This example illustrates how to debounce commands.
//
// When the user presses a key we increment the "tag" value on the model and,
// after a short delay, we include that tag value in the message produced
// by the Tick command.
//
// In a subsequent Update, if the tag in the Msg matches current tag on the
// model's state we know that the debouncing is complete and we can proceed as
// normal. If not, we simply ignore the inbound message.
import (
"fmt"
"os"
"time"
tea "github.com/charmbracelet/bubbletea"
)
const debounceDuration = time.Second
type exitMsg int
type model struct {
tag int
}
func (m model) Init() tea.Cmd {
return nil
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
// Increment the tag on the model...
m.tag++
return m, tea.Tick(debounceDuration, func(_ time.Time) tea.Msg {
// ...and include a copy of that tag value in the message.
return exitMsg(m.tag)
})
case exitMsg:
// If the tag in the message doesn't match the tag on the model then we
// know that this message was not the last one sent and another is on
// the way. If that's the case we know, we can ignore this message.
// Otherwise, the debounce timeout has passed and this message is a
// valid debounced one.
if int(msg) == m.tag {
return m, tea.Quit
}
}
return m, nil
}
func (m model) View() string {
return fmt.Sprintf("Key presses: %d", m.tag) +
"\nTo exit press any key, then wait for one second without pressing anything."
}
func main() {
if _, err := tea.NewProgram(model{}).Run(); err != nil {
fmt.Println("uh oh:", err)
os.Exit(1)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/spinner/main.go | examples/spinner/main.go | package main
// A simple program demonstrating the spinner component from the Bubbles
// component library.
import (
"fmt"
"os"
"github.com/charmbracelet/bubbles/spinner"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
type errMsg error
type model struct {
spinner spinner.Model
quitting bool
err error
}
func initialModel() model {
s := spinner.New()
s.Spinner = spinner.Dot
s.Style = lipgloss.NewStyle().Foreground(lipgloss.Color("205"))
return model{spinner: s}
}
func (m model) Init() tea.Cmd {
return m.spinner.Tick
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
switch msg.String() {
case "q", "esc", "ctrl+c":
m.quitting = true
return m, tea.Quit
default:
return m, nil
}
case errMsg:
m.err = msg
return m, nil
default:
var cmd tea.Cmd
m.spinner, cmd = m.spinner.Update(msg)
return m, cmd
}
}
func (m model) View() string {
if m.err != nil {
return m.err.Error()
}
str := fmt.Sprintf("\n\n %s Loading forever...press q to quit\n\n", m.spinner.View())
if m.quitting {
return str + "\n"
}
return str
}
func main() {
p := tea.NewProgram(initialModel())
if _, err := p.Run(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/window-size/main.go | examples/window-size/main.go | package main
// A simple program that queries and displays the window-size.
import (
"log"
tea "github.com/charmbracelet/bubbletea"
)
func main() {
p := tea.NewProgram(model{})
if _, err := p.Run(); err != nil {
log.Fatal(err)
}
}
type model struct{}
func (m model) Init() tea.Cmd {
return nil
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
if s := msg.String(); s == "ctrl+c" || s == "q" || s == "esc" {
return m, tea.Quit
}
return m, tea.WindowSize()
case tea.WindowSizeMsg:
return m, tea.Printf("%dx%d", msg.Width, msg.Height)
}
return m, nil
}
func (m model) View() string {
s := "When you're done press q to quit. Press any other key to query the window-size.\n"
return s
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/glamour/main.go | examples/glamour/main.go | package main
import (
"fmt"
"os"
"github.com/charmbracelet/bubbles/viewport"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/glamour"
"github.com/charmbracelet/lipgloss"
)
const content = `
# Today’s Menu
## Appetizers
| Name | Price | Notes |
| --- | --- | --- |
| Tsukemono | $2 | Just an appetizer |
| Tomato Soup | $4 | Made with San Marzano tomatoes |
| Okonomiyaki | $4 | Takes a few minutes to make |
| Curry | $3 | We can add squash if you’d like |
## Seasonal Dishes
| Name | Price | Notes |
| --- | --- | --- |
| Steamed bitter melon | $2 | Not so bitter |
| Takoyaki | $3 | Fun to eat |
| Winter squash | $3 | Today it's pumpkin |
## Desserts
| Name | Price | Notes |
| --- | --- | --- |
| Dorayaki | $4 | Looks good on rabbits |
| Banana Split | $5 | A classic |
| Cream Puff | $3 | Pretty creamy! |
All our dishes are made in-house by Karen, our chef. Most of our ingredients
are from our garden or the fish market down the street.
Some famous people that have eaten here lately:
* [x] René Redzepi
* [x] David Chang
* [ ] Jiro Ono (maybe some day)
Bon appétit!
`
var helpStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("241")).Render
type example struct {
viewport viewport.Model
}
func newExample() (*example, error) {
const width = 78
vp := viewport.New(width, 20)
vp.Style = lipgloss.NewStyle().
BorderStyle(lipgloss.RoundedBorder()).
BorderForeground(lipgloss.Color("62")).
PaddingRight(2)
// We need to adjust the width of the glamour render from our main width
// to account for a few things:
//
// * The viewport border width
// * The viewport padding
// * The viewport margins
// * The gutter glamour applies to the left side of the content
//
const glamourGutter = 2
glamourRenderWidth := width - vp.Style.GetHorizontalFrameSize() - glamourGutter
renderer, err := glamour.NewTermRenderer(
glamour.WithAutoStyle(),
glamour.WithWordWrap(glamourRenderWidth),
)
if err != nil {
return nil, err
}
str, err := renderer.Render(content)
if err != nil {
return nil, err
}
vp.SetContent(str)
return &example{
viewport: vp,
}, nil
}
func (e example) Init() tea.Cmd {
return nil
}
func (e example) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
switch msg.String() {
case "q", "ctrl+c", "esc":
return e, tea.Quit
default:
var cmd tea.Cmd
e.viewport, cmd = e.viewport.Update(msg)
return e, cmd
}
default:
return e, nil
}
}
func (e example) View() string {
return e.viewport.View() + e.helpView()
}
func (e example) helpView() string {
return helpStyle("\n ↑/↓: Navigate • q: Quit\n")
}
func main() {
model, err := newExample()
if err != nil {
fmt.Println("Could not initialize Bubble Tea model:", err)
os.Exit(1)
}
if _, err := tea.NewProgram(model).Run(); err != nil {
fmt.Println("Bummer, there's been an error:", err)
os.Exit(1)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/file-picker/main.go | examples/file-picker/main.go | package main
import (
"errors"
"fmt"
"os"
"strings"
"time"
"github.com/charmbracelet/bubbles/filepicker"
tea "github.com/charmbracelet/bubbletea"
)
type model struct {
filepicker filepicker.Model
selectedFile string
quitting bool
err error
}
type clearErrorMsg struct{}
func clearErrorAfter(t time.Duration) tea.Cmd {
return tea.Tick(t, func(_ time.Time) tea.Msg {
return clearErrorMsg{}
})
}
func (m model) Init() tea.Cmd {
return m.filepicker.Init()
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
switch msg.String() {
case "ctrl+c", "q":
m.quitting = true
return m, tea.Quit
}
case clearErrorMsg:
m.err = nil
}
var cmd tea.Cmd
m.filepicker, cmd = m.filepicker.Update(msg)
// Did the user select a file?
if didSelect, path := m.filepicker.DidSelectFile(msg); didSelect {
// Get the path of the selected file.
m.selectedFile = path
}
// Did the user select a disabled file?
// This is only necessary to display an error to the user.
if didSelect, path := m.filepicker.DidSelectDisabledFile(msg); didSelect {
// Let's clear the selectedFile and display an error.
m.err = errors.New(path + " is not valid.")
m.selectedFile = ""
return m, tea.Batch(cmd, clearErrorAfter(2*time.Second))
}
return m, cmd
}
func (m model) View() string {
if m.quitting {
return ""
}
var s strings.Builder
s.WriteString("\n ")
if m.err != nil {
s.WriteString(m.filepicker.Styles.DisabledFile.Render(m.err.Error()))
} else if m.selectedFile == "" {
s.WriteString("Pick a file:")
} else {
s.WriteString("Selected file: " + m.filepicker.Styles.Selected.Render(m.selectedFile))
}
s.WriteString("\n\n" + m.filepicker.View() + "\n")
return s.String()
}
func main() {
fp := filepicker.New()
fp.AllowedTypes = []string{".mod", ".sum", ".go", ".txt", ".md"}
fp.CurrentDirectory, _ = os.UserHomeDir()
m := model{
filepicker: fp,
}
tm, _ := tea.NewProgram(&m).Run()
mm := tm.(model)
fmt.Println("\n You selected: " + m.filepicker.Styles.Selected.Render(mm.selectedFile) + "\n")
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/pipe/main.go | examples/pipe/main.go | package main
// An example illustrating how to pipe in data to a Bubble Tea application.
// More so, this serves as proof that Bubble Tea will automatically listen for
// keystrokes when input is not a TTY, such as when data is piped or redirected
// in.
import (
"bufio"
"fmt"
"io"
"os"
"strings"
"github.com/charmbracelet/bubbles/textinput"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
func main() {
stat, err := os.Stdin.Stat()
if err != nil {
panic(err)
}
if stat.Mode()&os.ModeNamedPipe == 0 && stat.Size() == 0 {
fmt.Println("Try piping in some text.")
os.Exit(1)
}
reader := bufio.NewReader(os.Stdin)
var b strings.Builder
for {
r, _, err := reader.ReadRune()
if err != nil && err == io.EOF {
break
}
_, err = b.WriteRune(r)
if err != nil {
fmt.Println("Error getting input:", err)
os.Exit(1)
}
}
model := newModel(strings.TrimSpace(b.String()))
if _, err := tea.NewProgram(model).Run(); err != nil {
fmt.Println("Couldn't start program:", err)
os.Exit(1)
}
}
type model struct {
userInput textinput.Model
}
func newModel(initialValue string) (m model) {
i := textinput.New()
i.Prompt = ""
i.Cursor.Style = lipgloss.NewStyle().Foreground(lipgloss.Color("63"))
i.Width = 48
i.SetValue(initialValue)
i.CursorEnd()
i.Focus()
m.userInput = i
return
}
func (m model) Init() tea.Cmd {
return textinput.Blink
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
if key, ok := msg.(tea.KeyMsg); ok {
switch key.Type {
case tea.KeyCtrlC, tea.KeyEscape, tea.KeyEnter:
return m, tea.Quit
}
}
var cmd tea.Cmd
m.userInput, cmd = m.userInput.Update(msg)
return m, cmd
}
func (m model) View() string {
return fmt.Sprintf(
"\nYou piped in: %s\n\nPress ^C to exit",
m.userInput.View(),
)
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/progress-static/main.go | examples/progress-static/main.go | package main
// A simple example that shows how to render a progress bar in a "pure"
// fashion. In this example we bump the progress by 25% every second,
// maintaining the progress state on our top level model using the progress bar
// model's ViewAs method only for rendering.
//
// The signature for ViewAs is:
//
// func (m Model) ViewAs(percent float64) string
//
// So it takes a float between 0 and 1, and renders the progress bar
// accordingly. When using the progress bar in this "pure" fashion and there's
// no need to call an Update method.
//
// The progress bar is also able to animate itself, however. For details see
// the progress-animated example.
import (
"fmt"
"os"
"strings"
"time"
"github.com/charmbracelet/bubbles/progress"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
const (
padding = 2
maxWidth = 80
)
var helpStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#626262")).Render
func main() {
prog := progress.New(progress.WithScaledGradient("#FF7CCB", "#FDFF8C"))
if _, err := tea.NewProgram(model{progress: prog}).Run(); err != nil {
fmt.Println("Oh no!", err)
os.Exit(1)
}
}
type tickMsg time.Time
type model struct {
percent float64
progress progress.Model
}
func (m model) Init() tea.Cmd {
return tickCmd()
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
return m, tea.Quit
case tea.WindowSizeMsg:
m.progress.Width = msg.Width - padding*2 - 4
if m.progress.Width > maxWidth {
m.progress.Width = maxWidth
}
return m, nil
case tickMsg:
m.percent += 0.25
if m.percent > 1.0 {
m.percent = 1.0
return m, tea.Quit
}
return m, tickCmd()
default:
return m, nil
}
}
func (m model) View() string {
pad := strings.Repeat(" ", padding)
return "\n" +
pad + m.progress.ViewAs(m.percent) + "\n\n" +
pad + helpStyle("Press any key to quit")
}
func tickCmd() tea.Cmd {
return tea.Tick(time.Second, func(t time.Time) tea.Msg {
return tickMsg(t)
})
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/table/main.go | examples/table/main.go | package main
import (
"fmt"
"os"
"github.com/charmbracelet/bubbles/table"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
var baseStyle = lipgloss.NewStyle().
BorderStyle(lipgloss.NormalBorder()).
BorderForeground(lipgloss.Color("240"))
type model struct {
table table.Model
}
func (m model) Init() tea.Cmd { return nil }
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
var cmd tea.Cmd
switch msg := msg.(type) {
case tea.KeyMsg:
switch msg.String() {
case "esc":
if m.table.Focused() {
m.table.Blur()
} else {
m.table.Focus()
}
case "q", "ctrl+c":
return m, tea.Quit
case "enter":
return m, tea.Batch(
tea.Printf("Let's go to %s!", m.table.SelectedRow()[1]),
)
}
}
m.table, cmd = m.table.Update(msg)
return m, cmd
}
func (m model) View() string {
return baseStyle.Render(m.table.View()) + "\n"
}
func main() {
columns := []table.Column{
{Title: "Rank", Width: 4},
{Title: "City", Width: 10},
{Title: "Country", Width: 10},
{Title: "Population", Width: 10},
}
rows := []table.Row{
{"1", "Tokyo", "Japan", "37,274,000"},
{"2", "Delhi", "India", "32,065,760"},
{"3", "Shanghai", "China", "28,516,904"},
{"4", "Dhaka", "Bangladesh", "22,478,116"},
{"5", "São Paulo", "Brazil", "22,429,800"},
{"6", "Mexico City", "Mexico", "22,085,140"},
{"7", "Cairo", "Egypt", "21,750,020"},
{"8", "Beijing", "China", "21,333,332"},
{"9", "Mumbai", "India", "20,961,472"},
{"10", "Osaka", "Japan", "19,059,856"},
{"11", "Chongqing", "China", "16,874,740"},
{"12", "Karachi", "Pakistan", "16,839,950"},
{"13", "Istanbul", "Turkey", "15,636,243"},
{"14", "Kinshasa", "DR Congo", "15,628,085"},
{"15", "Lagos", "Nigeria", "15,387,639"},
{"16", "Buenos Aires", "Argentina", "15,369,919"},
{"17", "Kolkata", "India", "15,133,888"},
{"18", "Manila", "Philippines", "14,406,059"},
{"19", "Tianjin", "China", "14,011,828"},
{"20", "Guangzhou", "China", "13,964,637"},
{"21", "Rio De Janeiro", "Brazil", "13,634,274"},
{"22", "Lahore", "Pakistan", "13,541,764"},
{"23", "Bangalore", "India", "13,193,035"},
{"24", "Shenzhen", "China", "12,831,330"},
{"25", "Moscow", "Russia", "12,640,818"},
{"26", "Chennai", "India", "11,503,293"},
{"27", "Bogota", "Colombia", "11,344,312"},
{"28", "Paris", "France", "11,142,303"},
{"29", "Jakarta", "Indonesia", "11,074,811"},
{"30", "Lima", "Peru", "11,044,607"},
{"31", "Bangkok", "Thailand", "10,899,698"},
{"32", "Hyderabad", "India", "10,534,418"},
{"33", "Seoul", "South Korea", "9,975,709"},
{"34", "Nagoya", "Japan", "9,571,596"},
{"35", "London", "United Kingdom", "9,540,576"},
{"36", "Chengdu", "China", "9,478,521"},
{"37", "Nanjing", "China", "9,429,381"},
{"38", "Tehran", "Iran", "9,381,546"},
{"39", "Ho Chi Minh City", "Vietnam", "9,077,158"},
{"40", "Luanda", "Angola", "8,952,496"},
{"41", "Wuhan", "China", "8,591,611"},
{"42", "Xi An Shaanxi", "China", "8,537,646"},
{"43", "Ahmedabad", "India", "8,450,228"},
{"44", "Kuala Lumpur", "Malaysia", "8,419,566"},
{"45", "New York City", "United States", "8,177,020"},
{"46", "Hangzhou", "China", "8,044,878"},
{"47", "Surat", "India", "7,784,276"},
{"48", "Suzhou", "China", "7,764,499"},
{"49", "Hong Kong", "Hong Kong", "7,643,256"},
{"50", "Riyadh", "Saudi Arabia", "7,538,200"},
{"51", "Shenyang", "China", "7,527,975"},
{"52", "Baghdad", "Iraq", "7,511,920"},
{"53", "Dongguan", "China", "7,511,851"},
{"54", "Foshan", "China", "7,497,263"},
{"55", "Dar Es Salaam", "Tanzania", "7,404,689"},
{"56", "Pune", "India", "6,987,077"},
{"57", "Santiago", "Chile", "6,856,939"},
{"58", "Madrid", "Spain", "6,713,557"},
{"59", "Haerbin", "China", "6,665,951"},
{"60", "Toronto", "Canada", "6,312,974"},
{"61", "Belo Horizonte", "Brazil", "6,194,292"},
{"62", "Khartoum", "Sudan", "6,160,327"},
{"63", "Johannesburg", "South Africa", "6,065,354"},
{"64", "Singapore", "Singapore", "6,039,577"},
{"65", "Dalian", "China", "5,930,140"},
{"66", "Qingdao", "China", "5,865,232"},
{"67", "Zhengzhou", "China", "5,690,312"},
{"68", "Ji Nan Shandong", "China", "5,663,015"},
{"69", "Barcelona", "Spain", "5,658,472"},
{"70", "Saint Petersburg", "Russia", "5,535,556"},
{"71", "Abidjan", "Ivory Coast", "5,515,790"},
{"72", "Yangon", "Myanmar", "5,514,454"},
{"73", "Fukuoka", "Japan", "5,502,591"},
{"74", "Alexandria", "Egypt", "5,483,605"},
{"75", "Guadalajara", "Mexico", "5,339,583"},
{"76", "Ankara", "Turkey", "5,309,690"},
{"77", "Chittagong", "Bangladesh", "5,252,842"},
{"78", "Addis Ababa", "Ethiopia", "5,227,794"},
{"79", "Melbourne", "Australia", "5,150,766"},
{"80", "Nairobi", "Kenya", "5,118,844"},
{"81", "Hanoi", "Vietnam", "5,067,352"},
{"82", "Sydney", "Australia", "5,056,571"},
{"83", "Monterrey", "Mexico", "5,036,535"},
{"84", "Changsha", "China", "4,809,887"},
{"85", "Brasilia", "Brazil", "4,803,877"},
{"86", "Cape Town", "South Africa", "4,800,954"},
{"87", "Jiddah", "Saudi Arabia", "4,780,740"},
{"88", "Urumqi", "China", "4,710,203"},
{"89", "Kunming", "China", "4,657,381"},
{"90", "Changchun", "China", "4,616,002"},
{"91", "Hefei", "China", "4,496,456"},
{"92", "Shantou", "China", "4,490,411"},
{"93", "Xinbei", "Taiwan", "4,470,672"},
{"94", "Kabul", "Afghanistan", "4,457,882"},
{"95", "Ningbo", "China", "4,405,292"},
{"96", "Tel Aviv", "Israel", "4,343,584"},
{"97", "Yaounde", "Cameroon", "4,336,670"},
{"98", "Rome", "Italy", "4,297,877"},
{"99", "Shijiazhuang", "China", "4,285,135"},
{"100", "Montreal", "Canada", "4,276,526"},
}
t := table.New(
table.WithColumns(columns),
table.WithRows(rows),
table.WithFocused(true),
table.WithHeight(7),
)
s := table.DefaultStyles()
s.Header = s.Header.
BorderStyle(lipgloss.NormalBorder()).
BorderForeground(lipgloss.Color("240")).
BorderBottom(true).
Bold(false)
s.Selected = s.Selected.
Foreground(lipgloss.Color("229")).
Background(lipgloss.Color("57")).
Bold(false)
t.SetStyles(s)
m := model{t}
if _, err := tea.NewProgram(m).Run(); err != nil {
fmt.Println("Error running program:", err)
os.Exit(1)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/suspend/main.go | examples/suspend/main.go | package main
import (
"errors"
"fmt"
"os"
tea "github.com/charmbracelet/bubbletea"
)
type model struct {
quitting bool
suspending bool
}
func (m model) Init() tea.Cmd {
return nil
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.ResumeMsg:
m.suspending = false
return m, nil
case tea.KeyMsg:
switch msg.String() {
case "q", "esc":
m.quitting = true
return m, tea.Quit
case "ctrl+c":
m.quitting = true
return m, tea.Interrupt
case "ctrl+z":
m.suspending = true
return m, tea.Suspend
}
}
return m, nil
}
func (m model) View() string {
if m.suspending || m.quitting {
return ""
}
return "\nPress ctrl-z to suspend, ctrl+c to interrupt, q, or esc to exit\n"
}
func main() {
if _, err := tea.NewProgram(model{}).Run(); err != nil {
fmt.Println("Error running program:", err)
if errors.Is(err, tea.ErrInterrupted) {
os.Exit(130)
}
os.Exit(1)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/textinput/main.go | examples/textinput/main.go | package main
// A simple program demonstrating the text input component from the Bubbles
// component library.
import (
"fmt"
"log"
"github.com/charmbracelet/bubbles/textinput"
tea "github.com/charmbracelet/bubbletea"
)
func main() {
p := tea.NewProgram(initialModel())
if _, err := p.Run(); err != nil {
log.Fatal(err)
}
}
type (
errMsg error
)
type model struct {
textInput textinput.Model
err error
}
func initialModel() model {
ti := textinput.New()
ti.Placeholder = "Pikachu"
ti.Focus()
ti.CharLimit = 156
ti.Width = 20
return model{
textInput: ti,
err: nil,
}
}
func (m model) Init() tea.Cmd {
return textinput.Blink
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
var cmd tea.Cmd
switch msg := msg.(type) {
case tea.KeyMsg:
switch msg.Type {
case tea.KeyEnter, tea.KeyCtrlC, tea.KeyEsc:
return m, tea.Quit
}
// We handle errors just like any other message
case errMsg:
m.err = msg
return m, nil
}
m.textInput, cmd = m.textInput.Update(msg)
return m, cmd
}
func (m model) View() string {
return fmt.Sprintf(
"What’s your favorite Pokémon?\n\n%s\n\n%s",
m.textInput.View(),
"(esc to quit)",
) + "\n"
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/credit-card-form/main.go | examples/credit-card-form/main.go | package main
import (
"fmt"
"log"
"strconv"
"strings"
"github.com/charmbracelet/bubbles/textinput"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
func main() {
p := tea.NewProgram(initialModel())
if _, err := p.Run(); err != nil {
log.Fatal(err)
}
}
type (
errMsg error
)
const (
ccn = iota
exp
cvv
)
const (
hotPink = lipgloss.Color("#FF06B7")
darkGray = lipgloss.Color("#767676")
)
var (
inputStyle = lipgloss.NewStyle().Foreground(hotPink)
continueStyle = lipgloss.NewStyle().Foreground(darkGray)
)
type model struct {
inputs []textinput.Model
focused int
err error
}
// Validator functions to ensure valid input
func ccnValidator(s string) error {
// Credit Card Number should a string less than 20 digits
// It should include 16 integers and 3 spaces
if len(s) > 16+3 {
return fmt.Errorf("CCN is too long")
}
if len(s) == 0 || len(s)%5 != 0 && (s[len(s)-1] < '0' || s[len(s)-1] > '9') {
return fmt.Errorf("CCN is invalid")
}
// The last digit should be a number unless it is a multiple of 4 in which
// case it should be a space
if len(s)%5 == 0 && s[len(s)-1] != ' ' {
return fmt.Errorf("CCN must separate groups with spaces")
}
// The remaining digits should be integers
c := strings.ReplaceAll(s, " ", "")
_, err := strconv.ParseInt(c, 10, 64)
return err
}
func expValidator(s string) error {
// The 3 character should be a slash (/)
// The rest should be numbers
e := strings.ReplaceAll(s, "/", "")
_, err := strconv.ParseInt(e, 10, 64)
if err != nil {
return fmt.Errorf("EXP is invalid")
}
// There should be only one slash and it should be in the 2nd index (3rd character)
if len(s) >= 3 && (strings.Index(s, "/") != 2 || strings.LastIndex(s, "/") != 2) {
return fmt.Errorf("EXP is invalid")
}
return nil
}
func cvvValidator(s string) error {
// The CVV should be a number of 3 digits
// Since the input will already ensure that the CVV is a string of length 3,
// All we need to do is check that it is a number
_, err := strconv.ParseInt(s, 10, 64)
return err
}
func initialModel() model {
var inputs []textinput.Model = make([]textinput.Model, 3)
inputs[ccn] = textinput.New()
inputs[ccn].Placeholder = "4505 **** **** 1234"
inputs[ccn].Focus()
inputs[ccn].CharLimit = 20
inputs[ccn].Width = 30
inputs[ccn].Prompt = ""
inputs[ccn].Validate = ccnValidator
inputs[exp] = textinput.New()
inputs[exp].Placeholder = "MM/YY "
inputs[exp].CharLimit = 5
inputs[exp].Width = 5
inputs[exp].Prompt = ""
inputs[exp].Validate = expValidator
inputs[cvv] = textinput.New()
inputs[cvv].Placeholder = "XXX"
inputs[cvv].CharLimit = 3
inputs[cvv].Width = 5
inputs[cvv].Prompt = ""
inputs[cvv].Validate = cvvValidator
return model{
inputs: inputs,
focused: 0,
err: nil,
}
}
func (m model) Init() tea.Cmd {
return textinput.Blink
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
var cmds []tea.Cmd = make([]tea.Cmd, len(m.inputs))
switch msg := msg.(type) {
case tea.KeyMsg:
switch msg.Type {
case tea.KeyEnter:
if m.focused == len(m.inputs)-1 {
return m, tea.Quit
}
m.nextInput()
case tea.KeyCtrlC, tea.KeyEsc:
return m, tea.Quit
case tea.KeyShiftTab, tea.KeyCtrlP:
m.prevInput()
case tea.KeyTab, tea.KeyCtrlN:
m.nextInput()
}
for i := range m.inputs {
m.inputs[i].Blur()
}
m.inputs[m.focused].Focus()
// We handle errors just like any other message
case errMsg:
m.err = msg
return m, nil
}
for i := range m.inputs {
m.inputs[i], cmds[i] = m.inputs[i].Update(msg)
}
return m, tea.Batch(cmds...)
}
func (m model) View() string {
return fmt.Sprintf(
` Total: $21.50:
%s
%s
%s %s
%s %s
%s
`,
inputStyle.Width(30).Render("Card Number"),
m.inputs[ccn].View(),
inputStyle.Width(6).Render("EXP"),
inputStyle.Width(6).Render("CVV"),
m.inputs[exp].View(),
m.inputs[cvv].View(),
continueStyle.Render("Continue ->"),
) + "\n"
}
// nextInput focuses the next input field
func (m *model) nextInput() {
m.focused = (m.focused + 1) % len(m.inputs)
}
// prevInput focuses the previous input field
func (m *model) prevInput() {
m.focused--
// Wrap around
if m.focused < 0 {
m.focused = len(m.inputs) - 1
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/textinputs/main.go | examples/textinputs/main.go | package main
// A simple example demonstrating the use of multiple text input components
// from the Bubbles component library.
import (
"fmt"
"os"
"strings"
"github.com/charmbracelet/bubbles/cursor"
"github.com/charmbracelet/bubbles/textinput"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
var (
focusedStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("205"))
blurredStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("240"))
cursorStyle = focusedStyle
noStyle = lipgloss.NewStyle()
helpStyle = blurredStyle
cursorModeHelpStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("244"))
focusedButton = focusedStyle.Render("[ Submit ]")
blurredButton = fmt.Sprintf("[ %s ]", blurredStyle.Render("Submit"))
)
type model struct {
focusIndex int
inputs []textinput.Model
cursorMode cursor.Mode
}
func initialModel() model {
m := model{
inputs: make([]textinput.Model, 3),
}
var t textinput.Model
for i := range m.inputs {
t = textinput.New()
t.Cursor.Style = cursorStyle
t.CharLimit = 32
switch i {
case 0:
t.Placeholder = "Nickname"
t.Focus()
t.PromptStyle = focusedStyle
t.TextStyle = focusedStyle
case 1:
t.Placeholder = "Email"
t.CharLimit = 64
case 2:
t.Placeholder = "Password"
t.EchoMode = textinput.EchoPassword
t.EchoCharacter = '•'
}
m.inputs[i] = t
}
return m
}
func (m model) Init() tea.Cmd {
return textinput.Blink
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
switch msg.String() {
case "ctrl+c", "esc":
return m, tea.Quit
// Change cursor mode
case "ctrl+r":
m.cursorMode++
if m.cursorMode > cursor.CursorHide {
m.cursorMode = cursor.CursorBlink
}
cmds := make([]tea.Cmd, len(m.inputs))
for i := range m.inputs {
cmds[i] = m.inputs[i].Cursor.SetMode(m.cursorMode)
}
return m, tea.Batch(cmds...)
// Set focus to next input
case "tab", "shift+tab", "enter", "up", "down":
s := msg.String()
// Did the user press enter while the submit button was focused?
// If so, exit.
if s == "enter" && m.focusIndex == len(m.inputs) {
return m, tea.Quit
}
// Cycle indexes
if s == "up" || s == "shift+tab" {
m.focusIndex--
} else {
m.focusIndex++
}
if m.focusIndex > len(m.inputs) {
m.focusIndex = 0
} else if m.focusIndex < 0 {
m.focusIndex = len(m.inputs)
}
cmds := make([]tea.Cmd, len(m.inputs))
for i := 0; i <= len(m.inputs)-1; i++ {
if i == m.focusIndex {
// Set focused state
cmds[i] = m.inputs[i].Focus()
m.inputs[i].PromptStyle = focusedStyle
m.inputs[i].TextStyle = focusedStyle
continue
}
// Remove focused state
m.inputs[i].Blur()
m.inputs[i].PromptStyle = noStyle
m.inputs[i].TextStyle = noStyle
}
return m, tea.Batch(cmds...)
}
}
// Handle character input and blinking
cmd := m.updateInputs(msg)
return m, cmd
}
func (m *model) updateInputs(msg tea.Msg) tea.Cmd {
cmds := make([]tea.Cmd, len(m.inputs))
// Only text inputs with Focus() set will respond, so it's safe to simply
// update all of them here without any further logic.
for i := range m.inputs {
m.inputs[i], cmds[i] = m.inputs[i].Update(msg)
}
return tea.Batch(cmds...)
}
func (m model) View() string {
var b strings.Builder
for i := range m.inputs {
b.WriteString(m.inputs[i].View())
if i < len(m.inputs)-1 {
b.WriteRune('\n')
}
}
button := &blurredButton
if m.focusIndex == len(m.inputs) {
button = &focusedButton
}
fmt.Fprintf(&b, "\n\n%s\n\n", *button)
b.WriteString(helpStyle.Render("cursor mode is "))
b.WriteString(cursorModeHelpStyle.Render(m.cursorMode.String()))
b.WriteString(helpStyle.Render(" (ctrl+r to change style)"))
return b.String()
}
func main() {
if _, err := tea.NewProgram(initialModel()).Run(); err != nil {
fmt.Printf("could not start program: %s\n", err)
os.Exit(1)
}
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
charmbracelet/bubbletea | https://github.com/charmbracelet/bubbletea/blob/f9233d51192293dadda7184a4de347738606c328/examples/http/main.go | examples/http/main.go | package main
// A simple program that makes a GET request and prints the response status.
import (
"fmt"
"log"
"net/http"
"time"
tea "github.com/charmbracelet/bubbletea"
)
const url = "https://charm.sh/"
type model struct {
status int
err error
}
type statusMsg int
type errMsg struct{ error }
func (e errMsg) Error() string { return e.error.Error() }
func main() {
p := tea.NewProgram(model{})
if _, err := p.Run(); err != nil {
log.Fatal(err)
}
}
func (m model) Init() tea.Cmd {
return checkServer
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
switch msg.String() {
case "q", "ctrl+c", "esc":
return m, tea.Quit
default:
return m, nil
}
case statusMsg:
m.status = int(msg)
return m, tea.Quit
case errMsg:
m.err = msg
return m, nil
default:
return m, nil
}
}
func (m model) View() string {
s := fmt.Sprintf("Checking %s...", url)
if m.err != nil {
s += fmt.Sprintf("something went wrong: %s", m.err)
} else if m.status != 0 {
s += fmt.Sprintf("%d %s", m.status, http.StatusText(m.status))
}
return s + "\n"
}
func checkServer() tea.Msg {
c := &http.Client{
Timeout: 10 * time.Second,
}
res, err := c.Get(url)
if err != nil {
return errMsg{err}
}
defer res.Body.Close() // nolint:errcheck
return statusMsg(res.StatusCode)
}
| go | MIT | f9233d51192293dadda7184a4de347738606c328 | 2026-01-07T08:35:58.083951Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/.github/ci/modelslist.go | .github/ci/modelslist.go | package main
import (
"fmt"
"html/template"
"io/ioutil"
"os"
"github.com/microcosm-cc/bluemonday"
"gopkg.in/yaml.v3"
)
var modelPageTemplate string = `
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>LocalAI models</title>
<link href="https://cdnjs.cloudflare.com/ajax/libs/flowbite/2.3.0/flowbite.min.css" rel="stylesheet" />
<script src="https://cdn.jsdelivr.net/npm/vanilla-lazyload@19.1.3/dist/lazyload.min.js"></script>
<link
rel="stylesheet"
href="https://cdn.jsdelivr.net/gh/highlightjs/cdn-release@11.8.0/build/styles/default.min.css"
/>
<script
defer
src="https://cdn.jsdelivr.net/gh/highlightjs/cdn-release@11.8.0/build/highlight.min.js"
></script>
<script
defer
src="https://cdn.jsdelivr.net/npm/alpinejs@3.x.x/dist/cdn.min.js"
></script>
<script
defer
src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"
></script>
<script
defer
src="https://cdn.jsdelivr.net/npm/dompurify@3.0.6/dist/purify.min.js"
></script>
<link href="/static/general.css" rel="stylesheet" />
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;600;700&family=Roboto:wght@400;500&display=swap" rel="stylesheet">
<link
href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700,900&display=swap"
rel="stylesheet" />
<link
rel="stylesheet"
href="https://cdn.jsdelivr.net/npm/tw-elements/css/tw-elements.min.css" />
<script src="https://cdn.tailwindcss.com/3.3.0"></script>
<script>
tailwind.config = {
darkMode: "class",
theme: {
fontFamily: {
sans: ["Roboto", "sans-serif"],
body: ["Roboto", "sans-serif"],
mono: ["ui-monospace", "monospace"],
},
},
corePlugins: {
preflight: false,
},
};
</script>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.1.1/css/all.min.css">
<script src="https://unpkg.com/htmx.org@1.9.12" integrity="sha384-ujb1lZYygJmzgSwoxRggbCHcjc0rB2XoQrxeTUQyRjrOnlCoYta87iKBWq3EsdM2" crossorigin="anonymous"></script>
</head>
<body class="bg-gray-900 text-gray-200">
<div class="flex flex-col min-h-screen">
<nav class="bg-gray-800 shadow-lg">
<div class="container mx-auto px-4 py-4">
<div class="flex items-center justify-between">
<div class="flex items-center">
<a href="/" class="text-white text-xl font-bold"><img src="https://github.com/mudler/LocalAI/assets/2420543/0966aa2a-166e-4f99-a3e5-6c915fc997dd" alt="LocalAI Logo" class="h-10 mr-3 border-2 border-gray-300 shadow rounded"></a>
<a href="/" class="text-white text-xl font-bold">LocalAI</a>
</div>
<!-- Menu button for small screens -->
<div class="lg:hidden">
<button id="menu-toggle" class="text-gray-400 hover:text-white focus:outline-none">
<i class="fas fa-bars fa-lg"></i>
</button>
</div>
<!-- Navigation links -->
<div class="hidden lg:flex lg:items-center lg:justify-end lg:flex-1 lg:w-0">
<a href="https://localai.io" class="text-gray-400 hover:text-white px-3 py-2 rounded" target="_blank" ><i class="fas fa-book-reader pr-2"></i> Documentation</a>
</div>
</div>
<!-- Collapsible menu for small screens -->
<div class="hidden lg:hidden" id="mobile-menu">
<div class="pt-4 pb-3 border-t border-gray-700">
<a href="https://localai.io" class="block text-gray-400 hover:text-white px-3 py-2 rounded mt-1" target="_blank" ><i class="fas fa-book-reader pr-2"></i> Documentation</a>
</div>
</div>
</div>
</nav>
<style>
.is-hidden {
display: none;
}
</style>
<div class="container mx-auto px-4 flex-grow">
<div class="models mt-12">
<h2 class="text-center text-3xl font-semibold text-gray-100">
LocalAI model gallery list </h2><br>
<h2 class="text-center text-3xl font-semibold text-gray-100">
🖼️ Available {{.AvailableModels}} models</i> <a href="https://localai.io/models/" target="_blank" >
<i class="fas fa-circle-info pr-2"></i>
</a></h2>
<h3>
Refer to the Model gallery <a href="https://localai.io/models/" target="_blank" ><i class="fas fa-circle-info pr-2"></i></a> for more information on how to use the models with LocalAI.<br>
You can install models with the CLI command <code>local-ai models install <model-name></code>. or by using the WebUI.
</h3>
<input class="form-control appearance-none block w-full mt-5 px-3 py-2 text-base font-normal text-gray-300 pb-2 mb-5 bg-gray-800 bg-clip-padding border border-solid border-gray-600 rounded transition ease-in-out m-0 focus:text-gray-300 focus:bg-gray-900 focus:border-blue-500 focus:outline-none" type="search"
id="searchbox" placeholder="Live search keyword..">
<div class="dark grid grid-cols-1 grid-rows-1 md:grid-cols-3 block rounded-lg shadow-secondary-1 dark:bg-surface-dark">
{{ range $_, $model := .Models }}
<div class="box me-4 mb-2 block rounded-lg bg-white shadow-secondary-1 dark:bg-gray-800 dark:bg-surface-dark dark:text-white text-surface pb-2">
<div>
{{ $icon := "https://upload.wikimedia.org/wikipedia/commons/6/65/No-Image-Placeholder.svg" }}
{{ if $model.Icon }}
{{ $icon = $model.Icon }}
{{ end }}
<div class="flex justify-center items-center">
<img data-src="{{ $icon }}" alt="{{$model.Name}}" class="rounded-t-lg max-h-48 max-w-96 object-cover mt-3 lazy">
</div>
<div class="p-6 text-surface dark:text-white">
<h5 class="mb-2 text-xl font-medium leading-tight">{{$model.Name}}</h5>
<p class="mb-4 text-base truncate">{{ $model.Description }}</p>
</div>
<div class="px-6 pt-4 pb-2">
<!-- Modal toggle -->
<button data-modal-target="{{ $model.Name}}-modal" data-modal-toggle="{{ $model.Name }}-modal" class="block text-white bg-blue-700 hover:bg-blue-800 focus:ring-4 focus:outline-none focus:ring-blue-300 font-medium rounded-lg text-sm px-5 py-2.5 text-center dark:bg-blue-600 dark:hover:bg-blue-700 dark:focus:ring-blue-800" type="button">
More info
</button>
<!-- Main modal -->
<div id="{{ $model.Name}}-modal" tabindex="-1" aria-hidden="true" class="hidden overflow-y-auto overflow-x-hidden fixed top-0 right-0 left-0 z-50 justify-center items-center w-full md:inset-0 h-[calc(100%-1rem)] max-h-full">
<div class="relative p-4 w-full max-w-2xl max-h-full">
<!-- Modal content -->
<div class="relative bg-white rounded-lg shadow dark:bg-gray-700">
<!-- Modal header -->
<div class="flex items-center justify-between p-4 md:p-5 border-b rounded-t dark:border-gray-600">
<h3 class="text-xl font-semibold text-gray-900 dark:text-white">
{{ $model.Name}}
</h3>
<button type="button" class="text-gray-400 bg-transparent hover:bg-gray-200 hover:text-gray-900 rounded-lg text-sm w-8 h-8 ms-auto inline-flex justify-center items-center dark:hover:bg-gray-600 dark:hover:text-white" data-modal-hide="{{$model.Name}}-modal">
<svg class="w-3 h-3" aria-hidden="true" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 14 14">
<path stroke="currentColor" stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="m1 1 6 6m0 0 6 6M7 7l6-6M7 7l-6 6"/>
</svg>
<span class="sr-only">Close modal</span>
</button>
</div>
<!-- Modal body -->
<div class="p-4 md:p-5 space-y-4">
<div class="flex justify-center items-center">
<img data-src="{{ $icon }}" alt="{{$model.Name}}" class="lazy rounded-t-lg max-h-48 max-w-96 object-cover mt-3">
</div>
<p class="text-base leading-relaxed text-gray-500 dark:text-gray-400">
{{ $model.Description }}
</p>
<p class="text-base leading-relaxed text-gray-500 dark:text-gray-400">
To install the model with the CLI, run: <br>
<code> local-ai models install {{$model.Name}} </code> <br>
<hr>
See also <a href="https://localai.io/models/" target="_blank" >
Installation <i class="fas fa-circle-info pr-2"></i>
</a> to see how to install models with the REST API.
</p>
<p class="text-base leading-relaxed text-gray-500 dark:text-gray-400">
<ul>
{{ range $_, $u := $model.URLs }}
<li><a href="{{ $u }}" target=_blank><i class="fa-solid fa-link"></i> {{ $u }}</a></li>
{{ end }}
</ul>
</p>
</div>
<!-- Modal footer -->
<div class="flex items-center p-4 md:p-5 border-t border-gray-200 rounded-b dark:border-gray-600">
<button data-modal-hide="{{ $model.Name}}-modal" type="button" class="py-2.5 px-5 ms-3 text-sm font-medium text-gray-900 focus:outline-none bg-white rounded-lg border border-gray-200 hover:bg-gray-100 hover:text-blue-700 focus:z-10 focus:ring-4 focus:ring-gray-100 dark:focus:ring-gray-700 dark:bg-gray-800 dark:text-gray-400 dark:border-gray-600 dark:hover:text-white dark:hover:bg-gray-700">Close</button>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
{{ end }}
</div>
</div>
</div>
<script>
var lazyLoadInstance = new LazyLoad({
// Your custom settings go here
});
let cards = document.querySelectorAll('.box')
function liveSearch() {
let search_query = document.getElementById("searchbox").value;
//Use innerText if all contents are visible
//Use textContent for including hidden elements
for (var i = 0; i < cards.length; i++) {
if(cards[i].textContent.toLowerCase()
.includes(search_query.toLowerCase())) {
cards[i].classList.remove("is-hidden");
} else {
cards[i].classList.add("is-hidden");
}
}
}
//A little delay
let typingTimer;
let typeInterval = 500;
let searchInput = document.getElementById('searchbox');
searchInput.addEventListener('keyup', () => {
clearTimeout(typingTimer);
typingTimer = setTimeout(liveSearch, typeInterval);
});
</script>
</div>
<script src="https://cdnjs.cloudflare.com/ajax/libs/flowbite/2.3.0/flowbite.min.js"></script>
</body>
</html>
`
type GalleryModel struct {
Name string `json:"name" yaml:"name"`
URLs []string `json:"urls" yaml:"urls"`
Icon string `json:"icon" yaml:"icon"`
Description string `json:"description" yaml:"description"`
}
func main() {
// read the YAML file which contains the models
f, err := ioutil.ReadFile(os.Args[1])
if err != nil {
fmt.Println("Error reading file:", err)
return
}
models := []*GalleryModel{}
err = yaml.Unmarshal(f, &models)
if err != nil {
// write to stderr
os.Stderr.WriteString("Error unmarshaling YAML: " + err.Error() + "\n")
return
}
// Ensure that all arbitrary text content is sanitized before display
for i, m := range models {
models[i].Name = bluemonday.StrictPolicy().Sanitize(m.Name)
models[i].Description = bluemonday.StrictPolicy().Sanitize(m.Description)
}
// render the template
data := struct {
Models []*GalleryModel
AvailableModels int
}{
Models: models,
AvailableModels: len(models),
}
tmpl := template.Must(template.New("modelPage").Parse(modelPageTemplate))
err = tmpl.Execute(os.Stdout, data)
if err != nil {
fmt.Println("Error executing template:", err)
return
}
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/.github/gallery-agent/agent.go | .github/gallery-agent/agent.go | package main
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"regexp"
"slices"
"strings"
"github.com/ghodss/yaml"
hfapi "github.com/mudler/LocalAI/pkg/huggingface-api"
cogito "github.com/mudler/cogito"
"github.com/mudler/cogito/structures"
"github.com/sashabaranov/go-openai/jsonschema"
)
var (
openAIModel = os.Getenv("OPENAI_MODEL")
openAIKey = os.Getenv("OPENAI_KEY")
openAIBaseURL = os.Getenv("OPENAI_BASE_URL")
galleryIndexPath = os.Getenv("GALLERY_INDEX_PATH")
//defaultclient
llm = cogito.NewOpenAILLM(openAIModel, openAIKey, openAIBaseURL)
)
// cleanTextContent removes trailing spaces, tabs, and normalizes line endings
// to prevent YAML linting issues like trailing spaces and multiple empty lines
func cleanTextContent(text string) string {
lines := strings.Split(text, "\n")
var cleanedLines []string
var prevEmpty bool
for _, line := range lines {
// Remove all trailing whitespace (spaces, tabs, etc.)
trimmed := strings.TrimRight(line, " \t\r")
// Avoid multiple consecutive empty lines
if trimmed == "" {
if !prevEmpty {
cleanedLines = append(cleanedLines, "")
}
prevEmpty = true
} else {
cleanedLines = append(cleanedLines, trimmed)
prevEmpty = false
}
}
// Remove trailing empty lines from the result
result := strings.Join(cleanedLines, "\n")
return stripThinkingTags(strings.TrimRight(result, "\n"))
}
type galleryModel struct {
Name string `yaml:"name"`
Urls []string `yaml:"urls"`
}
// isModelExisting checks if a specific model ID exists in the gallery using text search
func isModelExisting(modelID string) (bool, error) {
indexPath := getGalleryIndexPath()
content, err := os.ReadFile(indexPath)
if err != nil {
return false, fmt.Errorf("failed to read %s: %w", indexPath, err)
}
var galleryModels []galleryModel
err = yaml.Unmarshal(content, &galleryModels)
if err != nil {
return false, fmt.Errorf("failed to unmarshal %s: %w", indexPath, err)
}
for _, galleryModel := range galleryModels {
if slices.Contains(galleryModel.Urls, modelID) {
return true, nil
}
}
return false, nil
}
// filterExistingModels removes models that already exist in the gallery
func filterExistingModels(models []ProcessedModel) ([]ProcessedModel, error) {
var filteredModels []ProcessedModel
for _, model := range models {
exists, err := isModelExisting(model.ModelID)
if err != nil {
fmt.Printf("Error checking if model %s exists: %v, skipping\n", model.ModelID, err)
continue
}
if !exists {
filteredModels = append(filteredModels, model)
} else {
fmt.Printf("Skipping existing model: %s\n", model.ModelID)
}
}
fmt.Printf("Filtered out %d existing models, %d new models remaining\n",
len(models)-len(filteredModels), len(filteredModels))
return filteredModels, nil
}
// getGalleryIndexPath returns the gallery index file path, with a default fallback
func getGalleryIndexPath() string {
if galleryIndexPath != "" {
return galleryIndexPath
}
return "gallery/index.yaml"
}
func stripThinkingTags(content string) string {
// Remove content between <thinking> and </thinking> (including multi-line)
content = regexp.MustCompile(`(?s)<thinking>.*?</thinking>`).ReplaceAllString(content, "")
// Remove content between <think> and </think> (including multi-line)
content = regexp.MustCompile(`(?s)<think>.*?</think>`).ReplaceAllString(content, "")
// Clean up any extra whitespace
content = strings.TrimSpace(content)
return content
}
func getRealReadme(ctx context.Context, repository string) (string, error) {
// Create a conversation fragment
fragment := cogito.NewEmptyFragment().
AddMessage("user",
`Your task is to get a clear description of a large language model from huggingface by using the provided tool. I will share with you a repository that might be quantized, and as such probably not by the original model author. We need to get the real description of the model, and not the one that might be quantized. You will have to call the tool to get the readme more than once by figuring out from the quantized readme which is the base model readme. This is the repository: `+repository)
// Execute with tools
result, err := cogito.ExecuteTools(llm, fragment,
cogito.WithIterations(3),
cogito.WithMaxAttempts(3),
cogito.WithTools(&HFReadmeTool{client: hfapi.NewClient()}))
if err != nil {
return "", err
}
result = result.AddMessage("user", "Describe the model in a clear and concise way that can be shared in a model gallery.")
// Get a response
newFragment, err := llm.Ask(ctx, result)
if err != nil {
return "", err
}
content := newFragment.LastMessage().Content
return cleanTextContent(content), nil
}
func selectMostInterestingModels(ctx context.Context, searchResult *SearchResult) ([]ProcessedModel, error) {
if len(searchResult.Models) == 1 {
return searchResult.Models, nil
}
// Create a conversation fragment
fragment := cogito.NewEmptyFragment().
AddMessage("user",
`Your task is to analyze a list of AI models and select the most interesting ones for a model gallery. You will be given detailed information about multiple models including their metadata, file information, and README content.
Consider the following criteria when selecting models:
1. Model popularity (download count)
2. Model recency (last modified date)
3. Model completeness (has preferred model file, README, etc.)
4. Model uniqueness (not duplicates or very similar models)
5. Model quality (based on README content and description)
6. Model utility (practical applications)
You should select models that would be most valuable for users browsing a model gallery. Prioritize models that are:
- Well-documented with clear READMEs
- Recently updated
- Popular (high download count)
- Have the preferred quantization format available
- Offer unique capabilities or are from reputable authors
Return your analysis and selection reasoning.`)
// Add the search results as context
modelsInfo := fmt.Sprintf("Found %d models matching '%s' with quantization preference '%s':\n\n",
searchResult.TotalModelsFound, searchResult.SearchTerm, searchResult.Quantization)
for i, model := range searchResult.Models {
modelsInfo += fmt.Sprintf("Model %d:\n", i+1)
modelsInfo += fmt.Sprintf(" ID: %s\n", model.ModelID)
modelsInfo += fmt.Sprintf(" Author: %s\n", model.Author)
modelsInfo += fmt.Sprintf(" Downloads: %d\n", model.Downloads)
modelsInfo += fmt.Sprintf(" Last Modified: %s\n", model.LastModified)
modelsInfo += fmt.Sprintf(" Files: %d files\n", len(model.Files))
if model.PreferredModelFile != nil {
modelsInfo += fmt.Sprintf(" Preferred Model File: %s (%d bytes)\n",
model.PreferredModelFile.Path, model.PreferredModelFile.Size)
} else {
modelsInfo += " No preferred model file found\n"
}
if model.ReadmeContent != "" {
modelsInfo += fmt.Sprintf(" README: %s\n", model.ReadmeContent)
}
if model.ProcessingError != "" {
modelsInfo += fmt.Sprintf(" Processing Error: %s\n", model.ProcessingError)
}
modelsInfo += "\n"
}
fragment = fragment.AddMessage("user", modelsInfo)
fragment = fragment.AddMessage("user", "Based on your analysis, select the top 5 most interesting models and provide a brief explanation for each selection. Also, create a filtered SearchResult with only the selected models. Return just a list of repositories IDs, you will later be asked to output it as a JSON array with the json tool.")
// Get a response
newFragment, err := llm.Ask(ctx, fragment)
if err != nil {
return nil, err
}
fmt.Println(newFragment.LastMessage().Content)
repositories := struct {
Repositories []string `json:"repositories"`
}{}
s := structures.Structure{
Schema: jsonschema.Definition{
Type: jsonschema.Object,
AdditionalProperties: false,
Properties: map[string]jsonschema.Definition{
"repositories": {
Type: jsonschema.Array,
Items: &jsonschema.Definition{Type: jsonschema.String},
Description: "The trending repositories IDs",
},
},
Required: []string{"repositories"},
},
Object: &repositories,
}
err = newFragment.ExtractStructure(ctx, llm, s)
if err != nil {
return nil, err
}
filteredModels := []ProcessedModel{}
for _, m := range searchResult.Models {
if slices.Contains(repositories.Repositories, m.ModelID) {
filteredModels = append(filteredModels, m)
}
}
return filteredModels, nil
}
// ModelMetadata represents extracted metadata from a model
type ModelMetadata struct {
Tags []string `json:"tags"`
License string `json:"license"`
}
// extractModelMetadata extracts tags and license from model README and documentation
func extractModelMetadata(ctx context.Context, model ProcessedModel) ([]string, string, error) {
// Create a conversation fragment
fragment := cogito.NewEmptyFragment().
AddMessage("user",
`Your task is to extract metadata from an AI model's README and documentation. You will be provided with:
1. Model information (ID, author, description)
2. README content
You need to extract:
1. **Tags**: An array of relevant tags that describe the model. Use common tags from the gallery such as:
- llm, gguf, gpu, cpu, multimodal, image-to-text, text-to-text, text-to-speech, tts
- thinking, reasoning, chat, instruction-tuned, code, vision
- Model family names (e.g., llama, qwen, mistral, gemma) if applicable
- Any other relevant descriptive tags
Select 3-8 most relevant tags.
2. **License**: The license identifier (e.g., "apache-2.0", "mit", "llama2", "gpl-3.0", "bsd", "cc-by-4.0").
If no license is found, return an empty string.
Return the extracted metadata in a structured format.`)
// Add model information
modelInfo := "Model Information:\n"
modelInfo += fmt.Sprintf(" ID: %s\n", model.ModelID)
modelInfo += fmt.Sprintf(" Author: %s\n", model.Author)
modelInfo += fmt.Sprintf(" Downloads: %d\n", model.Downloads)
if model.ReadmeContent != "" {
modelInfo += fmt.Sprintf(" README Content:\n%s\n", model.ReadmeContent)
} else if model.ReadmeContentPreview != "" {
modelInfo += fmt.Sprintf(" README Preview: %s\n", model.ReadmeContentPreview)
}
fragment = fragment.AddMessage("user", modelInfo)
fragment = fragment.AddMessage("user", "Extract the tags and license from the model information. Return the metadata as a JSON object with 'tags' (array of strings) and 'license' (string).")
// Get a response
newFragment, err := llm.Ask(ctx, fragment)
if err != nil {
return nil, "", err
}
// Extract structured metadata
metadata := ModelMetadata{}
s := structures.Structure{
Schema: jsonschema.Definition{
Type: jsonschema.Object,
AdditionalProperties: false,
Properties: map[string]jsonschema.Definition{
"tags": {
Type: jsonschema.Array,
Items: &jsonschema.Definition{Type: jsonschema.String},
Description: "Array of relevant tags describing the model",
},
"license": {
Type: jsonschema.String,
Description: "License identifier (e.g., apache-2.0, mit, llama2). Empty string if not found.",
},
},
Required: []string{"tags", "license"},
},
Object: &metadata,
}
err = newFragment.ExtractStructure(ctx, llm, s)
if err != nil {
return nil, "", err
}
return metadata.Tags, metadata.License, nil
}
// extractIconFromReadme scans the README content for image URLs and returns the first suitable icon URL found
func extractIconFromReadme(readmeContent string) string {
if readmeContent == "" {
return ""
}
// Regular expressions to match image URLs in various formats (case-insensitive)
// Match markdown image syntax:  - case insensitive extensions
markdownImageRegex := regexp.MustCompile(`(?i)!\[[^\]]*\]\(([^)]+\.(png|jpg|jpeg|svg|webp|gif))\)`)
// Match HTML img tags: <img src="url">
htmlImageRegex := regexp.MustCompile(`(?i)<img[^>]+src=["']([^"']+\.(png|jpg|jpeg|svg|webp|gif))["']`)
// Match plain URLs ending with image extensions
plainImageRegex := regexp.MustCompile(`(?i)https?://[^\s<>"']+\.(png|jpg|jpeg|svg|webp|gif)`)
// Try markdown format first
matches := markdownImageRegex.FindStringSubmatch(readmeContent)
if len(matches) > 1 && matches[1] != "" {
url := strings.TrimSpace(matches[1])
// Prefer HuggingFace CDN URLs or absolute URLs
if strings.HasPrefix(strings.ToLower(url), "http") {
return url
}
}
// Try HTML img tags
matches = htmlImageRegex.FindStringSubmatch(readmeContent)
if len(matches) > 1 && matches[1] != "" {
url := strings.TrimSpace(matches[1])
if strings.HasPrefix(strings.ToLower(url), "http") {
return url
}
}
// Try plain URLs
matches = plainImageRegex.FindStringSubmatch(readmeContent)
if len(matches) > 0 {
url := strings.TrimSpace(matches[0])
if strings.HasPrefix(strings.ToLower(url), "http") {
return url
}
}
return ""
}
// getHuggingFaceAvatarURL attempts to get the HuggingFace avatar URL for a user
func getHuggingFaceAvatarURL(author string) string {
if author == "" {
return ""
}
// Try to fetch user info from HuggingFace API
// HuggingFace API endpoint: https://huggingface.co/api/users/{username}
baseURL := "https://huggingface.co"
userURL := fmt.Sprintf("%s/api/users/%s", baseURL, author)
req, err := http.NewRequest("GET", userURL, nil)
if err != nil {
return ""
}
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return ""
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return ""
}
// Parse the response to get avatar URL
var userInfo map[string]interface{}
body, err := io.ReadAll(resp.Body)
if err != nil {
return ""
}
if err := json.Unmarshal(body, &userInfo); err != nil {
return ""
}
// Try to extract avatar URL from response
if avatar, ok := userInfo["avatarUrl"].(string); ok && avatar != "" {
return avatar
}
if avatar, ok := userInfo["avatar"].(string); ok && avatar != "" {
return avatar
}
return ""
}
// extractModelIcon extracts icon URL from README or falls back to HuggingFace avatar
func extractModelIcon(model ProcessedModel) string {
// First, try to extract icon from README
if icon := extractIconFromReadme(model.ReadmeContent); icon != "" {
return icon
}
// Fallback: Try to get HuggingFace user avatar
if model.Author != "" {
if avatar := getHuggingFaceAvatarURL(model.Author); avatar != "" {
return avatar
}
}
return ""
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/.github/gallery-agent/tools.go | .github/gallery-agent/tools.go | package main
import (
"fmt"
hfapi "github.com/mudler/LocalAI/pkg/huggingface-api"
openai "github.com/sashabaranov/go-openai"
jsonschema "github.com/sashabaranov/go-openai/jsonschema"
)
// Get repository README from HF
type HFReadmeTool struct {
client *hfapi.Client
}
func (s *HFReadmeTool) Execute(args map[string]any) (string, error) {
q, ok := args["repository"].(string)
if !ok {
return "", fmt.Errorf("no query")
}
readme, err := s.client.GetReadmeContent(q, "README.md")
if err != nil {
return "", err
}
return readme, nil
}
func (s *HFReadmeTool) Tool() openai.Tool {
return openai.Tool{
Type: openai.ToolTypeFunction,
Function: &openai.FunctionDefinition{
Name: "hf_readme",
Description: "A tool to get the README content of a huggingface repository",
Parameters: jsonschema.Definition{
Type: jsonschema.Object,
Properties: map[string]jsonschema.Definition{
"repository": {
Type: jsonschema.String,
Description: "The huggingface repository to get the README content of",
},
},
Required: []string{"repository"},
},
},
}
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/.github/gallery-agent/gallery.go | .github/gallery-agent/gallery.go | package main
import (
"context"
"encoding/json"
"fmt"
"os"
"strings"
"github.com/ghodss/yaml"
"github.com/mudler/LocalAI/core/gallery/importers"
)
func formatTextContent(text string) string {
return formatTextContentWithIndent(text, 4, 6)
}
// formatTextContentWithIndent formats text content with specified base and list item indentation
func formatTextContentWithIndent(text string, baseIndent int, listItemIndent int) string {
var formattedLines []string
lines := strings.Split(text, "\n")
for _, line := range lines {
trimmed := strings.TrimRight(line, " \t\r")
if trimmed == "" {
// Keep empty lines as empty (no indentation)
formattedLines = append(formattedLines, "")
} else {
// Preserve relative indentation from yaml.Marshal output
// Count existing leading spaces to preserve relative structure
leadingSpaces := len(trimmed) - len(strings.TrimLeft(trimmed, " \t"))
trimmedStripped := strings.TrimLeft(trimmed, " \t")
var totalIndent int
if strings.HasPrefix(trimmedStripped, "-") {
// List items: use listItemIndent (ignore existing leading spaces)
totalIndent = listItemIndent
} else {
// Regular lines: use baseIndent + preserve relative indentation
// This handles both top-level keys (leadingSpaces=0) and nested properties (leadingSpaces>0)
totalIndent = baseIndent + leadingSpaces
}
indentStr := strings.Repeat(" ", totalIndent)
formattedLines = append(formattedLines, indentStr+trimmedStripped)
}
}
formattedText := strings.Join(formattedLines, "\n")
// Remove any trailing spaces from the formatted description
formattedText = strings.TrimRight(formattedText, " \t")
return formattedText
}
// generateYAMLEntry generates a YAML entry for a model using the specified anchor
func generateYAMLEntry(model ProcessedModel, quantization string) string {
modelConfig, err := importers.DiscoverModelConfig("https://huggingface.co/"+model.ModelID, json.RawMessage(`{ "quantization": "`+quantization+`"}`))
if err != nil {
panic(err)
}
// Extract model name from ModelID
parts := strings.Split(model.ModelID, "/")
modelName := model.ModelID
if len(parts) > 0 {
modelName = strings.ToLower(parts[len(parts)-1])
}
// Remove common suffixes
modelName = strings.ReplaceAll(modelName, "-gguf", "")
modelName = strings.ReplaceAll(modelName, "-q4_k_m", "")
modelName = strings.ReplaceAll(modelName, "-q4_k_s", "")
modelName = strings.ReplaceAll(modelName, "-q3_k_m", "")
modelName = strings.ReplaceAll(modelName, "-q2_k", "")
description := model.ReadmeContent
if description == "" {
description = fmt.Sprintf("AI model: %s", modelName)
}
// Clean up description to prevent YAML linting issues
description = cleanTextContent(description)
formattedDescription := formatTextContent(description)
configFile := formatTextContent(modelConfig.ConfigFile)
filesYAML, _ := yaml.Marshal(modelConfig.Files)
// Files section: list items need 4 spaces (not 6), since files: is at 2 spaces
files := formatTextContentWithIndent(string(filesYAML), 4, 4)
// Build metadata sections
var metadataSections []string
// Add license if present
if model.License != "" {
metadataSections = append(metadataSections, fmt.Sprintf(` license: "%s"`, model.License))
}
// Add tags if present
if len(model.Tags) > 0 {
tagsYAML, _ := yaml.Marshal(model.Tags)
tagsFormatted := formatTextContentWithIndent(string(tagsYAML), 4, 4)
tagsFormatted = strings.TrimRight(tagsFormatted, "\n")
metadataSections = append(metadataSections, fmt.Sprintf(" tags:\n%s", tagsFormatted))
}
// Add icon if present
if model.Icon != "" {
metadataSections = append(metadataSections, fmt.Sprintf(` icon: %s`, model.Icon))
}
// Build the metadata block
metadataBlock := ""
if len(metadataSections) > 0 {
metadataBlock = strings.Join(metadataSections, "\n") + "\n"
}
yamlTemplate := ""
yamlTemplate = `- name: "%s"
url: "github:mudler/LocalAI/gallery/virtual.yaml@master"
urls:
- https://huggingface.co/%s
description: |
%s%s
overrides:
%s
files:
%s`
// Trim trailing newlines from formatted sections to prevent extra blank lines
formattedDescription = strings.TrimRight(formattedDescription, "\n")
configFile = strings.TrimRight(configFile, "\n")
files = strings.TrimRight(files, "\n")
// Add newline before metadata block if present
if metadataBlock != "" {
metadataBlock = "\n" + strings.TrimRight(metadataBlock, "\n")
}
return fmt.Sprintf(yamlTemplate,
modelName,
model.ModelID,
formattedDescription,
metadataBlock,
configFile,
files,
)
}
// generateYAMLForModels generates YAML entries for selected models and appends to index.yaml
func generateYAMLForModels(ctx context.Context, models []ProcessedModel, quantization string) error {
// Generate YAML entries for each model
var yamlEntries []string
for _, model := range models {
fmt.Printf("Generating YAML entry for model: %s\n", model.ModelID)
// Generate YAML entry
yamlEntry := generateYAMLEntry(model, quantization)
yamlEntries = append(yamlEntries, yamlEntry)
}
// Prepend to index.yaml (write at the top)
if len(yamlEntries) > 0 {
indexPath := getGalleryIndexPath()
fmt.Printf("Prepending YAML entries to %s...\n", indexPath)
// Read current content
content, err := os.ReadFile(indexPath)
if err != nil {
return fmt.Errorf("failed to read %s: %w", indexPath, err)
}
existingContent := string(content)
yamlBlock := strings.Join(yamlEntries, "\n")
// Check if file starts with "---"
var newContent string
if strings.HasPrefix(existingContent, "---\n") {
// File starts with "---", prepend new entries after it
restOfContent := strings.TrimPrefix(existingContent, "---\n")
// Ensure proper spacing: "---\n" + new entries + "\n" + rest of content
newContent = "---\n" + yamlBlock + "\n" + restOfContent
} else if strings.HasPrefix(existingContent, "---") {
// File starts with "---" but no newline after
restOfContent := strings.TrimPrefix(existingContent, "---")
newContent = "---\n" + yamlBlock + "\n" + strings.TrimPrefix(restOfContent, "\n")
} else {
// No "---" at start, prepend new entries at the very beginning
// Trim leading whitespace from existing content
existingContent = strings.TrimLeft(existingContent, " \t\n\r")
newContent = yamlBlock + "\n" + existingContent
}
// Write back to file
err = os.WriteFile(indexPath, []byte(newContent), 0644)
if err != nil {
return fmt.Errorf("failed to write %s: %w", indexPath, err)
}
fmt.Printf("Successfully prepended %d models to %s\n", len(yamlEntries), indexPath)
}
return nil
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/.github/gallery-agent/testing.go | .github/gallery-agent/testing.go | package main
import (
"context"
"fmt"
"math/rand"
"strings"
"time"
)
// runSyntheticMode generates synthetic test data and appends it to the gallery
func runSyntheticMode() error {
generator := NewSyntheticDataGenerator()
// Generate a random number of synthetic models (1-3)
numModels := generator.rand.Intn(3) + 1
fmt.Printf("Generating %d synthetic models for testing...\n", numModels)
var models []ProcessedModel
for i := 0; i < numModels; i++ {
model := generator.GenerateProcessedModel()
models = append(models, model)
fmt.Printf("Generated synthetic model: %s\n", model.ModelID)
}
// Generate YAML entries and append to gallery/index.yaml
fmt.Println("Generating YAML entries for synthetic models...")
err := generateYAMLForModels(context.Background(), models, "Q4_K_M")
if err != nil {
return fmt.Errorf("error generating YAML entries: %w", err)
}
fmt.Printf("Successfully added %d synthetic models to the gallery for testing!\n", len(models))
return nil
}
// SyntheticDataGenerator provides methods to generate synthetic test data
type SyntheticDataGenerator struct {
rand *rand.Rand
}
// NewSyntheticDataGenerator creates a new synthetic data generator
func NewSyntheticDataGenerator() *SyntheticDataGenerator {
return &SyntheticDataGenerator{
rand: rand.New(rand.NewSource(time.Now().UnixNano())),
}
}
// GenerateProcessedModelFile creates a synthetic ProcessedModelFile
func (g *SyntheticDataGenerator) GenerateProcessedModelFile() ProcessedModelFile {
fileTypes := []string{"model", "readme", "other"}
fileType := fileTypes[g.rand.Intn(len(fileTypes))]
var path string
var isReadme bool
switch fileType {
case "model":
path = fmt.Sprintf("model-%s.gguf", g.randomString(8))
isReadme = false
case "readme":
path = "README.md"
isReadme = true
default:
path = fmt.Sprintf("file-%s.txt", g.randomString(6))
isReadme = false
}
return ProcessedModelFile{
Path: path,
Size: int64(g.rand.Intn(1000000000) + 1000000), // 1MB to 1GB
SHA256: g.randomSHA256(),
IsReadme: isReadme,
FileType: fileType,
}
}
// GenerateProcessedModel creates a synthetic ProcessedModel
func (g *SyntheticDataGenerator) GenerateProcessedModel() ProcessedModel {
authors := []string{"microsoft", "meta", "google", "openai", "anthropic", "mistralai", "huggingface"}
modelNames := []string{"llama", "gpt", "claude", "mistral", "gemma", "phi", "qwen", "codellama"}
author := authors[g.rand.Intn(len(authors))]
modelName := modelNames[g.rand.Intn(len(modelNames))]
modelID := fmt.Sprintf("%s/%s-%s", author, modelName, g.randomString(6))
// Generate files
numFiles := g.rand.Intn(5) + 2 // 2-6 files
files := make([]ProcessedModelFile, numFiles)
// Ensure at least one model file and one readme
hasModelFile := false
hasReadme := false
for i := 0; i < numFiles; i++ {
files[i] = g.GenerateProcessedModelFile()
if files[i].FileType == "model" {
hasModelFile = true
}
if files[i].FileType == "readme" {
hasReadme = true
}
}
// Add required files if missing
if !hasModelFile {
modelFile := g.GenerateProcessedModelFile()
modelFile.FileType = "model"
modelFile.Path = fmt.Sprintf("%s-Q4_K_M.gguf", modelName)
files = append(files, modelFile)
}
if !hasReadme {
readmeFile := g.GenerateProcessedModelFile()
readmeFile.FileType = "readme"
readmeFile.Path = "README.md"
readmeFile.IsReadme = true
files = append(files, readmeFile)
}
// Find preferred model file
var preferredModelFile *ProcessedModelFile
for i := range files {
if files[i].FileType == "model" {
preferredModelFile = &files[i]
break
}
}
// Find readme file
var readmeFile *ProcessedModelFile
for i := range files {
if files[i].FileType == "readme" {
readmeFile = &files[i]
break
}
}
readmeContent := g.generateReadmeContent(modelName, author)
// Generate sample metadata
licenses := []string{"apache-2.0", "mit", "llama2", "gpl-3.0", "bsd", ""}
license := licenses[g.rand.Intn(len(licenses))]
sampleTags := []string{"llm", "gguf", "gpu", "cpu", "text-to-text", "chat", "instruction-tuned"}
numTags := g.rand.Intn(4) + 3 // 3-6 tags
tags := make([]string, numTags)
for i := 0; i < numTags; i++ {
tags[i] = sampleTags[g.rand.Intn(len(sampleTags))]
}
// Remove duplicates
tags = g.removeDuplicates(tags)
// Optionally include icon (50% chance)
icon := ""
if g.rand.Intn(2) == 0 {
icon = fmt.Sprintf("https://cdn-avatars.huggingface.co/v1/production/uploads/%s.png", g.randomString(24))
}
return ProcessedModel{
ModelID: modelID,
Author: author,
Downloads: g.rand.Intn(1000000) + 1000,
LastModified: g.randomDate(),
Files: files,
PreferredModelFile: preferredModelFile,
ReadmeFile: readmeFile,
ReadmeContent: readmeContent,
ReadmeContentPreview: truncateString(readmeContent, 200),
QuantizationPreferences: []string{"Q4_K_M", "Q4_K_S", "Q3_K_M", "Q2_K"},
ProcessingError: "",
Tags: tags,
License: license,
Icon: icon,
}
}
// Helper methods for synthetic data generation
func (g *SyntheticDataGenerator) randomString(length int) string {
const charset = "abcdefghijklmnopqrstuvwxyz0123456789"
b := make([]byte, length)
for i := range b {
b[i] = charset[g.rand.Intn(len(charset))]
}
return string(b)
}
func (g *SyntheticDataGenerator) randomSHA256() string {
const charset = "0123456789abcdef"
b := make([]byte, 64)
for i := range b {
b[i] = charset[g.rand.Intn(len(charset))]
}
return string(b)
}
func (g *SyntheticDataGenerator) randomDate() string {
now := time.Now()
daysAgo := g.rand.Intn(365) // Random date within last year
pastDate := now.AddDate(0, 0, -daysAgo)
return pastDate.Format("2006-01-02T15:04:05.000Z")
}
func (g *SyntheticDataGenerator) removeDuplicates(slice []string) []string {
keys := make(map[string]bool)
result := []string{}
for _, item := range slice {
if !keys[item] {
keys[item] = true
result = append(result, item)
}
}
return result
}
func (g *SyntheticDataGenerator) generateReadmeContent(modelName, author string) string {
templates := []string{
fmt.Sprintf("# %s Model\n\nThis is a %s model developed by %s. It's designed for various natural language processing tasks including text generation, question answering, and conversation.\n\n## Features\n\n- High-quality text generation\n- Efficient inference\n- Multiple quantization options\n- Easy to use with LocalAI\n\n## Usage\n\nUse this model with LocalAI for various AI tasks.", strings.Title(modelName), modelName, author),
fmt.Sprintf("# %s\n\nA powerful language model from %s. This model excels at understanding and generating human-like text across multiple domains.\n\n## Capabilities\n\n- Text completion\n- Code generation\n- Creative writing\n- Technical documentation\n\n## Model Details\n\n- Architecture: Transformer-based\n- Training: Large-scale supervised learning\n- Quantization: Available in multiple formats", strings.Title(modelName), author),
fmt.Sprintf("# %s Language Model\n\nDeveloped by %s, this model represents state-of-the-art performance in natural language understanding and generation.\n\n## Key Features\n\n- Multilingual support\n- Context-aware responses\n- Efficient memory usage\n- Fast inference speed\n\n## Applications\n\n- Chatbots and virtual assistants\n- Content generation\n- Code completion\n- Educational tools", strings.Title(modelName), author),
}
return templates[g.rand.Intn(len(templates))]
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/.github/gallery-agent/main.go | .github/gallery-agent/main.go | package main
import (
"context"
"encoding/json"
"fmt"
"os"
"strconv"
"strings"
"time"
hfapi "github.com/mudler/LocalAI/pkg/huggingface-api"
)
// ProcessedModelFile represents a processed model file with additional metadata
type ProcessedModelFile struct {
Path string `json:"path"`
Size int64 `json:"size"`
SHA256 string `json:"sha256"`
IsReadme bool `json:"is_readme"`
FileType string `json:"file_type"` // "model", "readme", "other"
}
// ProcessedModel represents a processed model with all gathered metadata
type ProcessedModel struct {
ModelID string `json:"model_id"`
Author string `json:"author"`
Downloads int `json:"downloads"`
LastModified string `json:"last_modified"`
Files []ProcessedModelFile `json:"files"`
PreferredModelFile *ProcessedModelFile `json:"preferred_model_file,omitempty"`
ReadmeFile *ProcessedModelFile `json:"readme_file,omitempty"`
ReadmeContent string `json:"readme_content,omitempty"`
ReadmeContentPreview string `json:"readme_content_preview,omitempty"`
QuantizationPreferences []string `json:"quantization_preferences"`
ProcessingError string `json:"processing_error,omitempty"`
Tags []string `json:"tags,omitempty"`
License string `json:"license,omitempty"`
Icon string `json:"icon,omitempty"`
}
// SearchResult represents the complete result of searching and processing models
type SearchResult struct {
SearchTerm string `json:"search_term"`
Limit int `json:"limit"`
Quantization string `json:"quantization"`
TotalModelsFound int `json:"total_models_found"`
Models []ProcessedModel `json:"models"`
FormattedOutput string `json:"formatted_output"`
}
// AddedModelSummary represents a summary of models added to the gallery
type AddedModelSummary struct {
SearchTerm string `json:"search_term"`
TotalFound int `json:"total_found"`
ModelsAdded int `json:"models_added"`
AddedModelIDs []string `json:"added_model_ids"`
AddedModelURLs []string `json:"added_model_urls"`
Quantization string `json:"quantization"`
ProcessingTime string `json:"processing_time"`
}
func main() {
startTime := time.Now()
// Check for synthetic mode
syntheticMode := os.Getenv("SYNTHETIC_MODE")
if syntheticMode == "true" || syntheticMode == "1" {
fmt.Println("Running in SYNTHETIC MODE - generating random test data")
err := runSyntheticMode()
if err != nil {
fmt.Fprintf(os.Stderr, "Error in synthetic mode: %v\n", err)
os.Exit(1)
}
return
}
// Get configuration from environment variables
searchTerm := os.Getenv("SEARCH_TERM")
if searchTerm == "" {
searchTerm = "GGUF"
}
limitStr := os.Getenv("LIMIT")
if limitStr == "" {
limitStr = "5"
}
limit, err := strconv.Atoi(limitStr)
if err != nil {
fmt.Fprintf(os.Stderr, "Error parsing LIMIT: %v\n", err)
os.Exit(1)
}
quantization := os.Getenv("QUANTIZATION")
maxModels := os.Getenv("MAX_MODELS")
if maxModels == "" {
maxModels = "1"
}
maxModelsInt, err := strconv.Atoi(maxModels)
if err != nil {
fmt.Fprintf(os.Stderr, "Error parsing MAX_MODELS: %v\n", err)
os.Exit(1)
}
// Print configuration
fmt.Printf("Gallery Agent Configuration:\n")
fmt.Printf(" Search Term: %s\n", searchTerm)
fmt.Printf(" Limit: %d\n", limit)
fmt.Printf(" Quantization: %s\n", quantization)
fmt.Printf(" Max Models to Add: %d\n", maxModelsInt)
fmt.Printf(" Gallery Index Path: %s\n", os.Getenv("GALLERY_INDEX_PATH"))
fmt.Println()
result, err := searchAndProcessModels(searchTerm, limit, quantization)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
fmt.Println(result.FormattedOutput)
var models []ProcessedModel
if len(result.Models) > 1 {
fmt.Println("More than one model found (", len(result.Models), "), using AI agent to select the most interesting models")
for _, model := range result.Models {
fmt.Println("Model: ", model.ModelID)
}
// Use AI agent to select the most interesting models
fmt.Println("Using AI agent to select the most interesting models...")
models, err = selectMostInterestingModels(context.Background(), result)
if err != nil {
fmt.Fprintf(os.Stderr, "Error in model selection: %v\n", err)
// Continue with original result if selection fails
models = result.Models
}
} else if len(result.Models) == 1 {
models = result.Models
fmt.Println("Only one model found, using it directly")
}
fmt.Print(models)
// Filter out models that already exist in the gallery
fmt.Println("Filtering out existing models...")
models, err = filterExistingModels(models)
if err != nil {
fmt.Fprintf(os.Stderr, "Error filtering existing models: %v\n", err)
os.Exit(1)
}
// Limit to maxModelsInt after filtering
if len(models) > maxModelsInt {
models = models[:maxModelsInt]
}
// Track added models for summary
var addedModelIDs []string
var addedModelURLs []string
// Generate YAML entries and append to gallery/index.yaml
if len(models) > 0 {
for _, model := range models {
addedModelIDs = append(addedModelIDs, model.ModelID)
// Generate Hugging Face URL for the model
modelURL := fmt.Sprintf("https://huggingface.co/%s", model.ModelID)
addedModelURLs = append(addedModelURLs, modelURL)
}
fmt.Println("Generating YAML entries for selected models...")
err = generateYAMLForModels(context.Background(), models, quantization)
if err != nil {
fmt.Fprintf(os.Stderr, "Error generating YAML entries: %v\n", err)
os.Exit(1)
}
} else {
fmt.Println("No new models to add to the gallery.")
}
// Create and write summary
processingTime := time.Since(startTime).String()
summary := AddedModelSummary{
SearchTerm: searchTerm,
TotalFound: result.TotalModelsFound,
ModelsAdded: len(addedModelIDs),
AddedModelIDs: addedModelIDs,
AddedModelURLs: addedModelURLs,
Quantization: quantization,
ProcessingTime: processingTime,
}
// Write summary to file
summaryData, err := json.MarshalIndent(summary, "", " ")
if err != nil {
fmt.Fprintf(os.Stderr, "Error marshaling summary: %v\n", err)
} else {
err = os.WriteFile("gallery-agent-summary.json", summaryData, 0644)
if err != nil {
fmt.Fprintf(os.Stderr, "Error writing summary file: %v\n", err)
} else {
fmt.Printf("Summary written to gallery-agent-summary.json\n")
}
}
}
func searchAndProcessModels(searchTerm string, limit int, quantization string) (*SearchResult, error) {
client := hfapi.NewClient()
var outputBuilder strings.Builder
fmt.Println("Searching for models...")
// Initialize the result struct
result := &SearchResult{
SearchTerm: searchTerm,
Limit: limit,
Quantization: quantization,
Models: []ProcessedModel{},
}
models, err := client.GetLatest(searchTerm, limit)
if err != nil {
return nil, fmt.Errorf("failed to fetch models: %w", err)
}
fmt.Println("Models found:", len(models))
result.TotalModelsFound = len(models)
if len(models) == 0 {
outputBuilder.WriteString("No models found.\n")
result.FormattedOutput = outputBuilder.String()
return result, nil
}
outputBuilder.WriteString(fmt.Sprintf("Found %d models matching '%s':\n\n", len(models), searchTerm))
// Process each model
for i, model := range models {
outputBuilder.WriteString(fmt.Sprintf("%d. Processing Model: %s\n", i+1, model.ModelID))
outputBuilder.WriteString(fmt.Sprintf(" Author: %s\n", model.Author))
outputBuilder.WriteString(fmt.Sprintf(" Downloads: %d\n", model.Downloads))
outputBuilder.WriteString(fmt.Sprintf(" Last Modified: %s\n", model.LastModified))
// Initialize processed model struct
processedModel := ProcessedModel{
ModelID: model.ModelID,
Author: model.Author,
Downloads: model.Downloads,
LastModified: model.LastModified,
QuantizationPreferences: []string{quantization, "Q4_K_M", "Q4_K_S", "Q3_K_M", "Q2_K"},
}
// Get detailed model information
details, err := client.GetModelDetails(model.ModelID)
if err != nil {
errorMsg := fmt.Sprintf(" Error getting model details: %v\n", err)
outputBuilder.WriteString(errorMsg)
processedModel.ProcessingError = err.Error()
result.Models = append(result.Models, processedModel)
continue
}
// Define quantization preferences (in order of preference)
quantizationPreferences := []string{quantization, "Q4_K_M", "Q4_K_S", "Q3_K_M", "Q2_K"}
// Find preferred model file
preferredModelFile := hfapi.FindPreferredModelFile(details.Files, quantizationPreferences)
// Process files
processedFiles := make([]ProcessedModelFile, len(details.Files))
for j, file := range details.Files {
fileType := "other"
if file.IsReadme {
fileType = "readme"
} else if preferredModelFile != nil && file.Path == preferredModelFile.Path {
fileType = "model"
}
processedFiles[j] = ProcessedModelFile{
Path: file.Path,
Size: file.Size,
SHA256: file.SHA256,
IsReadme: file.IsReadme,
FileType: fileType,
}
}
processedModel.Files = processedFiles
// Set preferred model file
if preferredModelFile != nil {
for _, file := range processedFiles {
if file.Path == preferredModelFile.Path {
processedModel.PreferredModelFile = &file
break
}
}
}
// Print file information
outputBuilder.WriteString(fmt.Sprintf(" Files found: %d\n", len(details.Files)))
if preferredModelFile != nil {
outputBuilder.WriteString(fmt.Sprintf(" Preferred Model File: %s (SHA256: %s)\n",
preferredModelFile.Path,
preferredModelFile.SHA256))
} else {
outputBuilder.WriteString(fmt.Sprintf(" No model file found with quantization preferences: %v\n", quantizationPreferences))
}
if details.ReadmeFile != nil {
outputBuilder.WriteString(fmt.Sprintf(" README File: %s\n", details.ReadmeFile.Path))
// Find and set readme file
for _, file := range processedFiles {
if file.IsReadme {
processedModel.ReadmeFile = &file
break
}
}
fmt.Println("Getting real readme for", model.ModelID, "waiting...")
// Use agent to get the real readme and prepare the model description
readmeContent, err := getRealReadme(context.Background(), model.ModelID)
if err == nil {
processedModel.ReadmeContent = readmeContent
processedModel.ReadmeContentPreview = truncateString(readmeContent, 200)
outputBuilder.WriteString(fmt.Sprintf(" README Content Preview: %s\n",
processedModel.ReadmeContentPreview))
} else {
fmt.Printf(" Warning: Failed to get real readme: %v\n", err)
}
fmt.Println("Real readme got", readmeContent)
// Extract metadata (tags, license) from README using LLM
fmt.Println("Extracting metadata for", model.ModelID, "waiting...")
tags, license, err := extractModelMetadata(context.Background(), processedModel)
if err == nil {
processedModel.Tags = tags
processedModel.License = license
outputBuilder.WriteString(fmt.Sprintf(" Tags: %v\n", tags))
outputBuilder.WriteString(fmt.Sprintf(" License: %s\n", license))
} else {
fmt.Printf(" Warning: Failed to extract metadata: %v\n", err)
}
// Extract icon from README or use HuggingFace avatar
icon := extractModelIcon(processedModel)
if icon != "" {
processedModel.Icon = icon
outputBuilder.WriteString(fmt.Sprintf(" Icon: %s\n", icon))
}
// Get README content
// readmeContent, err := client.GetReadmeContent(model.ModelID, details.ReadmeFile.Path)
// if err == nil {
// processedModel.ReadmeContent = readmeContent
// processedModel.ReadmeContentPreview = truncateString(readmeContent, 200)
// outputBuilder.WriteString(fmt.Sprintf(" README Content Preview: %s\n",
// processedModel.ReadmeContentPreview))
// }
}
// Print all files with their checksums
outputBuilder.WriteString(" All Files:\n")
for _, file := range processedFiles {
outputBuilder.WriteString(fmt.Sprintf(" - %s (%s, %d bytes", file.Path, file.FileType, file.Size))
if file.SHA256 != "" {
outputBuilder.WriteString(fmt.Sprintf(", SHA256: %s", file.SHA256))
}
outputBuilder.WriteString(")\n")
}
outputBuilder.WriteString("\n")
result.Models = append(result.Models, processedModel)
}
result.FormattedOutput = outputBuilder.String()
return result, nil
}
func truncateString(s string, maxLen int) string {
if len(s) <= maxLen {
return s
}
return s[:maxLen] + "..."
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/system/capabilities.go | pkg/system/capabilities.go | // Package system provides system detection utilities, including GPU/vendor detection
// and capability classification used to select optimal backends at runtime.
package system
import (
"os"
"path/filepath"
"runtime"
"strings"
"github.com/jaypipes/ghw/pkg/gpu"
"github.com/mudler/xlog"
)
const (
defaultCapability = "default"
nvidiaL4T = "nvidia-l4t"
darwinX86 = "darwin-x86"
metal = "metal"
nvidia = "nvidia"
amd = "amd"
intel = "intel"
nvidiaCuda13 = "nvidia-cuda-13"
nvidiaCuda12 = "nvidia-cuda-12"
nvidiaL4TCuda12 = "nvidia-l4t-cuda-12"
nvidiaL4TCuda13 = "nvidia-l4t-cuda-13"
capabilityEnv = "LOCALAI_FORCE_META_BACKEND_CAPABILITY"
capabilityRunFileEnv = "LOCALAI_FORCE_META_BACKEND_CAPABILITY_RUN_FILE"
defaultRunFile = "/run/localai/capability"
)
var (
cuda13DirExists bool
cuda12DirExists bool
)
func init() {
_, err := os.Stat(filepath.Join("usr", "local", "cuda-13"))
cuda13DirExists = err == nil
_, err = os.Stat(filepath.Join("usr", "local", "cuda-12"))
cuda12DirExists = err == nil
}
func (s *SystemState) Capability(capMap map[string]string) string {
reportedCapability := s.getSystemCapabilities()
// Check if the reported capability is in the map
if _, exists := capMap[reportedCapability]; exists {
xlog.Debug("Using reported capability", "reportedCapability", reportedCapability, "capMap", capMap)
return reportedCapability
}
xlog.Debug("The requested capability was not found, using default capability", "reportedCapability", reportedCapability, "capMap", capMap)
// Otherwise, return the default capability (catch-all)
return defaultCapability
}
func (s *SystemState) getSystemCapabilities() string {
capability := os.Getenv(capabilityEnv)
if capability != "" {
xlog.Info("Using forced capability from environment variable", "capability", capability, "env", capabilityEnv)
return capability
}
capabilityRunFile := defaultRunFile
capabilityRunFileEnv := os.Getenv(capabilityRunFileEnv)
if capabilityRunFileEnv != "" {
capabilityRunFile = capabilityRunFileEnv
}
// Check if /run/localai/capability exists and use it
// This might be used by e.g. container images to specify which
// backends to pull in automatically when installing meta backends.
if _, err := os.Stat(capabilityRunFile); err == nil {
capability, err := os.ReadFile(capabilityRunFile)
if err == nil {
xlog.Info("Using forced capability run file", "capabilityRunFile", capabilityRunFile, "capability", string(capability), "env", capabilityRunFileEnv)
return strings.Trim(strings.TrimSpace(string(capability)), "\n")
}
}
// If we are on mac and arm64, we will return metal
if runtime.GOOS == "darwin" && runtime.GOARCH == "arm64" {
xlog.Info("Using metal capability (arm64 on mac)", "env", capabilityEnv)
return metal
}
// If we are on mac and x86, we will return darwin-x86
if runtime.GOOS == "darwin" && runtime.GOARCH == "amd64" {
xlog.Info("Using darwin-x86 capability (amd64 on mac)", "env", capabilityEnv)
return darwinX86
}
// If arm64 on linux and a nvidia gpu is detected, we will return nvidia-l4t
if runtime.GOOS == "linux" && runtime.GOARCH == "arm64" {
if s.GPUVendor == nvidia {
xlog.Info("Using nvidia-l4t capability (arm64 on linux)", "env", capabilityEnv)
if cuda13DirExists {
return nvidiaL4TCuda13
}
if cuda12DirExists {
return nvidiaL4TCuda12
}
return nvidiaL4T
}
}
if cuda13DirExists {
return nvidiaCuda13
}
if cuda12DirExists {
return nvidiaCuda12
}
if s.GPUVendor == "" {
xlog.Info("Default capability (no GPU detected)", "env", capabilityEnv)
return defaultCapability
}
xlog.Info("Capability automatically detected", "capability", s.GPUVendor, "env", capabilityEnv)
// If vram is less than 4GB, let's default to CPU but warn the user that they can override that via env
if s.VRAM <= 4*1024*1024*1024 {
xlog.Warn("VRAM is less than 4GB, defaulting to CPU", "env", capabilityEnv)
return defaultCapability
}
return s.GPUVendor
}
func detectGPUVendor(gpus []*gpu.GraphicsCard) (string, error) {
for _, gpu := range gpus {
if gpu.DeviceInfo != nil {
if gpu.DeviceInfo.Vendor != nil {
gpuVendorName := strings.ToUpper(gpu.DeviceInfo.Vendor.Name)
if strings.Contains(gpuVendorName, strings.ToUpper(nvidia)) {
return nvidia, nil
}
if strings.Contains(gpuVendorName, strings.ToUpper(amd)) {
return amd, nil
}
if strings.Contains(gpuVendorName, strings.ToUpper(intel)) {
return intel, nil
}
}
}
}
return "", nil
}
// BackendPreferenceTokens returns a list of substrings that represent the preferred
// backend implementation order for the current system capability. Callers can use
// these tokens to select the most appropriate concrete backend among multiple
// candidates sharing the same alias (e.g., "llama-cpp").
func (s *SystemState) BackendPreferenceTokens() []string {
capStr := strings.ToLower(s.getSystemCapabilities())
switch {
case strings.HasPrefix(capStr, nvidia):
return []string{"cuda", "vulkan", "cpu"}
case strings.HasPrefix(capStr, amd):
return []string{"rocm", "hip", "vulkan", "cpu"}
case strings.HasPrefix(capStr, intel):
return []string{"sycl", intel, "cpu"}
case strings.HasPrefix(capStr, metal):
return []string{"metal", "cpu"}
case strings.HasPrefix(capStr, darwinX86):
return []string{"darwin-x86", "cpu"}
default:
return []string{"cpu"}
}
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/system/state.go | pkg/system/state.go | package system
import (
"github.com/jaypipes/ghw/pkg/gpu"
"github.com/mudler/LocalAI/pkg/xsysinfo"
"github.com/mudler/xlog"
)
type Backend struct {
BackendsPath string
BackendsSystemPath string
}
type Model struct {
ModelsPath string
}
type SystemState struct {
GPUVendor string
Backend Backend
Model Model
gpus []*gpu.GraphicsCard
VRAM uint64
}
type SystemStateOptions func(*SystemState)
func WithBackendPath(path string) SystemStateOptions {
return func(s *SystemState) {
s.Backend.BackendsPath = path
}
}
func WithBackendSystemPath(path string) SystemStateOptions {
return func(s *SystemState) {
s.Backend.BackendsSystemPath = path
}
}
func WithModelPath(path string) SystemStateOptions {
return func(s *SystemState) {
s.Model.ModelsPath = path
}
}
func GetSystemState(opts ...SystemStateOptions) (*SystemState, error) {
state := &SystemState{}
for _, opt := range opts {
opt(state)
}
// Detection is best-effort here, we don't want to fail if it fails
state.gpus, _ = xsysinfo.GPUs()
xlog.Debug("GPUs", "gpus", state.gpus)
state.GPUVendor, _ = detectGPUVendor(state.gpus)
xlog.Debug("GPU vendor", "gpuVendor", state.GPUVendor)
state.VRAM, _ = xsysinfo.TotalAvailableVRAM()
xlog.Debug("Total available VRAM", "vram", state.VRAM)
return state, nil
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/xsysinfo/gguf.go | pkg/xsysinfo/gguf.go | package xsysinfo
import (
gguf "github.com/gpustack/gguf-parser-go"
)
type VRAMEstimate struct {
TotalVRAM uint64
AvailableVRAM uint64
ModelSize uint64
EstimatedLayers int
EstimatedVRAM uint64
IsFullOffload bool
}
func EstimateGGUFVRAMUsage(f *gguf.GGUFFile, availableVRAM uint64) (*VRAMEstimate, error) {
// Get model metadata
m := f.Metadata()
estimate := f.EstimateLLaMACppRun()
lmes := estimate.SummarizeItem(true, 0, 0)
estimatedVRAM := uint64(0)
availableLayers := lmes.OffloadLayers // TODO: check if we can just use OffloadLayers here
for _, vram := range lmes.VRAMs {
estimatedVRAM += uint64(vram.NonUMA)
}
// Calculate base model size
modelSize := uint64(m.Size)
if availableLayers == 0 {
availableLayers = 1
}
if estimatedVRAM == 0 {
estimatedVRAM = 1
}
// Estimate number of layers that can fit in VRAM
// Each layer typically requires about 1/32 of the model size
layerSize := estimatedVRAM / availableLayers
estimatedLayers := int(availableVRAM / layerSize)
if availableVRAM > estimatedVRAM {
estimatedLayers = int(availableLayers)
}
// Calculate estimated VRAM usage
return &VRAMEstimate{
TotalVRAM: availableVRAM,
AvailableVRAM: availableVRAM,
ModelSize: modelSize,
EstimatedLayers: estimatedLayers,
EstimatedVRAM: estimatedVRAM,
IsFullOffload: availableVRAM > estimatedVRAM,
}, nil
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/xsysinfo/memory.go | pkg/xsysinfo/memory.go | package xsysinfo
import (
"github.com/mudler/memory"
"github.com/mudler/xlog"
)
// SystemRAMInfo contains system RAM usage information
type SystemRAMInfo struct {
Total uint64 `json:"total"`
Used uint64 `json:"used"`
Free uint64 `json:"free"`
Available uint64 `json:"available"`
UsagePercent float64 `json:"usage_percent"`
}
// GetSystemRAMInfo returns real-time system RAM usage
func GetSystemRAMInfo() (*SystemRAMInfo, error) {
total := memory.TotalMemory()
free := memory.AvailableMemory()
used := total - free
usagePercent := 0.0
if total > 0 {
usagePercent = float64(used) / float64(total) * 100
}
xlog.Debug("System RAM Info", "total", total, "used", used, "free", free, "usage_percent", usagePercent)
return &SystemRAMInfo{
Total: total,
Used: used,
Free: free,
Available: total - used,
UsagePercent: usagePercent,
}, nil
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/xsysinfo/gpu.go | pkg/xsysinfo/gpu.go | package xsysinfo
import (
"bytes"
"encoding/json"
"os/exec"
"strconv"
"strings"
"sync"
"github.com/jaypipes/ghw"
"github.com/jaypipes/ghw/pkg/gpu"
"github.com/mudler/xlog"
)
// GPU vendor constants
const (
VendorNVIDIA = "nvidia"
VendorAMD = "amd"
VendorIntel = "intel"
VendorVulkan = "vulkan"
VendorUnknown = "unknown"
)
// UnifiedMemoryDevices is a list of GPU device name patterns that use unified memory
// (shared with system RAM). When these devices are detected and report N/A for VRAM,
// we fall back to system RAM information.
var UnifiedMemoryDevices = []string{
"NVIDIA GB10",
"GB10",
// Add more unified memory devices here as needed
}
// GPUMemoryInfo contains real-time GPU memory usage information
type GPUMemoryInfo struct {
Index int `json:"index"`
Name string `json:"name"`
Vendor string `json:"vendor"`
TotalVRAM uint64 `json:"total_vram"` // Total VRAM in bytes
UsedVRAM uint64 `json:"used_vram"` // Used VRAM in bytes
FreeVRAM uint64 `json:"free_vram"` // Free VRAM in bytes
UsagePercent float64 `json:"usage_percent"` // Usage as percentage (0-100)
}
// GPUAggregateInfo contains aggregate GPU information across all GPUs
type GPUAggregateInfo struct {
TotalVRAM uint64 `json:"total_vram"`
UsedVRAM uint64 `json:"used_vram"`
FreeVRAM uint64 `json:"free_vram"`
UsagePercent float64 `json:"usage_percent"`
GPUCount int `json:"gpu_count"`
}
// AggregateMemoryInfo contains aggregate memory information (unified for GPU/RAM)
type AggregateMemoryInfo struct {
TotalMemory uint64 `json:"total_memory"`
UsedMemory uint64 `json:"used_memory"`
FreeMemory uint64 `json:"free_memory"`
UsagePercent float64 `json:"usage_percent"`
GPUCount int `json:"gpu_count"`
}
// ResourceInfo represents unified memory resource information
type ResourceInfo struct {
Type string `json:"type"` // "gpu" or "ram"
Available bool `json:"available"`
GPUs []GPUMemoryInfo `json:"gpus,omitempty"`
RAM *SystemRAMInfo `json:"ram,omitempty"`
Aggregate AggregateMemoryInfo `json:"aggregate"`
}
var (
gpuCache []*gpu.GraphicsCard
gpuCacheOnce sync.Once
gpuCacheErr error
)
func GPUs() ([]*gpu.GraphicsCard, error) {
gpuCacheOnce.Do(func() {
gpu, err := ghw.GPU()
if err != nil {
gpuCacheErr = err
return
}
gpuCache = gpu.GraphicsCards
})
return gpuCache, gpuCacheErr
}
func TotalAvailableVRAM() (uint64, error) {
gpus, err := GPUs()
if err != nil {
return 0, err
}
var totalVRAM uint64
for _, gpu := range gpus {
if gpu != nil && gpu.Node != nil && gpu.Node.Memory != nil {
if gpu.Node.Memory.TotalUsableBytes > 0 {
totalVRAM += uint64(gpu.Node.Memory.TotalUsableBytes)
}
}
}
return totalVRAM, nil
}
func HasGPU(vendor string) bool {
gpus, err := GPUs()
if err != nil {
return false
}
if vendor == "" {
return len(gpus) > 0
}
for _, gpu := range gpus {
if strings.Contains(gpu.String(), vendor) {
return true
}
}
return false
}
// isUnifiedMemoryDevice checks if the given GPU name matches any known unified memory device
func isUnifiedMemoryDevice(gpuName string) bool {
gpuNameUpper := strings.ToUpper(gpuName)
for _, pattern := range UnifiedMemoryDevices {
if strings.Contains(gpuNameUpper, strings.ToUpper(pattern)) {
return true
}
}
return false
}
// GetGPUMemoryUsage returns real-time GPU memory usage for all detected GPUs.
// It tries multiple vendor-specific tools in order: NVIDIA, AMD, Intel, Vulkan.
// Returns an empty slice if no GPU monitoring tools are available.
func GetGPUMemoryUsage() []GPUMemoryInfo {
var gpus []GPUMemoryInfo
// Try NVIDIA first
nvidiaGPUs := getNVIDIAGPUMemory()
if len(nvidiaGPUs) > 0 {
gpus = append(gpus, nvidiaGPUs...)
}
// XXX: Note - I could not test this with AMD and Intel GPUs, so I'm not sure if it works and it was added with the help of AI.
// Try AMD ROCm
amdGPUs := getAMDGPUMemory()
if len(amdGPUs) > 0 {
// Adjust indices to continue from NVIDIA GPUs
startIdx := len(gpus)
for i := range amdGPUs {
amdGPUs[i].Index = startIdx + i
}
gpus = append(gpus, amdGPUs...)
}
// Try Intel
intelGPUs := getIntelGPUMemory()
if len(intelGPUs) > 0 {
startIdx := len(gpus)
for i := range intelGPUs {
intelGPUs[i].Index = startIdx + i
}
gpus = append(gpus, intelGPUs...)
}
// Try Vulkan as fallback for device detection (limited real-time data)
if len(gpus) == 0 {
vulkanGPUs := getVulkanGPUMemory()
gpus = append(gpus, vulkanGPUs...)
}
return gpus
}
// GetGPUAggregateInfo returns aggregate GPU information across all GPUs
func GetGPUAggregateInfo() GPUAggregateInfo {
gpus := GetGPUMemoryUsage()
var aggregate GPUAggregateInfo
aggregate.GPUCount = len(gpus)
for _, gpu := range gpus {
aggregate.TotalVRAM += gpu.TotalVRAM
aggregate.UsedVRAM += gpu.UsedVRAM
aggregate.FreeVRAM += gpu.FreeVRAM
}
if aggregate.TotalVRAM > 0 {
aggregate.UsagePercent = float64(aggregate.UsedVRAM) / float64(aggregate.TotalVRAM) * 100
}
return aggregate
}
// getNVIDIAGPUMemory queries NVIDIA GPUs using nvidia-smi
func getNVIDIAGPUMemory() []GPUMemoryInfo {
// Check if nvidia-smi is available
if _, err := exec.LookPath("nvidia-smi"); err != nil {
return nil
}
cmd := exec.Command("nvidia-smi",
"--query-gpu=index,name,memory.total,memory.used,memory.free",
"--format=csv,noheader,nounits")
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
xlog.Debug("nvidia-smi failed", "error", err, "stderr", stderr.String())
return nil
}
var gpus []GPUMemoryInfo
lines := strings.Split(strings.TrimSpace(stdout.String()), "\n")
for _, line := range lines {
if line == "" {
continue
}
parts := strings.Split(line, ", ")
if len(parts) < 5 {
continue
}
idx, _ := strconv.Atoi(strings.TrimSpace(parts[0]))
name := strings.TrimSpace(parts[1])
totalStr := strings.TrimSpace(parts[2])
usedStr := strings.TrimSpace(parts[3])
freeStr := strings.TrimSpace(parts[4])
var totalBytes, usedBytes, freeBytes uint64
var usagePercent float64
// Check if memory values are N/A (unified memory devices like GB10)
isNA := totalStr == "[N/A]" || usedStr == "[N/A]" || freeStr == "[N/A]"
if isNA && isUnifiedMemoryDevice(name) {
// Unified memory device - fall back to system RAM
sysInfo, err := GetSystemRAMInfo()
if err != nil {
xlog.Debug("failed to get system RAM for unified memory device", "error", err, "device", name)
// Still add the GPU but with zero memory info
gpus = append(gpus, GPUMemoryInfo{
Index: idx,
Name: name,
Vendor: VendorNVIDIA,
TotalVRAM: 0,
UsedVRAM: 0,
FreeVRAM: 0,
UsagePercent: 0,
})
continue
}
totalBytes = sysInfo.Total
usedBytes = sysInfo.Used
freeBytes = sysInfo.Free
if totalBytes > 0 {
usagePercent = float64(usedBytes) / float64(totalBytes) * 100
}
xlog.Debug("using system RAM for unified memory GPU", "device", name, "system_ram_bytes", totalBytes)
} else if isNA {
// Unknown device with N/A values - skip memory info
xlog.Debug("nvidia-smi returned N/A for unknown device", "device", name)
gpus = append(gpus, GPUMemoryInfo{
Index: idx,
Name: name,
Vendor: VendorNVIDIA,
TotalVRAM: 0,
UsedVRAM: 0,
FreeVRAM: 0,
UsagePercent: 0,
})
continue
} else {
// Normal GPU with dedicated VRAM
totalMB, _ := strconv.ParseFloat(totalStr, 64)
usedMB, _ := strconv.ParseFloat(usedStr, 64)
freeMB, _ := strconv.ParseFloat(freeStr, 64)
// Convert MB to bytes
totalBytes = uint64(totalMB * 1024 * 1024)
usedBytes = uint64(usedMB * 1024 * 1024)
freeBytes = uint64(freeMB * 1024 * 1024)
if totalBytes > 0 {
usagePercent = float64(usedBytes) / float64(totalBytes) * 100
}
}
gpus = append(gpus, GPUMemoryInfo{
Index: idx,
Name: name,
Vendor: VendorNVIDIA,
TotalVRAM: totalBytes,
UsedVRAM: usedBytes,
FreeVRAM: freeBytes,
UsagePercent: usagePercent,
})
}
return gpus
}
// getAMDGPUMemory queries AMD GPUs using rocm-smi
func getAMDGPUMemory() []GPUMemoryInfo {
// Check if rocm-smi is available
if _, err := exec.LookPath("rocm-smi"); err != nil {
return nil
}
// Try CSV format first
cmd := exec.Command("rocm-smi", "--showmeminfo", "vram", "--csv")
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
xlog.Debug("rocm-smi failed", "error", err, "stderr", stderr.String())
return nil
}
var gpus []GPUMemoryInfo
lines := strings.Split(strings.TrimSpace(stdout.String()), "\n")
// Skip header line
for i, line := range lines {
if i == 0 || line == "" {
continue
}
parts := strings.Split(line, ",")
if len(parts) < 3 {
continue
}
// Parse GPU index from first column (usually "GPU[0]" format)
idxStr := strings.TrimSpace(parts[0])
idx := 0
if strings.HasPrefix(idxStr, "GPU[") {
idxStr = strings.TrimPrefix(idxStr, "GPU[")
idxStr = strings.TrimSuffix(idxStr, "]")
idx, _ = strconv.Atoi(idxStr)
}
// Parse memory values (in bytes or MB depending on rocm-smi version)
usedBytes, _ := strconv.ParseUint(strings.TrimSpace(parts[2]), 10, 64)
totalBytes, _ := strconv.ParseUint(strings.TrimSpace(parts[1]), 10, 64)
// If values seem like MB, convert to bytes
if totalBytes < 1000000 {
usedBytes *= 1024 * 1024
totalBytes *= 1024 * 1024
}
freeBytes := uint64(0)
if totalBytes > usedBytes {
freeBytes = totalBytes - usedBytes
}
usagePercent := 0.0
if totalBytes > 0 {
usagePercent = float64(usedBytes) / float64(totalBytes) * 100
}
gpus = append(gpus, GPUMemoryInfo{
Index: idx,
Name: "AMD GPU",
Vendor: VendorAMD,
TotalVRAM: totalBytes,
UsedVRAM: usedBytes,
FreeVRAM: freeBytes,
UsagePercent: usagePercent,
})
}
return gpus
}
// getIntelGPUMemory queries Intel GPUs using xpu-smi or intel_gpu_top
func getIntelGPUMemory() []GPUMemoryInfo {
// Try xpu-smi first (Intel's official GPU management tool)
gpus := getIntelXPUSMI()
if len(gpus) > 0 {
return gpus
}
// Fallback to intel_gpu_top
return getIntelGPUTop()
}
// getIntelXPUSMI queries Intel GPUs using xpu-smi
func getIntelXPUSMI() []GPUMemoryInfo {
if _, err := exec.LookPath("xpu-smi"); err != nil {
return nil
}
// Get device list
cmd := exec.Command("xpu-smi", "discovery", "--json")
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
xlog.Debug("xpu-smi discovery failed", "error", err, "stderr", stderr.String())
return nil
}
// Parse JSON output
var result struct {
DeviceList []struct {
DeviceID int `json:"device_id"`
DeviceName string `json:"device_name"`
VendorName string `json:"vendor_name"`
MemoryPhysicalSizeBytes uint64 `json:"memory_physical_size_byte"`
} `json:"device_list"`
}
if err := json.Unmarshal(stdout.Bytes(), &result); err != nil {
xlog.Debug("failed to parse xpu-smi discovery output", "error", err)
return nil
}
var gpus []GPUMemoryInfo
for _, device := range result.DeviceList {
// Get memory usage for this device
statsCmd := exec.Command("xpu-smi", "stats", "-d", strconv.Itoa(device.DeviceID), "--json")
var statsStdout bytes.Buffer
statsCmd.Stdout = &statsStdout
usedBytes := uint64(0)
if err := statsCmd.Run(); err == nil {
var stats struct {
DeviceID int `json:"device_id"`
MemoryUsed uint64 `json:"memory_used"`
}
if err := json.Unmarshal(statsStdout.Bytes(), &stats); err == nil {
usedBytes = stats.MemoryUsed
}
}
totalBytes := device.MemoryPhysicalSizeBytes
freeBytes := uint64(0)
if totalBytes > usedBytes {
freeBytes = totalBytes - usedBytes
}
usagePercent := 0.0
if totalBytes > 0 {
usagePercent = float64(usedBytes) / float64(totalBytes) * 100
}
gpus = append(gpus, GPUMemoryInfo{
Index: device.DeviceID,
Name: device.DeviceName,
Vendor: VendorIntel,
TotalVRAM: totalBytes,
UsedVRAM: usedBytes,
FreeVRAM: freeBytes,
UsagePercent: usagePercent,
})
}
return gpus
}
// getIntelGPUTop queries Intel GPUs using intel_gpu_top
func getIntelGPUTop() []GPUMemoryInfo {
if _, err := exec.LookPath("intel_gpu_top"); err != nil {
return nil
}
// intel_gpu_top with -J outputs JSON, -s 1 for single sample
cmd := exec.Command("intel_gpu_top", "-J", "-s", "1")
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
xlog.Debug("intel_gpu_top failed", "error", err, "stderr", stderr.String())
return nil
}
// Parse JSON output - intel_gpu_top outputs NDJSON
lines := strings.Split(strings.TrimSpace(stdout.String()), "\n")
if len(lines) == 0 {
return nil
}
// Take the last complete JSON object
var lastJSON string
for i := len(lines) - 1; i >= 0; i-- {
if strings.HasPrefix(strings.TrimSpace(lines[i]), "{") {
lastJSON = lines[i]
break
}
}
if lastJSON == "" {
return nil
}
var result struct {
Engines map[string]interface{} `json:"engines"`
// Memory info if available
}
if err := json.Unmarshal([]byte(lastJSON), &result); err != nil {
xlog.Debug("failed to parse intel_gpu_top output", "error", err)
return nil
}
// intel_gpu_top doesn't always provide memory info
// Return empty if we can't get useful data
return nil
}
// GetResourceInfo returns GPU info if available, otherwise system RAM info
func GetResourceInfo() ResourceInfo {
gpus := GetGPUMemoryUsage()
if len(gpus) > 0 {
// GPU available - return GPU info
aggregate := GetGPUAggregateInfo()
return ResourceInfo{
Type: "gpu",
Available: true,
GPUs: gpus,
RAM: nil,
Aggregate: AggregateMemoryInfo{
TotalMemory: aggregate.TotalVRAM,
UsedMemory: aggregate.UsedVRAM,
FreeMemory: aggregate.FreeVRAM,
UsagePercent: aggregate.UsagePercent,
GPUCount: aggregate.GPUCount,
},
}
}
// No GPU - fall back to system RAM
ramInfo, err := GetSystemRAMInfo()
if err != nil {
xlog.Debug("failed to get system RAM info", "error", err)
return ResourceInfo{
Type: "ram",
Available: false,
Aggregate: AggregateMemoryInfo{},
}
}
return ResourceInfo{
Type: "ram",
Available: true,
GPUs: nil,
RAM: ramInfo,
Aggregate: AggregateMemoryInfo{
TotalMemory: ramInfo.Total,
UsedMemory: ramInfo.Used,
FreeMemory: ramInfo.Free,
UsagePercent: ramInfo.UsagePercent,
GPUCount: 0,
},
}
}
// GetResourceAggregateInfo returns aggregate memory info (GPU if available, otherwise RAM)
// This is used by the memory reclaimer to check memory usage
func GetResourceAggregateInfo() AggregateMemoryInfo {
resourceInfo := GetResourceInfo()
return resourceInfo.Aggregate
}
// getVulkanGPUMemory queries GPUs using vulkaninfo as a fallback
// Note: Vulkan provides memory heap info but not real-time usage
func getVulkanGPUMemory() []GPUMemoryInfo {
if _, err := exec.LookPath("vulkaninfo"); err != nil {
return nil
}
cmd := exec.Command("vulkaninfo", "--json")
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
xlog.Debug("vulkaninfo failed", "error", err, "stderr", stderr.String())
return nil
}
// Parse Vulkan JSON output
var result struct {
VkPhysicalDevices []struct {
DeviceName string `json:"deviceName"`
DeviceType string `json:"deviceType"`
VkPhysicalDeviceMemoryProperties struct {
MemoryHeaps []struct {
Flags int `json:"flags"`
Size uint64 `json:"size"`
} `json:"memoryHeaps"`
} `json:"VkPhysicalDeviceMemoryProperties"`
} `json:"VkPhysicalDevices"`
}
if err := json.Unmarshal(stdout.Bytes(), &result); err != nil {
xlog.Debug("failed to parse vulkaninfo output", "error", err)
return nil
}
var gpus []GPUMemoryInfo
for i, device := range result.VkPhysicalDevices {
// Skip non-discrete/integrated GPUs if possible
if device.DeviceType == "VK_PHYSICAL_DEVICE_TYPE_CPU" {
continue
}
// Sum up device-local memory heaps
var totalVRAM uint64
for _, heap := range device.VkPhysicalDeviceMemoryProperties.MemoryHeaps {
// Flag 1 = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT
if heap.Flags&1 != 0 {
totalVRAM += heap.Size
}
}
if totalVRAM == 0 {
continue
}
gpus = append(gpus, GPUMemoryInfo{
Index: i,
Name: device.DeviceName,
Vendor: VendorVulkan,
TotalVRAM: totalVRAM,
UsedVRAM: 0, // Vulkan doesn't provide real-time usage
FreeVRAM: totalVRAM,
UsagePercent: 0,
})
}
return gpus
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/xsysinfo/cpu.go | pkg/xsysinfo/cpu.go | package xsysinfo
import (
"sort"
"github.com/jaypipes/ghw"
"github.com/klauspost/cpuid/v2"
)
func CPUCapabilities() ([]string, error) {
cpu, err := ghw.CPU()
if err != nil {
return nil, err
}
caps := map[string]struct{}{}
for _, proc := range cpu.Processors {
for _, c := range proc.Capabilities {
caps[c] = struct{}{}
}
}
ret := []string{}
for c := range caps {
ret = append(ret, c)
}
// order
sort.Strings(ret)
return ret, nil
}
func HasCPUCaps(ids ...cpuid.FeatureID) bool {
return cpuid.CPU.Supports(ids...)
}
func CPUPhysicalCores() int {
if cpuid.CPU.PhysicalCores == 0 {
return 1
}
return cpuid.CPU.PhysicalCores
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/xio/copy.go | pkg/xio/copy.go | package xio
import (
"context"
"io"
)
type readerFunc func(p []byte) (n int, err error)
func (rf readerFunc) Read(p []byte) (n int, err error) { return rf(p) }
func Copy(ctx context.Context, dst io.Writer, src io.Reader) (int64, error) {
return io.Copy(dst, readerFunc(func(p []byte) (int, error) {
select {
case <-ctx.Done():
return 0, ctx.Err()
default:
return src.Read(p)
}
}))
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/model/loader.go | pkg/model/loader.go | package model
import (
"context"
"fmt"
"maps"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/mudler/LocalAI/pkg/system"
"github.com/mudler/LocalAI/pkg/utils"
"github.com/mudler/xlog"
)
// new idea: what if we declare a struct of these here, and use a loop to check?
// TODO: Split ModelLoader and TemplateLoader? Just to keep things more organized. Left together to share a mutex until I look into that. Would split if we separate directories for .bin/.yaml and .tmpl
type ModelLoader struct {
ModelPath string
mu sync.Mutex
models map[string]*Model
loading map[string]chan struct{} // tracks models currently being loaded
wd *WatchDog
externalBackends map[string]string
lruEvictionMaxRetries int // Maximum number of retries when waiting for busy models
lruEvictionRetryInterval time.Duration // Interval between retries when waiting for busy models
}
// NewModelLoader creates a new ModelLoader instance.
// LRU eviction is now managed through the WatchDog component.
func NewModelLoader(system *system.SystemState) *ModelLoader {
nml := &ModelLoader{
ModelPath: system.Model.ModelsPath,
models: make(map[string]*Model),
loading: make(map[string]chan struct{}),
externalBackends: make(map[string]string),
lruEvictionMaxRetries: 30, // Default: 30 retries
lruEvictionRetryInterval: 1 * time.Second, // Default: 1 second
}
return nml
}
// GetLoadingCount returns the number of models currently being loaded
func (ml *ModelLoader) GetLoadingCount() int {
ml.mu.Lock()
defer ml.mu.Unlock()
return len(ml.loading)
}
func (ml *ModelLoader) SetWatchDog(wd *WatchDog) {
ml.wd = wd
}
func (ml *ModelLoader) GetWatchDog() *WatchDog {
return ml.wd
}
// SetLRUEvictionRetrySettings updates the LRU eviction retry settings
func (ml *ModelLoader) SetLRUEvictionRetrySettings(maxRetries int, retryInterval time.Duration) {
ml.mu.Lock()
defer ml.mu.Unlock()
ml.lruEvictionMaxRetries = maxRetries
ml.lruEvictionRetryInterval = retryInterval
}
func (ml *ModelLoader) ExistsInModelPath(s string) bool {
return utils.ExistsInPath(ml.ModelPath, s)
}
func (ml *ModelLoader) SetExternalBackend(name, uri string) {
ml.mu.Lock()
defer ml.mu.Unlock()
ml.externalBackends[name] = uri
}
func (ml *ModelLoader) DeleteExternalBackend(name string) {
ml.mu.Lock()
defer ml.mu.Unlock()
delete(ml.externalBackends, name)
}
func (ml *ModelLoader) GetExternalBackend(name string) string {
ml.mu.Lock()
defer ml.mu.Unlock()
return ml.externalBackends[name]
}
func (ml *ModelLoader) GetAllExternalBackends(o *Options) map[string]string {
backends := make(map[string]string)
maps.Copy(backends, ml.externalBackends)
if o != nil {
maps.Copy(backends, o.externalBackends)
}
return backends
}
var knownFilesToSkip []string = []string{
"MODEL_CARD",
"README",
"README.md",
}
var knownModelsNameSuffixToSkip []string = []string{
".tmpl",
".keep",
".yaml",
".yml",
".json",
".txt",
".pt",
".onnx",
".md",
".MD",
".DS_Store",
".",
".safetensors",
".bin",
".partial",
".tar.gz",
}
const retryTimeout = time.Duration(2 * time.Minute)
func (ml *ModelLoader) ListFilesInModelPath() ([]string, error) {
files, err := os.ReadDir(ml.ModelPath)
if err != nil {
return []string{}, err
}
models := []string{}
FILE:
for _, file := range files {
for _, skip := range knownFilesToSkip {
if strings.EqualFold(file.Name(), skip) {
continue FILE
}
}
// Skip templates, YAML, .keep, .json, and .DS_Store files
for _, skip := range knownModelsNameSuffixToSkip {
if strings.HasSuffix(file.Name(), skip) {
continue FILE
}
}
// Skip directories
if file.IsDir() {
continue
}
models = append(models, file.Name())
}
return models, nil
}
func (ml *ModelLoader) ListLoadedModels() []*Model {
ml.mu.Lock()
defer ml.mu.Unlock()
models := []*Model{}
for _, model := range ml.models {
models = append(models, model)
}
return models
}
func (ml *ModelLoader) LoadModel(modelID, modelName string, loader func(string, string, string) (*Model, error)) (*Model, error) {
ml.mu.Lock()
// Check if we already have a loaded model
if model := ml.checkIsLoaded(modelID); model != nil {
ml.mu.Unlock()
return model, nil
}
// Check if another goroutine is already loading this model
if loadingChan, isLoading := ml.loading[modelID]; isLoading {
ml.mu.Unlock()
// Wait for the other goroutine to finish loading
xlog.Debug("Waiting for model to be loaded by another request", "modelID", modelID)
<-loadingChan
// Now check if the model is loaded
ml.mu.Lock()
model := ml.checkIsLoaded(modelID)
ml.mu.Unlock()
if model != nil {
return model, nil
}
// If still not loaded, the other goroutine failed - we'll try again
return ml.LoadModel(modelID, modelName, loader)
}
// Mark this model as loading (create a channel that will be closed when done)
loadingChan := make(chan struct{})
ml.loading[modelID] = loadingChan
ml.mu.Unlock()
// Ensure we clean up the loading state when done
defer func() {
ml.mu.Lock()
delete(ml.loading, modelID)
close(loadingChan)
ml.mu.Unlock()
}()
// Load the model (this can take a long time, no lock held)
modelFile := filepath.Join(ml.ModelPath, modelName)
xlog.Debug("Loading model in memory from file", "file", modelFile)
model, err := loader(modelID, modelName, modelFile)
if err != nil {
return nil, fmt.Errorf("failed to load model with internal loader: %s", err)
}
if model == nil {
return nil, fmt.Errorf("loader didn't return a model")
}
// Add to models map
ml.mu.Lock()
ml.models[modelID] = model
ml.mu.Unlock()
return model, nil
}
func (ml *ModelLoader) ShutdownModel(modelName string) error {
ml.mu.Lock()
defer ml.mu.Unlock()
return ml.deleteProcess(modelName)
}
func (ml *ModelLoader) CheckIsLoaded(s string) *Model {
ml.mu.Lock()
defer ml.mu.Unlock()
return ml.checkIsLoaded(s)
}
func (ml *ModelLoader) checkIsLoaded(s string) *Model {
m, ok := ml.models[s]
if !ok {
return nil
}
xlog.Debug("Model already loaded in memory", "model", s)
client := m.GRPC(false, ml.wd)
xlog.Debug("Checking model availability", "model", s)
cTimeout, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel()
alive, err := client.HealthCheck(cTimeout)
if !alive {
xlog.Warn("GRPC Model not responding", "error", err)
xlog.Warn("Deleting the process in order to recreate it")
process := m.Process()
if process == nil {
xlog.Error("Process not found and the model is not responding anymore", "model", s)
return m
}
if !process.IsAlive() {
xlog.Debug("GRPC Process is not responding", "model", s)
// stop and delete the process, this forces to re-load the model and re-create again the service
err := ml.deleteProcess(s)
if err != nil {
xlog.Error("error stopping process", "error", err, "process", s)
}
return nil
}
}
return m
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/model/initializers.go | pkg/model/initializers.go | package model
import (
"context"
"errors"
"fmt"
"os"
"strings"
"time"
grpc "github.com/mudler/LocalAI/pkg/grpc"
"github.com/mudler/xlog"
"github.com/phayes/freeport"
)
const (
LLamaCPP = "llama-cpp"
)
var Aliases map[string]string = map[string]string{
"go-llama": LLamaCPP,
"llama": LLamaCPP,
"embedded-store": LocalStoreBackend,
"huggingface-embeddings": TransformersBackend,
"langchain-huggingface": LCHuggingFaceBackend,
"transformers-musicgen": TransformersBackend,
"sentencetransformers": TransformersBackend,
"mamba": TransformersBackend,
"stablediffusion": StableDiffusionGGMLBackend,
}
var TypeAlias map[string]string = map[string]string{
"sentencetransformers": "SentenceTransformer",
"huggingface-embeddings": "SentenceTransformer",
"mamba": "Mamba",
"transformers-musicgen": "MusicgenForConditionalGeneration",
}
const (
WhisperBackend = "whisper"
StableDiffusionGGMLBackend = "stablediffusion-ggml"
LCHuggingFaceBackend = "huggingface"
TransformersBackend = "transformers"
LocalStoreBackend = "local-store"
)
// starts the grpcModelProcess for the backend, and returns a grpc client
// It also loads the model
func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string, string) (*Model, error) {
return func(modelID, modelName, modelFile string) (*Model, error) {
xlog.Debug("Loading Model with gRPC", "modelID", modelID, "file", modelFile, "backend", backend, "options", *o)
var client *Model
getFreeAddress := func() (string, error) {
port, err := freeport.GetFreePort()
if err != nil {
return "", fmt.Errorf("failed allocating free ports: %s", err.Error())
}
return fmt.Sprintf("127.0.0.1:%d", port), nil
}
// If no specific model path is set for transformers/HF, set it to the model path
for _, env := range []string{"HF_HOME", "TRANSFORMERS_CACHE", "HUGGINGFACE_HUB_CACHE"} {
if os.Getenv(env) == "" {
err := os.Setenv(env, ml.ModelPath)
if err != nil {
xlog.Error("unable to set environment variable to modelPath", "error", err, "name", env, "modelPath", ml.ModelPath)
}
}
}
// Check if the backend is provided as external
if uri, ok := ml.GetAllExternalBackends(o)[backend]; ok {
xlog.Debug("Loading external backend", "uri", uri)
// check if uri is a file or a address
if fi, err := os.Stat(uri); err == nil {
xlog.Debug("external backend is file", "file", fi)
serverAddress, err := getFreeAddress()
if err != nil {
return nil, fmt.Errorf("failed allocating free ports: %s", err.Error())
}
// Make sure the process is executable
process, err := ml.startProcess(uri, modelID, serverAddress)
if err != nil {
xlog.Error("failed to launch", "error", err, "path", uri)
return nil, err
}
xlog.Debug("GRPC Service Started")
client = NewModel(modelID, serverAddress, process)
} else {
xlog.Debug("external backend is a uri")
// address
client = NewModel(modelID, uri, nil)
}
} else {
xlog.Error("Backend not found", "backend", backend)
return nil, fmt.Errorf("backend not found: %s", backend)
}
xlog.Debug("Wait for the service to start up")
xlog.Debug("Options", "options", o.gRPCOptions)
// Wait for the service to start up
ready := false
for i := 0; i < o.grpcAttempts; i++ {
alive, err := client.GRPC(o.parallelRequests, ml.wd).HealthCheck(context.Background())
if alive {
xlog.Debug("GRPC Service Ready")
ready = true
break
}
if err != nil && i == o.grpcAttempts-1 {
xlog.Error("failed starting/connecting to the gRPC service", "error", err)
}
time.Sleep(time.Duration(o.grpcAttemptsDelay) * time.Second)
}
if !ready {
xlog.Debug("GRPC Service NOT ready")
if process := client.Process(); process != nil {
process.Stop()
}
return nil, fmt.Errorf("grpc service not ready")
}
options := *o.gRPCOptions
options.Model = modelName
options.ModelFile = modelFile
options.ModelPath = ml.ModelPath
xlog.Debug("GRPC: Loading model with options", "options", options)
res, err := client.GRPC(o.parallelRequests, ml.wd).LoadModel(o.context, &options)
if err != nil {
if process := client.Process(); process != nil {
process.Stop()
}
return nil, fmt.Errorf("could not load model: %w", err)
}
if !res.Success {
if process := client.Process(); process != nil {
process.Stop()
}
return nil, fmt.Errorf("could not load model (no success): %s", res.Message)
}
return client, nil
}
}
func (ml *ModelLoader) backendLoader(opts ...Option) (client grpc.Backend, err error) {
o := NewOptions(opts...)
xlog.Info("BackendLoader starting", "modelID", o.modelID, "backend", o.backendString, "model", o.model)
backend := strings.ToLower(o.backendString)
if realBackend, exists := Aliases[backend]; exists {
typeAlias, exists := TypeAlias[backend]
if exists {
xlog.Debug("alias is a type alias", "alias", backend, "realBackend", realBackend, "type", typeAlias)
o.gRPCOptions.Type = typeAlias
} else {
xlog.Debug("alias", "alias", backend, "realBackend", realBackend)
}
backend = realBackend
}
model, err := ml.LoadModel(o.modelID, o.model, ml.grpcModel(backend, o))
if err != nil {
if stopErr := ml.StopGRPC(only(o.modelID)); stopErr != nil {
xlog.Error("error stopping model", "error", stopErr, "model", o.modelID)
}
xlog.Error("Failed to load model", "modelID", o.modelID, "error", err, "backend", o.backendString)
return nil, err
}
return model.GRPC(o.parallelRequests, ml.wd), nil
}
// enforceLRULimit enforces the LRU limit before loading a new model.
// This is called before loading a model to ensure we don't exceed the limit.
// It accounts for models that are currently being loaded by other goroutines.
// If models are busy and can't be evicted, it will wait and retry until space is available.
func (ml *ModelLoader) enforceLRULimit() {
if ml.wd == nil {
return
}
// Get the count of models currently being loaded to account for concurrent requests
pendingLoads := ml.GetLoadingCount()
// Get retry settings from ModelLoader
ml.mu.Lock()
maxRetries := ml.lruEvictionMaxRetries
retryInterval := ml.lruEvictionRetryInterval
ml.mu.Unlock()
for attempt := 0; attempt < maxRetries; attempt++ {
result := ml.wd.EnforceLRULimit(pendingLoads)
if !result.NeedMore {
// Successfully evicted enough models (or no eviction needed)
if result.EvictedCount > 0 {
xlog.Info("[ModelLoader] LRU enforcement complete", "evicted", result.EvictedCount)
}
return
}
// Need more evictions but models are busy - wait and retry
if attempt < maxRetries-1 {
xlog.Info("[ModelLoader] Waiting for busy models to become idle before eviction",
"evicted", result.EvictedCount,
"attempt", attempt+1,
"maxRetries", maxRetries,
"retryIn", retryInterval)
time.Sleep(retryInterval)
} else {
// Last attempt - log warning but proceed (might fail to load, but at least we tried)
xlog.Warn("[ModelLoader] LRU enforcement incomplete after max retries",
"evicted", result.EvictedCount,
"reason", "models are still busy with active API calls")
}
}
}
// updateModelLastUsed updates the last used time for a model (for LRU tracking)
func (ml *ModelLoader) updateModelLastUsed(m *Model) {
if ml.wd == nil || m == nil {
return
}
ml.wd.UpdateLastUsed(m.address)
}
func (ml *ModelLoader) Load(opts ...Option) (grpc.Backend, error) {
o := NewOptions(opts...)
// Return earlier if we have a model already loaded
// (avoid looping through all the backends)
if m := ml.CheckIsLoaded(o.modelID); m != nil {
xlog.Debug("Model already loaded", "model", o.modelID)
// Update last used time for LRU tracking
ml.updateModelLastUsed(m)
return m.GRPC(o.parallelRequests, ml.wd), nil
}
// Enforce LRU limit before loading a new model
ml.enforceLRULimit()
// if a backend is defined, return the loader directly
if o.backendString != "" {
client, err := ml.backendLoader(opts...)
if err != nil {
return nil, err
}
return client, nil
}
// Otherwise scan for backends in the asset directory
var err error
// get backends embedded in the binary
autoLoadBackends := []string{}
// append externalBackends supplied by the user via the CLI
for b := range ml.GetAllExternalBackends(o) {
autoLoadBackends = append(autoLoadBackends, b)
}
if len(autoLoadBackends) == 0 {
xlog.Error("No backends found")
return nil, fmt.Errorf("no backends found")
}
xlog.Debug("Loading from the following backends (in order)", "backends", autoLoadBackends)
xlog.Info("Trying to load the model", "modelID", o.modelID, "backends", autoLoadBackends)
for _, key := range autoLoadBackends {
xlog.Info("Attempting to load", "backend", key)
options := append(opts, []Option{
WithBackendString(key),
}...)
model, modelerr := ml.backendLoader(options...)
if modelerr == nil && model != nil {
xlog.Info("Loads OK", "backend", key)
return model, nil
} else if modelerr != nil {
err = errors.Join(err, fmt.Errorf("[%s]: %w", key, modelerr))
xlog.Info("Fails", "backend", key, "error", modelerr.Error())
} else if model == nil {
err = errors.Join(err, fmt.Errorf("backend %s returned no usable model", key))
xlog.Info("Fails", "backend", key, "error", "backend returned no usable model")
}
}
return nil, fmt.Errorf("could not load model - all backends returned error: %s", err.Error())
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/model/watchdog_options_test.go | pkg/model/watchdog_options_test.go | package model_test
import (
"time"
"github.com/mudler/LocalAI/pkg/model"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("WatchDogOptions", func() {
Context("DefaultWatchDogOptions", func() {
It("should return sensible defaults", func() {
opts := model.DefaultWatchDogOptions()
Expect(opts).ToNot(BeNil())
})
})
Context("NewWatchDogOptions", func() {
It("should apply options in order", func() {
pm := newMockProcessManager()
opts := model.NewWatchDogOptions(
model.WithProcessManager(pm),
model.WithBusyTimeout(10*time.Minute),
model.WithIdleTimeout(20*time.Minute),
model.WithBusyCheck(true),
model.WithIdleCheck(true),
model.WithLRULimit(5),
model.WithMemoryReclaimer(true, 0.85),
)
Expect(opts).ToNot(BeNil())
})
It("should allow overriding options", func() {
opts := model.NewWatchDogOptions(
model.WithLRULimit(3),
model.WithLRULimit(7), // override
)
// Create watchdog to verify
wd := model.NewWatchDog(
model.WithProcessManager(newMockProcessManager()),
model.WithLRULimit(3),
model.WithLRULimit(7), // override
)
Expect(wd.GetLRULimit()).To(Equal(7))
Expect(opts).ToNot(BeNil())
})
})
Context("Individual Options", func() {
var pm *mockProcessManager
BeforeEach(func() {
pm = newMockProcessManager()
})
It("WithProcessManager should set process manager", func() {
wd := model.NewWatchDog(
model.WithProcessManager(pm),
)
Expect(wd).ToNot(BeNil())
})
It("WithBusyTimeout should set busy timeout", func() {
wd := model.NewWatchDog(
model.WithProcessManager(pm),
model.WithBusyTimeout(7*time.Minute),
)
Expect(wd).ToNot(BeNil())
})
It("WithIdleTimeout should set idle timeout", func() {
wd := model.NewWatchDog(
model.WithProcessManager(pm),
model.WithIdleTimeout(25*time.Minute),
)
Expect(wd).ToNot(BeNil())
})
It("WithBusyCheck should enable busy checking", func() {
wd := model.NewWatchDog(
model.WithProcessManager(pm),
model.WithBusyCheck(true),
)
Expect(wd).ToNot(BeNil())
})
It("WithIdleCheck should enable idle checking", func() {
wd := model.NewWatchDog(
model.WithProcessManager(pm),
model.WithIdleCheck(true),
)
Expect(wd).ToNot(BeNil())
})
It("WithLRULimit should set LRU limit", func() {
wd := model.NewWatchDog(
model.WithProcessManager(pm),
model.WithLRULimit(10),
)
Expect(wd.GetLRULimit()).To(Equal(10))
})
It("WithMemoryReclaimer should set both enabled and threshold", func() {
wd := model.NewWatchDog(
model.WithProcessManager(pm),
model.WithMemoryReclaimer(true, 0.88),
)
enabled, threshold := wd.GetMemoryReclaimerSettings()
Expect(enabled).To(BeTrue())
Expect(threshold).To(Equal(0.88))
})
It("WithMemoryReclaimerEnabled should set enabled flag only", func() {
wd := model.NewWatchDog(
model.WithProcessManager(pm),
model.WithMemoryReclaimerEnabled(true),
)
enabled, _ := wd.GetMemoryReclaimerSettings()
Expect(enabled).To(BeTrue())
})
It("WithMemoryReclaimerThreshold should set threshold only", func() {
wd := model.NewWatchDog(
model.WithProcessManager(pm),
model.WithMemoryReclaimerThreshold(0.75),
)
_, threshold := wd.GetMemoryReclaimerSettings()
Expect(threshold).To(Equal(0.75))
})
})
Context("Option Combinations", func() {
It("should work with all options combined", func() {
pm := newMockProcessManager()
wd := model.NewWatchDog(
model.WithProcessManager(pm),
model.WithBusyTimeout(3*time.Minute),
model.WithIdleTimeout(10*time.Minute),
model.WithBusyCheck(true),
model.WithIdleCheck(true),
model.WithLRULimit(2),
model.WithMemoryReclaimerEnabled(true),
model.WithMemoryReclaimerThreshold(0.92),
)
Expect(wd).ToNot(BeNil())
Expect(wd.GetLRULimit()).To(Equal(2))
enabled, threshold := wd.GetMemoryReclaimerSettings()
Expect(enabled).To(BeTrue())
Expect(threshold).To(Equal(0.92))
})
It("should work with no options (all defaults)", func() {
wd := model.NewWatchDog()
Expect(wd).ToNot(BeNil())
Expect(wd.GetLRULimit()).To(Equal(0))
enabled, threshold := wd.GetMemoryReclaimerSettings()
Expect(enabled).To(BeFalse())
Expect(threshold).To(Equal(model.DefaultMemoryReclaimerThreshold)) // default
})
It("should allow partial configuration", func() {
pm := newMockProcessManager()
wd := model.NewWatchDog(
model.WithProcessManager(pm),
model.WithLRULimit(3),
)
Expect(wd).ToNot(BeNil())
Expect(wd.GetLRULimit()).To(Equal(3))
// Memory reclaimer should use defaults
enabled, threshold := wd.GetMemoryReclaimerSettings()
Expect(enabled).To(BeFalse())
Expect(threshold).To(Equal(model.DefaultMemoryReclaimerThreshold))
})
})
})
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/model/watchdog_options.go | pkg/model/watchdog_options.go | package model
import (
"time"
)
const (
DefaultWatchdogInterval = 500 * time.Millisecond
DefaultMemoryReclaimerThreshold = 0.80
)
// WatchDogOptions contains all configuration for the WatchDog
type WatchDogOptions struct {
processManager ProcessManager
// Timeout settings
busyTimeout time.Duration
idleTimeout time.Duration
watchdogInterval time.Duration
// Check toggles
busyCheck bool
idleCheck bool
// LRU settings
lruLimit int // Maximum number of active backends (0 = unlimited)
// Memory reclaimer settings (works with GPU if available, otherwise RAM)
memoryReclaimerEnabled bool // Enable memory threshold monitoring
memoryReclaimerThreshold float64 // Threshold 0.0-1.0 (e.g., 0.95 = 95%)
// Eviction settings
forceEvictionWhenBusy bool // Force eviction even when models have active API calls (default: false for safety)
}
// WatchDogOption is a function that configures WatchDogOptions
type WatchDogOption func(*WatchDogOptions)
// WithProcessManager sets the process manager for the watchdog
func WithProcessManager(pm ProcessManager) WatchDogOption {
return func(o *WatchDogOptions) {
o.processManager = pm
}
}
// WithBusyTimeout sets the busy timeout duration
func WithBusyTimeout(timeout time.Duration) WatchDogOption {
return func(o *WatchDogOptions) {
o.busyTimeout = timeout
}
}
// WithIdleTimeout sets the idle timeout duration
func WithIdleTimeout(timeout time.Duration) WatchDogOption {
return func(o *WatchDogOptions) {
o.idleTimeout = timeout
}
}
// WithWatchdogCheck sets the watchdog check duration
func WithWatchdogInterval(interval time.Duration) WatchDogOption {
return func(o *WatchDogOptions) {
o.watchdogInterval = interval
}
}
// WithBusyCheck enables or disables busy checking
func WithBusyCheck(enabled bool) WatchDogOption {
return func(o *WatchDogOptions) {
o.busyCheck = enabled
}
}
// WithIdleCheck enables or disables idle checking
func WithIdleCheck(enabled bool) WatchDogOption {
return func(o *WatchDogOptions) {
o.idleCheck = enabled
}
}
// WithLRULimit sets the maximum number of active backends (0 = unlimited)
func WithLRULimit(limit int) WatchDogOption {
return func(o *WatchDogOptions) {
o.lruLimit = limit
}
}
// WithMemoryReclaimer enables memory threshold monitoring with the specified threshold
// Works with GPU VRAM if available, otherwise uses system RAM
func WithMemoryReclaimer(enabled bool, threshold float64) WatchDogOption {
return func(o *WatchDogOptions) {
o.memoryReclaimerEnabled = enabled
o.memoryReclaimerThreshold = threshold
}
}
// WithMemoryReclaimerEnabled enables or disables memory threshold monitoring
func WithMemoryReclaimerEnabled(enabled bool) WatchDogOption {
return func(o *WatchDogOptions) {
o.memoryReclaimerEnabled = enabled
}
}
// WithMemoryReclaimerThreshold sets the memory threshold (0.0-1.0)
func WithMemoryReclaimerThreshold(threshold float64) WatchDogOption {
return func(o *WatchDogOptions) {
o.memoryReclaimerThreshold = threshold
}
}
// WithForceEvictionWhenBusy sets whether to force eviction even when models have active API calls
// Default: false (skip eviction when busy for safety)
func WithForceEvictionWhenBusy(force bool) WatchDogOption {
return func(o *WatchDogOptions) {
o.forceEvictionWhenBusy = force
}
}
// DefaultWatchDogOptions returns default options for the watchdog
func DefaultWatchDogOptions() *WatchDogOptions {
return &WatchDogOptions{
busyTimeout: 5 * time.Minute,
idleTimeout: 15 * time.Minute,
watchdogInterval: DefaultWatchdogInterval,
busyCheck: false,
idleCheck: false,
lruLimit: 0,
memoryReclaimerEnabled: false,
memoryReclaimerThreshold: DefaultMemoryReclaimerThreshold,
forceEvictionWhenBusy: false, // Default: skip eviction when busy for safety
}
}
// NewWatchDogOptions creates WatchDogOptions with the provided options applied
func NewWatchDogOptions(opts ...WatchDogOption) *WatchDogOptions {
o := DefaultWatchDogOptions()
for _, opt := range opts {
opt(o)
}
return o
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/model/watchdog_test.go | pkg/model/watchdog_test.go | package model_test
import (
"sync"
"time"
"github.com/mudler/LocalAI/pkg/model"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
// mockProcessManager implements ProcessManager for testing
type mockProcessManager struct {
mu sync.Mutex
shutdownCalls []string
shutdownErrors map[string]error
}
func newMockProcessManager() *mockProcessManager {
return &mockProcessManager{
shutdownCalls: []string{},
shutdownErrors: make(map[string]error),
}
}
func (m *mockProcessManager) ShutdownModel(modelName string) error {
m.mu.Lock()
defer m.mu.Unlock()
m.shutdownCalls = append(m.shutdownCalls, modelName)
if err, ok := m.shutdownErrors[modelName]; ok {
return err
}
return nil
}
func (m *mockProcessManager) getShutdownCalls() []string {
m.mu.Lock()
defer m.mu.Unlock()
result := make([]string, len(m.shutdownCalls))
copy(result, m.shutdownCalls)
return result
}
var _ = Describe("WatchDog", func() {
var (
wd *model.WatchDog
pm *mockProcessManager
)
BeforeEach(func() {
pm = newMockProcessManager()
})
Context("LRU Limit", func() {
It("should create watchdog with LRU limit", func() {
wd = model.NewWatchDog(
model.WithProcessManager(pm),
model.WithBusyTimeout(5*time.Minute),
model.WithIdleTimeout(15*time.Minute),
model.WithLRULimit(2),
)
Expect(wd.GetLRULimit()).To(Equal(2))
})
It("should allow updating LRU limit dynamically", func() {
wd = model.NewWatchDog(
model.WithProcessManager(pm),
model.WithLRULimit(2),
)
wd.SetLRULimit(5)
Expect(wd.GetLRULimit()).To(Equal(5))
})
It("should return 0 for disabled LRU", func() {
wd = model.NewWatchDog(
model.WithProcessManager(pm),
model.WithLRULimit(0),
)
Expect(wd.GetLRULimit()).To(Equal(0))
})
})
Context("Memory Reclaimer Options", func() {
It("should create watchdog with memory reclaimer settings", func() {
wd = model.NewWatchDog(
model.WithProcessManager(pm),
model.WithMemoryReclaimer(true, 0.85),
)
enabled, threshold := wd.GetMemoryReclaimerSettings()
Expect(enabled).To(BeTrue())
Expect(threshold).To(Equal(0.85))
})
It("should allow setting memory reclaimer via separate options", func() {
wd = model.NewWatchDog(
model.WithProcessManager(pm),
model.WithMemoryReclaimerEnabled(true),
model.WithMemoryReclaimerThreshold(0.90),
)
enabled, threshold := wd.GetMemoryReclaimerSettings()
Expect(enabled).To(BeTrue())
Expect(threshold).To(Equal(0.90))
})
It("should use default threshold when not specified", func() {
wd = model.NewWatchDog(
model.WithProcessManager(pm),
)
_, threshold := wd.GetMemoryReclaimerSettings()
Expect(threshold).To(Equal(model.DefaultMemoryReclaimerThreshold))
})
It("should allow updating memory reclaimer settings dynamically", func() {
wd = model.NewWatchDog(
model.WithProcessManager(pm),
)
wd.SetMemoryReclaimer(true, 0.80)
enabled, threshold := wd.GetMemoryReclaimerSettings()
Expect(enabled).To(BeTrue())
Expect(threshold).To(Equal(0.80))
})
})
Context("Model Tracking", func() {
BeforeEach(func() {
wd = model.NewWatchDog(
model.WithProcessManager(pm),
model.WithBusyTimeout(5*time.Minute),
model.WithIdleTimeout(15*time.Minute),
model.WithLRULimit(3),
)
})
It("should track loaded models count", func() {
Expect(wd.GetLoadedModelCount()).To(Equal(0))
wd.AddAddressModelMap("addr1", "model1")
Expect(wd.GetLoadedModelCount()).To(Equal(1))
wd.AddAddressModelMap("addr2", "model2")
Expect(wd.GetLoadedModelCount()).To(Equal(2))
})
It("should update lastUsed time on Mark", func() {
wd.AddAddressModelMap("addr1", "model1")
wd.Mark("addr1")
// The model should now have a lastUsed time set
// We can verify this indirectly through LRU eviction behavior
})
It("should update lastUsed time on UnMark", func() {
wd.AddAddressModelMap("addr1", "model1")
wd.Mark("addr1")
time.Sleep(10 * time.Millisecond)
wd.UnMark("addr1")
// The model should now have an updated lastUsed time
})
It("should update lastUsed time via UpdateLastUsed", func() {
wd.AddAddressModelMap("addr1", "model1")
wd.UpdateLastUsed("addr1")
// Verify the time was updated
})
})
Context("EnforceLRULimit", func() {
BeforeEach(func() {
wd = model.NewWatchDog(
model.WithProcessManager(pm),
model.WithBusyTimeout(5*time.Minute),
model.WithIdleTimeout(15*time.Minute),
model.WithLRULimit(2),
model.WithForceEvictionWhenBusy(true), // Enable force eviction for these tests to match old behavior
)
})
It("should not evict when under limit", func() {
wd.AddAddressModelMap("addr1", "model1")
wd.Mark("addr1")
wd.UnMark("addr1") // Unmark to make it idle (not busy)
result := wd.EnforceLRULimit(0)
Expect(result.EvictedCount).To(Equal(0))
Expect(result.NeedMore).To(BeFalse())
Expect(pm.getShutdownCalls()).To(BeEmpty())
})
It("should evict oldest model when at limit", func() {
// Add two models
wd.AddAddressModelMap("addr1", "model1")
wd.Mark("addr1")
wd.UnMark("addr1") // Unmark to make it idle
time.Sleep(10 * time.Millisecond)
wd.AddAddressModelMap("addr2", "model2")
wd.Mark("addr2")
wd.UnMark("addr2") // Unmark to make it idle
// Enforce LRU with limit of 2 (need to make room for 1 new model)
result := wd.EnforceLRULimit(0)
Expect(result.EvictedCount).To(Equal(1))
Expect(result.NeedMore).To(BeFalse())
Expect(pm.getShutdownCalls()).To(ContainElement("model1")) // oldest should be evicted
})
It("should evict multiple models when needed", func() {
// Add three models
wd.AddAddressModelMap("addr1", "model1")
wd.Mark("addr1")
wd.UnMark("addr1") // Unmark to make it idle
time.Sleep(10 * time.Millisecond)
wd.AddAddressModelMap("addr2", "model2")
wd.Mark("addr2")
wd.UnMark("addr2") // Unmark to make it idle
time.Sleep(10 * time.Millisecond)
wd.AddAddressModelMap("addr3", "model3")
wd.Mark("addr3")
wd.UnMark("addr3") // Unmark to make it idle
// Set limit to 1, should evict 2 oldest + 1 for new = 3 evictions
wd.SetLRULimit(1)
result := wd.EnforceLRULimit(0)
Expect(result.EvictedCount).To(Equal(3))
Expect(result.NeedMore).To(BeFalse())
shutdowns := pm.getShutdownCalls()
Expect(shutdowns).To(ContainElement("model1"))
Expect(shutdowns).To(ContainElement("model2"))
Expect(shutdowns).To(ContainElement("model3"))
})
It("should account for pending loads", func() {
// Add two models (at limit)
wd.AddAddressModelMap("addr1", "model1")
wd.Mark("addr1")
wd.UnMark("addr1") // Unmark to make it idle
time.Sleep(10 * time.Millisecond)
wd.AddAddressModelMap("addr2", "model2")
wd.Mark("addr2")
wd.UnMark("addr2") // Unmark to make it idle
// With 1 pending load, we need to evict 2 (current=2, pending=1, new=1, limit=2)
// total after = 2 + 1 + 1 = 4, need to evict 4 - 2 = 2
result := wd.EnforceLRULimit(1)
Expect(result.EvictedCount).To(Equal(2))
Expect(result.NeedMore).To(BeFalse())
})
It("should not evict when LRU is disabled", func() {
wd.SetLRULimit(0)
wd.AddAddressModelMap("addr1", "model1")
wd.AddAddressModelMap("addr2", "model2")
wd.AddAddressModelMap("addr3", "model3")
result := wd.EnforceLRULimit(0)
Expect(result.EvictedCount).To(Equal(0))
Expect(result.NeedMore).To(BeFalse())
Expect(pm.getShutdownCalls()).To(BeEmpty())
})
It("should evict least recently used first", func() {
wd.SetLRULimit(2)
// Add models with different lastUsed times
wd.AddAddressModelMap("addr1", "model1")
wd.Mark("addr1")
wd.UnMark("addr1") // Unmark to make it idle
time.Sleep(20 * time.Millisecond)
wd.AddAddressModelMap("addr2", "model2")
wd.Mark("addr2")
wd.UnMark("addr2") // Unmark to make it idle
time.Sleep(20 * time.Millisecond)
// Touch model1 again to make it more recent
wd.UpdateLastUsed("addr1")
time.Sleep(20 * time.Millisecond)
wd.AddAddressModelMap("addr3", "model3")
wd.Mark("addr3")
wd.UnMark("addr3") // Unmark to make it idle
// Now model2 is the oldest, should be evicted first
result := wd.EnforceLRULimit(0)
Expect(result.EvictedCount).To(BeNumerically(">=", 1))
Expect(result.NeedMore).To(BeFalse())
shutdowns := pm.getShutdownCalls()
// model2 should be evicted first (it's the oldest)
if len(shutdowns) >= 1 {
Expect(shutdowns[0]).To(Equal("model2"))
}
})
})
Context("Single Backend Mode (LRU=1)", func() {
BeforeEach(func() {
wd = model.NewWatchDog(
model.WithProcessManager(pm),
model.WithBusyTimeout(5*time.Minute),
model.WithIdleTimeout(15*time.Minute),
model.WithLRULimit(1),
model.WithForceEvictionWhenBusy(true), // Enable force eviction for these tests
)
})
It("should evict existing model when loading new one", func() {
wd.AddAddressModelMap("addr1", "model1")
wd.Mark("addr1")
wd.UnMark("addr1") // Unmark to make it idle
// With limit=1, loading a new model should evict the existing one
result := wd.EnforceLRULimit(0)
Expect(result.EvictedCount).To(Equal(1))
Expect(result.NeedMore).To(BeFalse())
Expect(pm.getShutdownCalls()).To(ContainElement("model1"))
})
It("should handle rapid model switches", func() {
for i := 0; i < 5; i++ {
wd.AddAddressModelMap("addr", "model")
wd.Mark("addr")
wd.UnMark("addr") // Unmark to make it idle
wd.EnforceLRULimit(0)
}
// All previous models should have been evicted
Expect(len(pm.getShutdownCalls())).To(Equal(5))
})
})
Context("Force Eviction When Busy", func() {
BeforeEach(func() {
wd = model.NewWatchDog(
model.WithProcessManager(pm),
model.WithLRULimit(2),
model.WithForceEvictionWhenBusy(false), // Default: skip eviction when busy
)
})
It("should skip eviction for busy models when forceEvictionWhenBusy is false", func() {
// Add two models (at limit of 2, need to evict 1 for new model)
wd.AddAddressModelMap("addr1", "model1")
wd.Mark("addr1")
time.Sleep(10 * time.Millisecond)
wd.AddAddressModelMap("addr2", "model2")
wd.Mark("addr2")
wd.UnMark("addr2") // Make model2 idle
// Keep model1 as busy (simulating active API call)
// model1 is already marked as busy from the first Mark call
// Try to enforce LRU - should skip busy model1, evict model2
result := wd.EnforceLRULimit(0)
// Should evict model2 (not busy) but skip model1 (busy)
// Since we evicted 1 (which is what we needed), NeedMore should be false
Expect(result.EvictedCount).To(Equal(1))
Expect(result.NeedMore).To(BeFalse()) // We evicted enough, even though we skipped model1
Expect(pm.getShutdownCalls()).To(ContainElement("model2"))
Expect(pm.getShutdownCalls()).ToNot(ContainElement("model1"))
})
It("should evict busy models when forceEvictionWhenBusy is true", func() {
wd.SetForceEvictionWhenBusy(true)
// Add two models
wd.AddAddressModelMap("addr1", "model1")
wd.Mark("addr1")
time.Sleep(10 * time.Millisecond)
wd.AddAddressModelMap("addr2", "model2")
wd.Mark("addr2")
// Keep model1 as busy (already marked from first Mark call)
// Try to enforce LRU - should evict model1 even though busy
result := wd.EnforceLRULimit(0)
Expect(result.EvictedCount).To(Equal(1))
Expect(result.NeedMore).To(BeFalse())
Expect(pm.getShutdownCalls()).To(ContainElement("model1"))
})
It("should set NeedMore when all models are busy and forceEvictionWhenBusy is false", func() {
// Add two models
wd.AddAddressModelMap("addr1", "model1")
wd.Mark("addr1")
time.Sleep(10 * time.Millisecond)
wd.AddAddressModelMap("addr2", "model2")
wd.Mark("addr2")
// Mark both as busy
wd.Mark("addr1")
wd.Mark("addr2")
// Try to enforce LRU - should skip both busy models
result := wd.EnforceLRULimit(0)
Expect(result.EvictedCount).To(Equal(0))
Expect(result.NeedMore).To(BeTrue())
Expect(pm.getShutdownCalls()).To(BeEmpty())
})
It("should allow updating forceEvictionWhenBusy dynamically", func() {
// Start with false
Expect(wd).ToNot(BeNil())
// Add models
wd.AddAddressModelMap("addr1", "model1")
wd.Mark("addr1")
time.Sleep(10 * time.Millisecond)
wd.AddAddressModelMap("addr2", "model2")
wd.Mark("addr2")
wd.UnMark("addr2") // Make model2 idle
// Keep model1 busy (already marked)
// With forceEvictionWhenBusy=false, should skip busy model1, evict model2
result := wd.EnforceLRULimit(0)
Expect(result.NeedMore).To(BeFalse()) // We evicted enough (1 model)
Expect(result.EvictedCount).To(Equal(1)) // Should evict model2 (not busy)
// Now enable force eviction
wd.SetForceEvictionWhenBusy(true)
// Add models again
wd.AddAddressModelMap("addr1", "model1")
wd.Mark("addr1")
time.Sleep(10 * time.Millisecond)
wd.AddAddressModelMap("addr2", "model2")
wd.Mark("addr2")
// Keep model1 busy (already marked)
// With forceEvictionWhenBusy=true, should evict busy model1
result = wd.EnforceLRULimit(0)
Expect(result.NeedMore).To(BeFalse())
Expect(result.EvictedCount).To(Equal(1))
})
It("should continue to next LRU model when busy model is skipped", func() {
// Add three models
wd.AddAddressModelMap("addr1", "model1")
wd.Mark("addr1")
time.Sleep(10 * time.Millisecond)
wd.AddAddressModelMap("addr2", "model2")
wd.Mark("addr2")
wd.UnMark("addr2") // Make model2 idle
time.Sleep(10 * time.Millisecond)
wd.AddAddressModelMap("addr3", "model3")
wd.Mark("addr3")
wd.UnMark("addr3") // Make model3 idle
// Keep model1 as busy (oldest, already marked)
// Need to evict 2 models (limit=2, current=3, need room for 1 new)
// Should skip model1 (busy), evict model2 and model3 (not busy)
result := wd.EnforceLRULimit(0)
// Should evict model2 and model3 (2 models, which is what we needed)
Expect(result.EvictedCount).To(Equal(2))
Expect(result.NeedMore).To(BeFalse()) // We evicted enough (2 models)
Expect(pm.getShutdownCalls()).To(ContainElement("model2"))
Expect(pm.getShutdownCalls()).To(ContainElement("model3"))
})
})
Context("EnforceLRULimitResult", func() {
BeforeEach(func() {
wd = model.NewWatchDog(
model.WithProcessManager(pm),
model.WithLRULimit(2),
model.WithForceEvictionWhenBusy(false),
)
})
It("should return NeedMore=false when eviction is successful", func() {
wd.AddAddressModelMap("addr1", "model1")
wd.Mark("addr1")
wd.UnMark("addr1") // Make idle
time.Sleep(10 * time.Millisecond)
wd.AddAddressModelMap("addr2", "model2")
wd.Mark("addr2")
wd.UnMark("addr2") // Make idle
result := wd.EnforceLRULimit(0)
Expect(result.NeedMore).To(BeFalse())
Expect(result.EvictedCount).To(Equal(1))
})
It("should return NeedMore=true when not enough models can be evicted", func() {
// Add two models (at limit of 2, need to evict 1 for new model)
wd.AddAddressModelMap("addr1", "model1")
wd.Mark("addr1")
time.Sleep(10 * time.Millisecond)
wd.AddAddressModelMap("addr2", "model2")
wd.Mark("addr2")
// Mark both as busy (keep them busy)
// Both are already marked as busy from the Mark calls above
// Need to evict 1, but both are busy
result := wd.EnforceLRULimit(0)
Expect(result.NeedMore).To(BeTrue())
Expect(result.EvictedCount).To(Equal(0))
})
It("should return NeedMore=true when need to evict multiple but some are busy", func() {
// Set limit to 1, add 3 models (need to evict 2 for new model)
wd.SetLRULimit(1)
wd.AddAddressModelMap("addr1", "model1")
wd.Mark("addr1")
time.Sleep(10 * time.Millisecond)
wd.AddAddressModelMap("addr2", "model2")
wd.Mark("addr2")
wd.UnMark("addr2") // Make model2 idle
time.Sleep(10 * time.Millisecond)
wd.AddAddressModelMap("addr3", "model3")
wd.Mark("addr3")
// Keep model1 and model3 busy
// Need to evict 2 models, but model1 and model3 are busy, only model2 is idle
// Should evict model2 (1 model), but NeedMore=true because we needed 2
result := wd.EnforceLRULimit(0)
Expect(result.EvictedCount).To(Equal(1))
Expect(result.NeedMore).To(BeTrue())
})
It("should return correct EvictedCount when some models are evicted", func() {
// Add three models
wd.AddAddressModelMap("addr1", "model1")
wd.Mark("addr1")
time.Sleep(10 * time.Millisecond)
wd.AddAddressModelMap("addr2", "model2")
wd.Mark("addr2")
wd.UnMark("addr2") // Make model2 idle
time.Sleep(10 * time.Millisecond)
wd.AddAddressModelMap("addr3", "model3")
wd.Mark("addr3")
wd.UnMark("addr3") // Make model3 idle
// Keep model1 as busy (already marked)
// Need to evict 2 models, but model1 is busy
// Should evict model2 and model3 (2 models, which is what we needed)
result := wd.EnforceLRULimit(0)
Expect(result.EvictedCount).To(Equal(2))
Expect(result.NeedMore).To(BeFalse()) // We evicted enough (2 models)
})
})
Context("Functional Options", func() {
It("should use default options when none provided", func() {
wd = model.NewWatchDog(
model.WithProcessManager(pm),
)
Expect(wd.GetLRULimit()).To(Equal(0))
enabled, threshold := wd.GetMemoryReclaimerSettings()
Expect(enabled).To(BeFalse())
Expect(threshold).To(Equal(model.DefaultMemoryReclaimerThreshold))
})
It("should allow combining multiple options", func() {
wd = model.NewWatchDog(
model.WithProcessManager(pm),
model.WithBusyTimeout(10*time.Minute),
model.WithIdleTimeout(30*time.Minute),
model.WithBusyCheck(true),
model.WithIdleCheck(true),
model.WithLRULimit(5),
model.WithMemoryReclaimerEnabled(true),
model.WithMemoryReclaimerThreshold(0.80),
model.WithForceEvictionWhenBusy(true),
)
Expect(wd.GetLRULimit()).To(Equal(5))
enabled, threshold := wd.GetMemoryReclaimerSettings()
Expect(enabled).To(BeTrue())
Expect(threshold).To(Equal(0.80))
})
It("should use default forceEvictionWhenBusy (false) when not specified", func() {
wd = model.NewWatchDog(
model.WithProcessManager(pm),
)
// Default should be false - we can test this by checking behavior
// Add a busy model and verify it's skipped
wd.AddAddressModelMap("addr1", "model1")
wd.Mark("addr1")
time.Sleep(10 * time.Millisecond)
wd.AddAddressModelMap("addr2", "model2")
wd.Mark("addr2")
wd.Mark("addr1") // Keep model1 busy
wd.SetLRULimit(1)
result := wd.EnforceLRULimit(0)
// Should skip busy model1, evict model2, but NeedMore=true
Expect(result.NeedMore).To(BeTrue())
})
It("should allow setting forceEvictionWhenBusy via option", func() {
wd = model.NewWatchDog(
model.WithProcessManager(pm),
model.WithLRULimit(2),
model.WithForceEvictionWhenBusy(true),
)
// Add models
wd.AddAddressModelMap("addr1", "model1")
wd.Mark("addr1")
time.Sleep(10 * time.Millisecond)
wd.AddAddressModelMap("addr2", "model2")
wd.Mark("addr2")
// Keep model1 busy (already marked from first Mark call)
// Should evict busy model1
result := wd.EnforceLRULimit(0)
Expect(result.NeedMore).To(BeFalse())
Expect(result.EvictedCount).To(Equal(1))
Expect(pm.getShutdownCalls()).To(ContainElement("model1"))
})
})
})
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/model/model.go | pkg/model/model.go | package model
import (
"sync"
grpc "github.com/mudler/LocalAI/pkg/grpc"
process "github.com/mudler/go-processmanager"
)
type Model struct {
ID string `json:"id"`
address string
client grpc.Backend
process *process.Process
sync.Mutex
}
func NewModel(ID, address string, process *process.Process) *Model {
return &Model{
ID: ID,
address: address,
process: process,
}
}
func (m *Model) Process() *process.Process {
return m.process
}
func (m *Model) GRPC(parallel bool, wd *WatchDog) grpc.Backend {
if m.client != nil {
return m.client
}
enableWD := false
if wd != nil {
enableWD = true
}
m.Lock()
defer m.Unlock()
m.client = grpc.NewClient(m.address, parallel, wd, enableWD)
return m.client
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/model/watchdog.go | pkg/model/watchdog.go | package model
import (
"sort"
"sync"
"time"
"github.com/mudler/LocalAI/pkg/xsysinfo"
process "github.com/mudler/go-processmanager"
"github.com/mudler/xlog"
)
// WatchDog tracks all the requests from GRPC clients.
// All GRPC Clients created by ModelLoader should have an associated injected
// watchdog that will keep track of the state of each backend (busy or not)
// and for how much time it has been busy.
// If a backend is busy for too long, the watchdog will kill the process and
// force a reload of the model.
// The watchdog also supports LRU (Least Recently Used) eviction when a maximum
// number of active backends is configured.
// The watchdog also supports memory threshold monitoring - when memory usage
// (GPU VRAM if available, otherwise system RAM) exceeds the threshold,
// it will evict backends using the LRU strategy.
// The watchdog runs as a separate go routine,
// and the GRPC client talks to it via a channel to send status updates
type WatchDog struct {
sync.Mutex
busyTime map[string]time.Time
idleTime map[string]time.Time
lastUsed map[string]time.Time // LRU tracking: when each model was last used
timeout, idletimeout time.Duration
addressMap map[string]*process.Process
addressModelMap map[string]string
pm ProcessManager
stop chan bool
busyCheck, idleCheck bool
lruLimit int // Maximum number of active backends (0 = unlimited)
// Memory reclaimer settings (works with GPU if available, otherwise RAM)
memoryReclaimerEnabled bool // Enable memory threshold monitoring
memoryReclaimerThreshold float64 // Threshold 0.0-1.0 (e.g., 0.95 = 95%)
watchdogInterval time.Duration
// Eviction settings
forceEvictionWhenBusy bool // Force eviction even when models have active API calls (default: false for safety)
}
type ProcessManager interface {
ShutdownModel(modelName string) error
}
// NewWatchDog creates a new WatchDog with the provided options.
// Example usage:
//
// wd := NewWatchDog(
// WithProcessManager(pm),
// WithBusyTimeout(5*time.Minute),
// WithIdleTimeout(15*time.Minute),
// WithBusyCheck(true),
// WithIdleCheck(true),
// WithLRULimit(3),
// WithMemoryReclaimer(true, 0.95),
// )
func NewWatchDog(opts ...WatchDogOption) *WatchDog {
o := NewWatchDogOptions(opts...)
return &WatchDog{
timeout: o.busyTimeout,
idletimeout: o.idleTimeout,
pm: o.processManager,
busyTime: make(map[string]time.Time),
idleTime: make(map[string]time.Time),
lastUsed: make(map[string]time.Time),
addressMap: make(map[string]*process.Process),
busyCheck: o.busyCheck,
idleCheck: o.idleCheck,
lruLimit: o.lruLimit,
addressModelMap: make(map[string]string),
stop: make(chan bool, 1),
memoryReclaimerEnabled: o.memoryReclaimerEnabled,
memoryReclaimerThreshold: o.memoryReclaimerThreshold,
watchdogInterval: o.watchdogInterval,
forceEvictionWhenBusy: o.forceEvictionWhenBusy,
}
}
// SetLRULimit updates the LRU limit dynamically
func (wd *WatchDog) SetLRULimit(limit int) {
wd.Lock()
defer wd.Unlock()
wd.lruLimit = limit
}
// GetLRULimit returns the current LRU limit
func (wd *WatchDog) GetLRULimit() int {
wd.Lock()
defer wd.Unlock()
return wd.lruLimit
}
// SetMemoryReclaimer updates the memory reclaimer settings dynamically
func (wd *WatchDog) SetMemoryReclaimer(enabled bool, threshold float64) {
wd.Lock()
defer wd.Unlock()
wd.memoryReclaimerEnabled = enabled
wd.memoryReclaimerThreshold = threshold
}
// GetMemoryReclaimerSettings returns the current memory reclaimer settings
func (wd *WatchDog) GetMemoryReclaimerSettings() (enabled bool, threshold float64) {
wd.Lock()
defer wd.Unlock()
return wd.memoryReclaimerEnabled, wd.memoryReclaimerThreshold
}
// SetForceEvictionWhenBusy updates the force eviction when busy setting dynamically
func (wd *WatchDog) SetForceEvictionWhenBusy(force bool) {
wd.Lock()
defer wd.Unlock()
wd.forceEvictionWhenBusy = force
}
func (wd *WatchDog) Shutdown() {
wd.Lock()
defer wd.Unlock()
xlog.Info("[WatchDog] Shutting down watchdog")
wd.stop <- true
}
func (wd *WatchDog) AddAddressModelMap(address string, model string) {
wd.Lock()
defer wd.Unlock()
wd.addressModelMap[address] = model
}
func (wd *WatchDog) Add(address string, p *process.Process) {
wd.Lock()
defer wd.Unlock()
wd.addressMap[address] = p
}
func (wd *WatchDog) Mark(address string) {
wd.Lock()
defer wd.Unlock()
now := time.Now()
wd.busyTime[address] = now
wd.lastUsed[address] = now // Update LRU tracking
delete(wd.idleTime, address)
}
func (wd *WatchDog) UnMark(ModelAddress string) {
wd.Lock()
defer wd.Unlock()
now := time.Now()
delete(wd.busyTime, ModelAddress)
wd.idleTime[ModelAddress] = now
wd.lastUsed[ModelAddress] = now // Update LRU tracking
}
// UpdateLastUsed updates the last used time for a model address (for LRU tracking)
// This should be called when a model is accessed (e.g., when checking if loaded)
func (wd *WatchDog) UpdateLastUsed(address string) {
wd.Lock()
defer wd.Unlock()
wd.lastUsed[address] = time.Now()
}
// GetLoadedModelCount returns the number of currently loaded models tracked by the watchdog
func (wd *WatchDog) GetLoadedModelCount() int {
wd.Lock()
defer wd.Unlock()
return len(wd.addressModelMap)
}
// modelUsageInfo holds information about a model's usage for LRU sorting
type modelUsageInfo struct {
address string
model string
lastUsed time.Time
}
// EnforceLRULimitResult contains the result of LRU enforcement
type EnforceLRULimitResult struct {
EvictedCount int // Number of models successfully evicted
NeedMore bool // True if more evictions are needed but couldn't be done (e.g., all models are busy)
}
// EnforceLRULimit ensures we're under the LRU limit by evicting least recently used models.
// This should be called before loading a new model.
// pendingLoads is the number of models currently being loaded (to account for concurrent loads).
// Returns the result containing evicted count and whether more evictions are needed.
func (wd *WatchDog) EnforceLRULimit(pendingLoads int) EnforceLRULimitResult {
if wd.lruLimit <= 0 {
return EnforceLRULimitResult{EvictedCount: 0, NeedMore: false} // LRU disabled
}
wd.Lock()
currentCount := len(wd.addressModelMap)
// We need to evict enough to make room for the new model AND any pending loads
// Total after loading = currentCount + pendingLoads + 1 (the new one we're about to load)
// We need: currentCount + pendingLoads + 1 <= lruLimit
// So evict: currentCount + pendingLoads + 1 - lruLimit = currentCount - lruLimit + pendingLoads + 1
modelsToEvict := currentCount - wd.lruLimit + pendingLoads + 1
forceEvictionWhenBusy := wd.forceEvictionWhenBusy
if modelsToEvict <= 0 {
wd.Unlock()
return EnforceLRULimitResult{EvictedCount: 0, NeedMore: false}
}
xlog.Debug("[WatchDog] LRU enforcement triggered", "current", currentCount, "pendingLoads", pendingLoads, "limit", wd.lruLimit, "toEvict", modelsToEvict)
// Build a list of models sorted by last used time (oldest first)
var models []modelUsageInfo
for address, model := range wd.addressModelMap {
lastUsed := wd.lastUsed[address]
if lastUsed.IsZero() {
// If no lastUsed recorded, use a very old time
lastUsed = time.Time{}
}
models = append(models, modelUsageInfo{
address: address,
model: model,
lastUsed: lastUsed,
})
}
// Sort by lastUsed time (oldest first)
sort.Slice(models, func(i, j int) bool {
return models[i].lastUsed.Before(models[j].lastUsed)
})
// Collect models to evict (the oldest ones)
var modelsToShutdown []string
evictedCount := 0
skippedBusyCount := 0
for i := 0; evictedCount < modelsToEvict && i < len(models); i++ {
m := models[i]
// Check if model is busy
_, isBusy := wd.busyTime[m.address]
if isBusy && !forceEvictionWhenBusy {
// Skip eviction for busy models when forceEvictionWhenBusy is false
xlog.Warn("[WatchDog] Skipping LRU eviction for busy model", "model", m.model, "reason", "model has active API calls")
skippedBusyCount++
continue
}
xlog.Info("[WatchDog] LRU evicting model", "model", m.model, "lastUsed", m.lastUsed, "busy", isBusy)
modelsToShutdown = append(modelsToShutdown, m.model)
// Clean up the maps while we have the lock
wd.untrack(m.address)
evictedCount++
}
needMore := evictedCount < modelsToEvict && skippedBusyCount > 0
wd.Unlock()
// Now shutdown models without holding the watchdog lock to prevent deadlock
for _, model := range modelsToShutdown {
if err := wd.pm.ShutdownModel(model); err != nil {
xlog.Error("[WatchDog] error shutting down model during LRU eviction", "error", err, "model", model)
}
xlog.Debug("[WatchDog] LRU eviction complete", "model", model)
}
if needMore {
xlog.Warn("[WatchDog] LRU eviction incomplete", "evicted", evictedCount, "needed", modelsToEvict, "skippedBusy", skippedBusyCount, "reason", "some models are busy with active API calls")
}
return EnforceLRULimitResult{
EvictedCount: len(modelsToShutdown),
NeedMore: needMore,
}
}
func (wd *WatchDog) Run() {
xlog.Info("[WatchDog] starting watchdog")
for {
select {
case <-wd.stop:
xlog.Info("[WatchDog] Stopping watchdog")
return
case <-time.After(wd.watchdogInterval):
// Check if any monitoring is enabled
wd.Lock()
busyCheck := wd.busyCheck
idleCheck := wd.idleCheck
memoryCheck := wd.memoryReclaimerEnabled
wd.Unlock()
if !busyCheck && !idleCheck && !memoryCheck {
xlog.Info("[WatchDog] No checks enabled, stopping watchdog")
return
}
if busyCheck {
wd.checkBusy()
}
if idleCheck {
wd.checkIdle()
}
if memoryCheck {
wd.checkMemory()
}
}
}
}
func (wd *WatchDog) checkIdle() {
wd.Lock()
xlog.Debug("[WatchDog] Watchdog checks for idle connections")
// Collect models to shutdown while holding the lock
var modelsToShutdown []string
for address, t := range wd.idleTime {
xlog.Debug("[WatchDog] idle connection", "address", address)
if time.Since(t) > wd.idletimeout {
xlog.Warn("[WatchDog] Address is idle for too long, killing it", "address", address)
model, ok := wd.addressModelMap[address]
if ok {
modelsToShutdown = append(modelsToShutdown, model)
} else {
xlog.Warn("[WatchDog] Address unresolvable", "address", address)
}
wd.untrack(address)
}
}
wd.Unlock()
// Now shutdown models without holding the watchdog lock to prevent deadlock
for _, model := range modelsToShutdown {
if err := wd.pm.ShutdownModel(model); err != nil {
xlog.Error("[watchdog] error shutting down model", "error", err, "model", model)
}
xlog.Debug("[WatchDog] model shut down", "model", model)
}
}
func (wd *WatchDog) checkBusy() {
wd.Lock()
xlog.Debug("[WatchDog] Watchdog checks for busy connections")
// Collect models to shutdown while holding the lock
var modelsToShutdown []string
for address, t := range wd.busyTime {
xlog.Debug("[WatchDog] active connection", "address", address)
if time.Since(t) > wd.timeout {
model, ok := wd.addressModelMap[address]
if ok {
xlog.Warn("[WatchDog] Model is busy for too long, killing it", "model", model)
modelsToShutdown = append(modelsToShutdown, model)
} else {
xlog.Warn("[WatchDog] Address unresolvable", "address", address)
}
wd.untrack(address)
}
}
wd.Unlock()
// Now shutdown models without holding the watchdog lock to prevent deadlock
for _, model := range modelsToShutdown {
if err := wd.pm.ShutdownModel(model); err != nil {
xlog.Error("[watchdog] error shutting down model", "error", err, "model", model)
}
xlog.Debug("[WatchDog] model shut down", "model", model)
}
}
// checkMemory monitors memory usage (GPU VRAM if available, otherwise RAM) and evicts backends when usage exceeds threshold
func (wd *WatchDog) checkMemory() {
wd.Lock()
threshold := wd.memoryReclaimerThreshold
enabled := wd.memoryReclaimerEnabled
modelCount := len(wd.addressModelMap)
wd.Unlock()
if !enabled || threshold <= 0 || modelCount == 0 {
return
}
// Get current memory usage (GPU if available, otherwise RAM)
aggregate := xsysinfo.GetResourceAggregateInfo()
if aggregate.TotalMemory == 0 {
xlog.Debug("[WatchDog] No memory information available for memory reclaimer")
return
}
// Convert threshold from 0.0-1.0 to percentage
thresholdPercent := threshold * 100
memoryType := "GPU"
if aggregate.GPUCount == 0 {
memoryType = "RAM"
}
xlog.Debug("[WatchDog] Memory check", "type", memoryType, "usage_percent", aggregate.UsagePercent, "threshold_percent", thresholdPercent, "loaded_models", modelCount)
// Check if usage exceeds threshold
if aggregate.UsagePercent > thresholdPercent {
xlog.Warn("[WatchDog] Memory usage exceeds threshold, evicting LRU backend", "type", memoryType, "usage_percent", aggregate.UsagePercent, "threshold_percent", thresholdPercent)
// Evict the least recently used model
wd.evictLRUModel()
}
}
// evictLRUModel evicts the least recently used model
func (wd *WatchDog) evictLRUModel() {
wd.Lock()
if len(wd.addressModelMap) == 0 {
wd.Unlock()
return
}
forceEvictionWhenBusy := wd.forceEvictionWhenBusy
// Build a list of models sorted by last used time (oldest first)
var models []modelUsageInfo
for address, model := range wd.addressModelMap {
lastUsed := wd.lastUsed[address]
if lastUsed.IsZero() {
lastUsed = time.Time{}
}
models = append(models, modelUsageInfo{
address: address,
model: model,
lastUsed: lastUsed,
})
}
if len(models) == 0 {
wd.Unlock()
return
}
// Sort by lastUsed time (oldest first)
sort.Slice(models, func(i, j int) bool {
return models[i].lastUsed.Before(models[j].lastUsed)
})
// Find the first non-busy model (or first model if forceEvictionWhenBusy is true)
var lruModel *modelUsageInfo
for i := 0; i < len(models); i++ {
m := models[i]
_, isBusy := wd.busyTime[m.address]
if isBusy && !forceEvictionWhenBusy {
// Skip busy models when forceEvictionWhenBusy is false
xlog.Warn("[WatchDog] Skipping memory reclaimer eviction for busy model", "model", m.model, "reason", "model has active API calls")
continue
}
lruModel = &m
break
}
if lruModel == nil {
// All models are busy and forceEvictionWhenBusy is false
wd.Unlock()
xlog.Warn("[WatchDog] Memory reclaimer cannot evict: all models are busy with active API calls")
return
}
xlog.Info("[WatchDog] Memory reclaimer evicting LRU model", "model", lruModel.model, "lastUsed", lruModel.lastUsed)
// Untrack the model
wd.untrack(lruModel.address)
wd.Unlock()
// Shutdown the model
if err := wd.pm.ShutdownModel(lruModel.model); err != nil {
xlog.Error("[WatchDog] error shutting down model during memory reclamation", "error", err, "model", lruModel.model)
} else {
xlog.Info("[WatchDog] Memory reclaimer eviction complete", "model", lruModel.model)
}
}
func (wd *WatchDog) untrack(address string) {
delete(wd.busyTime, address)
delete(wd.idleTime, address)
delete(wd.lastUsed, address)
delete(wd.addressModelMap, address)
delete(wd.addressMap, address)
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/model/process.go | pkg/model/process.go | package model
import (
"errors"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/hpcloud/tail"
"github.com/mudler/LocalAI/pkg/signals"
process "github.com/mudler/go-processmanager"
"github.com/mudler/xlog"
)
var forceBackendShutdown bool = os.Getenv("LOCALAI_FORCE_BACKEND_SHUTDOWN") == "true"
func (ml *ModelLoader) deleteProcess(s string) error {
model, ok := ml.models[s]
if !ok {
xlog.Debug("Model not found", "model", s)
return fmt.Errorf("model %s not found", s)
}
defer delete(ml.models, s)
retries := 1
for model.GRPC(false, ml.wd).IsBusy() {
xlog.Debug("Model busy. Waiting.", "model", s)
dur := time.Duration(retries*2) * time.Second
if dur > retryTimeout {
dur = retryTimeout
}
time.Sleep(dur)
retries++
if retries > 10 && forceBackendShutdown {
xlog.Warn("Model is still busy after retries. Forcing shutdown.", "model", s, "retries", retries)
break
}
}
xlog.Debug("Deleting process", "model", s)
process := model.Process()
if process == nil {
xlog.Error("No process", "model", s)
// Nothing to do as there is no process
return nil
}
err := process.Stop()
if err != nil {
xlog.Error("(deleteProcess) error while deleting process", "error", err, "model", s)
}
return err
}
func (ml *ModelLoader) StopGRPC(filter GRPCProcessFilter) error {
var err error = nil
ml.mu.Lock()
defer ml.mu.Unlock()
for k, m := range ml.models {
if filter(k, m.Process()) {
e := ml.deleteProcess(k)
err = errors.Join(err, e)
}
}
return err
}
func (ml *ModelLoader) StopAllGRPC() error {
return ml.StopGRPC(all)
}
func (ml *ModelLoader) GetGRPCPID(id string) (int, error) {
ml.mu.Lock()
defer ml.mu.Unlock()
p, exists := ml.models[id]
if !exists {
return -1, fmt.Errorf("no grpc backend found for %s", id)
}
if p.Process() == nil {
return -1, fmt.Errorf("no grpc backend found for %s", id)
}
return strconv.Atoi(p.Process().PID)
}
func (ml *ModelLoader) startProcess(grpcProcess, id string, serverAddress string, args ...string) (*process.Process, error) {
// Make sure the process is executable
// Check first if it has executable permissions
if fi, err := os.Stat(grpcProcess); err == nil {
if fi.Mode()&0111 == 0 {
xlog.Debug("Process is not executable. Making it executable.", "process", grpcProcess)
if err := os.Chmod(grpcProcess, 0700); err != nil {
return nil, err
}
}
}
xlog.Debug("Loading GRPC Process", "process", grpcProcess)
xlog.Debug("GRPC Service will be running", "id", id, "address", serverAddress)
workDir, err := filepath.Abs(filepath.Dir(grpcProcess))
if err != nil {
return nil, err
}
grpcControlProcess := process.New(
process.WithTemporaryStateDir(),
process.WithName(filepath.Base(grpcProcess)),
process.WithArgs(append(args, []string{"--addr", serverAddress}...)...),
process.WithEnvironment(os.Environ()...),
process.WithWorkDir(workDir),
)
if ml.wd != nil {
ml.wd.Add(serverAddress, grpcControlProcess)
ml.wd.AddAddressModelMap(serverAddress, id)
}
if err := grpcControlProcess.Run(); err != nil {
return grpcControlProcess, err
}
xlog.Debug("GRPC Service state dir", "dir", grpcControlProcess.StateDir())
signals.RegisterGracefulTerminationHandler(func() {
err := grpcControlProcess.Stop()
if err != nil {
xlog.Error("error while shutting down grpc process", "error", err)
}
})
go func() {
t, err := tail.TailFile(grpcControlProcess.StderrPath(), tail.Config{Follow: true})
if err != nil {
xlog.Debug("Could not tail stderr")
}
for line := range t.Lines {
xlog.Debug("GRPC stderr", "id", strings.Join([]string{id, serverAddress}, "-"), "line", line.Text)
}
}()
go func() {
t, err := tail.TailFile(grpcControlProcess.StdoutPath(), tail.Config{Follow: true})
if err != nil {
xlog.Debug("Could not tail stdout")
}
for line := range t.Lines {
xlog.Debug("GRPC stdout", "id", strings.Join([]string{id, serverAddress}, "-"), "line", line.Text)
}
}()
return grpcControlProcess, nil
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/model/loader_options.go | pkg/model/loader_options.go | package model
import (
"context"
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
)
type Options struct {
backendString string
model string
modelID string
context context.Context
gRPCOptions *pb.ModelOptions
externalBackends map[string]string
grpcAttempts int
grpcAttemptsDelay int
parallelRequests bool
}
type Option func(*Options)
var EnableParallelRequests = func(o *Options) {
o.parallelRequests = true
}
func WithExternalBackend(name string, uri string) Option {
return func(o *Options) {
if o.externalBackends == nil {
o.externalBackends = make(map[string]string)
}
o.externalBackends[name] = uri
}
}
func WithGRPCAttempts(attempts int) Option {
return func(o *Options) {
o.grpcAttempts = attempts
}
}
func WithGRPCAttemptsDelay(delay int) Option {
return func(o *Options) {
o.grpcAttemptsDelay = delay
}
}
func WithBackendString(backend string) Option {
return func(o *Options) {
o.backendString = backend
}
}
func WithDefaultBackendString(backend string) Option {
return func(o *Options) {
if o.backendString == "" {
o.backendString = backend
}
}
}
func WithModel(modelFile string) Option {
return func(o *Options) {
o.model = modelFile
}
}
func WithLoadGRPCLoadModelOpts(opts *pb.ModelOptions) Option {
return func(o *Options) {
o.gRPCOptions = opts
}
}
func WithContext(ctx context.Context) Option {
return func(o *Options) {
o.context = ctx
}
}
func WithModelID(id string) Option {
return func(o *Options) {
o.modelID = id
}
}
func NewOptions(opts ...Option) *Options {
o := &Options{
gRPCOptions: &pb.ModelOptions{},
context: context.Background(),
grpcAttempts: 20,
grpcAttemptsDelay: 2,
}
for _, opt := range opts {
opt(o)
}
return o
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/model/model_suite_test.go | pkg/model/model_suite_test.go | package model_test
import (
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func TestModel(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "LocalAI model test")
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/model/filters.go | pkg/model/filters.go | package model
import (
process "github.com/mudler/go-processmanager"
)
type GRPCProcessFilter = func(id string, p *process.Process) bool
func all(_ string, _ *process.Process) bool {
return true
}
func allExcept(s string) GRPCProcessFilter {
return func(id string, p *process.Process) bool {
return id != s
}
}
func only(s string) GRPCProcessFilter {
return func(id string, p *process.Process) bool {
return id == s
}
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/model/loader_test.go | pkg/model/loader_test.go | package model_test
import (
"errors"
"os"
"path/filepath"
"sync"
"sync/atomic"
"time"
"github.com/mudler/LocalAI/pkg/model"
"github.com/mudler/LocalAI/pkg/system"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("ModelLoader", func() {
var (
modelLoader *model.ModelLoader
modelPath string
mockModel *model.Model
)
BeforeEach(func() {
// Setup the model loader with a test directory
modelPath = "/tmp/test_model_path"
os.Mkdir(modelPath, 0755)
systemState, err := system.GetSystemState(
system.WithModelPath(modelPath),
)
Expect(err).ToNot(HaveOccurred())
modelLoader = model.NewModelLoader(systemState)
})
AfterEach(func() {
// Cleanup test directory
os.RemoveAll(modelPath)
})
Context("NewModelLoader", func() {
It("should create a new ModelLoader with an empty model map", func() {
Expect(modelLoader).ToNot(BeNil())
Expect(modelLoader.ModelPath).To(Equal(modelPath))
Expect(modelLoader.ListLoadedModels()).To(BeEmpty())
})
})
Context("ExistsInModelPath", func() {
It("should return true if a file exists in the model path", func() {
testFile := filepath.Join(modelPath, "test.model")
os.Create(testFile)
Expect(modelLoader.ExistsInModelPath("test.model")).To(BeTrue())
})
It("should return false if a file does not exist in the model path", func() {
Expect(modelLoader.ExistsInModelPath("nonexistent.model")).To(BeFalse())
})
})
Context("ListFilesInModelPath", func() {
It("should list all valid model files in the model path", func() {
os.Create(filepath.Join(modelPath, "test.model"))
os.Create(filepath.Join(modelPath, "README.md"))
files, err := modelLoader.ListFilesInModelPath()
Expect(err).To(BeNil())
Expect(files).To(ContainElement("test.model"))
Expect(files).ToNot(ContainElement("README.md"))
})
})
Context("LoadModel", func() {
It("should load a model and keep it in memory", func() {
mockModel = model.NewModel("foo", "test.model", nil)
mockLoader := func(modelID, modelName, modelFile string) (*model.Model, error) {
return mockModel, nil
}
model, err := modelLoader.LoadModel("foo", "test.model", mockLoader)
Expect(err).To(BeNil())
Expect(model).To(Equal(mockModel))
Expect(modelLoader.CheckIsLoaded("foo")).To(Equal(mockModel))
})
It("should return an error if loading the model fails", func() {
mockLoader := func(modelID, modelName, modelFile string) (*model.Model, error) {
return nil, errors.New("failed to load model")
}
model, err := modelLoader.LoadModel("foo", "test.model", mockLoader)
Expect(err).To(HaveOccurred())
Expect(model).To(BeNil())
})
})
Context("ShutdownModel", func() {
It("should shutdown a loaded model", func() {
mockLoader := func(modelID, modelName, modelFile string) (*model.Model, error) {
return model.NewModel("foo", "test.model", nil), nil
}
_, err := modelLoader.LoadModel("foo", "test.model", mockLoader)
Expect(err).To(BeNil())
err = modelLoader.ShutdownModel("foo")
Expect(err).To(BeNil())
Expect(modelLoader.CheckIsLoaded("foo")).To(BeNil())
})
})
Context("Concurrent Loading", func() {
It("should handle concurrent requests for the same model", func() {
var loadCount int32
mockLoader := func(modelID, modelName, modelFile string) (*model.Model, error) {
atomic.AddInt32(&loadCount, 1)
time.Sleep(100 * time.Millisecond) // Simulate loading time
return model.NewModel(modelID, modelName, nil), nil
}
var wg sync.WaitGroup
results := make([]*model.Model, 5)
errs := make([]error, 5)
// Start 5 concurrent requests for the same model
for i := 0; i < 5; i++ {
wg.Add(1)
go func(idx int) {
defer wg.Done()
results[idx], errs[idx] = modelLoader.LoadModel("concurrent-model", "test.model", mockLoader)
}(i)
}
wg.Wait()
// All requests should succeed
for i := 0; i < 5; i++ {
Expect(errs[i]).To(BeNil())
Expect(results[i]).ToNot(BeNil())
}
// The loader should only have been called once
Expect(atomic.LoadInt32(&loadCount)).To(Equal(int32(1)))
// All results should be the same model instance
for i := 1; i < 5; i++ {
Expect(results[i]).To(Equal(results[0]))
}
})
It("should handle concurrent requests for different models", func() {
var loadCount int32
mockLoader := func(modelID, modelName, modelFile string) (*model.Model, error) {
atomic.AddInt32(&loadCount, 1)
time.Sleep(50 * time.Millisecond) // Simulate loading time
return model.NewModel(modelID, modelName, nil), nil
}
var wg sync.WaitGroup
modelCount := 3
// Start concurrent requests for different models
for i := 0; i < modelCount; i++ {
wg.Add(1)
go func(idx int) {
defer wg.Done()
modelID := "model-" + string(rune('A'+idx))
_, err := modelLoader.LoadModel(modelID, "test.model", mockLoader)
Expect(err).To(BeNil())
}(i)
}
wg.Wait()
// Each model should be loaded exactly once
Expect(atomic.LoadInt32(&loadCount)).To(Equal(int32(modelCount)))
// All models should be loaded
Expect(modelLoader.CheckIsLoaded("model-A")).ToNot(BeNil())
Expect(modelLoader.CheckIsLoaded("model-B")).ToNot(BeNil())
Expect(modelLoader.CheckIsLoaded("model-C")).ToNot(BeNil())
})
It("should track loading count correctly", func() {
loadStarted := make(chan struct{})
loadComplete := make(chan struct{})
mockLoader := func(modelID, modelName, modelFile string) (*model.Model, error) {
close(loadStarted)
<-loadComplete // Wait until we're told to complete
return model.NewModel(modelID, modelName, nil), nil
}
// Start loading in background
go func() {
modelLoader.LoadModel("slow-model", "test.model", mockLoader)
}()
// Wait for loading to start
<-loadStarted
// Loading count should be 1
Expect(modelLoader.GetLoadingCount()).To(Equal(1))
// Complete the loading
close(loadComplete)
// Wait a bit for cleanup
time.Sleep(50 * time.Millisecond)
// Loading count should be back to 0
Expect(modelLoader.GetLoadingCount()).To(Equal(0))
})
It("should retry loading if first attempt fails", func() {
var attemptCount int32
mockLoader := func(modelID, modelName, modelFile string) (*model.Model, error) {
count := atomic.AddInt32(&attemptCount, 1)
if count == 1 {
return nil, errors.New("first attempt fails")
}
return model.NewModel(modelID, modelName, nil), nil
}
// First goroutine will fail
var wg sync.WaitGroup
wg.Add(2)
var err1, err2 error
var m1, m2 *model.Model
go func() {
defer wg.Done()
m1, err1 = modelLoader.LoadModel("retry-model", "test.model", mockLoader)
}()
// Give first goroutine a head start
time.Sleep(10 * time.Millisecond)
go func() {
defer wg.Done()
m2, err2 = modelLoader.LoadModel("retry-model", "test.model", mockLoader)
}()
wg.Wait()
// At least one should succeed (the second attempt after retry)
successCount := 0
if err1 == nil && m1 != nil {
successCount++
}
if err2 == nil && m2 != nil {
successCount++
}
Expect(successCount).To(BeNumerically(">=", 1))
})
})
Context("GetLoadingCount", func() {
It("should return 0 when nothing is loading", func() {
Expect(modelLoader.GetLoadingCount()).To(Equal(0))
})
})
Context("LRU Eviction Retry Settings", func() {
It("should allow updating retry settings", func() {
modelLoader.SetLRUEvictionRetrySettings(50, 2*time.Second)
// Settings are updated - we can verify through behavior if needed
// For now, just verify the call doesn't panic
Expect(modelLoader).ToNot(BeNil())
})
})
})
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/store/client.go | pkg/store/client.go | package store
import (
"context"
"fmt"
grpc "github.com/mudler/LocalAI/pkg/grpc"
"github.com/mudler/LocalAI/pkg/grpc/proto"
)
// Wrapper for the GRPC client so that simple use cases are handled without verbosity
// SetCols sets multiple key-value pairs in the store
// It's in columnar format so that keys[i] is associated with values[i]
func SetCols(ctx context.Context, c grpc.Backend, keys [][]float32, values [][]byte) error {
protoKeys := make([]*proto.StoresKey, len(keys))
for i, k := range keys {
protoKeys[i] = &proto.StoresKey{
Floats: k,
}
}
protoValues := make([]*proto.StoresValue, len(values))
for i, v := range values {
protoValues[i] = &proto.StoresValue{
Bytes: v,
}
}
setOpts := &proto.StoresSetOptions{
Keys: protoKeys,
Values: protoValues,
}
res, err := c.StoresSet(ctx, setOpts)
if err != nil {
return err
}
if res.Success {
return nil
}
return fmt.Errorf("failed to set keys: %v", res.Message)
}
// SetSingle sets a single key-value pair in the store
// Don't call this in a tight loop, instead use SetCols
func SetSingle(ctx context.Context, c grpc.Backend, key []float32, value []byte) error {
return SetCols(ctx, c, [][]float32{key}, [][]byte{value})
}
// DeleteCols deletes multiple key-value pairs from the store
// It's in columnar format so that keys[i] is associated with values[i]
func DeleteCols(ctx context.Context, c grpc.Backend, keys [][]float32) error {
protoKeys := make([]*proto.StoresKey, len(keys))
for i, k := range keys {
protoKeys[i] = &proto.StoresKey{
Floats: k,
}
}
deleteOpts := &proto.StoresDeleteOptions{
Keys: protoKeys,
}
res, err := c.StoresDelete(ctx, deleteOpts)
if err != nil {
return err
}
if res.Success {
return nil
}
return fmt.Errorf("failed to delete keys: %v", res.Message)
}
// DeleteSingle deletes a single key-value pair from the store
// Don't call this in a tight loop, instead use DeleteCols
func DeleteSingle(ctx context.Context, c grpc.Backend, key []float32) error {
return DeleteCols(ctx, c, [][]float32{key})
}
// GetCols gets multiple key-value pairs from the store
// It's in columnar format so that keys[i] is associated with values[i]
// Be warned the keys are sorted and will be returned in a different order than they were input
// There is no guarantee as to how the keys are sorted
func GetCols(ctx context.Context, c grpc.Backend, keys [][]float32) ([][]float32, [][]byte, error) {
protoKeys := make([]*proto.StoresKey, len(keys))
for i, k := range keys {
protoKeys[i] = &proto.StoresKey{
Floats: k,
}
}
getOpts := &proto.StoresGetOptions{
Keys: protoKeys,
}
res, err := c.StoresGet(ctx, getOpts)
if err != nil {
return nil, nil, err
}
ks := make([][]float32, len(res.Keys))
for i, k := range res.Keys {
ks[i] = k.Floats
}
vs := make([][]byte, len(res.Values))
for i, v := range res.Values {
vs[i] = v.Bytes
}
return ks, vs, nil
}
// GetSingle gets a single key-value pair from the store
// Don't call this in a tight loop, instead use GetCols
func GetSingle(ctx context.Context, c grpc.Backend, key []float32) ([]byte, error) {
_, values, err := GetCols(ctx, c, [][]float32{key})
if err != nil {
return nil, err
}
if len(values) > 0 {
return values[0], nil
}
return nil, fmt.Errorf("failed to get key")
}
// Find similar keys to the given key. Returns the keys, values, and similarities
func Find(ctx context.Context, c grpc.Backend, key []float32, topk int) ([][]float32, [][]byte, []float32, error) {
findOpts := &proto.StoresFindOptions{
Key: &proto.StoresKey{
Floats: key,
},
TopK: int32(topk),
}
res, err := c.StoresFind(ctx, findOpts)
if err != nil {
return nil, nil, nil, err
}
ks := make([][]float32, len(res.Keys))
vs := make([][]byte, len(res.Values))
for i, k := range res.Keys {
ks[i] = k.Floats
}
for i, v := range res.Values {
vs[i] = v.Bytes
}
return ks, vs, res.Similarities, nil
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/huggingface-api/client.go | pkg/huggingface-api/client.go | package hfapi
import (
"encoding/json"
"fmt"
"io"
"net/http"
"path/filepath"
"strings"
)
// Model represents a model from the Hugging Face API
type Model struct {
ModelID string `json:"modelId"`
Author string `json:"author"`
Downloads int `json:"downloads"`
LastModified string `json:"lastModified"`
PipelineTag string `json:"pipelineTag"`
Private bool `json:"private"`
Tags []string `json:"tags"`
CreatedAt string `json:"createdAt"`
UpdatedAt string `json:"updatedAt"`
Sha string `json:"sha"`
Config map[string]interface{} `json:"config"`
ModelIndex string `json:"model_index"`
LibraryName string `json:"library_name"`
MaskToken string `json:"mask_token"`
TokenizerClass string `json:"tokenizer_class"`
}
// FileInfo represents file information from HuggingFace
type FileInfo struct {
Type string `json:"type"`
Oid string `json:"oid"`
Size int64 `json:"size"`
Path string `json:"path"`
LFS *LFSInfo `json:"lfs,omitempty"`
XetHash string `json:"xetHash,omitempty"`
}
// LFSInfo represents LFS (Large File Storage) information
type LFSInfo struct {
Oid string `json:"oid"`
Size int64 `json:"size"`
PointerSize int `json:"pointerSize"`
}
// ModelFile represents a file in a model repository
type ModelFile struct {
Path string
Size int64
SHA256 string
IsReadme bool
URL string
}
// ModelDetails represents detailed information about a model
type ModelDetails struct {
ModelID string
Author string
Files []ModelFile
ReadmeFile *ModelFile
ReadmeContent string
}
// SearchParams represents the parameters for searching models
type SearchParams struct {
Sort string `json:"sort"`
Direction int `json:"direction"`
Limit int `json:"limit"`
Search string `json:"search"`
}
// Client represents a Hugging Face API client
type Client struct {
baseURL string
client *http.Client
}
// NewClient creates a new Hugging Face API client
func NewClient() *Client {
return &Client{
baseURL: "https://huggingface.co/api/models",
client: &http.Client{},
}
}
// SearchModels searches for models using the Hugging Face API
func (c *Client) SearchModels(params SearchParams) ([]Model, error) {
req, err := http.NewRequest("GET", c.baseURL, nil)
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
// Add query parameters
q := req.URL.Query()
q.Add("sort", params.Sort)
q.Add("direction", fmt.Sprintf("%d", params.Direction))
q.Add("limit", fmt.Sprintf("%d", params.Limit))
q.Add("search", params.Search)
req.URL.RawQuery = q.Encode()
// Make the HTTP request
resp, err := c.client.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to make request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("failed to fetch models. Status code: %d", resp.StatusCode)
}
// Read the response body
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response body: %w", err)
}
// Parse the JSON response
var models []Model
if err := json.Unmarshal(body, &models); err != nil {
return nil, fmt.Errorf("failed to parse JSON response: %w", err)
}
return models, nil
}
// GetLatest fetches the latest GGUF models
func (c *Client) GetLatest(searchTerm string, limit int) ([]Model, error) {
params := SearchParams{
Sort: "lastModified",
Direction: -1,
Limit: limit,
Search: searchTerm,
}
return c.SearchModels(params)
}
// BaseURL returns the current base URL
func (c *Client) BaseURL() string {
return c.baseURL
}
// SetBaseURL sets a new base URL (useful for testing)
func (c *Client) SetBaseURL(url string) {
c.baseURL = url
}
// listFilesInPath lists all files in a specific path of a HuggingFace repository (recursive helper)
func (c *Client) listFilesInPath(repoID, path string) ([]FileInfo, error) {
baseURL := strings.TrimSuffix(c.baseURL, "/api/models")
var url string
if path == "" {
url = fmt.Sprintf("%s/api/models/%s/tree/main", baseURL, repoID)
} else {
url = fmt.Sprintf("%s/api/models/%s/tree/main/%s", baseURL, repoID, path)
}
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
resp, err := c.client.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to make request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("failed to fetch files. Status code: %d", resp.StatusCode)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response body: %w", err)
}
var items []FileInfo
if err := json.Unmarshal(body, &items); err != nil {
return nil, fmt.Errorf("failed to parse JSON response: %w", err)
}
var allFiles []FileInfo
for _, item := range items {
switch item.Type {
// If it's a directory/folder, recursively list its contents
case "directory", "folder":
// Build the subfolder path
subPath := item.Path
if path != "" {
subPath = fmt.Sprintf("%s/%s", path, item.Path)
}
// Recursively get files from subfolder
// The recursive call will already prepend the subPath to each file's path
subFiles, err := c.listFilesInPath(repoID, subPath)
if err != nil {
return nil, fmt.Errorf("failed to list files in subfolder %s: %w", subPath, err)
}
allFiles = append(allFiles, subFiles...)
case "file":
// It's a file, prepend the current path to make it relative to root
// if path != "" {
// item.Path = fmt.Sprintf("%s/%s", path, item.Path)
// }
allFiles = append(allFiles, item)
}
}
return allFiles, nil
}
// ListFiles lists all files in a HuggingFace repository, including files in subfolders
func (c *Client) ListFiles(repoID string) ([]FileInfo, error) {
return c.listFilesInPath(repoID, "")
}
// GetFileSHA gets the SHA256 checksum for a specific file by searching through the file list
func (c *Client) GetFileSHA(repoID, fileName string) (string, error) {
files, err := c.ListFiles(repoID)
if err != nil {
return "", fmt.Errorf("failed to list files while getting SHA: %w", err)
}
for _, file := range files {
if filepath.Base(file.Path) == fileName {
if file.LFS != nil && file.LFS.Oid != "" {
// The LFS OID contains the SHA256 hash
return file.LFS.Oid, nil
}
// If no LFS, return the regular OID
return file.Oid, nil
}
}
return "", fmt.Errorf("file %s not found", fileName)
}
// GetModelDetails gets detailed information about a model including files and checksums
func (c *Client) GetModelDetails(repoID string) (*ModelDetails, error) {
files, err := c.ListFiles(repoID)
if err != nil {
return nil, fmt.Errorf("failed to list files: %w", err)
}
details := &ModelDetails{
ModelID: repoID,
Author: strings.Split(repoID, "/")[0],
Files: make([]ModelFile, 0, len(files)),
}
// Process each file
baseURL := strings.TrimSuffix(c.baseURL, "/api/models")
for _, file := range files {
fileName := filepath.Base(file.Path)
isReadme := strings.Contains(strings.ToLower(fileName), "readme")
// Extract SHA256 from LFS or use OID
sha256 := ""
if file.LFS != nil && file.LFS.Oid != "" {
sha256 = file.LFS.Oid
} else {
sha256 = file.Oid
}
// Construct the full URL for the file
// Use /resolve/main/ for downloading files (handles LFS properly)
fileURL := fmt.Sprintf("%s/%s/resolve/main/%s", baseURL, repoID, file.Path)
modelFile := ModelFile{
Path: file.Path,
Size: file.Size,
SHA256: sha256,
IsReadme: isReadme,
URL: fileURL,
}
details.Files = append(details.Files, modelFile)
// Set the readme file
if isReadme && details.ReadmeFile == nil {
details.ReadmeFile = &modelFile
}
}
return details, nil
}
// GetReadmeContent gets the content of a README file
func (c *Client) GetReadmeContent(repoID, readmePath string) (string, error) {
baseURL := strings.TrimSuffix(c.baseURL, "/api/models")
url := fmt.Sprintf("%s/%s/raw/main/%s", baseURL, repoID, readmePath)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return "", fmt.Errorf("failed to create request: %w", err)
}
resp, err := c.client.Do(req)
if err != nil {
return "", fmt.Errorf("failed to make request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("failed to fetch readme content. Status code: %d", resp.StatusCode)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return "", fmt.Errorf("failed to read response body: %w", err)
}
return string(body), nil
}
// FilterFilesByQuantization filters files by quantization type
func FilterFilesByQuantization(files []ModelFile, quantization string) []ModelFile {
var filtered []ModelFile
for _, file := range files {
fileName := filepath.Base(file.Path)
if strings.Contains(strings.ToLower(fileName), strings.ToLower(quantization)) {
filtered = append(filtered, file)
}
}
return filtered
}
// FindPreferredModelFile finds the preferred model file based on quantization preferences
func FindPreferredModelFile(files []ModelFile, preferences []string) *ModelFile {
for _, preference := range preferences {
for i := range files {
fileName := filepath.Base(files[i].Path)
if strings.Contains(strings.ToLower(fileName), strings.ToLower(preference)) {
return &files[i]
}
}
}
return nil
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/huggingface-api/hfapi_suite_test.go | pkg/huggingface-api/hfapi_suite_test.go | package hfapi_test
import (
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func TestHfapi(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "HuggingFace API Suite")
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/huggingface-api/client_test.go | pkg/huggingface-api/client_test.go | package hfapi_test
import (
"fmt"
"net/http"
"net/http/httptest"
"strings"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
hfapi "github.com/mudler/LocalAI/pkg/huggingface-api"
)
var _ = Describe("HuggingFace API Client", func() {
var (
client *hfapi.Client
server *httptest.Server
)
BeforeEach(func() {
client = hfapi.NewClient()
})
AfterEach(func() {
if server != nil {
server.Close()
}
})
Context("when creating a new client", func() {
It("should initialize with correct base URL", func() {
Expect(client).ToNot(BeNil())
Expect(client.BaseURL()).To(Equal("https://huggingface.co/api/models"))
})
})
Context("when searching for models", func() {
BeforeEach(func() {
// Mock response data
mockResponse := `[
{
"modelId": "test-model-1",
"author": "test-author",
"downloads": 1000,
"lastModified": "2024-01-01T00:00:00.000Z",
"pipelineTag": "text-generation",
"private": false,
"tags": ["gguf", "llama"],
"createdAt": "2024-01-01T00:00:00.000Z",
"updatedAt": "2024-01-01T00:00:00.000Z",
"sha": "abc123",
"config": {},
"model_index": "test-index",
"library_name": "transformers",
"mask_token": null,
"tokenizer_class": "LlamaTokenizer"
},
{
"modelId": "test-model-2",
"author": "test-author-2",
"downloads": 2000,
"lastModified": "2024-01-02T00:00:00.000Z",
"pipelineTag": "text-generation",
"private": false,
"tags": ["gguf", "mistral"],
"createdAt": "2024-01-02T00:00:00.000Z",
"updatedAt": "2024-01-02T00:00:00.000Z",
"sha": "def456",
"config": {},
"model_index": "test-index-2",
"library_name": "transformers",
"mask_token": null,
"tokenizer_class": "MistralTokenizer"
}
]`
server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Verify request parameters
Expect(r.URL.Query().Get("sort")).To(Equal("lastModified"))
Expect(r.URL.Query().Get("direction")).To(Equal("-1"))
Expect(r.URL.Query().Get("limit")).To(Equal("30"))
Expect(r.URL.Query().Get("search")).To(Equal("GGUF"))
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(mockResponse))
}))
// Override the client's base URL to use our mock server
client.SetBaseURL(server.URL)
})
It("should successfully search for models", func() {
params := hfapi.SearchParams{
Sort: "lastModified",
Direction: -1,
Limit: 30,
Search: "GGUF",
}
models, err := client.SearchModels(params)
Expect(err).ToNot(HaveOccurred())
Expect(models).To(HaveLen(2))
// Verify first model
Expect(models[0].ModelID).To(Equal("test-model-1"))
Expect(models[0].Author).To(Equal("test-author"))
Expect(models[0].Downloads).To(Equal(1000))
Expect(models[0].PipelineTag).To(Equal("text-generation"))
Expect(models[0].Private).To(BeFalse())
Expect(models[0].Tags).To(ContainElements("gguf", "llama"))
// Verify second model
Expect(models[1].ModelID).To(Equal("test-model-2"))
Expect(models[1].Author).To(Equal("test-author-2"))
Expect(models[1].Downloads).To(Equal(2000))
Expect(models[1].Tags).To(ContainElements("gguf", "mistral"))
})
It("should handle empty search results", func() {
server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte("[]"))
}))
client.SetBaseURL(server.URL)
params := hfapi.SearchParams{
Sort: "lastModified",
Direction: -1,
Limit: 30,
Search: "nonexistent",
}
models, err := client.SearchModels(params)
Expect(err).ToNot(HaveOccurred())
Expect(models).To(HaveLen(0))
})
It("should handle HTTP errors", func() {
server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("Internal Server Error"))
}))
client.SetBaseURL(server.URL)
params := hfapi.SearchParams{
Sort: "lastModified",
Direction: -1,
Limit: 30,
Search: "GGUF",
}
models, err := client.SearchModels(params)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("Status code: 500"))
Expect(models).To(BeNil())
})
It("should handle malformed JSON response", func() {
server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte("invalid json"))
}))
client.SetBaseURL(server.URL)
params := hfapi.SearchParams{
Sort: "lastModified",
Direction: -1,
Limit: 30,
Search: "GGUF",
}
models, err := client.SearchModels(params)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("failed to parse JSON response"))
Expect(models).To(BeNil())
})
})
Context("when getting latest GGUF models", func() {
BeforeEach(func() {
mockResponse := `[
{
"modelId": "latest-gguf-model",
"author": "gguf-author",
"downloads": 5000,
"lastModified": "2024-01-03T00:00:00.000Z",
"pipelineTag": "text-generation",
"private": false,
"tags": ["gguf", "latest"],
"createdAt": "2024-01-03T00:00:00.000Z",
"updatedAt": "2024-01-03T00:00:00.000Z",
"sha": "latest123",
"config": {},
"model_index": "latest-index",
"library_name": "transformers",
"mask_token": null,
"tokenizer_class": "LlamaTokenizer"
}
]`
server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Verify the search parameters are correct for GGUF search
Expect(r.URL.Query().Get("search")).To(Equal("GGUF"))
Expect(r.URL.Query().Get("sort")).To(Equal("lastModified"))
Expect(r.URL.Query().Get("direction")).To(Equal("-1"))
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(mockResponse))
}))
client.SetBaseURL(server.URL)
})
It("should fetch latest GGUF models with correct parameters", func() {
models, err := client.GetLatest("GGUF", 10)
Expect(err).ToNot(HaveOccurred())
Expect(models).To(HaveLen(1))
Expect(models[0].ModelID).To(Equal("latest-gguf-model"))
Expect(models[0].Author).To(Equal("gguf-author"))
Expect(models[0].Downloads).To(Equal(5000))
Expect(models[0].Tags).To(ContainElements("gguf", "latest"))
})
It("should use custom search term", func() {
server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
Expect(r.URL.Query().Get("search")).To(Equal("custom-search"))
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte("[]"))
}))
client.SetBaseURL(server.URL)
models, err := client.GetLatest("custom-search", 5)
Expect(err).ToNot(HaveOccurred())
Expect(models).To(HaveLen(0))
})
})
Context("when handling network errors", func() {
It("should handle connection failures gracefully", func() {
// Use an invalid URL to simulate connection failure
client.SetBaseURL("http://invalid-url-that-does-not-exist")
params := hfapi.SearchParams{
Sort: "lastModified",
Direction: -1,
Limit: 30,
Search: "GGUF",
}
models, err := client.SearchModels(params)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("failed to make request"))
Expect(models).To(BeNil())
})
})
Context("when getting file SHA on remote model", func() {
It("should get file SHA successfully", func() {
sha, err := client.GetFileSHA(
"mudler/LocalAI-functioncall-qwen2.5-7b-v0.5-Q4_K_M-GGUF", "localai-functioncall-qwen2.5-7b-v0.5-q4_k_m.gguf")
Expect(err).ToNot(HaveOccurred())
Expect(sha).To(Equal("4e7b7fe1d54b881f1ef90799219dc6cc285d29db24f559c8998d1addb35713d4"))
})
})
Context("when listing files", func() {
BeforeEach(func() {
mockFilesResponse := `[
{
"type": "file",
"path": "model-Q4_K_M.gguf",
"size": 1000000,
"oid": "abc123",
"lfs": {
"oid": "def456789",
"size": 1000000,
"pointerSize": 135
}
},
{
"type": "file",
"path": "README.md",
"size": 5000,
"oid": "readme123"
},
{
"type": "file",
"path": "config.json",
"size": 1000,
"oid": "config123"
}
]`
server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if strings.Contains(r.URL.Path, "/tree/main") {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(mockFilesResponse))
} else {
w.WriteHeader(http.StatusNotFound)
}
}))
client.SetBaseURL(server.URL)
})
It("should list files successfully", func() {
files, err := client.ListFiles("test/model")
Expect(err).ToNot(HaveOccurred())
Expect(files).To(HaveLen(3))
Expect(files[0].Path).To(Equal("model-Q4_K_M.gguf"))
Expect(files[0].Size).To(Equal(int64(1000000)))
Expect(files[0].LFS).ToNot(BeNil())
Expect(files[0].LFS.Oid).To(Equal("def456789"))
Expect(files[1].Path).To(Equal("README.md"))
Expect(files[1].Size).To(Equal(int64(5000)))
})
})
Context("when listing files with subfolders", func() {
BeforeEach(func() {
// Mock response for root directory with files and a subfolder
mockRootResponse := `[
{
"type": "file",
"path": "README.md",
"size": 5000,
"oid": "readme123"
},
{
"type": "directory",
"path": "subfolder",
"size": 0,
"oid": "dir123"
},
{
"type": "file",
"path": "config.json",
"size": 1000,
"oid": "config123"
}
]`
// Mock response for subfolder directory
mockSubfolderResponse := `[
{
"type": "file",
"path": "subfolder/file.bin",
"size": 2000000,
"oid": "filebin123",
"lfs": {
"oid": "filebin456",
"size": 2000000,
"pointerSize": 135
}
},
{
"type": "directory",
"path": "nested",
"size": 0,
"oid": "nesteddir123"
}
]`
// Mock response for nested subfolder
mockNestedResponse := `[
{
"type": "file",
"path": "subfolder/nested/nested_file.gguf",
"size": 5000000,
"oid": "nested123",
"lfs": {
"oid": "nested456",
"size": 5000000,
"pointerSize": 135
}
}
]`
server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
urlPath := r.URL.Path
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
if strings.Contains(urlPath, "/tree/main/subfolder/nested") {
w.Write([]byte(mockNestedResponse))
} else if strings.Contains(urlPath, "/tree/main/subfolder") {
w.Write([]byte(mockSubfolderResponse))
} else if strings.Contains(urlPath, "/tree/main") {
w.Write([]byte(mockRootResponse))
} else {
w.WriteHeader(http.StatusNotFound)
}
}))
client.SetBaseURL(server.URL)
})
It("should recursively list all files including those in subfolders", func() {
files, err := client.ListFiles("test/model")
Expect(err).ToNot(HaveOccurred())
Expect(files).To(HaveLen(4))
// Verify root level files
readmeFile := findFileByPath(files, "README.md")
Expect(readmeFile).ToNot(BeNil())
Expect(readmeFile.Size).To(Equal(int64(5000)))
Expect(readmeFile.Oid).To(Equal("readme123"))
configFile := findFileByPath(files, "config.json")
Expect(configFile).ToNot(BeNil())
Expect(configFile.Size).To(Equal(int64(1000)))
Expect(configFile.Oid).To(Equal("config123"))
// Verify subfolder file with relative path
subfolderFile := findFileByPath(files, "subfolder/file.bin")
Expect(subfolderFile).ToNot(BeNil())
Expect(subfolderFile.Size).To(Equal(int64(2000000)))
Expect(subfolderFile.LFS).ToNot(BeNil())
Expect(subfolderFile.LFS.Oid).To(Equal("filebin456"))
// Verify nested subfolder file
nestedFile := findFileByPath(files, "subfolder/nested/nested_file.gguf")
Expect(nestedFile).ToNot(BeNil())
Expect(nestedFile.Size).To(Equal(int64(5000000)))
Expect(nestedFile.LFS).ToNot(BeNil())
Expect(nestedFile.LFS.Oid).To(Equal("nested456"))
})
It("should handle files with correct relative paths", func() {
files, err := client.ListFiles("test/model")
Expect(err).ToNot(HaveOccurred())
// Check that all paths are relative and correct
paths := make([]string, len(files))
for i, file := range files {
paths[i] = file.Path
}
Expect(paths).To(ContainElements(
"README.md",
"config.json",
"subfolder/file.bin",
"subfolder/nested/nested_file.gguf",
))
})
})
Context("when getting file SHA", func() {
BeforeEach(func() {
mockFilesResponse := `[
{
"type": "file",
"path": "model-Q4_K_M.gguf",
"size": 1000000,
"oid": "abc123",
"lfs": {
"oid": "def456789",
"size": 1000000,
"pointerSize": 135
}
}
]`
server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if strings.Contains(r.URL.Path, "/tree/main") {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(mockFilesResponse))
} else {
w.WriteHeader(http.StatusNotFound)
}
}))
client.SetBaseURL(server.URL)
})
It("should get file SHA successfully", func() {
sha, err := client.GetFileSHA("test/model", "model-Q4_K_M.gguf")
Expect(err).ToNot(HaveOccurred())
Expect(sha).To(Equal("def456789"))
})
It("should handle missing SHA gracefully", func() {
server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if strings.Contains(r.URL.Path, "/tree/main") {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(`[
{
"type": "file",
"path": "file.txt",
"size": 100,
"oid": "file123"
}
]`))
} else {
w.WriteHeader(http.StatusNotFound)
}
}))
client.SetBaseURL(server.URL)
sha, err := client.GetFileSHA("test/model", "file.txt")
Expect(err).ToNot(HaveOccurred())
// When there's no LFS, it should return the OID
Expect(sha).To(Equal("file123"))
})
})
Context("when getting model details", func() {
BeforeEach(func() {
mockFilesResponse := `[
{
"type": "file",
"path": "model-Q4_K_M.gguf",
"size": 1000000,
"oid": "abc123",
"lfs": {
"oid": "sha256:def456",
"size": 1000000,
"pointer": "version https://git-lfs.github.com/spec/v1",
"sha256": "def456789"
}
},
{
"type": "file",
"path": "README.md",
"size": 5000,
"oid": "readme123"
}
]`
mockFileInfoResponse := `{
"path": "model-Q4_K_M.gguf",
"size": 1000000,
"oid": "abc123",
"lfs": {
"oid": "sha256:def456",
"size": 1000000,
"pointer": "version https://git-lfs.github.com/spec/v1",
"sha256": "def456789"
}
}`
server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if strings.Contains(r.URL.Path, "/tree/main") {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(mockFilesResponse))
} else if strings.Contains(r.URL.Path, "/paths-info") {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(mockFileInfoResponse))
} else {
w.WriteHeader(http.StatusNotFound)
}
}))
client.SetBaseURL(server.URL)
})
It("should get model details successfully", func() {
details, err := client.GetModelDetails("test/model")
Expect(err).ToNot(HaveOccurred())
Expect(details.ModelID).To(Equal("test/model"))
Expect(details.Author).To(Equal("test"))
Expect(details.Files).To(HaveLen(2))
Expect(details.ReadmeFile).ToNot(BeNil())
Expect(details.ReadmeFile.Path).To(Equal("README.md"))
Expect(details.ReadmeFile.IsReadme).To(BeTrue())
// Verify URLs are set for all files
baseURL := strings.TrimSuffix(server.URL, "/api/models")
for _, file := range details.Files {
expectedURL := fmt.Sprintf("%s/test/model/resolve/main/%s", baseURL, file.Path)
Expect(file.URL).To(Equal(expectedURL))
}
})
})
Context("when getting README content", func() {
BeforeEach(func() {
mockReadmeContent := "# Test Model\n\nThis is a test model for demonstration purposes."
server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if strings.Contains(r.URL.Path, "/raw/main/") {
w.Header().Set("Content-Type", "text/plain")
w.WriteHeader(http.StatusOK)
w.Write([]byte(mockReadmeContent))
} else {
w.WriteHeader(http.StatusNotFound)
}
}))
client.SetBaseURL(server.URL)
})
It("should get README content successfully", func() {
content, err := client.GetReadmeContent("test/model", "README.md")
Expect(err).ToNot(HaveOccurred())
Expect(content).To(Equal("# Test Model\n\nThis is a test model for demonstration purposes."))
})
})
Context("when filtering files", func() {
It("should filter files by quantization", func() {
files := []hfapi.ModelFile{
{Path: "model-Q4_K_M.gguf"},
{Path: "model-Q3_K_M.gguf"},
{Path: "README.md", IsReadme: true},
}
filtered := hfapi.FilterFilesByQuantization(files, "Q4_K_M")
Expect(filtered).To(HaveLen(1))
Expect(filtered[0].Path).To(Equal("model-Q4_K_M.gguf"))
})
It("should find preferred model file", func() {
files := []hfapi.ModelFile{
{Path: "model-Q3_K_M.gguf"},
{Path: "model-Q4_K_M.gguf"},
{Path: "README.md", IsReadme: true},
}
preferences := []string{"Q4_K_M", "Q3_K_M"}
preferred := hfapi.FindPreferredModelFile(files, preferences)
Expect(preferred).ToNot(BeNil())
Expect(preferred.Path).To(Equal("model-Q4_K_M.gguf"))
Expect(preferred.IsReadme).To(BeFalse())
})
It("should return nil if no preferred file found", func() {
files := []hfapi.ModelFile{
{Path: "model-Q2_K.gguf"},
{Path: "README.md", IsReadme: true},
}
preferences := []string{"Q4_K_M", "Q3_K_M"}
preferred := hfapi.FindPreferredModelFile(files, preferences)
Expect(preferred).To(BeNil())
})
})
Context("integration test with real HuggingFace API", func() {
It("should recursively list all files including subfolders from real repository", func() {
// This test makes actual API calls to HuggingFace
// Skip if running in CI or if network is not available
realClient := hfapi.NewClient()
repoID := "bartowski/Qwen_Qwen3-Next-80B-A3B-Instruct-GGUF"
files, err := realClient.ListFiles(repoID)
Expect(err).ToNot(HaveOccurred())
Expect(files).ToNot(BeEmpty(), "should return at least some files")
// Verify that we get files from subfolders
// Based on the repository structure, there should be files in subfolders like:
// - Qwen_Qwen3-Next-80B-A3B-Instruct-Q4_1/...
// - Qwen_Qwen3-Next-80B-A3B-Instruct-Q5_K_L/...
// etc.
hasSubfolderFiles := false
rootLevelFiles := 0
subfolderFiles := 0
for _, file := range files {
if strings.Contains(file.Path, "/") {
hasSubfolderFiles = true
subfolderFiles++
// Verify the path format is correct (subfolder/file.gguf)
Expect(file.Path).ToNot(HavePrefix("/"), "paths should be relative, not absolute")
Expect(file.Path).ToNot(HaveSuffix("/"), "file paths should not end with /")
} else {
rootLevelFiles++
}
}
Expect(hasSubfolderFiles).To(BeTrue(), "should find files in subfolders")
Expect(rootLevelFiles).To(BeNumerically(">", 0), "should find files at root level")
Expect(subfolderFiles).To(BeNumerically(">", 0), "should find files in subfolders")
// Verify specific expected files exist
// Root level files
readmeFile := findFileByPath(files, "README.md")
Expect(readmeFile).ToNot(BeNil(), "README.md should exist at root level")
// Verify we can find files in subfolders
// Look for any file in a subfolder (the exact structure may vary, can be nested)
foundSubfolderFile := false
for _, file := range files {
if strings.Contains(file.Path, "/") && strings.HasSuffix(file.Path, ".gguf") {
foundSubfolderFile = true
// Verify the path structure: can be nested like subfolder/subfolder/file.gguf
parts := strings.Split(file.Path, "/")
Expect(len(parts)).To(BeNumerically(">=", 2), "subfolder files should have at least subfolder/file.gguf format")
// The last part should be the filename
Expect(parts[len(parts)-1]).To(HaveSuffix(".gguf"), "file in subfolder should be a .gguf file")
Expect(parts[len(parts)-1]).ToNot(BeEmpty(), "filename should not be empty")
break
}
}
Expect(foundSubfolderFile).To(BeTrue(), "should find at least one .gguf file in a subfolder")
// Verify file properties are populated
for _, file := range files {
Expect(file.Path).ToNot(BeEmpty(), "file path should not be empty")
Expect(file.Type).To(Equal("file"), "all returned items should be files, not directories")
// Size might be 0 for some files, but OID should be present
if file.LFS == nil {
Expect(file.Oid).ToNot(BeEmpty(), "file should have an OID if no LFS")
}
}
})
})
})
// findFileByPath is a helper function to find a file by its path in a slice of FileInfo
func findFileByPath(files []hfapi.FileInfo, path string) *hfapi.FileInfo {
for i := range files {
if files[i].Path == path {
return &files[i]
}
}
return nil
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/concurrency/jobresult_test.go | pkg/concurrency/jobresult_test.go | package concurrency_test
import (
"context"
"fmt"
"time"
. "github.com/mudler/LocalAI/pkg/concurrency"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("pkg/concurrency unit tests", func() {
It("can be used to receive a result across goroutines", func() {
jr, wjr := NewJobResult[string, string]("foo")
Expect(jr).ToNot(BeNil())
Expect(wjr).ToNot(BeNil())
go func(wjr *WritableJobResult[string, string]) {
time.Sleep(time.Second * 5)
wjr.SetResult("bar", nil)
}(wjr)
resPtr, err := jr.Wait(context.Background())
Expect(err).To(BeNil())
Expect(jr.Request).ToNot(BeNil())
Expect(*jr.Request()).To(Equal("foo"))
Expect(resPtr).ToNot(BeNil())
Expect(*resPtr).To(Equal("bar"))
})
It("can be used to receive an error across goroutines", func() {
jr, wjr := NewJobResult[string, string]("foo")
Expect(jr).ToNot(BeNil())
Expect(wjr).ToNot(BeNil())
go func(wjr *WritableJobResult[string, string]) {
time.Sleep(time.Second * 5)
wjr.SetResult("", fmt.Errorf("test"))
}(wjr)
_, err := jr.Wait(context.Background())
Expect(jr.Request).ToNot(BeNil())
Expect(*jr.Request()).To(Equal("foo"))
Expect(err).ToNot(BeNil())
Expect(err).To(MatchError("test"))
})
It("can properly handle timeouts", func() {
jr, wjr := NewJobResult[string, string]("foo")
Expect(jr).ToNot(BeNil())
Expect(wjr).ToNot(BeNil())
go func(wjr *WritableJobResult[string, string]) {
time.Sleep(time.Second * 5)
wjr.SetResult("bar", nil)
}(wjr)
timeout1s, c1 := context.WithTimeoutCause(context.Background(), time.Second, fmt.Errorf("timeout"))
timeout10s, c2 := context.WithTimeoutCause(context.Background(), time.Second*10, fmt.Errorf("timeout"))
_, err := jr.Wait(timeout1s)
Expect(jr.Request).ToNot(BeNil())
Expect(*jr.Request()).To(Equal("foo"))
Expect(err).ToNot(BeNil())
Expect(err).To(MatchError(context.DeadlineExceeded))
resPtr, err := jr.Wait(timeout10s)
Expect(jr.Request).ToNot(BeNil())
Expect(*jr.Request()).To(Equal("foo"))
Expect(err).To(BeNil())
Expect(resPtr).ToNot(BeNil())
Expect(*resPtr).To(Equal("bar"))
// Is this needed? Cleanup Either Way.
c1()
c2()
})
})
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/concurrency/jobresult.go | pkg/concurrency/jobresult.go | package concurrency
import (
"context"
"sync"
)
// This is a Read-ONLY structure that contains the result of an arbitrary asynchronous action
type JobResult[RequestType any, ResultType any] struct {
request *RequestType
result *ResultType
err error
once sync.Once
done *chan struct{}
}
// This structure is returned in a pair with a JobResult and serves as the structure that has access to be updated.
type WritableJobResult[RequestType any, ResultType any] struct {
*JobResult[RequestType, ResultType]
}
// Wait blocks until the result is ready and then returns the result, or the context expires.
// Returns *ResultType instead of ResultType since its possible we have only an error and nil for ResultType.
// Is this correct and idiomatic?
func (jr *JobResult[RequestType, ResultType]) Wait(ctx context.Context) (*ResultType, error) {
if jr.done == nil { // If the channel is blanked out, result is ready.
return jr.result, jr.err
}
select {
case <-*jr.done: // Wait for the result to be ready
jr.done = nil
if jr.err != nil {
return nil, jr.err
}
return jr.result, nil
case <-ctx.Done():
return nil, ctx.Err()
}
}
// Accessor function to allow holders of JobResults to access the associated request, without allowing the pointer to be updated.
func (jr *JobResult[RequestType, ResultType]) Request() *RequestType {
return jr.request
}
// This is the function that actually updates the Result and Error on the JobResult... but it's normally not accessible
func (jr *JobResult[RequestType, ResultType]) setResult(result ResultType, err error) {
jr.once.Do(func() {
jr.result = &result
jr.err = err
close(*jr.done) // Signal that the result is ready - since this is only ran once, jr.done cannot be set to nil yet.
})
}
// Only the WritableJobResult can actually call setResult - prevents accidental corruption
func (wjr *WritableJobResult[RequestType, ResultType]) SetResult(result ResultType, err error) {
wjr.JobResult.setResult(result, err)
}
// NewJobResult binds a request to a matched pair of JobResult and WritableJobResult
func NewJobResult[RequestType any, ResultType any](request RequestType) (*JobResult[RequestType, ResultType], *WritableJobResult[RequestType, ResultType]) {
done := make(chan struct{})
jr := &JobResult[RequestType, ResultType]{
once: sync.Once{},
request: &request,
done: &done,
}
return jr, &WritableJobResult[RequestType, ResultType]{JobResult: jr}
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/concurrency/concurrency_suite_test.go | pkg/concurrency/concurrency_suite_test.go | package concurrency
import (
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func TestConcurrency(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Concurrency test suite")
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/utils/json.go | pkg/utils/json.go | package utils
import "regexp"
var matchNewlines = regexp.MustCompile(`[\r\n]`)
const doubleQuote = `"[^"\\]*(?:\\[\s\S][^"\\]*)*"`
func EscapeNewLines(s string) string {
return regexp.MustCompile(doubleQuote).ReplaceAllStringFunc(s, func(s string) string {
return matchNewlines.ReplaceAllString(s, "\\n")
})
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/utils/logging.go | pkg/utils/logging.go | package utils
import (
"time"
"github.com/mudler/xlog"
)
var lastProgress time.Time = time.Now()
var startTime time.Time = time.Now()
func ResetDownloadTimers() {
lastProgress = time.Now()
startTime = time.Now()
}
func DisplayDownloadFunction(fileName string, current string, total string, percentage float64) {
currentTime := time.Now()
if currentTime.Sub(lastProgress) >= 5*time.Second {
lastProgress = currentTime
// calculate ETA based on percentage and elapsed time
var eta time.Duration
if percentage > 0 {
elapsed := currentTime.Sub(startTime)
eta = time.Duration(float64(elapsed)*(100/percentage) - float64(elapsed))
}
if total != "" {
xlog.Info("Downloading", "fileName", fileName, "current", current, "total", total, "percentage", percentage, "eta", eta)
} else {
xlog.Info("Downloading", "current", current)
}
}
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/utils/ffmpeg.go | pkg/utils/ffmpeg.go | package utils
import (
"fmt"
"os"
"os/exec"
"strings"
"github.com/go-audio/wav"
)
func ffmpegCommand(args []string) (string, error) {
cmd := exec.Command("ffmpeg", args...) // Constrain this to ffmpeg to permit security scanner to see that the command is safe.
cmd.Env = []string{}
out, err := cmd.CombinedOutput()
return string(out), err
}
// AudioToWav converts audio to wav for transcribe.
// TODO: use https://github.com/mccoyst/ogg?
func AudioToWav(src, dst string) error {
if strings.HasSuffix(src, ".wav") {
f, err := os.Open(src)
if err != nil {
return fmt.Errorf("open: %w", err)
}
dec := wav.NewDecoder(f)
dec.ReadInfo()
f.Close()
if dec.BitDepth == 16 && dec.NumChans == 1 && dec.SampleRate == 16000 {
os.Rename(src, dst)
return nil
}
}
commandArgs := []string{"-i", src, "-format", "s16le", "-ar", "16000", "-ac", "1", "-acodec", "pcm_s16le", dst}
out, err := ffmpegCommand(commandArgs)
if err != nil {
return fmt.Errorf("error: %w out: %s", err, out)
}
return nil
}
// AudioConvert converts generated wav file from tts to other output formats.
// TODO: handle pcm to have 100% parity of supported format from OpenAI
func AudioConvert(src string, format string) (string, error) {
extension := ""
// compute file extension from format, default to wav
switch format {
case "opus":
extension = ".ogg"
case "mp3", "aac", "flac":
extension = fmt.Sprintf(".%s", format)
default:
extension = ".wav"
}
// if .wav, do nothing
if extension == ".wav" {
return src, nil
}
// naive conversion based on default values and target extension of file
dst := strings.Replace(src, ".wav", extension, -1)
commandArgs := []string{"-y", "-i", src, "-vn", dst}
out, err := ffmpegCommand(commandArgs)
if err != nil {
return "", fmt.Errorf("error: %w out: %s", err, out)
}
return dst, nil
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
mudler/LocalAI | https://github.com/mudler/LocalAI/blob/23df29fbd3eec1af3944521205fd62b20d4149e5/pkg/utils/path.go | pkg/utils/path.go | package utils
import (
"fmt"
"os"
"path/filepath"
"strings"
)
func ExistsInPath(path string, s string) bool {
_, err := os.Stat(filepath.Join(path, s))
return err == nil
}
func InTrustedRoot(path string, trustedRoot string) error {
for path != "/" {
path = filepath.Dir(path)
if path == trustedRoot {
return nil
}
}
return fmt.Errorf("path is outside of trusted root")
}
// VerifyPath verifies that path is based in basePath.
func VerifyPath(path, basePath string) error {
c := filepath.Clean(filepath.Join(basePath, path))
return InTrustedRoot(c, filepath.Clean(basePath))
}
// SanitizeFileName sanitizes the given filename
func SanitizeFileName(fileName string) string {
// filepath.Clean to clean the path
cleanName := filepath.Clean(fileName)
// filepath.Base to ensure we only get the final element, not any directory path
baseName := filepath.Base(cleanName)
// Replace any remaining tricky characters that might have survived cleaning
safeName := strings.ReplaceAll(baseName, "..", "")
return safeName
}
func GenerateUniqueFileName(dir, baseName, ext string) string {
counter := 1
fileName := baseName + ext
for {
filePath := filepath.Join(dir, fileName)
_, err := os.Stat(filePath)
if os.IsNotExist(err) {
return fileName
}
counter++
fileName = fmt.Sprintf("%s_%d%s", baseName, counter, ext)
}
}
| go | MIT | 23df29fbd3eec1af3944521205fd62b20d4149e5 | 2026-01-07T08:35:47.749878Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.