diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..df6cbbc47dfeaa1d56433193cdf034e42fc14b71 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,35 +1,2 @@ -*.7z filter=lfs diff=lfs merge=lfs -text -*.arrow filter=lfs diff=lfs merge=lfs -text -*.bin filter=lfs diff=lfs merge=lfs -text -*.bz2 filter=lfs diff=lfs merge=lfs -text -*.ckpt filter=lfs diff=lfs merge=lfs -text -*.ftz filter=lfs diff=lfs merge=lfs -text -*.gz filter=lfs diff=lfs merge=lfs -text -*.h5 filter=lfs diff=lfs merge=lfs -text -*.joblib filter=lfs diff=lfs merge=lfs -text -*.lfs.* filter=lfs diff=lfs merge=lfs -text -*.mlmodel filter=lfs diff=lfs merge=lfs -text -*.model filter=lfs diff=lfs merge=lfs -text -*.msgpack filter=lfs diff=lfs merge=lfs -text -*.npy filter=lfs diff=lfs merge=lfs -text -*.npz filter=lfs diff=lfs merge=lfs -text -*.onnx filter=lfs diff=lfs merge=lfs -text -*.ot filter=lfs diff=lfs merge=lfs -text -*.parquet filter=lfs diff=lfs merge=lfs -text -*.pb filter=lfs diff=lfs merge=lfs -text -*.pickle filter=lfs diff=lfs merge=lfs -text -*.pkl filter=lfs diff=lfs merge=lfs -text -*.pt filter=lfs diff=lfs merge=lfs -text -*.pth filter=lfs diff=lfs merge=lfs -text -*.rar filter=lfs diff=lfs merge=lfs -text -*.safetensors filter=lfs diff=lfs merge=lfs -text -saved_model/**/* filter=lfs diff=lfs merge=lfs -text -*.tar.* filter=lfs diff=lfs merge=lfs -text -*.tar filter=lfs diff=lfs merge=lfs -text -*.tflite filter=lfs diff=lfs merge=lfs -text -*.tgz filter=lfs diff=lfs merge=lfs -text -*.wasm filter=lfs diff=lfs merge=lfs -text -*.xz filter=lfs diff=lfs merge=lfs -text -*.zip filter=lfs diff=lfs merge=lfs -text -*.zst filter=lfs diff=lfs merge=lfs -text -*tfevents* filter=lfs diff=lfs merge=lfs -text +*.dylib filter=lfs diff=lfs merge=lfs -text +tinyllama-1.1B-q4.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/chatapp.py b/chatapp.py new file mode 100644 index 0000000000000000000000000000000000000000..ede942fb509d58be6bf8e0952e4794f134f0f424 --- /dev/null +++ b/chatapp.py @@ -0,0 +1,30 @@ +import sys +from llama_cpp import Llama + +if len(sys.argv) < 2: + print("Model path not provided as argument") + print("Eg. Usage: $ python chatapp.py path/to/model.gguf") + sys.exit(1) + +llm = Llama( + model_path=sys.argv[1], + n_ctx=512, + n_threads=4, + n_gpu_layers=1, + verbose=False +) + +print("Chat with Llama (type 'exit' to quit)\n") + +while True: + user_input = input("You: ") + if user_input.lower() in ["exit", "quit"]: break + + prompt = f"### Human: {user_input}\n### Assistant:" + output = llm( + prompt, + max_tokens=100, + stop=["###", "### Human:", "\n###"] + ) + response = output["choices"][0]["text"].strip() + print("Bot:", response) diff --git a/llama_cpp/.DS_Store b/llama_cpp/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..0a7bb09767bb19d32725313b0f2b58afcec97d22 Binary files /dev/null and b/llama_cpp/.DS_Store differ diff --git a/llama_cpp/__init__.py b/llama_cpp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2c9c527cd432ad5b7ae9b58a7bb176170fd6ed37 --- /dev/null +++ b/llama_cpp/__init__.py @@ -0,0 +1,4 @@ +from .llama_cpp import * +from .llama import * + +__version__ = "0.3.9" diff --git a/llama_cpp/__pycache__/__init__.cpython-310.pyc b/llama_cpp/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c4b8872a2887777f693dc14fc2d28978312c269 Binary files /dev/null and b/llama_cpp/__pycache__/__init__.cpython-310.pyc differ diff --git a/llama_cpp/__pycache__/_ctypes_extensions.cpython-310.pyc b/llama_cpp/__pycache__/_ctypes_extensions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..110408c69982438f8a31e03b50181098a646c0ca Binary files /dev/null and b/llama_cpp/__pycache__/_ctypes_extensions.cpython-310.pyc differ diff --git a/llama_cpp/__pycache__/_ggml.cpython-310.pyc b/llama_cpp/__pycache__/_ggml.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..016d1a2ffc15a26b2efc65191c1d01eff8d5365d Binary files /dev/null and b/llama_cpp/__pycache__/_ggml.cpython-310.pyc differ diff --git a/llama_cpp/__pycache__/_internals.cpython-310.pyc b/llama_cpp/__pycache__/_internals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87015ce207bd4cf39c06b26c47eba05cdd584ccc Binary files /dev/null and b/llama_cpp/__pycache__/_internals.cpython-310.pyc differ diff --git a/llama_cpp/__pycache__/_logger.cpython-310.pyc b/llama_cpp/__pycache__/_logger.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52a09be8d87035fdbdbd9e7c42df55f77e90aa3a Binary files /dev/null and b/llama_cpp/__pycache__/_logger.cpython-310.pyc differ diff --git a/llama_cpp/__pycache__/_utils.cpython-310.pyc b/llama_cpp/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..411caf781765dde0e00730911d28659bfda80f21 Binary files /dev/null and b/llama_cpp/__pycache__/_utils.cpython-310.pyc differ diff --git a/llama_cpp/__pycache__/llama.cpython-310.pyc b/llama_cpp/__pycache__/llama.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9410a2dc63bfe2d72a919295ec3cba79a9d891d5 Binary files /dev/null and b/llama_cpp/__pycache__/llama.cpython-310.pyc differ diff --git a/llama_cpp/__pycache__/llama_cache.cpython-310.pyc b/llama_cpp/__pycache__/llama_cache.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de77728ba3b7ab9138a1d9820ecf4bc2d165857d Binary files /dev/null and b/llama_cpp/__pycache__/llama_cache.cpython-310.pyc differ diff --git a/llama_cpp/__pycache__/llama_chat_format.cpython-310.pyc b/llama_cpp/__pycache__/llama_chat_format.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c618d4393cefa1763edb302785a066cb046129ce Binary files /dev/null and b/llama_cpp/__pycache__/llama_chat_format.cpython-310.pyc differ diff --git a/llama_cpp/__pycache__/llama_cpp.cpython-310.pyc b/llama_cpp/__pycache__/llama_cpp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc16cfe564c35c9f2d2ac17300bd1b382d7530f8 Binary files /dev/null and b/llama_cpp/__pycache__/llama_cpp.cpython-310.pyc differ diff --git a/llama_cpp/__pycache__/llama_grammar.cpython-310.pyc b/llama_cpp/__pycache__/llama_grammar.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d39163a9f3041629cef2471c8b6d0bb40aa8e73f Binary files /dev/null and b/llama_cpp/__pycache__/llama_grammar.cpython-310.pyc differ diff --git a/llama_cpp/__pycache__/llama_speculative.cpython-310.pyc b/llama_cpp/__pycache__/llama_speculative.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..804ac483c72228d484c4abdc1d8fdb79db2444d7 Binary files /dev/null and b/llama_cpp/__pycache__/llama_speculative.cpython-310.pyc differ diff --git a/llama_cpp/__pycache__/llama_tokenizer.cpython-310.pyc b/llama_cpp/__pycache__/llama_tokenizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d90907842cb367b605e165f6cb561ff0f142c78b Binary files /dev/null and b/llama_cpp/__pycache__/llama_tokenizer.cpython-310.pyc differ diff --git a/llama_cpp/__pycache__/llama_types.cpython-310.pyc b/llama_cpp/__pycache__/llama_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28e86c9ef8bcb28a4a914b56c3fc5de920c7110f Binary files /dev/null and b/llama_cpp/__pycache__/llama_types.cpython-310.pyc differ diff --git a/llama_cpp/__pycache__/llava_cpp.cpython-310.pyc b/llama_cpp/__pycache__/llava_cpp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7e6dba9710ff756e8991e9113a93b8d82347169 Binary files /dev/null and b/llama_cpp/__pycache__/llava_cpp.cpython-310.pyc differ diff --git a/llama_cpp/_ctypes_extensions.py b/llama_cpp/_ctypes_extensions.py new file mode 100644 index 0000000000000000000000000000000000000000..e88ed387df342b8a16325ecc2ed1dfe8324a516a --- /dev/null +++ b/llama_cpp/_ctypes_extensions.py @@ -0,0 +1,131 @@ +from __future__ import annotations + +import sys +import os +import ctypes +import functools +import pathlib + +from typing import ( + Any, + Callable, + List, + Union, + Optional, + TYPE_CHECKING, + TypeVar, + Generic, +) +from typing_extensions import TypeAlias + + +# Load the library +def load_shared_library(lib_base_name: str, base_path: pathlib.Path): + """Platform independent shared library loader""" + # Searching for the library in the current directory under the name "libllama" (default name + # for llamacpp) and "llama" (default name for this repo) + lib_paths: List[pathlib.Path] = [] + # Determine the file extension based on the platform + if sys.platform.startswith("linux") or sys.platform.startswith("freebsd"): + lib_paths += [ + base_path / f"lib{lib_base_name}.so", + ] + elif sys.platform == "darwin": + lib_paths += [ + base_path / f"lib{lib_base_name}.so", + base_path / f"lib{lib_base_name}.dylib", + ] + elif sys.platform == "win32": + lib_paths += [ + base_path / f"{lib_base_name}.dll", + base_path / f"lib{lib_base_name}.dll", + ] + else: + raise RuntimeError("Unsupported platform") + + cdll_args = dict() # type: ignore + + # Add the library directory to the DLL search path on Windows (if needed) + if sys.platform == "win32": + os.add_dll_directory(str(base_path)) + os.environ["PATH"] = str(base_path) + os.pathsep + os.environ["PATH"] + + if sys.platform == "win32" and sys.version_info >= (3, 8): + os.add_dll_directory(str(base_path)) + if "CUDA_PATH" in os.environ: + os.add_dll_directory(os.path.join(os.environ["CUDA_PATH"], "bin")) + os.add_dll_directory(os.path.join(os.environ["CUDA_PATH"], "lib")) + if "HIP_PATH" in os.environ: + os.add_dll_directory(os.path.join(os.environ["HIP_PATH"], "bin")) + os.add_dll_directory(os.path.join(os.environ["HIP_PATH"], "lib")) + cdll_args["winmode"] = ctypes.RTLD_GLOBAL + + # Try to load the shared library, handling potential errors + for lib_path in lib_paths: + if lib_path.exists(): + try: + return ctypes.CDLL(str(lib_path), **cdll_args) # type: ignore + except Exception as e: + raise RuntimeError(f"Failed to load shared library '{lib_path}': {e}") + + raise FileNotFoundError( + f"Shared library with base name '{lib_base_name}' not found" + ) + + +# ctypes sane type hint helpers +# +# - Generic Pointer and Array types +# - PointerOrRef type with a type hinted byref function +# +# NOTE: Only use these for static type checking not for runtime checks +# no good will come of that + +if TYPE_CHECKING: + CtypesCData = TypeVar("CtypesCData", bound=ctypes._CData) # type: ignore + + CtypesArray: TypeAlias = ctypes.Array[CtypesCData] # type: ignore + + CtypesPointer: TypeAlias = ctypes._Pointer[CtypesCData] # type: ignore + + CtypesVoidPointer: TypeAlias = ctypes.c_void_p + + class CtypesRef(Generic[CtypesCData]): + pass + + CtypesPointerOrRef: TypeAlias = Union[ + CtypesPointer[CtypesCData], CtypesRef[CtypesCData] + ] + + CtypesFuncPointer: TypeAlias = ctypes._FuncPointer # type: ignore + +F = TypeVar("F", bound=Callable[..., Any]) + + +def ctypes_function_for_shared_library(lib: ctypes.CDLL): + """Decorator for defining ctypes functions with type hints""" + + def ctypes_function( + name: str, argtypes: List[Any], restype: Any, enabled: bool = True + ): + def decorator(f: F) -> F: + if enabled: + func = getattr(lib, name) + func.argtypes = argtypes + func.restype = restype + functools.wraps(f)(func) + return func + else: + return f + + return decorator + + return ctypes_function + + +def _byref(obj: CtypesCData, offset: Optional[int] = None) -> CtypesRef[CtypesCData]: + """Type-annotated version of ctypes.byref""" + ... + + +byref = _byref if TYPE_CHECKING else ctypes.byref diff --git a/llama_cpp/_ggml.py b/llama_cpp/_ggml.py new file mode 100644 index 0000000000000000000000000000000000000000..5bee8a93b57636729369f43e8f00b48b3af26fbe --- /dev/null +++ b/llama_cpp/_ggml.py @@ -0,0 +1,12 @@ +"""Internal module use at your own risk + +This module provides a minimal interface for working with ggml tensors from llama-cpp-python +""" +import os +import pathlib + +import llama_cpp._ctypes_extensions as ctypes_ext + +libggml_base_path = pathlib.Path(os.path.abspath(os.path.dirname(__file__))) / "lib" +libggml = ctypes_ext.load_shared_library("ggml", libggml_base_path) + diff --git a/llama_cpp/_internals.py b/llama_cpp/_internals.py new file mode 100644 index 0000000000000000000000000000000000000000..343581dce0bddcb193c9cdea0b613022cae6a5b5 --- /dev/null +++ b/llama_cpp/_internals.py @@ -0,0 +1,879 @@ +from __future__ import annotations + +import os +import ctypes + +from typing import ( + Dict, + List, + Tuple, + Optional, + Sequence, +) +from dataclasses import dataclass, field +from contextlib import ExitStack + +import numpy as np +import numpy.typing as npt + +from .llama_types import * +from .llama_grammar import LlamaGrammar +from ._utils import suppress_stdout_stderr + +import llama_cpp.llama_cpp as llama_cpp + + +# Python wrappers over llama.h structs + + +class LlamaModel: + """Intermediate Python wrapper for a llama.cpp llama_model. + NOTE: For stability it's recommended you use the Llama class instead.""" + + def __init__( + self, + *, + path_model: str, + params: llama_cpp.llama_model_params, + verbose: bool = True, + ): + self.path_model = path_model + self.params = params + self.verbose = verbose + self._exit_stack = ExitStack() + + model = None + + if not os.path.exists(path_model): + raise ValueError(f"Model path does not exist: {path_model}") + + with suppress_stdout_stderr(disable=verbose): + model = llama_cpp.llama_load_model_from_file( + self.path_model.encode("utf-8"), self.params + ) + + if model is None: + raise ValueError(f"Failed to load model from file: {path_model}") + + vocab = llama_cpp.llama_model_get_vocab(model) + + if vocab is None: + raise ValueError(f"Failed to get vocab from model: {path_model}") + + self.model = model + self.vocab = vocab + + def free_model(): + if self.model is None: + return + llama_cpp.llama_free_model(self.model) + self.model = None + + self._exit_stack.callback(free_model) + + def close(self): + self._exit_stack.close() + + def __del__(self): + self.close() + + def vocab_type(self) -> int: + return llama_cpp.llama_vocab_type(self.model) + + def n_vocab(self) -> int: + return llama_cpp.llama_n_vocab(self.vocab) + + def n_ctx_train(self) -> int: + return llama_cpp.llama_n_ctx_train(self.model) + + def n_embd(self) -> int: + return llama_cpp.llama_n_embd(self.model) + + def rope_freq_scale_train(self) -> float: + return llama_cpp.llama_model_rope_freq_scale_train(self.model) + + def desc(self) -> str: + buf = ctypes.create_string_buffer(1024) + llama_cpp.llama_model_desc(self.model, buf, 1024) + return buf.value.decode("utf-8") + + def size(self) -> int: + return llama_cpp.llama_model_size(self.model) + + def n_params(self) -> int: + return llama_cpp.llama_model_n_params(self.model) + + def get_tensor(self, name: str) -> ctypes.c_void_p: + raise NotImplementedError("get_tensor is not implemented in llama.cpp") + + # Vocab + + def token_get_text(self, token: int) -> str: + return llama_cpp.llama_token_get_text(self.vocab, token).decode("utf-8") + + def token_get_score(self, token: int) -> float: + return llama_cpp.llama_token_get_score(self.vocab, token) + + def token_get_attr(self, token: int) -> int: + return llama_cpp.llama_token_get_attr(self.vocab, token) + + # Special tokens + + def token_bos(self) -> int: + return llama_cpp.llama_token_bos(self.vocab) + + def token_eos(self) -> int: + return llama_cpp.llama_token_eos(self.vocab) + + def token_cls(self) -> int: + return llama_cpp.llama_token_cls(self.vocab) + + def token_sep(self) -> int: + return llama_cpp.llama_token_sep(self.vocab) + + def token_nl(self) -> int: + return llama_cpp.llama_token_nl(self.vocab) + + def token_prefix(self) -> int: + raise NotImplementedError("token_prefix is not implemented in llama.cpp") + + def token_middle(self) -> int: + raise NotImplementedError("token_middle is not implemented in llama.cpp") + + def token_suffix(self) -> int: + raise NotImplementedError("token_suffix is not implemented in llama.cpp") + + def token_eot(self) -> int: + return llama_cpp.llama_token_eot(self.vocab) + + def add_bos_token(self) -> bool: + return llama_cpp.llama_add_bos_token(self.vocab) + + def add_eos_token(self) -> bool: + return llama_cpp.llama_add_eos_token(self.vocab) + + # Tokenization + + def tokenize(self, text: bytes, add_bos: bool, special: bool): + n_ctx = self.n_ctx_train() + tokens = (llama_cpp.llama_token * n_ctx)() + n_tokens = llama_cpp.llama_tokenize( + self.vocab, text, len(text), tokens, n_ctx, add_bos, special + ) + if n_tokens < 0: + n_tokens = abs(n_tokens) + tokens = (llama_cpp.llama_token * n_tokens)() + n_tokens = llama_cpp.llama_tokenize( + self.vocab, text, len(text), tokens, n_tokens, add_bos, special + ) + if n_tokens < 0: + raise RuntimeError( + f'Failed to tokenize: text="{text}" n_tokens={n_tokens}' + ) + return list(tokens[:n_tokens]) + + def token_to_piece(self, token: int, special: bool = False) -> bytes: + buf = ctypes.create_string_buffer(32) + llama_cpp.llama_token_to_piece(self.vocab, token, buf, 32, 0, special) + return bytes(buf) + + def detokenize(self, tokens: List[int], special: bool = False) -> bytes: + output = b"" + size = 32 + buffer = (ctypes.c_char * size)() + for token in tokens: + n = llama_cpp.llama_token_to_piece( + self.vocab, llama_cpp.llama_token(token), buffer, size, 0, special + ) + assert n <= size + output += bytes(buffer[:n]) + # NOTE: Llama1 models automatically added a space at the start of the prompt + # this line removes a leading space if the first token is a beginning of sentence token + return ( + output[1:] + if len(tokens) > 0 and tokens[0] == self.token_bos() and output[0:1] == b" " + else output + ) + + # Extra + def metadata(self) -> Dict[str, str]: + metadata: Dict[str, str] = {} + buffer_size = 1024 + buffer = ctypes.create_string_buffer(buffer_size) + # zero the buffer + buffer.value = b"\0" * buffer_size + # iterate over model keys + for i in range(llama_cpp.llama_model_meta_count(self.model)): + nbytes = llama_cpp.llama_model_meta_key_by_index( + self.model, i, buffer, buffer_size + ) + if nbytes > buffer_size: + buffer_size = nbytes + 1 + buffer = ctypes.create_string_buffer(buffer_size) + nbytes = llama_cpp.llama_model_meta_key_by_index( + self.model, i, buffer, buffer_size + ) + key = buffer.value.decode("utf-8") + nbytes = llama_cpp.llama_model_meta_val_str_by_index( + self.model, i, buffer, buffer_size + ) + if nbytes > buffer_size: + buffer_size = nbytes + 1 + buffer = ctypes.create_string_buffer(buffer_size) + nbytes = llama_cpp.llama_model_meta_val_str_by_index( + self.model, i, buffer, buffer_size + ) + value = buffer.value.decode("utf-8") + metadata[key] = value + return metadata + + @staticmethod + def default_params(): + """Get the default llama_model_params.""" + return llama_cpp.llama_model_default_params() + + +class LlamaContext: + """Intermediate Python wrapper for a llama.cpp llama_context. + NOTE: For stability it's recommended you use the Llama class instead.""" + + def __init__( + self, + *, + model: LlamaModel, + params: llama_cpp.llama_context_params, + verbose: bool = True, + ): + self.model = model + self.params = params + self.verbose = verbose + self._exit_stack = ExitStack() + + ctx = llama_cpp.llama_new_context_with_model(self.model.model, self.params) + + if ctx is None: + raise ValueError("Failed to create llama_context") + + self.ctx = ctx + + def free_ctx(): + if self.ctx is None: + return + llama_cpp.llama_free(self.ctx) + self.ctx = None + + self._exit_stack.callback(free_ctx) + + def close(self): + self._exit_stack.close() + + def __del__(self): + self.close() + + def n_ctx(self) -> int: + return llama_cpp.llama_n_ctx(self.ctx) + + def pooling_type(self) -> int: + return llama_cpp.llama_pooling_type(self.ctx) + + def kv_cache_clear(self): + llama_cpp.llama_kv_cache_clear(self.ctx) + + def kv_cache_seq_rm(self, seq_id: int, p0: int, p1: int): + llama_cpp.llama_kv_cache_seq_rm(self.ctx, seq_id, p0, p1) + + def kv_cache_seq_cp(self, seq_id_src: int, seq_id_dst: int, p0: int, p1: int): + llama_cpp.llama_kv_cache_seq_cp(self.ctx, seq_id_src, seq_id_dst, p0, p1) + + def kv_cache_seq_keep(self, seq_id: int): + llama_cpp.llama_kv_cache_seq_keep(self.ctx, seq_id) + + def kv_cache_seq_shift(self, seq_id: int, p0: int, p1: int, shift: int): + llama_cpp.llama_kv_cache_seq_add(self.ctx, seq_id, p0, p1, shift) + + def get_state_size(self) -> int: + return llama_cpp.llama_get_state_size(self.ctx) + + # TODO: copy_state_data + + # TODO: set_state_data + + # TODO: llama_load_session_file + + # TODO: llama_save_session_file + + def decode(self, batch: LlamaBatch): + return_code = llama_cpp.llama_decode( + self.ctx, + batch.batch, + ) + if return_code != 0: + raise RuntimeError(f"llama_decode returned {return_code}") + + def set_n_threads(self, n_threads: int, n_threads_batch: int): + llama_cpp.llama_set_n_threads(self.ctx, n_threads, n_threads_batch) + + def get_logits(self): + return llama_cpp.llama_get_logits(self.ctx) + + def get_logits_ith(self, i: int): + return llama_cpp.llama_get_logits_ith(self.ctx, i) + + def get_embeddings(self): + return llama_cpp.llama_get_embeddings(self.ctx) + + # Sampling functions + + def set_rng_seed(self, seed: int): + # TODO: Fix + # llama_cpp.llama_set_rng_seed(self.ctx, seed) + raise NotImplementedError("set_rng_seed is not implemented in llama.cpp") + + def sample_repetition_penalties( + self, + candidates: "_LlamaTokenDataArray", + last_tokens_data: "llama_cpp.Array[llama_cpp.llama_token]", + penalty_last_n: int, + penalty_repeat: float, + penalty_freq: float, + penalty_present: float, + ): + # llama_cpp.llama_sample_repetition_penalties( + # self.ctx, + # llama_cpp.byref(candidates.candidates), + # last_tokens_data, + # penalty_last_n, + # penalty_repeat, + # penalty_freq, + # penalty_present, + # ) + raise NotImplementedError("sample_repetition_penalties is not implemented in llama.cpp") + + def sample_softmax(self, candidates: "_LlamaTokenDataArray"): + # llama_cpp.llama_sample_softmax( + # self.ctx, + # llama_cpp.byref(candidates.candidates), + # ) + raise NotImplementedError("sample_softmax is not implemented in llama.cpp") + + def sample_top_k(self, candidates: "_LlamaTokenDataArray", k: int, min_keep: int): + # llama_cpp.llama_sample_top_k( + # self.ctx, llama_cpp.byref(candidates.candidates), k, min_keep + # ) + raise NotImplementedError("sample_top_k is not implemented in llama.cpp") + + def sample_top_p(self, candidates: "_LlamaTokenDataArray", p: float, min_keep: int): + # llama_cpp.llama_sample_top_p( + # self.ctx, llama_cpp.byref(candidates.candidates), p, min_keep + # ) + raise NotImplementedError("sample_top_p is not implemented in llama.cpp") + + def sample_min_p(self, candidates: "_LlamaTokenDataArray", p: float, min_keep: int): + # llama_cpp.llama_sample_min_p( + # self.ctx, llama_cpp.byref(candidates.candidates), p, min_keep + # ) + raise NotImplementedError("sample_min_p is not implemented in llama.cpp") + + def sample_typical( + self, candidates: "_LlamaTokenDataArray", p: float, min_keep: int + ): + # llama_cpp.llama_sample_typical( + # self.ctx, llama_cpp.byref(candidates.candidates), p, min_keep + # ) + raise NotImplementedError("sample_typical is not implemented in llama.cpp") + + def sample_temp(self, candidates: "_LlamaTokenDataArray", temp: float): + # llama_cpp.llama_sample_temp( + # self.ctx, llama_cpp.byref(candidates.candidates), temp + # ) + raise NotImplementedError("sample_temp is not implemented in llama.cpp") + + def sample_grammar(self, candidates: "_LlamaTokenDataArray", grammar: LlamaGrammar): + # llama_cpp.llama_sample_grammar( + # self.ctx, + # llama_cpp.byref(candidates.candidates), + # grammar.grammar, + # ) + raise NotImplementedError("sample_grammar is not implemented in llama.cpp") + + def sample_token_mirostat( + self, + candidates: "_LlamaTokenDataArray", + tau: float, + eta: float, + m: int, + mu: llama_cpp.CtypesPointerOrRef[ctypes.c_float], + ) -> int: + raise NotImplementedError("sample_token_mirostat is not implemented in llama.cpp") + # return llama_cpp.llama_sample_token_mirostat( + # self.ctx, + # llama_cpp.byref(candidates.candidates), + # tau, + # eta, + # m, + # mu, + # ) + + def sample_token_mirostat_v2( + self, + candidates: "_LlamaTokenDataArray", + tau: float, + eta: float, + mu: llama_cpp.CtypesPointerOrRef[ctypes.c_float], + ) -> int: + raise NotImplementedError("sample_token_mirostat_v2 is not implemented in llama.cpp") + # return llama_cpp.llama_sample_token_mirostat_v2( + # self.ctx, + # llama_cpp.byref(candidates.candidates), + # tau, + # eta, + # mu, + # ) + + def sample_token_greedy(self, candidates: "_LlamaTokenDataArray") -> int: + raise NotImplementedError("sample_token_greedy is not implemented in llama.cpp") + # return llama_cpp.llama_sample_token_greedy( + # self.ctx, + # llama_cpp.byref(candidates.candidates), + # ) + + def sample_token(self, candidates: "_LlamaTokenDataArray") -> int: + raise NotImplementedError("sample_token is not implemented in llama.cpp") + # return llama_cpp.llama_sample_token( + # self.ctx, + # llama_cpp.byref(candidates.candidates), + # ) + + # Grammar + def grammar_accept_token(self, grammar: LlamaGrammar, token: int): + raise NotImplementedError("grammar_accept_token is not implemented in llama.cpp") + # llama_cpp.llama_grammar_accept_token(grammar.grammar, self.ctx, token) + + def reset_timings(self): + llama_cpp.llama_perf_context_reset(self.ctx) + + def print_timings(self): + llama_cpp.llama_perf_context_print(self.ctx) + + # Utility functions + @staticmethod + def default_params(): + """Get the default llama_context_params.""" + return llama_cpp.llama_context_default_params() + + +class LlamaBatch: + def __init__( + self, *, n_tokens: int, embd: int, n_seq_max: int, verbose: bool = True + ): + self._n_tokens = n_tokens + self.embd = embd + self.n_seq_max = n_seq_max + self.verbose = verbose + self._exit_stack = ExitStack() + + batch = llama_cpp.llama_batch_init(self._n_tokens, self.embd, self.n_seq_max) + + if batch is None: + raise ValueError("Failed to create llama_batch") + + self.batch = batch + + def free_batch(): + if self.batch is None: + return + llama_cpp.llama_batch_free(self.batch) + self.batch = None + + self._exit_stack.callback(free_batch) + + def close(self): + self._exit_stack.close() + + def __del__(self): + self.close() + + def n_tokens(self) -> int: + return self.batch.n_tokens + + def reset(self): + self.batch.n_tokens = 0 + + def set_batch(self, batch: Sequence[int], n_past: int, logits_all: bool): + n_tokens = len(batch) + self.batch.n_tokens = n_tokens + for i in range(n_tokens): + self.batch.token[i] = batch[i] + self.batch.pos[i] = n_past + i + self.batch.seq_id[i][0] = 0 + self.batch.n_seq_id[i] = 1 + self.batch.logits[i] = logits_all + self.batch.logits[n_tokens - 1] = True + + def add_sequence(self, batch: Sequence[int], seq_id: int, logits_all: bool): + n_tokens = len(batch) + n_tokens0 = self.batch.n_tokens + self.batch.n_tokens += n_tokens + for i in range(n_tokens): + j = n_tokens0 + i + self.batch.token[j] = batch[i] + self.batch.pos[j] = i + self.batch.seq_id[j][0] = seq_id + self.batch.n_seq_id[j] = 1 + self.batch.logits[j] = logits_all + self.batch.logits[n_tokens - 1] = True + + +class LlamaTokenDataArray: + def __init__(self, *, n_vocab: int): + self.n_vocab = n_vocab + self.candidates_data = np.recarray( + (self.n_vocab,), + dtype=np.dtype( + [("id", np.intc), ("logit", np.single), ("p", np.single)], align=True + ), + ) + self.candidates = llama_cpp.llama_token_data_array( + data=self.candidates_data.ctypes.data_as(llama_cpp.llama_token_data_p), + size=self.n_vocab, + sorted=False, + ) + self.default_candidates_data_id = np.arange(self.n_vocab, dtype=np.intc) # type: ignore + self.default_candidates_data_p = np.zeros(self.n_vocab, dtype=np.single) + + def copy_logits(self, logits: npt.NDArray[np.single]): + self.candidates_data.id[:] = self.default_candidates_data_id + self.candidates_data.logit[:] = logits + self.candidates_data.p[:] = self.default_candidates_data_p + self.candidates.sorted = False + self.candidates.size = self.n_vocab + + +# Embedding functions + + +def normalize_embedding(embedding): + norm = float(np.linalg.norm(embedding)) + if norm == 0.0: + return embedding + return [v / norm for v in embedding] + + +# Python wrappers over common/sampling structs + + +@dataclass +class LlamaSamplingParams: + n_prev: int = 64 + n_probs: int = 0 + top_k: int = 40 + top_p: float = 0.95 + min_p: float = 0.05 + tfs_z: float = 1.00 + typical_p: float = 1.00 + temp: float = 0.80 + penalty_last_n: int = 64 + penalty_repeat: float = 1.0 + penalty_freq: float = 0.00 + penalty_present: float = 0.00 + mirostat: int = 0 + mirostat_tau: float = 5.00 + mirostat_eta: float = 0.10 + penalize_nl: bool = True + + grammar: str = "" + + cfg_negative_prompt: str = "" + cfg_scale: float = 1.00 + + logit_bias: dict[int, float] = field(default_factory=dict) + + +@dataclass +class LlamaSamplingContext: + params: LlamaSamplingParams = field(default_factory=LlamaSamplingParams) + mirostat_mu: ctypes.c_float = field(default_factory=ctypes.c_float) + grammar: Optional[LlamaGrammar] = None + # NOTE: Missing parsed_grammar + prev: list[int] = field(default_factory=list) + cur: list[llama_cpp.llama_token_data] = field(default_factory=list) + + def reset(self): + self.prev = [] + self.cur = [] + if self.grammar is not None: + self.grammar.reset() + + def cp(self): + return LlamaSamplingContext( + params=self.params, + mirostat_mu=self.mirostat_mu, + grammar=self.grammar, + prev=self.prev.copy(), + cur=self.cur.copy(), + ) + + def last(self) -> Optional[int]: + if len(self.prev) > 0: + return self.prev[-1] + else: + return None + + def prev_str(self, ctx_main: LlamaContext, n: int) -> str: + return ctx_main.model.detokenize(self.prev[-n:]).decode("utf-8") + + def sample( + self, + ctx_main: LlamaContext, + idx: int = 0, + logits_array: Optional[npt.NDArray[np.single]] = None, + ): + n_vocab = ctx_main.model.n_vocab() + id: int = 0 + + if logits_array is None: + logits = ctx_main.get_logits_ith(idx) + logits_array = np.array( + ctypes.cast(logits, ctypes.POINTER(ctypes.c_float * n_vocab)).contents, + dtype=np.single, + ) + + # apply logit_bias + for token, logit_bias in self.params.logit_bias.items(): + logits_array[token] += logit_bias + + token_data_array = LlamaTokenDataArray( + n_vocab=n_vocab + ) # TODO: Only create this once + token_data_array.copy_logits(logits_array) + + # apply penalties + if len(self.prev) > 0: + nl_token = ctx_main.model.token_nl() + nl_logit = logits_array[nl_token] + last_tokens = self.prev[-self.params.penalty_last_n :] + last_tokens_size = min(len(last_tokens), self.params.penalty_last_n) + if last_tokens_size > 0: + last_tokens_p = (llama_cpp.llama_token * len(last_tokens))(*last_tokens) + ctx_main.sample_repetition_penalties( + token_data_array, + last_tokens_p, + last_tokens_size, + self.params.penalty_repeat, + self.params.penalty_freq, + self.params.penalty_present, + ) + if not self.params.penalize_nl: + token_data_array.candidates_data.logit[nl_token] = nl_logit + + if self.grammar is not None: + ctx_main.sample_grammar(token_data_array, self.grammar) + + if self.params.temp < 0: + ctx_main.sample_softmax(token_data_array) + id = token_data_array.candidates_data.id[0] + elif self.params.temp == 0: + id = ctx_main.sample_token_greedy(token_data_array) + else: + if self.params.mirostat == 1: + mirostat_m = 100 + ctx_main.sample_temp(token_data_array, self.params.temp) + id = ctx_main.sample_token_mirostat( + token_data_array, + self.params.mirostat_tau, + self.params.mirostat_eta, + mirostat_m, + ctypes.pointer(self.mirostat_mu), + ) + elif self.params.mirostat == 2: + ctx_main.sample_temp(token_data_array, self.params.temp) + id = ctx_main.sample_token_mirostat_v2( + token_data_array, + self.params.mirostat_tau, + self.params.mirostat_eta, + ctypes.pointer(self.mirostat_mu), + ) + else: + min_keep = max(1, self.params.n_probs) + ctx_main.sample_top_k( + token_data_array, self.params.top_k, min_keep=min_keep + ) + ctx_main.sample_typical( + token_data_array, self.params.typical_p, min_keep=min_keep + ) + ctx_main.sample_top_p( + token_data_array, self.params.top_p, min_keep=min_keep + ) + ctx_main.sample_min_p( + token_data_array, self.params.min_p, min_keep=min_keep + ) + ctx_main.sample_temp(token_data_array, self.params.temp) + id = ctx_main.sample_token(token_data_array) + return id + + def accept(self, ctx_main: LlamaContext, id: int, apply_grammar: bool): + if apply_grammar and self.grammar is not None: + ctx_main.grammar_accept_token(self.grammar, id) + self.prev.append(id) + + +from typing import List, Callable, Optional, Union +import ctypes +import llama_cpp + + +class CustomSampler: + def __init__( + self, apply_func: typing.Callable[[llama_cpp.llama_token_data_array], None] + ): + self.apply_func = apply_func + + def apply_wrapper( + sampler: llama_cpp.llama_sampler_p, + cur_p: llama_cpp.llama_token_data_array_p, + ): + self.apply_func(cur_p) + + def free_wrapper(sampler: llama_cpp.llama_sampler_p): + pass + + sampler_i = llama_cpp.llama_sampler_i() + sampler_i.apply = llama_cpp.llama_sampler_i_apply(apply_wrapper) + self._apply_wrapper_ref = apply_wrapper + + sampler_i.name = llama_cpp.llama_sampler_i_name(0) + sampler_i.accept = llama_cpp.llama_sampler_i_accept(0) + sampler_i.reset = llama_cpp.llama_sampler_i_reset(0) + sampler_i.clone = llama_cpp.llama_sampler_i_clone(0) + sampler_i.free = llama_cpp.llama_sampler_i_free(0) + + self.sampler = llama_cpp.llama_sampler() + self.sampler.iface = ctypes.pointer(sampler_i) + self.sampler.ctx = None + + def get_sampler(self) -> llama_cpp.llama_sampler_p: + return ctypes.pointer(self.sampler) + + +class LlamaSampler: + def __init__(self): + params = llama_cpp.llama_sampler_chain_params() + self.sampler = llama_cpp.llama_sampler_chain_init(params) + self.samplers: List[llama_cpp.llama_sampler_p] = [] + self.custom_samplers: List[Tuple[int, CustomSampler]] = [] + + def add_greedy(self): + sampler = llama_cpp.llama_sampler_init_greedy() + self._add_sampler(sampler) + + def add_dist(self, seed: int): + sampler = llama_cpp.llama_sampler_init_dist(seed) + self._add_sampler(sampler) + + def add_softmax(self): + sampler = llama_cpp.llama_sampler_init_softmax() + self._add_sampler(sampler) + + def add_top_k(self, k: int): + sampler = llama_cpp.llama_sampler_init_top_k(k) + self._add_sampler(sampler) + + def add_top_p(self, p: float, min_keep: int): + sampler = llama_cpp.llama_sampler_init_top_p(p, min_keep) + self._add_sampler(sampler) + + def add_min_p(self, p: float, min_keep: int): + sampler = llama_cpp.llama_sampler_init_min_p(p, min_keep) + self._add_sampler(sampler) + + def add_typical(self, p: float, min_keep: int): + sampler = llama_cpp.llama_sampler_init_typical(p, min_keep) + self._add_sampler(sampler) + + def add_temp(self, temp: float): + sampler = llama_cpp.llama_sampler_init_temp(temp) + self._add_sampler(sampler) + + def add_temp_ext(self, t: float, delta: float, exponent: float): + sampler = llama_cpp.llama_sampler_init_temp_ext(t, delta, exponent) + self._add_sampler(sampler) + + def add_mirostat(self, n_vocab: int, seed: int, tau: float, eta: float, m: int): + sampler = llama_cpp.llama_sampler_init_mirostat(n_vocab, seed, tau, eta, m) + self._add_sampler(sampler) + + def add_mirostat_v2(self, seed: int, tau: float, eta: float): + sampler = llama_cpp.llama_sampler_init_mirostat_v2(seed, tau, eta) + self._add_sampler(sampler) + + def add_grammar(self, model: LlamaModel, grammar: LlamaGrammar): + sampler = llama_cpp.llama_sampler_init_grammar( + model.vocab, grammar._grammar.encode("utf-8"), grammar._root.encode("utf-8") + ) + self._add_sampler(sampler) + + def add_penalties( + self, + n_vocab: int, + special_eos_id: int, + linefeed_id: int, + penalty_last_n: int, + penalty_repeat: float, + penalty_freq: float, + penalty_present: float, + penalize_nl: bool, + ignore_eos: bool, + ): + sampler = llama_cpp.llama_sampler_init_penalties( + penalty_last_n, + penalty_repeat, + penalty_freq, + penalty_present, + ) + self._add_sampler(sampler) + + def init_logit_bias( + self, n_vocab: int, n_logit_bias, logit_bias: llama_cpp.llama_logit_bias_p + ): + sampler = llama_cpp.llama_sampler_init_logit_bias( + n_vocab, n_logit_bias, logit_bias + ) + self._add_sampler(sampler) + + def add_custom( + self, apply_func: Callable[[llama_cpp.llama_token_data_array], None] + ): + custom_sampler = CustomSampler(apply_func) + sampler = custom_sampler.get_sampler() + self._add_sampler(sampler) + # NOTE: Must remove custom samplers before free or llama.cpp will try to free them + self.custom_samplers.append( + (llama_cpp.llama_sampler_chain_n(self.sampler) - 1, custom_sampler) + ) + + def _add_sampler(self, sampler: llama_cpp.llama_sampler_p): + assert self.sampler is not None + llama_cpp.llama_sampler_chain_add(self.sampler, sampler) + self.samplers.append(sampler) + + def get_seed(self) -> int: + assert self.sampler is not None + return llama_cpp.llama_sampler_get_seed(self.sampler) + + def sample(self, ctx: LlamaContext, idx: int) -> int: + assert self.sampler is not None + assert ctx.ctx is not None + return llama_cpp.llama_sampler_sample(self.sampler, ctx.ctx, idx) + + def close(self): + if self.sampler: + # NOTE: Must remove custom samplers before free or llama.cpp will try to free them + for i, _ in reversed(self.custom_samplers): + llama_cpp.llama_sampler_chain_remove(self.sampler, i) + llama_cpp.llama_sampler_free(self.sampler) + self.sampler = None + self.samplers.clear() + self.custom_samplers.clear() + + def __del__(self): + self.close() diff --git a/llama_cpp/_logger.py b/llama_cpp/_logger.py new file mode 100644 index 0000000000000000000000000000000000000000..787b3f108e1cf61d21c6f954e52567ba32659906 --- /dev/null +++ b/llama_cpp/_logger.py @@ -0,0 +1,47 @@ +import sys +import ctypes +import logging + +import llama_cpp + +# enum ggml_log_level { +# GGML_LOG_LEVEL_NONE = 0, +# GGML_LOG_LEVEL_INFO = 1, +# GGML_LOG_LEVEL_WARN = 2, +# GGML_LOG_LEVEL_ERROR = 3, +# GGML_LOG_LEVEL_DEBUG = 4, +# GGML_LOG_LEVEL_CONT = 5, // continue previous log +# }; +GGML_LOG_LEVEL_TO_LOGGING_LEVEL = { + 0: logging.CRITICAL, + 1: logging.INFO, + 2: logging.WARNING, + 3: logging.ERROR, + 4: logging.DEBUG, + 5: logging.DEBUG, +} + +logger = logging.getLogger("llama-cpp-python") + +_last_log_level = GGML_LOG_LEVEL_TO_LOGGING_LEVEL[0] + +# typedef void (*ggml_log_callback)(enum ggml_log_level level, const char * text, void * user_data); +@llama_cpp.llama_log_callback +def llama_log_callback( + level: int, + text: bytes, + user_data: ctypes.c_void_p, +): + # TODO: Correctly implement continue previous log + global _last_log_level + log_level = GGML_LOG_LEVEL_TO_LOGGING_LEVEL[level] if level != 5 else _last_log_level + if logger.level <= GGML_LOG_LEVEL_TO_LOGGING_LEVEL[level]: + print(text.decode("utf-8"), end="", flush=True, file=sys.stderr) + _last_log_level = log_level + + +llama_cpp.llama_log_set(llama_log_callback, ctypes.c_void_p(0)) + + +def set_verbose(verbose: bool): + logger.setLevel(logging.DEBUG if verbose else logging.ERROR) diff --git a/llama_cpp/_utils.py b/llama_cpp/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..29628193bb0601d24b09d867663287f99bc0db70 --- /dev/null +++ b/llama_cpp/_utils.py @@ -0,0 +1,78 @@ +import os +import sys + +from typing import Any, Dict + +# Avoid "LookupError: unknown encoding: ascii" when open() called in a destructor +outnull_file = open(os.devnull, "w") +errnull_file = open(os.devnull, "w") + +STDOUT_FILENO = 1 +STDERR_FILENO = 2 + + +class suppress_stdout_stderr(object): + # NOTE: these must be "saved" here to avoid exceptions when using + # this context manager inside of a __del__ method + sys = sys + os = os + + def __init__(self, disable: bool = True): + self.disable = disable + + # Oddly enough this works better than the contextlib version + def __enter__(self): + if self.disable: + return self + + self.old_stdout_fileno_undup = STDOUT_FILENO + self.old_stderr_fileno_undup = STDERR_FILENO + + self.old_stdout_fileno = self.os.dup(self.old_stdout_fileno_undup) + self.old_stderr_fileno = self.os.dup(self.old_stderr_fileno_undup) + + self.old_stdout = self.sys.stdout + self.old_stderr = self.sys.stderr + + self.os.dup2(outnull_file.fileno(), self.old_stdout_fileno_undup) + self.os.dup2(errnull_file.fileno(), self.old_stderr_fileno_undup) + + self.sys.stdout = outnull_file + self.sys.stderr = errnull_file + return self + + def __exit__(self, *_): + if self.disable: + return + + # Check if sys.stdout and sys.stderr have fileno method + self.sys.stdout = self.old_stdout + self.sys.stderr = self.old_stderr + + self.os.dup2(self.old_stdout_fileno, self.old_stdout_fileno_undup) + self.os.dup2(self.old_stderr_fileno, self.old_stderr_fileno_undup) + + self.os.close(self.old_stdout_fileno) + self.os.close(self.old_stderr_fileno) + + +class MetaSingleton(type): + """ + Metaclass for implementing the Singleton pattern. + """ + + _instances: Dict[type, Any] = {} + + def __call__(cls, *args: Any, **kwargs: Any) -> Any: + if cls not in cls._instances: + cls._instances[cls] = super(MetaSingleton, cls).__call__(*args, **kwargs) + return cls._instances[cls] + + +class Singleton(object, metaclass=MetaSingleton): + """ + Base class for implementing the Singleton pattern. + """ + + def __init__(self): + super(Singleton, self).__init__() diff --git a/llama_cpp/lib/libggml-base.dylib b/llama_cpp/lib/libggml-base.dylib new file mode 100644 index 0000000000000000000000000000000000000000..cf34a612efa99df6c2c906bb8d0db146a98c311a --- /dev/null +++ b/llama_cpp/lib/libggml-base.dylib @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b10bbd734ef61868c17ddad1e15a08bcb93f711475f02b8df167256483f6b80c +size 548128 diff --git a/llama_cpp/lib/libggml-blas.dylib b/llama_cpp/lib/libggml-blas.dylib new file mode 100644 index 0000000000000000000000000000000000000000..a8bff3d6af570b528a005ef58b8cbf331b7b244b Binary files /dev/null and b/llama_cpp/lib/libggml-blas.dylib differ diff --git a/llama_cpp/lib/libggml-cpu.dylib b/llama_cpp/lib/libggml-cpu.dylib new file mode 100644 index 0000000000000000000000000000000000000000..2e1a906d5081336a6e3fce4bfe80a3300b42d81d --- /dev/null +++ b/llama_cpp/lib/libggml-cpu.dylib @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e842f3d817bdc4366d5b0b85a888bbe75bddd77e9cb320c9fcaacc7916cba08f +size 520192 diff --git a/llama_cpp/lib/libggml-metal.dylib b/llama_cpp/lib/libggml-metal.dylib new file mode 100644 index 0000000000000000000000000000000000000000..702ea8ab9291772e20c50a25a66919b5cccbf96c --- /dev/null +++ b/llama_cpp/lib/libggml-metal.dylib @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:965467ace598c542be5b05efa0edd6f28e8d9b3bef2d10e80f547b92cb05140c +size 600160 diff --git a/llama_cpp/lib/libggml.dylib b/llama_cpp/lib/libggml.dylib new file mode 100644 index 0000000000000000000000000000000000000000..66cc1255beb331da86df55745f2e46c97caf5c23 Binary files /dev/null and b/llama_cpp/lib/libggml.dylib differ diff --git a/llama_cpp/lib/libllama.dylib b/llama_cpp/lib/libllama.dylib new file mode 100644 index 0000000000000000000000000000000000000000..a463d1c2848ce1565641e29e9a67226d8344fdb1 --- /dev/null +++ b/llama_cpp/lib/libllama.dylib @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46ce63cb4168539abcaa517d7023aabf9f111d56ca64b5f8aa2c6fe991ba9910 +size 1140160 diff --git a/llama_cpp/lib/libllava.dylib b/llama_cpp/lib/libllava.dylib new file mode 100644 index 0000000000000000000000000000000000000000..f0da7b9d06b251c9acc36e025d938f7cd6b15c3d --- /dev/null +++ b/llama_cpp/lib/libllava.dylib @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:839335da6f28ab29c340a9c61e3dffbdbde54bfb9e68e52d3bb2e7255645506e +size 337424 diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py new file mode 100644 index 0000000000000000000000000000000000000000..7e9a6af23d7355c06d8d5020fb4d23f1e263793b --- /dev/null +++ b/llama_cpp/llama.py @@ -0,0 +1,2418 @@ +from __future__ import annotations + +import os +import sys +import uuid +import time +import json +import ctypes +import typing +import random +import fnmatch +import warnings +import contextlib +import multiprocessing + +from typing import ( + Any, + List, + Literal, + Optional, + Union, + Generator, + Sequence, + Iterator, + Deque, + Callable, + Dict, +) +from collections import deque +from pathlib import Path + + +from .llama_types import * +from .llama_grammar import LlamaGrammar +from .llama_cache import ( + BaseLlamaCache, + LlamaCache, # type: ignore + LlamaDiskCache, # type: ignore + LlamaRAMCache, # type: ignore +) +from .llama_tokenizer import BaseLlamaTokenizer, LlamaTokenizer +import llama_cpp.llama_cpp as llama_cpp +import llama_cpp.llama_chat_format as llama_chat_format + +from llama_cpp.llama_speculative import LlamaDraftModel + +import numpy as np +import numpy.typing as npt + +import llama_cpp._internals as internals +from ._logger import set_verbose +from ._utils import suppress_stdout_stderr + + +class Llama: + """High-level Python wrapper for a llama.cpp model.""" + + __backend_initialized = False + + def __init__( + self, + model_path: str, + *, + # Model Params + n_gpu_layers: int = 0, + split_mode: int = llama_cpp.LLAMA_SPLIT_MODE_LAYER, + main_gpu: int = 0, + tensor_split: Optional[List[float]] = None, + rpc_servers: Optional[str] = None, + vocab_only: bool = False, + use_mmap: bool = True, + use_mlock: bool = False, + kv_overrides: Optional[Dict[str, Union[bool, int, float, str]]] = None, + # Context Params + seed: int = llama_cpp.LLAMA_DEFAULT_SEED, + n_ctx: int = 512, + n_batch: int = 512, + n_ubatch: int = 512, + n_threads: Optional[int] = None, + n_threads_batch: Optional[int] = None, + rope_scaling_type: Optional[ + int + ] = llama_cpp.LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED, + pooling_type: int = llama_cpp.LLAMA_POOLING_TYPE_UNSPECIFIED, + rope_freq_base: float = 0.0, + rope_freq_scale: float = 0.0, + yarn_ext_factor: float = -1.0, + yarn_attn_factor: float = 1.0, + yarn_beta_fast: float = 32.0, + yarn_beta_slow: float = 1.0, + yarn_orig_ctx: int = 0, + logits_all: bool = False, + embedding: bool = False, + offload_kqv: bool = True, + flash_attn: bool = False, + # Sampling Params + no_perf: bool = False, + last_n_tokens_size: int = 64, + # LoRA Params + lora_base: Optional[str] = None, + lora_scale: float = 1.0, + lora_path: Optional[str] = None, + # Backend Params + numa: Union[bool, int] = False, + # Chat Format Params + chat_format: Optional[str] = None, + chat_handler: Optional[llama_chat_format.LlamaChatCompletionHandler] = None, + # Speculative Decoding + draft_model: Optional[LlamaDraftModel] = None, + # Tokenizer Override + tokenizer: Optional[BaseLlamaTokenizer] = None, + # KV cache quantization + type_k: Optional[int] = None, + type_v: Optional[int] = None, + # Misc + spm_infill: bool = False, + verbose: bool = True, + # Extra Params + **kwargs, # type: ignore + ): + """Load a llama.cpp model from `model_path`. + + Examples: + Basic usage + + >>> import llama_cpp + >>> model = llama_cpp.Llama( + ... model_path="path/to/model", + ... ) + >>> print(model("The quick brown fox jumps ", stop=["."])["choices"][0]["text"]) + the lazy dog + + Loading a chat model + + >>> import llama_cpp + >>> model = llama_cpp.Llama( + ... model_path="path/to/model", + ... chat_format="llama-2", + ... ) + >>> print(model.create_chat_completion( + ... messages=[{ + ... "role": "user", + ... "content": "what is the meaning of life?" + ... }] + ... )) + + Args: + model_path: Path to the model. + n_gpu_layers: Number of layers to offload to GPU (-ngl). If -1, all layers are offloaded. + split_mode: How to split the model across GPUs. See llama_cpp.LLAMA_SPLIT_* for options. + main_gpu: main_gpu interpretation depends on split_mode: LLAMA_SPLIT_MODE_NONE: the GPU that is used for the entire model. LLAMA_SPLIT_MODE_ROW: the GPU that is used for small tensors and intermediate results. LLAMA_SPLIT_MODE_LAYER: ignored + tensor_split: How split tensors should be distributed across GPUs. If None, the model is not split. + rpc_servers: Comma separated list of RPC servers to use for offloading + vocab_only: Only load the vocabulary no weights. + use_mmap: Use mmap if possible. + use_mlock: Force the system to keep the model in RAM. + kv_overrides: Key-value overrides for the model. + seed: RNG seed, -1 for random + n_ctx: Text context, 0 = from model + n_batch: Prompt processing maximum batch size + n_ubatch: Physical batch size + n_threads: Number of threads to use for generation + n_threads_batch: Number of threads to use for batch processing + rope_scaling_type: RoPE scaling type, from `enum llama_rope_scaling_type`. ref: https://github.com/ggerganov/llama.cpp/pull/2054 + pooling_type: Pooling type, from `enum llama_pooling_type`. + rope_freq_base: RoPE base frequency, 0 = from model + rope_freq_scale: RoPE frequency scaling factor, 0 = from model + yarn_ext_factor: YaRN extrapolation mix factor, negative = from model + yarn_attn_factor: YaRN magnitude scaling factor + yarn_beta_fast: YaRN low correction dim + yarn_beta_slow: YaRN high correction dim + yarn_orig_ctx: YaRN original context size + logits_all: Return logits for all tokens, not just the last token. Must be True for completion to return logprobs. + embedding: Embedding mode only. + offload_kqv: Offload K, Q, V to GPU. + flash_attn: Use flash attention. + no_perf: Measure performance timings. + last_n_tokens_size: Maximum number of tokens to keep in the last_n_tokens deque. + lora_base: Optional path to base model, useful if using a quantized base model and you want to apply LoRA to an f16 model. + lora_path: Path to a LoRA file to apply to the model. + numa: numa policy + chat_format: String specifying the chat format to use when calling create_chat_completion. + chat_handler: Optional chat handler to use when calling create_chat_completion. + draft_model: Optional draft model to use for speculative decoding. + tokenizer: Optional tokenizer to override the default tokenizer from llama.cpp. + verbose: Print verbose output to stderr. + type_k: KV cache data type for K (default: f16) + type_v: KV cache data type for V (default: f16) + spm_infill: Use Suffix/Prefix/Middle pattern for infill (instead of Prefix/Suffix/Middle) as some models prefer this. + + Raises: + ValueError: If the model path does not exist. + + Returns: + A Llama instance. + """ + self.verbose = verbose + self._stack = contextlib.ExitStack() + + set_verbose(verbose) + + if not Llama.__backend_initialized: + with suppress_stdout_stderr(disable=verbose): + llama_cpp.llama_backend_init() + Llama.__backend_initialized = True + + if isinstance(numa, bool): + self.numa = ( + llama_cpp.GGML_NUMA_STRATEGY_DISTRIBUTE + if numa + else llama_cpp.GGML_NUMA_STRATEGY_DISABLED + ) + else: + self.numa = numa + + if self.numa != llama_cpp.GGML_NUMA_STRATEGY_DISABLED: + with suppress_stdout_stderr(disable=verbose): + llama_cpp.llama_numa_init(self.numa) + + self.model_path = model_path + + # Model Params + self.model_params = llama_cpp.llama_model_default_params() + self.model_params.n_gpu_layers = ( + 0x7FFFFFFF if n_gpu_layers == -1 else n_gpu_layers + ) # 0x7FFFFFFF is INT32 max, will be auto set to all layers + self.model_params.split_mode = split_mode + self.model_params.main_gpu = main_gpu + if rpc_servers is not None: + self.model_params.rpc_servers = rpc_servers.encode("utf-8") + self._rpc_servers = rpc_servers + else: + self._rpc_servers = None + self.tensor_split = tensor_split + self._c_tensor_split = None + if self.tensor_split is not None: + if len(self.tensor_split) > llama_cpp.LLAMA_MAX_DEVICES: + raise ValueError( + f"Attempt to split tensors that exceed maximum supported devices. Current LLAMA_MAX_DEVICES={llama_cpp.LLAMA_MAX_DEVICES}" + ) + # Type conversion and expand the list to the length of LLAMA_MAX_DEVICES + FloatArray = ctypes.c_float * llama_cpp.LLAMA_MAX_DEVICES + self._c_tensor_split = FloatArray( + *tensor_split # type: ignore + ) # keep a reference to the array so it is not gc'd + self.model_params.tensor_split = self._c_tensor_split + self.model_params.vocab_only = vocab_only + self.model_params.use_mmap = use_mmap if lora_path is None else False + self.model_params.use_mlock = use_mlock + + # kv_overrides is the original python dict + self.kv_overrides = kv_overrides + if kv_overrides is not None: + # _kv_overrides_array is a ctypes.Array of llama_model_kv_override Structs + kvo_array_len = len(kv_overrides) + 1 # for sentinel element + self._kv_overrides_array = ( + llama_cpp.llama_model_kv_override * kvo_array_len + )() + + for i, (k, v) in enumerate(kv_overrides.items()): + self._kv_overrides_array[i].key = k.encode("utf-8") + if isinstance(v, bool): + self._kv_overrides_array[ + i + ].tag = llama_cpp.LLAMA_KV_OVERRIDE_TYPE_BOOL + self._kv_overrides_array[i].value.val_bool = v + elif isinstance(v, int): + self._kv_overrides_array[ + i + ].tag = llama_cpp.LLAMA_KV_OVERRIDE_TYPE_INT + self._kv_overrides_array[i].value.val_i64 = v + elif isinstance(v, float): + self._kv_overrides_array[ + i + ].tag = llama_cpp.LLAMA_KV_OVERRIDE_TYPE_FLOAT + self._kv_overrides_array[i].value.val_f64 = v + elif isinstance(v, str): # type: ignore + v_bytes = v.encode("utf-8") + if len(v_bytes) > 128: # TODO: Make this a constant + raise ValueError(f"Value for {k} is too long: {v}") + v_bytes = v_bytes.ljust(128, b"\0") + self._kv_overrides_array[ + i + ].tag = llama_cpp.LLAMA_KV_OVERRIDE_TYPE_STR + # copy min(v_bytes, 128) to str_value + address = typing.cast( + int, + ctypes.addressof(self._kv_overrides_array[i].value) + + llama_cpp.llama_model_kv_override_value.val_str.offset, + ) + buffer_start = ctypes.cast(address, ctypes.POINTER(ctypes.c_char)) + ctypes.memmove( + buffer_start, + v_bytes, + 128, + ) + else: + raise ValueError(f"Unknown value type for {k}: {v}") + + self._kv_overrides_array[ + -1 + ].key = b"\0" # ensure sentinel element is zeroed + self.model_params.kv_overrides = self._kv_overrides_array + + self.n_batch = min(n_ctx, n_batch) # ??? + self.n_threads = n_threads or max(multiprocessing.cpu_count() // 2, 1) + self.n_threads_batch = n_threads_batch or multiprocessing.cpu_count() + + # Used by the sampler + self._seed = seed or llama_cpp.LLAMA_DEFAULT_SEED + + # Context Params + self.context_params = llama_cpp.llama_context_default_params() + self.context_params.n_ctx = n_ctx + self.context_params.n_batch = self.n_batch + self.context_params.n_ubatch = min(self.n_batch, n_ubatch) + self.context_params.n_threads = self.n_threads + self.context_params.n_threads_batch = self.n_threads_batch + self.context_params.rope_scaling_type = ( + rope_scaling_type + if rope_scaling_type is not None + else llama_cpp.LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED + ) + self.context_params.pooling_type = pooling_type + self.context_params.rope_freq_base = ( + rope_freq_base if rope_freq_base != 0.0 else 0 + ) + self.context_params.rope_freq_scale = ( + rope_freq_scale if rope_freq_scale != 0.0 else 0 + ) + self.context_params.yarn_ext_factor = ( + yarn_ext_factor if yarn_ext_factor != 0.0 else 0 + ) + self.context_params.yarn_attn_factor = ( + yarn_attn_factor if yarn_attn_factor != 0.0 else 0 + ) + self.context_params.yarn_beta_fast = ( + yarn_beta_fast if yarn_beta_fast != 0.0 else 0 + ) + self.context_params.yarn_beta_slow = ( + yarn_beta_slow if yarn_beta_slow != 0.0 else 0 + ) + self.context_params.yarn_orig_ctx = yarn_orig_ctx if yarn_orig_ctx != 0 else 0 + self.context_params.logits_all = ( + logits_all if draft_model is None else True + ) # Must be set to True for speculative decoding + self.context_params.embeddings = embedding # TODO: Rename to embeddings + self.context_params.offload_kqv = offload_kqv + self.context_params.flash_attn = flash_attn + # KV cache quantization + if type_k is not None: + self.context_params.type_k = type_k + if type_v is not None: + self.context_params.type_v = type_v + # Sampling Params + self.context_params.no_perf = no_perf + self.last_n_tokens_size = last_n_tokens_size + + self.cache: Optional[BaseLlamaCache] = None + + self.lora_base = lora_base + self.lora_scale = lora_scale + self.lora_path = lora_path + + self.spm_infill = spm_infill + + if not os.path.exists(model_path): + raise ValueError(f"Model path does not exist: {model_path}") + + self._model = self._stack.enter_context( + contextlib.closing( + internals.LlamaModel( + path_model=self.model_path, + params=self.model_params, + verbose=self.verbose, + ) + ) + ) + + # Override tokenizer + self.tokenizer_ = tokenizer or LlamaTokenizer(self) + + # Set the default value for the context and correct the batch + if n_ctx == 0: + n_ctx = self._model.n_ctx_train() + self.n_batch = min(n_ctx, n_batch) + self.context_params.n_ctx = self._model.n_ctx_train() + self.context_params.n_batch = self.n_batch + self.context_params.n_ubatch = min(self.n_batch, n_ubatch) + + self._ctx = self._stack.enter_context( + contextlib.closing( + internals.LlamaContext( + model=self._model, + params=self.context_params, + verbose=self.verbose, + ) + ) + ) + + self._batch = self._stack.enter_context( + contextlib.closing( + internals.LlamaBatch( + n_tokens=self.n_batch, + embd=0, + n_seq_max=self.context_params.n_ctx, + verbose=self.verbose, + ) + ) + ) + + self._lora_adapter: Optional[llama_cpp.llama_adapter_lora_p] = None + + if self.lora_path: + self._lora_adapter = llama_cpp.llama_adapter_lora_init( + self._model.model, + self.lora_path.encode("utf-8"), + ) + if self._lora_adapter is None: + raise RuntimeError( + f"Failed to initialize LoRA adapter from lora path: {self.lora_path}" + ) + + def free_lora_adapter(): + if self._lora_adapter is None: + return + llama_cpp.llama_adapter_lora_free(self._lora_adapter) + self._lora_adapter = None + + self._stack.callback(free_lora_adapter) + + if llama_cpp.llama_set_adapter_lora( + self._ctx.ctx, self._lora_adapter, self.lora_scale + ): + raise RuntimeError( + f"Failed to set LoRA adapter from lora path: {self.lora_path}" + ) + + if self.verbose: + print(llama_cpp.llama_print_system_info().decode("utf-8"), file=sys.stderr) + + self.chat_format = chat_format + self.chat_handler = chat_handler + self._chat_handlers: Dict[ + str, llama_chat_format.LlamaChatCompletionHandler + ] = {} + + self.draft_model = draft_model + + self._n_vocab = self.n_vocab() + self._n_ctx = self.n_ctx() + + self._token_nl = self.token_nl() + self._token_eos = self.token_eos() + + self._candidates = internals.LlamaTokenDataArray(n_vocab=self._n_vocab) + + self.n_tokens = 0 + self.input_ids: npt.NDArray[np.intc] = np.ndarray((n_ctx,), dtype=np.intc) + self.scores: npt.NDArray[np.single] = np.ndarray( + (n_ctx if logits_all == True else n_batch, self._n_vocab), dtype=np.single + ) + + self._mirostat_mu = ctypes.c_float( + 2.0 * 5.0 + ) # TODO: Move this to sampling context + + try: + self.metadata = self._model.metadata() + except Exception as e: + self.metadata = {} + if self.verbose: + print(f"Failed to load metadata: {e}", file=sys.stderr) + + if self.verbose: + print(f"Model metadata: {self.metadata}", file=sys.stderr) + + eos_token_id = self.token_eos() + bos_token_id = self.token_bos() + + eos_token = ( + self._model.token_get_text(eos_token_id) if eos_token_id != -1 else "" + ) + bos_token = ( + self._model.token_get_text(bos_token_id) if bos_token_id != -1 else "" + ) + + # Unfortunately the llama.cpp API does not return metadata arrays, so we can't get template names from tokenizer.chat_templates + template_choices = dict( + (name[10:], template) + for name, template in self.metadata.items() + if name.startswith("tokenizer.chat_template.") + ) + + if "tokenizer.chat_template" in self.metadata: + template_choices["chat_template.default"] = self.metadata[ + "tokenizer.chat_template" + ] + + if self.verbose and template_choices: + print( + f"Available chat formats from metadata: {', '.join(template_choices.keys())}", + file=sys.stderr, + ) + + for name, template in template_choices.items(): + self._chat_handlers[name] = llama_chat_format.Jinja2ChatFormatter( + template=template, + eos_token=eos_token, + bos_token=bos_token, + stop_token_ids=[eos_token_id], + ).to_chat_handler() + + if ( + self.chat_format is None + and self.chat_handler is None + and "chat_template.default" in template_choices + ): + chat_format = llama_chat_format.guess_chat_format_from_gguf_metadata( + self.metadata + ) + + if chat_format is not None: + self.chat_format = chat_format + if self.verbose: + print(f"Guessed chat format: {chat_format}", file=sys.stderr) + else: + if self.verbose: + print( + f"Using gguf chat template: {template_choices['chat_template.default']}", + file=sys.stderr, + ) + print(f"Using chat eos_token: {eos_token}", file=sys.stderr) + print(f"Using chat bos_token: {bos_token}", file=sys.stderr) + + self.chat_format = "chat_template.default" + + if self.chat_format is None and self.chat_handler is None: + self.chat_format = "llama-2" + if self.verbose: + print( + f"Using fallback chat format: {self.chat_format}", file=sys.stderr + ) + + self._sampler = None + + @property + def ctx(self) -> llama_cpp.llama_context_p: + return self._ctx.ctx + + @property + def model(self) -> llama_cpp.llama_model_p: + return self._model.model + + @property + def _input_ids(self) -> npt.NDArray[np.intc]: + return self.input_ids[: self.n_tokens] + + @property + def _scores(self) -> npt.NDArray[np.single]: + return self.scores[: self.n_tokens, :] + + @property + def eval_tokens(self) -> Deque[int]: + return deque(self.input_ids[: self.n_tokens].tolist(), maxlen=self._n_ctx) + + @property + def eval_logits(self) -> Deque[List[float]]: + return deque( + self.scores[: self.n_tokens, :].tolist(), + maxlen=self._n_ctx if self.context_params.logits_all else 1, + ) + + def tokenize( + self, text: bytes, add_bos: bool = True, special: bool = False + ) -> List[int]: + """Tokenize a string. + + Args: + text: The utf-8 encoded string to tokenize. + add_bos: Whether to add a beginning of sequence token. + special: Whether to tokenize special tokens. + + Raises: + RuntimeError: If the tokenization failed. + + Returns: + A list of tokens. + """ + return self.tokenizer_.tokenize(text, add_bos, special) + + def detokenize( + self, + tokens: List[int], + prev_tokens: Optional[List[int]] = None, + special: bool = False, + ) -> bytes: + """Detokenize a list of tokens. + + Args: + tokens: The list of tokens to detokenize. + prev_tokens: The list of previous tokens. Offset mapping will be performed if provided. + special: Whether to detokenize special tokens. + + Returns: + The detokenized string. + """ + return self.tokenizer_.detokenize( + tokens, prev_tokens=prev_tokens, special=special + ) + + def set_cache(self, cache: Optional[BaseLlamaCache]): + """Set the cache. + + Args: + cache: The cache to set. + """ + self.cache = cache + + def set_seed(self, seed: int): + """Set the random seed. + + Args: + seed: The random seed. + """ + self._seed = seed + + def reset(self): + """Reset the model state.""" + self.n_tokens = 0 + + def eval(self, tokens: Sequence[int]): + """Evaluate a list of tokens. + + Args: + tokens: The list of tokens to evaluate. + """ + self._ctx.kv_cache_seq_rm(-1, self.n_tokens, -1) + for i in range(0, len(tokens), self.n_batch): + batch = tokens[i : min(len(tokens), i + self.n_batch)] + n_past = self.n_tokens + n_tokens = len(batch) + self._batch.set_batch( + batch=batch, n_past=n_past, logits_all=self.context_params.logits_all + ) + self._ctx.decode(self._batch) + # Save tokens + self.input_ids[n_past : n_past + n_tokens] = batch + # Save logits + if self.context_params.logits_all: + rows = n_tokens + cols = self._n_vocab + logits = np.ctypeslib.as_array( + self._ctx.get_logits(), shape=(rows * cols,) + ) + self.scores[n_past : n_past + n_tokens, :].reshape(-1)[::] = logits + else: + # rows = 1 + # cols = self._n_vocab + # logits = np.ctypeslib.as_array( + # self._ctx.get_logits(), shape=(rows * cols,) + # ) + # self.scores[n_past + n_tokens - 1, :].reshape(-1)[::] = logits + # NOTE: Now that sampling is done inside the sampler, logits are only needed for logprobs which requires logits_all + pass + # Update n_tokens + self.n_tokens += n_tokens + + def _init_sampler( + self, + top_k: int = 40, + top_p: float = 0.95, + min_p: float = 0.05, + typical_p: float = 1.0, + temp: float = 0.80, + repeat_penalty: float = 1.0, + frequency_penalty: float = 0.0, + presence_penalty: float = 0.0, + tfs_z: float = 1.0, + mirostat_mode: int = 0, + mirostat_eta: float = 0.1, + mirostat_tau: float = 5.0, + penalize_nl: bool = True, + logits_processor: Optional[LogitsProcessorList] = None, + grammar: Optional[LlamaGrammar] = None, + ): + sampler = internals.LlamaSampler() + + if logits_processor is not None: + # Create and add a custom sampler + def apply_func(token_data_array: llama_cpp.llama_token_data_array_p): + size = token_data_array.contents.size + data_soa = token_data_array.contents.data + data_soa_address = ctypes.addressof(data_soa.contents) + # NOTE: This is probably broken + recarray = np.recarray( + shape=(size,), + dtype=np.dtype( + [("id", np.intc), ("logit", np.single), ("p", np.single)], + align=True, + ), + buf=(llama_cpp.llama_token_data * size).from_address( + data_soa_address + ), + ) + for logit_processor in logits_processor: + recarray.logit[:] = logit_processor(self._input_ids, recarray.logit) + + sampler.add_custom(apply_func) + + sampler.add_penalties( + n_vocab=self._n_vocab, + special_eos_id=self._token_eos, + linefeed_id=self._token_nl, + penalty_last_n=self.last_n_tokens_size, + penalty_repeat=repeat_penalty, + penalty_freq=frequency_penalty, + penalty_present=presence_penalty, + penalize_nl=penalize_nl, + ignore_eos=False, + ) + + if grammar is not None: + sampler.add_grammar(self._model, grammar) + + if temp < 0.0: + sampler.add_softmax() + sampler.add_dist(self._seed) + elif temp == 0.0: + sampler.add_greedy() + else: + if mirostat_mode == 1: + mirostat_m = 100 + sampler.add_mirostat( + self._n_vocab, + self._seed, + mirostat_tau, + mirostat_eta, + mirostat_m, + ) + elif mirostat_mode == 2: + sampler.add_mirostat_v2( + self._seed, + mirostat_tau, + mirostat_eta, + ) + else: + n_probs = 0 + min_keep = max(1, n_probs) + sampler.add_top_k(top_k) + sampler.add_typical(typical_p, min_keep) + sampler.add_top_p(top_p, min_keep) + sampler.add_min_p(min_p, min_keep) + sampler.add_temp(temp) + sampler.add_dist(self._seed) + return sampler + + def sample( + self, + top_k: int = 40, + top_p: float = 0.95, + min_p: float = 0.05, + typical_p: float = 1.0, + temp: float = 0.80, + repeat_penalty: float = 1.0, + frequency_penalty: float = 0.0, + presence_penalty: float = 0.0, + tfs_z: float = 1.0, + mirostat_mode: int = 0, + mirostat_eta: float = 0.1, + mirostat_tau: float = 5.0, + penalize_nl: bool = True, + logits_processor: Optional[LogitsProcessorList] = None, + grammar: Optional[LlamaGrammar] = None, + idx: Optional[int] = None, + ): + """Sample a token from the model. + + Args: + top_k: The top-k sampling parameter. + top_p: The top-p sampling parameter. + temp: The temperature parameter. + repeat_penalty: The repeat penalty parameter. + + Returns: + The sampled token. + """ + assert self.n_tokens > 0 + + tmp_sampler = False + + if self._sampler is None: + tmp_sampler = True + self._sampler = self._init_sampler( + top_k=top_k, + top_p=top_p, + min_p=min_p, + typical_p=typical_p, + temp=temp, + repeat_penalty=repeat_penalty, + frequency_penalty=frequency_penalty, + presence_penalty=presence_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + penalize_nl=penalize_nl, + logits_processor=logits_processor, + grammar=grammar, + ) + + ridx = idx - self.n_tokens if idx is not None else -1 + + assert self.ctx is not None + token = self._sampler.sample(self._ctx, ridx) + if tmp_sampler: + self._sampler = None + return token + + def generate( + self, + tokens: Sequence[int], + top_k: int = 40, + top_p: float = 0.95, + min_p: float = 0.05, + typical_p: float = 1.0, + temp: float = 0.80, + repeat_penalty: float = 1.0, + reset: bool = True, + frequency_penalty: float = 0.0, + presence_penalty: float = 0.0, + tfs_z: float = 1.0, + mirostat_mode: int = 0, + mirostat_tau: float = 5.0, + mirostat_eta: float = 0.1, + penalize_nl: bool = True, + logits_processor: Optional[LogitsProcessorList] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + grammar: Optional[LlamaGrammar] = None, + ) -> Generator[int, Optional[Sequence[int]], None]: + """Create a generator of tokens from a prompt. + + Examples: + >>> llama = Llama("models/ggml-7b.bin") + >>> tokens = llama.tokenize(b"Hello, world!") + >>> for token in llama.generate(tokens, top_k=40, top_p=0.95, temp=1.0, repeat_penalty=1.0): + ... print(llama.detokenize([token])) + + Args: + tokens: The prompt tokens. + top_k: The top-k sampling parameter. + top_p: The top-p sampling parameter. + temp: The temperature parameter. + repeat_penalty: The repeat penalty parameter. + reset: Whether to reset the model state. + + Yields: + The generated tokens. + """ + # Reset mirostat sampling + self._mirostat_mu = ctypes.c_float(2.0 * mirostat_tau) + self._sampler = self._init_sampler( + top_k=top_k, + top_p=top_p, + min_p=min_p, + typical_p=typical_p, + temp=temp, + repeat_penalty=repeat_penalty, + frequency_penalty=frequency_penalty, + presence_penalty=presence_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + penalize_nl=penalize_nl, + logits_processor=logits_processor, + grammar=grammar, + ) + + # Check for kv cache prefix match + if reset and self.n_tokens > 0: + longest_prefix = 0 + for a, b in zip(self._input_ids, tokens[:-1]): + if a == b: + longest_prefix += 1 + else: + break + if longest_prefix > 0: + reset = False + tokens = tokens[longest_prefix:] + self.n_tokens = longest_prefix + if self.verbose: + print( + f"Llama.generate: {longest_prefix} prefix-match hit, " + f"remaining {len(tokens)} prompt tokens to eval", + file=sys.stderr, + ) + + # Reset the model state + if reset: + self.reset() + + # # Reset the grammar + # if grammar is not None: + # grammar.reset() + + sample_idx = self.n_tokens + len(tokens) - 1 + tokens = list(tokens) + + # Eval and sample + while True: + self.eval(tokens) + while sample_idx < self.n_tokens: + token = self.sample( + top_k=top_k, + top_p=top_p, + min_p=min_p, + typical_p=typical_p, + temp=temp, + repeat_penalty=repeat_penalty, + frequency_penalty=frequency_penalty, + presence_penalty=presence_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + logits_processor=logits_processor, + grammar=grammar, + penalize_nl=penalize_nl, + idx=sample_idx, + ) + + sample_idx += 1 + if stopping_criteria is not None and stopping_criteria( + self._input_ids[: sample_idx], self._scores[sample_idx - self.n_tokens, :] + ): + return + tokens_or_none = yield token + tokens.clear() + tokens.append(token) + if tokens_or_none is not None: + tokens.extend(tokens_or_none) + + if sample_idx < self.n_tokens and token != self._input_ids[sample_idx]: + self.n_tokens = sample_idx + self._ctx.kv_cache_seq_rm(-1, self.n_tokens, -1) + break + + if self.draft_model is not None: + self.input_ids[self.n_tokens : self.n_tokens + len(tokens)] = tokens + draft_tokens = self.draft_model( + self.input_ids[: self.n_tokens + len(tokens)] + ) + tokens.extend( + draft_tokens.astype(int)[ + : self._n_ctx - self.n_tokens - len(tokens) + ] + ) + + def create_embedding( + self, input: Union[str, List[str]], model: Optional[str] = None + ) -> CreateEmbeddingResponse: + """Embed a string. + + Args: + input: The utf-8 encoded string to embed. + + Returns: + An embedding object. + """ + model_name: str = model if model is not None else self.model_path + + input = input if isinstance(input, list) else [input] + + # get numeric embeddings + embeds: Union[List[List[float]], List[List[List[float]]]] + total_tokens: int + embeds, total_tokens = self.embed(input, return_count=True) # type: ignore + + # convert to CreateEmbeddingResponse + data: List[Embedding] = [ + { + "object": "embedding", + "embedding": emb, + "index": idx, + } + for idx, emb in enumerate(embeds) + ] + + return { + "object": "list", + "data": data, + "model": model_name, + "usage": { + "prompt_tokens": total_tokens, + "total_tokens": total_tokens, + }, + } + + def embed( + self, + input: Union[str, List[str]], + normalize: bool = False, + truncate: bool = True, + return_count: bool = False, + ): + """Embed a string. + + Args: + input: The utf-8 encoded string to embed. + + Returns: + A list of embeddings + """ + n_embd = self.n_embd() + n_batch = self.n_batch + + # get pooling information + pooling_type = self.pooling_type() + logits_all = pooling_type == llama_cpp.LLAMA_POOLING_TYPE_NONE + + if self.context_params.embeddings is False: + raise RuntimeError( + "Llama model must be created with embedding=True to call this method" + ) + + if self.verbose: + llama_cpp.llama_perf_context_reset(self._ctx.ctx) + + if isinstance(input, str): + inputs = [input] + else: + inputs = input + + # reset batch + self._batch.reset() + + # decode and fetch embeddings + data: Union[List[List[float]], List[List[List[float]]]] = [] + + def decode_batch(seq_sizes: List[int]): + llama_cpp.llama_kv_cache_clear(self._ctx.ctx) + self._ctx.decode(self._batch) + self._batch.reset() + + # store embeddings + if pooling_type == llama_cpp.LLAMA_POOLING_TYPE_NONE: + pos: int = 0 + for i, size in enumerate(seq_sizes): + ptr = llama_cpp.llama_get_embeddings(self._ctx.ctx) + embedding: List[List[float]] = [ + ptr[pos + j * n_embd : pos + (j + 1) * n_embd] + for j in range(size) + ] + if normalize: + embedding = [ + internals.normalize_embedding(e) for e in embedding + ] + data.append(embedding) + pos += size + else: + for i in range(len(seq_sizes)): + ptr = llama_cpp.llama_get_embeddings_seq(self._ctx.ctx, i) + embedding: List[float] = ptr[:n_embd] + if normalize: + embedding = internals.normalize_embedding(embedding) + data.append(embedding) + + # init state + total_tokens = 0 + s_batch = [] + t_batch = 0 + p_batch = 0 + + # accumulate batches and encode + for text in inputs: + tokens = self.tokenize(text.encode("utf-8")) + if truncate: + tokens = tokens[:n_batch] + + n_tokens = len(tokens) + total_tokens += n_tokens + + # check for overrun + if n_tokens > n_batch: + raise ValueError( + f"Requested tokens ({n_tokens}) exceed batch size of {n_batch}" + ) + + # time to eval batch + if t_batch + n_tokens > n_batch: + decode_batch(s_batch) + s_batch = [] + t_batch = 0 + p_batch = 0 + + # add to batch + self._batch.add_sequence(tokens, p_batch, logits_all) + + # update batch stats + s_batch.append(n_tokens) + t_batch += n_tokens + p_batch += 1 + + # hanlde last batch + decode_batch(s_batch) + + if self.verbose: + llama_cpp.llama_perf_context_print(self._ctx.ctx) + + output = data[0] if isinstance(input, str) else data + + llama_cpp.llama_kv_cache_clear(self._ctx.ctx) + self.reset() + + if return_count: + return output, total_tokens + else: + return output + + def _create_completion( + self, + prompt: Union[str, List[int]], + suffix: Optional[str] = None, + max_tokens: Optional[int] = 16, + temperature: float = 0.8, + top_p: float = 0.95, + min_p: float = 0.05, + typical_p: float = 1.0, + logprobs: Optional[int] = None, + echo: bool = False, + stop: Optional[Union[str, List[str]]] = [], + frequency_penalty: float = 0.0, + presence_penalty: float = 0.0, + repeat_penalty: float = 1.0, + top_k: int = 40, + stream: bool = False, + seed: Optional[int] = None, + tfs_z: float = 1.0, + mirostat_mode: int = 0, + mirostat_tau: float = 5.0, + mirostat_eta: float = 0.1, + model: Optional[str] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + logits_processor: Optional[LogitsProcessorList] = None, + grammar: Optional[LlamaGrammar] = None, + logit_bias: Optional[Dict[int, float]] = None, + ) -> Union[ + Iterator[CreateCompletionResponse], Iterator[CreateCompletionStreamResponse] + ]: + assert suffix is None or suffix.__class__ is str + + completion_id: str = f"cmpl-{str(uuid.uuid4())}" + created: int = int(time.time()) + bos_token_id: int = self.token_bos() + cls_token_id: int = self._model.token_cls() + sep_token_id: int = self._model.token_sep() + prefix_token_id: int = 0 # self._model.token_prefix() # TODO: Fix + middle_token_id: int = 0 # self._model.token_middle() # TODO: Fix + suffix_token_id: int = 0 # self._model.token_suffix() # TODO: Fix + add_space_prefix: bool = ( + self.metadata.get("tokenizer.ggml.add_space_prefix", "true") == "true" + ) + bos_tokens: List[int] = [cls_token_id if cls_token_id != -1 else bos_token_id] + eos_tokens: List[int] = [ + sep_token_id if sep_token_id != -1 else self.token_eos() + ] + + if ( + (isinstance(prompt, list) and suffix is None) + or not self._model.add_bos_token() + or bos_tokens[:1] == [-1] + ): + bos_tokens = [] + + if (isinstance(prompt, list) and suffix is None) or ( + not self._model.add_eos_token() and sep_token_id == -1 + ): + eos_tokens = [] + + suffix_space_prefix: int = 0 + # Tokenizer hack to remove leading space + if add_space_prefix and suffix_token_id >= 0 and suffix: + suffix = "☺" + suffix + suffix_space_prefix = 2 + + # If prompt is empty, initialize completion with BOS token to avoid + # detokenization including a space at the beginning of the completion + completion_tokens: List[int] = [] if len(prompt) > 0 else [bos_token_id] + # Add blank space to start of prompt to match OG llama tokenizer + prefix_tokens: List[int] = ( + [prefix_token_id] if prefix_token_id >= 0 and suffix is not None else [] + ) + ( + ( + self.tokenize( + prompt.encode("utf-8"), + add_bos=False, + special=(prefix_token_id < 0 or suffix is None), + ) + if prompt != "" + else [] + ) + if isinstance(prompt, str) + else prompt + ) + suffix_tokens: List[int] = ( + ( + [suffix_token_id] + + ( + self.tokenize(suffix.encode("utf-8"), add_bos=False, special=False)[ + suffix_space_prefix: + ] + if suffix + else [] + ) + ) + if suffix_token_id >= 0 and suffix is not None + else [] + ) + middle_tokens: List[int] = ( + [middle_token_id] if middle_token_id >= 0 and suffix is not None else [] + ) + prompt_tokens: List[int] = ( + bos_tokens + + ( + (suffix_tokens + prefix_tokens + middle_tokens) + if self.spm_infill + else (prefix_tokens + suffix_tokens + middle_tokens) + ) + + eos_tokens + ) + text: bytes = b"" + returned_tokens: int = 0 + stop = ( + stop if isinstance(stop, list) else [stop] if isinstance(stop, str) else [] + ) + model_name: str = model if model is not None else self.model_path + + if prompt_tokens[:2] == [self.token_bos()] * 2: + warnings.warn( + f'Detected duplicate leading "{self._model.token_get_text(self.token_bos())}" in prompt, this will likely reduce response quality, consider removing it...', + RuntimeWarning, + ) + + # NOTE: This likely doesn't work correctly for the first token in the prompt + # because of the extra space added to the start of the prompt_tokens + if logit_bias is not None: + logit_bias_map = {int(k): float(v) for k, v in logit_bias.items()} + + def logit_bias_processor( + input_ids: npt.NDArray[np.intc], + scores: npt.NDArray[np.single], + ) -> npt.NDArray[np.single]: + new_scores = np.copy( + scores + ) # Does it make sense to copy the whole array or can we just overwrite the original one? + for input_id, score in logit_bias_map.items(): + new_scores[input_id] = score + scores[input_id] + return new_scores + + _logit_bias_processor = LogitsProcessorList([logit_bias_processor]) + if logits_processor is None: + logits_processor = _logit_bias_processor + else: + logits_processor = logits_processor.extend(_logit_bias_processor) + + if self.verbose: + self._ctx.reset_timings() + + if len(prompt_tokens) >= self._n_ctx: + raise ValueError( + f"Requested tokens ({len(prompt_tokens)}) exceed context window of {llama_cpp.llama_n_ctx(self.ctx)}" + ) + + if max_tokens is None or max_tokens <= 0: + # Unlimited, depending on n_ctx. + max_tokens = self._n_ctx - len(prompt_tokens) + + # Truncate max_tokens if requested tokens would exceed the context window + max_tokens = ( + max_tokens + if max_tokens + len(prompt_tokens) < self._n_ctx + else (self._n_ctx - len(prompt_tokens)) + ) + + if stop != []: + stop_sequences = [s.encode("utf-8") for s in stop] + else: + stop_sequences = [] + + if logprobs is not None and self.context_params.logits_all is False: + raise ValueError( + "logprobs is not supported for models created with logits_all=False" + ) + + if self.cache: + try: + cache_item = self.cache[prompt_tokens] + cache_prefix_len = Llama.longest_token_prefix( + cache_item.input_ids.tolist(), prompt_tokens + ) + eval_prefix_len = Llama.longest_token_prefix( + self._input_ids.tolist(), prompt_tokens + ) + if cache_prefix_len > eval_prefix_len: + self.load_state(cache_item) + if self.verbose: + print("Llama._create_completion: cache hit", file=sys.stderr) + except KeyError: + if self.verbose: + print("Llama._create_completion: cache miss", file=sys.stderr) + + if seed is not None: + self.set_seed(seed) + else: + self.set_seed(random.Random(self._seed).randint(0, 2 ** 32)) + + finish_reason = "length" + multibyte_fix = 0 + for token in self.generate( + prompt_tokens, + top_k=top_k, + top_p=top_p, + min_p=min_p, + typical_p=typical_p, + temp=temperature, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + frequency_penalty=frequency_penalty, + presence_penalty=presence_penalty, + repeat_penalty=repeat_penalty, + stopping_criteria=stopping_criteria, + logits_processor=logits_processor, + grammar=grammar, + ): + if llama_cpp.llama_token_is_eog(self._model.vocab, token): + text = self.detokenize(completion_tokens, prev_tokens=prompt_tokens) + finish_reason = "stop" + break + + completion_tokens.append(token) + + all_text = self.detokenize(completion_tokens, prev_tokens=prompt_tokens) + + # Contains multi-byte UTF8 + for k, char in enumerate(all_text[-3:]): + k = 3 - k + for num, pattern in [(2, 192), (3, 224), (4, 240)]: + # Bitwise AND check + if num > k and pattern & char == pattern: + multibyte_fix = num - k + + # Stop incomplete bytes from passing + if multibyte_fix > 0: + multibyte_fix -= 1 + continue + + any_stop = [s for s in stop_sequences if s in all_text] + if len(any_stop) > 0: + first_stop = any_stop[0] + text = all_text[: all_text.index(first_stop)] + finish_reason = "stop" + break + + if stream: + remaining_tokens = completion_tokens[returned_tokens:] + remaining_text = self.detokenize( + remaining_tokens, + prev_tokens=prompt_tokens + completion_tokens[:returned_tokens], + ) + remaining_length = len(remaining_text) + + # We want to avoid yielding any characters from + # the generated text if they are part of a stop + # sequence. + first_stop_position = 0 + for s in stop_sequences: + for i in range(min(len(s), remaining_length), 0, -1): + if remaining_text.endswith(s[:i]): + if i > first_stop_position: + first_stop_position = i + break + + token_end_position = 0 + + if logprobs is not None: + # not sure how to handle this branch when dealing + # with CJK output, so keep it unchanged + for token in remaining_tokens: + if token == bos_token_id: + continue + token_end_position += len( + self.detokenize( + [token], + prev_tokens=prompt_tokens + + completion_tokens[:returned_tokens], + ) + ) + # Check if stop sequence is in the token + if token_end_position > ( + remaining_length - first_stop_position + ): + break + token_str = self.detokenize( + [token], + prev_tokens=prompt_tokens + + completion_tokens[:returned_tokens], + ).decode("utf-8", errors="ignore") + text_offset = len(prompt) + len( + self.detokenize( + completion_tokens[:returned_tokens], + prev_tokens=prompt_tokens + + completion_tokens[:returned_tokens], + ).decode("utf-8", errors="ignore") + ) + token_offset = len(prompt_tokens) + returned_tokens + logits = self._scores[token_offset - 1, :] + current_logprobs = Llama.logits_to_logprobs(logits).tolist() + sorted_logprobs = list( + sorted( + zip(current_logprobs, range(len(current_logprobs))), + reverse=True, + ) + ) + top_logprob = { + self.detokenize([i]).decode( + "utf-8", errors="ignore" + ): logprob + for logprob, i in sorted_logprobs[:logprobs] + } + top_logprob.update({token_str: current_logprobs[int(token)]}) + logprobs_or_none = { + "tokens": [ + self.detokenize( + [token], + prev_tokens=prompt_tokens + + completion_tokens[:returned_tokens], + ).decode("utf-8", errors="ignore") + ], + "text_offset": [text_offset], + "token_logprobs": [current_logprobs[int(token)]], + "top_logprobs": [top_logprob], + } + returned_tokens += 1 + yield { + "id": completion_id, + "object": "text_completion", + "created": created, + "model": model_name, + "choices": [ + { + "text": self.detokenize( + [token], + prev_tokens=prompt_tokens + + completion_tokens[:returned_tokens], + ).decode("utf-8", errors="ignore"), + "index": 0, + "logprobs": logprobs_or_none, + "finish_reason": None, + } + ], + } + else: + while len(remaining_tokens) > 0: + decode_success = False + for i in range(1, len(remaining_tokens) + 1): + try: + bs = self.detokenize( + remaining_tokens[:i], + prev_tokens=prompt_tokens + + completion_tokens[:returned_tokens], + ) + ts = bs.decode("utf-8") + decode_success = True + break + except UnicodeError: + pass + else: + break + if not decode_success: + # all remaining tokens cannot be decoded to a UTF-8 character + break + token_end_position += len(bs) + if token_end_position > ( + remaining_length - first_stop_position + ): + break + remaining_tokens = remaining_tokens[i:] + returned_tokens += i + + yield { + "id": completion_id, + "object": "text_completion", + "created": created, + "model": model_name, + "choices": [ + { + "text": ts, + "index": 0, + "logprobs": None, + "finish_reason": None, + } + ], + } + + if len(completion_tokens) >= max_tokens: + text = self.detokenize(completion_tokens, prev_tokens=prompt_tokens) + finish_reason = "length" + break + + if stopping_criteria is not None and stopping_criteria( + self._input_ids, self._scores[-1, :] + ): + text = self.detokenize(completion_tokens, prev_tokens=prompt_tokens) + finish_reason = "stop" + + if self.verbose: + self._ctx.print_timings() + + if stream: + remaining_tokens = completion_tokens[returned_tokens:] + remaining_text = self.detokenize( + remaining_tokens, + prev_tokens=prompt_tokens + completion_tokens[:returned_tokens], + ) + any_stop = [s for s in stop_sequences if s in remaining_text] + if len(any_stop) > 0: + end = min(remaining_text.index(stop) for stop in any_stop) + else: + end = len(remaining_text) + + token_end_position = 0 + for token in remaining_tokens: + token_end_position += len( + self.detokenize( + [token], + prev_tokens=prompt_tokens + completion_tokens[:returned_tokens], + ) + ) + + logprobs_or_none: Optional[CompletionLogprobs] = None + if logprobs is not None: + if token == bos_token_id: + continue + token_str = self.detokenize([token]).decode( + "utf-8", errors="ignore" + ) + text_offset = len(prompt) + len( + self.detokenize( + completion_tokens[:returned_tokens], + prev_tokens=prompt_tokens + + completion_tokens[:returned_tokens], + ) + ) + token_offset = len(prompt_tokens) + returned_tokens - 1 + logits = self._scores[token_offset, :] + current_logprobs = Llama.logits_to_logprobs(logits).tolist() + sorted_logprobs = list( + sorted( + zip(current_logprobs, range(len(current_logprobs))), + reverse=True, + ) + ) + top_logprob = { + self.detokenize([i]).decode("utf-8", errors="ignore"): logprob + for logprob, i in sorted_logprobs[:logprobs] + } + top_logprob.update({token_str: current_logprobs[int(token)]}) + logprobs_or_none = { + "tokens": [ + self.detokenize([token]).decode("utf-8", errors="ignore") + ], + "text_offset": [text_offset], + "token_logprobs": [current_logprobs[int(token)]], + "top_logprobs": [top_logprob], + } + + if token_end_position >= end: + last_text = self.detokenize([token]) + if token_end_position == end - 1: + break + returned_tokens += 1 + yield { + "id": completion_id, + "object": "text_completion", + "created": created, + "model": model_name, + "choices": [ + { + "text": last_text[ + : len(last_text) - (token_end_position - end) + ].decode("utf-8", errors="ignore"), + "index": 0, + "logprobs": logprobs_or_none, + "finish_reason": None, + } + ], + } + break + returned_tokens += 1 + yield { + "id": completion_id, + "object": "text_completion", + "created": created, + "model": model_name, + "choices": [ + { + "text": self.detokenize([token]).decode( + "utf-8", errors="ignore" + ), + "index": 0, + "logprobs": logprobs_or_none, + "finish_reason": None, + } + ], + } + yield { + "id": completion_id, + "object": "text_completion", + "created": created, + "model": model_name, + "choices": [ + { + "text": "", + "index": 0, + "logprobs": None, + "finish_reason": finish_reason, + } + ], + } + if self.cache: + if self.verbose: + print("Llama._create_completion: cache save", file=sys.stderr) + self.cache[prompt_tokens + completion_tokens] = self.save_state() + if self.verbose: + print("Llama._create_completion: cache saved", file=sys.stderr) + return + + if self.cache: + if self.verbose: + print("Llama._create_completion: cache save", file=sys.stderr) + self.cache[prompt_tokens + completion_tokens] = self.save_state() + + text_str = text.decode("utf-8", errors="ignore") + + if echo: + text_str = prompt + text_str + + if suffix_token_id < 0 and suffix is not None: + text_str = text_str + suffix + + logprobs_or_none: Optional[CompletionLogprobs] = None + if logprobs is not None: + text_offset = 0 if echo else len(prompt) + token_offset = 0 if echo else len(prompt_tokens[1:]) + text_offsets: List[int] = [] + token_logprobs: List[Optional[float]] = [] + tokens: List[str] = [] + top_logprobs: List[Optional[Dict[str, float]]] = [] + + if echo: + # Remove leading BOS token if exists + all_tokens = ( + prompt_tokens[1 if prompt_tokens[0] == self.token_bos() else 0 :] + + completion_tokens + ) + else: + all_tokens = completion_tokens + + all_token_strs = [ + self.detokenize([token], prev_tokens=all_tokens[:i]).decode( + "utf-8", errors="ignore" + ) + for i, token in enumerate(all_tokens) + ] + all_logprobs = Llama.logits_to_logprobs(self._scores)[token_offset:] + # TODO: may be able to change this loop to use np.take_along_dim + for idx, (token, token_str, logprobs_token) in enumerate( + zip(all_tokens, all_token_strs, all_logprobs) + ): + if token == bos_token_id: + continue + text_offsets.append( + text_offset + + len( + self.detokenize(all_tokens[:idx]).decode( + "utf-8", errors="ignore" + ) + ) + ) + tokens.append(token_str) + sorted_logprobs = list( + sorted( + zip(logprobs_token, range(len(logprobs_token))), reverse=True + ) + ) + token_logprobs.append(logprobs_token[int(token)]) + top_logprob: Optional[Dict[str, float]] = { + self.detokenize([i], prev_tokens=all_tokens[:idx]).decode( + "utf-8", errors="ignore" + ): logprob + for logprob, i in sorted_logprobs[:logprobs] + } + top_logprob.update({token_str: logprobs_token[int(token)]}) + top_logprobs.append(top_logprob) + # Weird idosincracy of the OpenAI API where + # token_logprobs and top_logprobs are null for + # the first token. + if echo and len(all_tokens) > 0: + token_logprobs[0] = None + top_logprobs[0] = None + logprobs_or_none = { + "tokens": tokens, + "text_offset": text_offsets, + "token_logprobs": token_logprobs, + "top_logprobs": top_logprobs, + } + + yield { + "id": completion_id, + "object": "text_completion", + "created": created, + "model": model_name, + "choices": [ + { + "text": text_str, + "index": 0, + "logprobs": logprobs_or_none, + "finish_reason": finish_reason, + } + ], + "usage": { + "prompt_tokens": len(prompt_tokens), + "completion_tokens": len(completion_tokens), + "total_tokens": len(prompt_tokens) + len(completion_tokens), + }, + } + + def create_completion( + self, + prompt: Union[str, List[int]], + suffix: Optional[str] = None, + max_tokens: Optional[int] = 16, + temperature: float = 0.8, + top_p: float = 0.95, + min_p: float = 0.05, + typical_p: float = 1.0, + logprobs: Optional[int] = None, + echo: bool = False, + stop: Optional[Union[str, List[str]]] = [], + frequency_penalty: float = 0.0, + presence_penalty: float = 0.0, + repeat_penalty: float = 1.0, + top_k: int = 40, + stream: bool = False, + seed: Optional[int] = None, + tfs_z: float = 1.0, + mirostat_mode: int = 0, + mirostat_tau: float = 5.0, + mirostat_eta: float = 0.1, + model: Optional[str] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + logits_processor: Optional[LogitsProcessorList] = None, + grammar: Optional[LlamaGrammar] = None, + logit_bias: Optional[Dict[int, float]] = None, + ) -> Union[CreateCompletionResponse, Iterator[CreateCompletionStreamResponse]]: + """Generate text from a prompt. + + Args: + prompt: The prompt to generate text from. + suffix: A suffix to append to the generated text. If None, no suffix is appended. + max_tokens: The maximum number of tokens to generate. If max_tokens <= 0 or None, the maximum number of tokens to generate is unlimited and depends on n_ctx. + temperature: The temperature to use for sampling. + top_p: The top-p value to use for nucleus sampling. Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 + min_p: The min-p value to use for minimum p sampling. Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841 + typical_p: The typical-p value to use for sampling. Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666. + logprobs: The number of logprobs to return. If None, no logprobs are returned. + echo: Whether to echo the prompt. + stop: A list of strings to stop generation when encountered. + frequency_penalty: The penalty to apply to tokens based on their frequency in the prompt. + presence_penalty: The penalty to apply to tokens based on their presence in the prompt. + repeat_penalty: The penalty to apply to repeated tokens. + top_k: The top-k value to use for sampling. Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 + stream: Whether to stream the results. + seed: The seed to use for sampling. + tfs_z: The tail-free sampling parameter. Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/. + mirostat_mode: The mirostat sampling mode. + mirostat_tau: The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text. + mirostat_eta: The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates. + model: The name to use for the model in the completion object. + stopping_criteria: A list of stopping criteria to use. + logits_processor: A list of logits processors to use. + grammar: A grammar to use for constrained sampling. + logit_bias: A logit bias to use. + + Raises: + ValueError: If the requested tokens exceed the context window. + RuntimeError: If the prompt fails to tokenize or the model fails to evaluate the prompt. + + Returns: + Response object containing the generated text. + """ + completion_or_chunks = self._create_completion( + prompt=prompt, + suffix=suffix, + max_tokens=-1 if max_tokens is None else max_tokens, + temperature=temperature, + top_p=top_p, + min_p=min_p, + typical_p=typical_p, + logprobs=logprobs, + echo=echo, + stop=stop, + frequency_penalty=frequency_penalty, + presence_penalty=presence_penalty, + repeat_penalty=repeat_penalty, + top_k=top_k, + stream=stream, + seed=seed, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + stopping_criteria=stopping_criteria, + logits_processor=logits_processor, + grammar=grammar, + logit_bias=logit_bias, + ) + if stream: + chunks: Iterator[CreateCompletionStreamResponse] = completion_or_chunks + return chunks + completion: Completion = next(completion_or_chunks) # type: ignore + return completion + + def __call__( + self, + prompt: str, + suffix: Optional[str] = None, + max_tokens: Optional[int] = 16, + temperature: float = 0.8, + top_p: float = 0.95, + min_p: float = 0.05, + typical_p: float = 1.0, + logprobs: Optional[int] = None, + echo: bool = False, + stop: Optional[Union[str, List[str]]] = [], + frequency_penalty: float = 0.0, + presence_penalty: float = 0.0, + repeat_penalty: float = 1.0, + top_k: int = 40, + stream: bool = False, + seed: Optional[int] = None, + tfs_z: float = 1.0, + mirostat_mode: int = 0, + mirostat_tau: float = 5.0, + mirostat_eta: float = 0.1, + model: Optional[str] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + logits_processor: Optional[LogitsProcessorList] = None, + grammar: Optional[LlamaGrammar] = None, + logit_bias: Optional[Dict[int, float]] = None, + ) -> Union[CreateCompletionResponse, Iterator[CreateCompletionStreamResponse]]: + """Generate text from a prompt. + + Args: + prompt: The prompt to generate text from. + suffix: A suffix to append to the generated text. If None, no suffix is appended. + max_tokens: The maximum number of tokens to generate. If max_tokens <= 0 or None, the maximum number of tokens to generate is unlimited and depends on n_ctx. + temperature: The temperature to use for sampling. + top_p: The top-p value to use for nucleus sampling. Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 + min_p: The min-p value to use for minimum p sampling. Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841 + typical_p: The typical-p value to use for sampling. Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666. + logprobs: The number of logprobs to return. If None, no logprobs are returned. + echo: Whether to echo the prompt. + stop: A list of strings to stop generation when encountered. + frequency_penalty: The penalty to apply to tokens based on their frequency in the prompt. + presence_penalty: The penalty to apply to tokens based on their presence in the prompt. + repeat_penalty: The penalty to apply to repeated tokens. + top_k: The top-k value to use for sampling. Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 + stream: Whether to stream the results. + seed: The seed to use for sampling. + tfs_z: The tail-free sampling parameter. Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/. + mirostat_mode: The mirostat sampling mode. + mirostat_tau: The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text. + mirostat_eta: The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates. + model: The name to use for the model in the completion object. + stopping_criteria: A list of stopping criteria to use. + logits_processor: A list of logits processors to use. + grammar: A grammar to use for constrained sampling. + logit_bias: A logit bias to use. + + Raises: + ValueError: If the requested tokens exceed the context window. + RuntimeError: If the prompt fails to tokenize or the model fails to evaluate the prompt. + + Returns: + Response object containing the generated text. + """ + return self.create_completion( + prompt=prompt, + suffix=suffix, + max_tokens=max_tokens, + temperature=temperature, + top_p=top_p, + min_p=min_p, + typical_p=typical_p, + logprobs=logprobs, + echo=echo, + stop=stop, + frequency_penalty=frequency_penalty, + presence_penalty=presence_penalty, + repeat_penalty=repeat_penalty, + top_k=top_k, + stream=stream, + seed=seed, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + stopping_criteria=stopping_criteria, + logits_processor=logits_processor, + grammar=grammar, + logit_bias=logit_bias, + ) + + def create_chat_completion( + self, + messages: List[ChatCompletionRequestMessage], + functions: Optional[List[ChatCompletionFunction]] = None, + function_call: Optional[ChatCompletionRequestFunctionCall] = None, + tools: Optional[List[ChatCompletionTool]] = None, + tool_choice: Optional[ChatCompletionToolChoiceOption] = None, + temperature: float = 0.2, + top_p: float = 0.95, + top_k: int = 40, + min_p: float = 0.05, + typical_p: float = 1.0, + stream: bool = False, + stop: Optional[Union[str, List[str]]] = [], + seed: Optional[int] = None, + response_format: Optional[ChatCompletionRequestResponseFormat] = None, + max_tokens: Optional[int] = None, + presence_penalty: float = 0.0, + frequency_penalty: float = 0.0, + repeat_penalty: float = 1.0, + tfs_z: float = 1.0, + mirostat_mode: int = 0, + mirostat_tau: float = 5.0, + mirostat_eta: float = 0.1, + model: Optional[str] = None, + logits_processor: Optional[LogitsProcessorList] = None, + grammar: Optional[LlamaGrammar] = None, + logit_bias: Optional[Dict[int, float]] = None, + logprobs: Optional[bool] = None, + top_logprobs: Optional[int] = None, + ) -> Union[ + CreateChatCompletionResponse, Iterator[CreateChatCompletionStreamResponse] + ]: + """Generate a chat completion from a list of messages. + + Args: + messages: A list of messages to generate a response for. + functions: A list of functions to use for the chat completion. + function_call: A function call to use for the chat completion. + tools: A list of tools to use for the chat completion. + tool_choice: A tool choice to use for the chat completion. + temperature: The temperature to use for sampling. + top_p: The top-p value to use for nucleus sampling. Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 + top_k: The top-k value to use for sampling. Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 + min_p: The min-p value to use for minimum p sampling. Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841 + typical_p: The typical-p value to use for sampling. Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666. + stream: Whether to stream the results. + stop: A list of strings to stop generation when encountered. + seed: The seed to use for sampling. + response_format: The response format to use for the chat completion. Use { "type": "json_object" } to contstrain output to only valid json. + max_tokens: The maximum number of tokens to generate. If max_tokens <= 0 or None, the maximum number of tokens to generate is unlimited and depends on n_ctx. + presence_penalty: The penalty to apply to tokens based on their presence in the prompt. + frequency_penalty: The penalty to apply to tokens based on their frequency in the prompt. + repeat_penalty: The penalty to apply to repeated tokens. + tfs_z: The tail-free sampling parameter. + mirostat_mode: The mirostat sampling mode. + mirostat_tau: The mirostat sampling tau parameter. + mirostat_eta: The mirostat sampling eta parameter. + model: The name to use for the model in the completion object. + logits_processor: A list of logits processors to use. + grammar: A grammar to use. + logit_bias: A logit bias to use. + + Returns: + Generated chat completion or a stream of chat completion chunks. + """ + handler = ( + self.chat_handler + or self._chat_handlers.get(self.chat_format) + or llama_chat_format.get_chat_completion_handler(self.chat_format) + ) + return handler( + llama=self, + messages=messages, + functions=functions, + function_call=function_call, + tools=tools, + tool_choice=tool_choice, + temperature=temperature, + top_p=top_p, + top_k=top_k, + min_p=min_p, + typical_p=typical_p, + logprobs=logprobs, + top_logprobs=top_logprobs, + stream=stream, + stop=stop, + seed=seed, + response_format=response_format, + max_tokens=max_tokens, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + repeat_penalty=repeat_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + logits_processor=logits_processor, + grammar=grammar, + logit_bias=logit_bias, + ) + + def create_chat_completion_openai_v1( + self, + *args: Any, + **kwargs: Any, + ): + """Generate a chat completion with return type based on the the OpenAI v1 API. + + OpenAI python package is required to use this method. + + You can install it with `pip install openai`. + + Args: + *args: Positional arguments to pass to create_chat_completion. + **kwargs: Keyword arguments to pass to create_chat_completion. + + Returns: + Generated chat completion or a stream of chat completion chunks. + """ + try: + from openai.types.chat import ChatCompletion, ChatCompletionChunk + + stream = kwargs.get("stream", False) # type: ignore + assert isinstance(stream, bool) + if stream: + return (ChatCompletionChunk(**chunk) for chunk in self.create_chat_completion(*args, **kwargs)) # type: ignore + else: + return ChatCompletion(**self.create_chat_completion(*args, **kwargs)) # type: ignore + except ImportError: + raise ImportError( + "To use create_chat_completion_openai_v1, you must install the openai package." + "You can install it with `pip install openai`." + ) + + def __getstate__(self): + return dict( + model_path=self.model_path, + # Model Params + n_gpu_layers=self.model_params.n_gpu_layers, + split_mode=self.model_params.split_mode, + main_gpu=self.model_params.main_gpu, + tensor_split=self.tensor_split, + vocab_only=self.model_params.vocab_only, + use_mmap=self.model_params.use_mmap, + use_mlock=self.model_params.use_mlock, + kv_overrides=self.kv_overrides, + # Context Params + seed=self._seed, + n_ctx=self.context_params.n_ctx, + n_batch=self.n_batch, + n_ubatch=self.context_params.n_ubatch, + n_threads=self.context_params.n_threads, + n_threads_batch=self.context_params.n_threads_batch, + rope_scaling_type=self.context_params.rope_scaling_type, + pooling_type=self.context_params.pooling_type, + rope_freq_base=self.context_params.rope_freq_base, + rope_freq_scale=self.context_params.rope_freq_scale, + yarn_ext_factor=self.context_params.yarn_ext_factor, + yarn_attn_factor=self.context_params.yarn_attn_factor, + yarn_beta_fast=self.context_params.yarn_beta_fast, + yarn_beta_slow=self.context_params.yarn_beta_slow, + yarn_orig_ctx=self.context_params.yarn_orig_ctx, + logits_all=self.context_params.logits_all, + embedding=self.context_params.embeddings, + offload_kqv=self.context_params.offload_kqv, + flash_attn=self.context_params.flash_attn, + # Sampling Params + no_perf=self.context_params.no_perf, + last_n_tokens_size=self.last_n_tokens_size, + # LoRA Params + lora_base=self.lora_base, + lora_scale=self.lora_scale, + lora_path=self.lora_path, + # Backend Params + numa=self.numa, + # Chat Format Params + chat_format=self.chat_format, + chat_handler=self.chat_handler, + # Speculative Decidng + draft_model=self.draft_model, + # KV cache quantization + type_k=self.context_params.type_k, + type_v=self.context_params.type_v, + # Misc + spm_infill=self.spm_infill, + verbose=self.verbose, + ) + + def __setstate__(self, state): + self.__init__(**state) + + def save_state(self) -> LlamaState: + if self.verbose: + print("Llama.save_state: saving llama state", file=sys.stderr) + state_size = llama_cpp.llama_get_state_size(self._ctx.ctx) + if self.verbose: + print(f"Llama.save_state: got state size: {state_size}", file=sys.stderr) + llama_state = (ctypes.c_uint8 * int(state_size))() + if self.verbose: + print("Llama.save_state: allocated state", file=sys.stderr) + n_bytes = llama_cpp.llama_copy_state_data(self._ctx.ctx, llama_state) + if self.verbose: + print(f"Llama.save_state: copied llama state: {n_bytes}", file=sys.stderr) + if int(n_bytes) > int(state_size): + raise RuntimeError("Failed to copy llama state data") + llama_state_compact = (ctypes.c_uint8 * int(n_bytes))() + llama_cpp.ctypes.memmove(llama_state_compact, llama_state, int(n_bytes)) + if self.verbose: + print( + f"Llama.save_state: saving {n_bytes} bytes of llama state", + file=sys.stderr, + ) + return LlamaState( + scores=self._scores.copy(), + input_ids=self.input_ids.copy(), + n_tokens=self.n_tokens, + llama_state=bytes(llama_state_compact), + llama_state_size=n_bytes, + seed=self._seed, + ) + + def load_state(self, state: LlamaState) -> None: + # Only filling in up to `n_tokens` and then zero-ing out the rest + self.scores[: state.n_tokens, :] = state.scores.copy() + rest = self.scores[state.n_tokens :, :] + rest[rest > 0] = 0.0 + self.input_ids = state.input_ids.copy() + self.n_tokens = state.n_tokens + self._seed = state.seed + state_size = state.llama_state_size + LLamaStateArrayType = ctypes.c_uint8 * state_size + llama_state = LLamaStateArrayType.from_buffer_copy(state.llama_state) + + if llama_cpp.llama_set_state_data(self._ctx.ctx, llama_state) != state_size: + raise RuntimeError("Failed to set llama state data") + + def n_ctx(self) -> int: + """Return the context window size.""" + return self._ctx.n_ctx() + + def n_embd(self) -> int: + """Return the embedding size.""" + return self._model.n_embd() + + def n_vocab(self) -> int: + """Return the vocabulary size.""" + return self._model.n_vocab() + + def tokenizer(self) -> LlamaTokenizer: + """Return the llama tokenizer for this model.""" + return LlamaTokenizer(self) + + def token_eos(self) -> int: + """Return the end-of-sequence token.""" + return self._model.token_eos() + + def token_bos(self) -> int: + """Return the beginning-of-sequence token.""" + return self._model.token_bos() + + def token_nl(self) -> int: + """Return the newline token.""" + return self._model.token_nl() + + def pooling_type(self) -> str: + """Return the pooling type.""" + return self._ctx.pooling_type() + + def close(self) -> None: + """Explicitly free the model from memory.""" + self._stack.close() + + def __del__(self) -> None: + self.close() + + @staticmethod + def logits_to_logprobs( + logits: Union[npt.NDArray[np.single], List], axis: int = -1 + ) -> npt.NDArray[np.single]: + # https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.log_softmax.html + logits_maxs: np.ndarray = np.amax(logits, axis=axis, keepdims=True) + if logits_maxs.ndim > 0: + logits_maxs[~np.isfinite(logits_maxs)] = 0 + elif not np.isfinite(logits_maxs): + logits_maxs = 0 + subtract_maxs = np.subtract(logits, logits_maxs, dtype=np.single) + exp = np.exp(subtract_maxs) + # Suppress warnings about log of zero + with np.errstate(divide="ignore"): + summed = np.sum(exp, axis=axis, keepdims=True) + out = np.log(summed) + return subtract_maxs - out + + @staticmethod + def longest_token_prefix(a: Sequence[int], b: Sequence[int]): + longest_prefix = 0 + for _a, _b in zip(a, b): + if _a == _b: + longest_prefix += 1 + else: + break + return longest_prefix + + @classmethod + def from_pretrained( + cls, + repo_id: str, + filename: Optional[str], + additional_files: Optional[List] = None, + local_dir: Optional[Union[str, os.PathLike[str]]] = None, + local_dir_use_symlinks: Union[bool, Literal["auto"]] = "auto", + cache_dir: Optional[Union[str, os.PathLike[str]]] = None, + **kwargs: Any, + ) -> "Llama": + """Create a Llama model from a pretrained model name or path. + This method requires the huggingface-hub package. + You can install it with `pip install huggingface-hub`. + + Args: + repo_id: The model repo id. + filename: A filename or glob pattern to match the model file in the repo. + additional_files: A list of filenames or glob patterns to match additional model files in the repo. + local_dir: The local directory to save the model to. + local_dir_use_symlinks: Whether to use symlinks when downloading the model. + **kwargs: Additional keyword arguments to pass to the Llama constructor. + + Returns: + A Llama model.""" + try: + from huggingface_hub import hf_hub_download, HfFileSystem + from huggingface_hub.utils import validate_repo_id + except ImportError: + raise ImportError( + "Llama.from_pretrained requires the huggingface-hub package. " + "You can install it with `pip install huggingface-hub`." + ) + + validate_repo_id(repo_id) + + hffs = HfFileSystem() + + files = [ + file["name"] if isinstance(file, dict) else file + for file in hffs.ls(repo_id, recursive=True) + ] + + # split each file into repo_id, subfolder, filename + file_list: List[str] = [] + for file in files: + rel_path = Path(file).relative_to(repo_id) + file_list.append(str(rel_path)) + + # find the only/first shard file: + matching_files = [file for file in file_list if fnmatch.fnmatch(file, filename)] # type: ignore + + if len(matching_files) == 0: + raise ValueError( + f"No file found in {repo_id} that match {filename}\n\n" + f"Available Files:\n{json.dumps(file_list)}" + ) + + if len(matching_files) > 1: + raise ValueError( + f"Multiple files found in {repo_id} matching {filename}\n\n" + f"Available Files:\n{json.dumps(files)}" + ) + + (matching_file,) = matching_files + + subfolder = str(Path(matching_file).parent) + filename = Path(matching_file).name + + # download the file + hf_hub_download( + repo_id=repo_id, + filename=filename, + subfolder=subfolder, + local_dir=local_dir, + local_dir_use_symlinks=local_dir_use_symlinks, + cache_dir=cache_dir, + ) + + if additional_files: + for additonal_file_name in additional_files: + # find the additional shard file: + matching_additional_files = [file for file in file_list if fnmatch.fnmatch(file, additonal_file_name)] + + if len(matching_additional_files) == 0: + raise ValueError( + f"No file found in {repo_id} that match {additonal_file_name}\n\n" + f"Available Files:\n{json.dumps(file_list)}" + ) + + if len(matching_additional_files) > 1: + raise ValueError( + f"Multiple files found in {repo_id} matching {additonal_file_name}\n\n" + f"Available Files:\n{json.dumps(files)}" + ) + + (matching_additional_file,) = matching_additional_files + + # download the additional file + hf_hub_download( + repo_id=repo_id, + filename=matching_additional_file, + subfolder=subfolder, + local_dir=local_dir, + local_dir_use_symlinks=local_dir_use_symlinks, + cache_dir=cache_dir, + ) + + if local_dir is None: + model_path = hf_hub_download( + repo_id=repo_id, + filename=filename, + subfolder=subfolder, + local_dir=local_dir, + local_dir_use_symlinks=local_dir_use_symlinks, + cache_dir=cache_dir, + local_files_only=True, + ) + else: + model_path = os.path.join(local_dir, filename) + + # loading the first file of a sharded GGUF loads all remaining shard files in the subfolder + return cls( + model_path=model_path, + **kwargs, + ) + + +class LlamaState: + def __init__( + self, + input_ids: npt.NDArray[np.intc], + scores: npt.NDArray[np.single], + n_tokens: int, + llama_state: bytes, + llama_state_size: int, + seed: int, + ): + self.input_ids = input_ids + self.scores = scores + self.n_tokens = n_tokens + self.llama_state = llama_state + self.llama_state_size = llama_state_size + self.seed = seed + + +LogitsProcessor = Callable[ + [npt.NDArray[np.intc], npt.NDArray[np.single]], npt.NDArray[np.single] +] + + +class LogitsProcessorList(List[LogitsProcessor]): + def __call__( + self, input_ids: npt.NDArray[np.intc], scores: npt.NDArray[np.single] + ) -> npt.NDArray[np.single]: + for processor in self: + scores = processor(input_ids, scores) + return scores + + +StoppingCriteria = Callable[[npt.NDArray[np.intc], npt.NDArray[np.single]], bool] + + +class StoppingCriteriaList(List[StoppingCriteria]): + def __call__( + self, input_ids: npt.NDArray[np.intc], logits: npt.NDArray[np.single] + ) -> bool: + return any([stopping_criteria(input_ids, logits) for stopping_criteria in self]) + + +class MinTokensLogitsProcessor(LogitsProcessor): + def __init__(self, min_tokens: int, token_eos: int): + self.min_tokens = min_tokens + self.token_eos = token_eos + self.prompt_tokens = None + + def __call__( + self, input_ids: npt.NDArray[np.intc], scores: npt.NDArray[np.single] + ) -> npt.NDArray[np.single]: + if self.prompt_tokens is None: + self.prompt_tokens = len(input_ids) + if len(input_ids) - self.prompt_tokens < self.min_tokens: + scores[self.token_eos] = -np.inf + return scores diff --git a/llama_cpp/llama_cache.py b/llama_cpp/llama_cache.py new file mode 100644 index 0000000000000000000000000000000000000000..e059e98e1ba9f89b51a33c3052c5e746e96e67ed --- /dev/null +++ b/llama_cpp/llama_cache.py @@ -0,0 +1,155 @@ +import sys +from abc import ABC, abstractmethod +from typing import ( + Optional, + Sequence, + Tuple, +) +from collections import OrderedDict + +import diskcache + +import llama_cpp.llama + +from .llama_types import * + + +class BaseLlamaCache(ABC): + """Base cache class for a llama.cpp model.""" + + def __init__(self, capacity_bytes: int = (2 << 30)): + self.capacity_bytes = capacity_bytes + + @property + @abstractmethod + def cache_size(self) -> int: + raise NotImplementedError + + def _find_longest_prefix_key( + self, + key: Tuple[int, ...], + ) -> Optional[Tuple[int, ...]]: + pass + + @abstractmethod + def __getitem__(self, key: Sequence[int]) -> "llama_cpp.llama.LlamaState": + raise NotImplementedError + + @abstractmethod + def __contains__(self, key: Sequence[int]) -> bool: + raise NotImplementedError + + @abstractmethod + def __setitem__( + self, key: Sequence[int], value: "llama_cpp.llama.LlamaState" + ) -> None: + raise NotImplementedError + + +class LlamaRAMCache(BaseLlamaCache): + """Cache for a llama.cpp model using RAM.""" + + def __init__(self, capacity_bytes: int = (2 << 30)): + super().__init__(capacity_bytes) + self.capacity_bytes = capacity_bytes + self.cache_state: OrderedDict[ + Tuple[int, ...], "llama_cpp.llama.LlamaState" + ] = OrderedDict() + + @property + def cache_size(self): + return sum([state.llama_state_size for state in self.cache_state.values()]) + + def _find_longest_prefix_key( + self, + key: Tuple[int, ...], + ) -> Optional[Tuple[int, ...]]: + min_len = 0 + min_key = None + keys = ( + (k, llama_cpp.llama.Llama.longest_token_prefix(k, key)) + for k in self.cache_state.keys() + ) + for k, prefix_len in keys: + if prefix_len > min_len: + min_len = prefix_len + min_key = k + return min_key + + def __getitem__(self, key: Sequence[int]) -> "llama_cpp.llama.LlamaState": + key = tuple(key) + _key = self._find_longest_prefix_key(key) + if _key is None: + raise KeyError("Key not found") + value = self.cache_state[_key] + self.cache_state.move_to_end(_key) + return value + + def __contains__(self, key: Sequence[int]) -> bool: + return self._find_longest_prefix_key(tuple(key)) is not None + + def __setitem__(self, key: Sequence[int], value: "llama_cpp.llama.LlamaState"): + key = tuple(key) + if key in self.cache_state: + del self.cache_state[key] + self.cache_state[key] = value + while self.cache_size > self.capacity_bytes and len(self.cache_state) > 0: + self.cache_state.popitem(last=False) + + +# Alias for backwards compatibility +LlamaCache = LlamaRAMCache + + +class LlamaDiskCache(BaseLlamaCache): + """Cache for a llama.cpp model using disk.""" + + def __init__( + self, cache_dir: str = ".cache/llama_cache", capacity_bytes: int = (2 << 30) + ): + super().__init__(capacity_bytes) + self.cache = diskcache.Cache(cache_dir) + + @property + def cache_size(self): + return int(self.cache.volume()) # type: ignore + + def _find_longest_prefix_key( + self, + key: Tuple[int, ...], + ) -> Optional[Tuple[int, ...]]: + min_len = 0 + min_key: Optional[Tuple[int, ...]] = None + for k in self.cache.iterkeys(): # type: ignore + prefix_len = llama_cpp.llama.Llama.longest_token_prefix(k, key) + if prefix_len > min_len: + min_len = prefix_len + min_key = k # type: ignore + return min_key + + def __getitem__(self, key: Sequence[int]) -> "llama_cpp.llama.LlamaState": + key = tuple(key) + _key = self._find_longest_prefix_key(key) + if _key is None: + raise KeyError("Key not found") + value: "llama_cpp.llama.LlamaState" = self.cache.pop(_key) # type: ignore + # NOTE: This puts an integer as key in cache, which breaks, + # Llama.longest_token_prefix(k, key) above since k is not a tuple of ints/tokens + # self.cache.push(_key, side="front") # type: ignore + return value + + def __contains__(self, key: Sequence[int]) -> bool: + return self._find_longest_prefix_key(tuple(key)) is not None + + def __setitem__(self, key: Sequence[int], value: "llama_cpp.llama.LlamaState"): + print("LlamaDiskCache.__setitem__: called", file=sys.stderr) + key = tuple(key) + if key in self.cache: + print("LlamaDiskCache.__setitem__: delete", file=sys.stderr) + del self.cache[key] + self.cache[key] = value + print("LlamaDiskCache.__setitem__: set", file=sys.stderr) + while self.cache_size > self.capacity_bytes and len(self.cache) > 0: + key_to_remove = next(iter(self.cache)) + del self.cache[key_to_remove] + print("LlamaDiskCache.__setitem__: trim", file=sys.stderr) diff --git a/llama_cpp/llama_chat_format.py b/llama_cpp/llama_chat_format.py new file mode 100644 index 0000000000000000000000000000000000000000..17575c700865ac97c39293a601625aeeb967664d --- /dev/null +++ b/llama_cpp/llama_chat_format.py @@ -0,0 +1,3816 @@ +from __future__ import annotations + +import os +import sys +import json +import ctypes +import dataclasses +import random +import string + +from contextlib import ExitStack +from typing import ( + Any, + Dict, + Iterator, + List, + Literal, + Optional, + Tuple, + Union, + Protocol, + cast, +) + +import jinja2 +from jinja2.sandbox import ImmutableSandboxedEnvironment + +import numpy as np +import numpy.typing as npt + +import llama_cpp.llama as llama +import llama_cpp.llama_types as llama_types +import llama_cpp.llama_grammar as llama_grammar + +from ._logger import logger +from ._utils import suppress_stdout_stderr, Singleton + +### Common Chat Templates and Special Tokens ### + +# Source: https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B/blob/main/tokenizer_config.json +CHATML_CHAT_TEMPLATE = "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" +CHATML_BOS_TOKEN = "" +CHATML_EOS_TOKEN = "<|im_end|>" + +# Source: https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/blob/main/tokenizer_config.json +MISTRAL_INSTRUCT_CHAT_TEMPLATE = "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}" +MISTRAL_INSTRUCT_BOS_TOKEN = "" +MISTRAL_INSTRUCT_EOS_TOKEN = "" + +# Source: https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1/blob/main/tokenizer_config.json +MIXTRAL_INSTRUCT_CHAT_TEMPLATE = "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}" + +# Source: https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct/blob/main/tokenizer_config.json +LLAMA3_INSTRUCT_CHAT_TEMPLATE = "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}" + +### Chat Completion Handler ### + + +class LlamaChatCompletionHandler(Protocol): + """Base Protocol for a llama chat completion handler. + + Very generic protocol that can be used to implement any chat format. + The only hard requirement is that it must return a ChatCompletion when + stream=False and an iterator of ChatCompletionChunks when stream=True.""" + + def __call__( + self, + *, + # llama.cpp instance + llama: llama.Llama, + # openai api parameters + messages: List[llama_types.ChatCompletionRequestMessage], + functions: Optional[List[llama_types.ChatCompletionFunction]] = None, + function_call: Optional[llama_types.ChatCompletionRequestFunctionCall] = None, + tools: Optional[List[llama_types.ChatCompletionTool]] = None, + tool_choice: Optional[llama_types.ChatCompletionToolChoiceOption] = None, + temperature: float = 0.2, + top_p: float = 0.95, + top_k: int = 40, + stream: bool = False, + stop: Optional[Union[str, List[str]]] = [], + seed: Optional[int] = None, + response_format: Optional[ + llama_types.ChatCompletionRequestResponseFormat + ] = None, + max_tokens: Optional[int] = None, + presence_penalty: float = 0.0, + frequency_penalty: float = 0.0, + repeat_penalty: float = 1.1, + model: Optional[str] = None, + logit_bias: Optional[Dict[str, float]] = None, + # llama.cpp parameters + min_p: float = 0.05, + typical_p: float = 1.0, + tfs_z: float = 1.0, + mirostat_mode: int = 0, + mirostat_tau: float = 5.0, + mirostat_eta: float = 0.1, + logits_processor: Optional[llama.LogitsProcessorList] = None, + grammar: Optional[llama.LlamaGrammar] = None, + logprobs: Optional[bool] = None, + top_logprobs: Optional[int] = None, + **kwargs, # type: ignore + ) -> Union[ + llama_types.CreateChatCompletionResponse, + Iterator[llama_types.CreateChatCompletionStreamResponse], + ]: ... + + +class LlamaChatCompletionHandlerNotFoundException(Exception): + pass + + +class LlamaChatCompletionHandlerRegistry(Singleton): + _chat_handlers: Dict[str, LlamaChatCompletionHandler] = {} + + def register_chat_completion_handler( + self, + name: str, + chat_handler: LlamaChatCompletionHandler, + overwrite: bool = False, + ): + if not overwrite and name in self._chat_handlers: + raise ValueError( + f"Formatter with name '{name}' is already registered. Use `overwrite=True` to overwrite it." + ) + self._chat_handlers[name] = chat_handler + + def unregister_chat_handler(self, name: str): + if name in self._chat_handlers: + del self._chat_handlers[name] + else: + raise ValueError(f"No formatter registered under the name '{name}'.") + + def get_chat_completion_handler_by_name( + self, name: str + ) -> LlamaChatCompletionHandler: + try: + chat_handler = self._chat_handlers[name] + return chat_handler + except KeyError: + raise LlamaChatCompletionHandlerNotFoundException( + f"Invalid chat handler: {name} (valid formats: {list(self._chat_handlers.keys())})" + ) + + +def get_chat_completion_handler(name: str) -> LlamaChatCompletionHandler: + return LlamaChatCompletionHandlerRegistry().get_chat_completion_handler_by_name( + name + ) + + +def register_chat_completion_handler(name: str): + def decorator(f: LlamaChatCompletionHandler): + LlamaChatCompletionHandlerRegistry().register_chat_completion_handler(name, f) + return f + + return decorator + + +### Chat Formatter ### + + +@dataclasses.dataclass +class ChatFormatterResponse: + """Dataclass that stores completion parameters for a given chat format and + create_chat_completion request. + + prompt contains the formatted prompt generated from the chat format and messages. + stop contains the stop token or list of stop tokens to use for the chat format.""" + + prompt: str + stop: Optional[Union[str, List[str]]] = None + stopping_criteria: Optional[llama.StoppingCriteriaList] = None + added_special: bool = False + + +class ChatFormatter(Protocol): + """Base Protocol for a chat formatter. A chat formatter is a function that + takes a list of messages and returns a chat format response which can be used + to generate a completion. The response can also include a stop token or list + of stop tokens to use for the completion.""" + + def __call__( + self, + *, + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, + ) -> ChatFormatterResponse: ... + + +class Jinja2ChatFormatter(ChatFormatter): + def __init__( + self, + template: str, + eos_token: str, + bos_token: str, + add_generation_prompt: bool = True, + stop_token_ids: Optional[List[int]] = None, + ): + """A chat formatter that uses jinja2 templates to format the prompt.""" + self.template = template + self.eos_token = eos_token + self.bos_token = bos_token + self.add_generation_prompt = add_generation_prompt + self.stop_token_ids = ( + set(stop_token_ids) if stop_token_ids is not None else None + ) + + self._environment = ImmutableSandboxedEnvironment( + loader=jinja2.BaseLoader(), + trim_blocks=True, + lstrip_blocks=True, + ).from_string(self.template) + + def __call__( + self, + *, + messages: List[llama_types.ChatCompletionRequestMessage], + functions: Optional[List[llama_types.ChatCompletionFunction]] = None, + function_call: Optional[llama_types.ChatCompletionRequestFunctionCall] = None, + tools: Optional[List[llama_types.ChatCompletionTool]] = None, + tool_choice: Optional[llama_types.ChatCompletionToolChoiceOption] = None, + **kwargs: Any, + ) -> ChatFormatterResponse: + def raise_exception(message: str): + raise ValueError(message) + + prompt = self._environment.render( + messages=messages, + eos_token=self.eos_token, + bos_token=self.bos_token, + raise_exception=raise_exception, + add_generation_prompt=self.add_generation_prompt, + functions=functions, + function_call=function_call, + tools=tools, + tool_choice=tool_choice, + ) + + stopping_criteria = None + if self.stop_token_ids is not None: + + def stop_on_last_token( + tokens: npt.NDArray[np.intc], logits: npt.NDArray[np.single] + ) -> bool: + return tokens[-1] in self.stop_token_ids + + stopping_criteria = llama.StoppingCriteriaList([stop_on_last_token]) + + return ChatFormatterResponse( + prompt=prompt, + stop=[self.eos_token], + stopping_criteria=stopping_criteria, + added_special=True, + ) + + def to_chat_handler(self) -> LlamaChatCompletionHandler: + return chat_formatter_to_chat_completion_handler(self) + + +def _convert_text_completion_logprobs_to_chat( + logprobs: Optional[llama_types.CompletionLogprobs], +) -> llama_types.ChatCompletionLogprobs: + if logprobs is None: + return None + + return { + "content": [ + { + "token": token, + "bytes": None, + "logprob": logprob, + "top_logprobs": [ + { + "token": top_token, + "logprob": top_logprob, + "bytes": None, + } + for top_token, top_logprob in top_logprobs.items() + ], + } for (token, logprob, top_logprobs) in zip(logprobs["tokens"], logprobs["token_logprobs"], logprobs["top_logprobs"]) + ], + "refusal": None, + } + +def _convert_text_completion_to_chat( + completion: llama_types.Completion, +) -> llama_types.ChatCompletion: + assert "usage" in completion + return { + "id": "chat" + completion["id"], + "object": "chat.completion", + "created": completion["created"], + "model": completion["model"], + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": completion["choices"][0]["text"], + }, + "logprobs": _convert_text_completion_logprobs_to_chat(completion["choices"][0]["logprobs"]), + "finish_reason": completion["choices"][0]["finish_reason"], + } + ], + "usage": completion["usage"], + } + + +def _convert_text_completion_chunks_to_chat( + chunks: Iterator[llama_types.CreateCompletionStreamResponse], +) -> Iterator[llama_types.ChatCompletionChunk]: + for i, chunk in enumerate(chunks): + if i == 0: + yield { + "id": "chat" + chunk["id"], + "model": chunk["model"], + "created": chunk["created"], + "object": "chat.completion.chunk", + "choices": [ + { + "index": 0, + "delta": { + "role": "assistant", + }, + "logprobs": None, + "finish_reason": None, + } + ], + } + yield { + "id": "chat" + chunk["id"], + "model": chunk["model"], + "created": chunk["created"], + "object": "chat.completion.chunk", + "choices": [ + { + "index": 0, + "delta": ( + { + "content": chunk["choices"][0]["text"], + } + if chunk["choices"][0]["finish_reason"] is None + else {} + ), + "logprobs": _convert_text_completion_logprobs_to_chat(chunk["choices"][0]["logprobs"]), + "finish_reason": chunk["choices"][0]["finish_reason"], + } + ], + } + + +def _convert_completion_to_chat( + completion_or_chunks: Union[ + llama_types.CreateCompletionResponse, + Iterator[llama_types.CreateCompletionStreamResponse], + ], + stream: bool = False, +) -> Union[ + llama_types.CreateChatCompletionResponse, Iterator[llama_types.ChatCompletionChunk] +]: + if stream: + chunks: Iterator[llama_types.CreateCompletionStreamResponse] = completion_or_chunks # type: ignore + return _convert_text_completion_chunks_to_chat(chunks) + else: + completion: llama_types.Completion = completion_or_chunks # type: ignore + return _convert_text_completion_to_chat(completion) + + +def _convert_completion_to_chat_function( + tool_name: str, + completion_or_chunks: Union[ + llama_types.CreateCompletionResponse, + Iterator[llama_types.CreateCompletionStreamResponse], + ], + stream: bool, +): + if not stream: + completion: llama_types.CreateCompletionResponse = completion_or_chunks # type: ignore + assert "usage" in completion + tool_id = "call_" + "_0_" + tool_name + "_" + completion["id"] + # TODO: Fix for legacy function calls + chat_completion: llama_types.CreateChatCompletionResponse = { + "id": "chat" + completion["id"], + "object": "chat.completion", + "created": completion["created"], + "model": completion["model"], + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": None, + "function_call": { + "name": tool_name, + "arguments": completion["choices"][0]["text"], + }, + "tool_calls": [ + { + "id": tool_id, + "type": "function", + "function": { + "name": tool_name, + "arguments": completion["choices"][0]["text"], + }, + } + ], + }, + "logprobs": _convert_text_completion_logprobs_to_chat(completion["choices"][0]["logprobs"]), + "finish_reason": "tool_calls", + } + ], + "usage": completion["usage"], + } + return chat_completion + else: + chunks: Iterator[llama_types.CreateCompletionStreamResponse] = completion_or_chunks # type: ignore + + def _stream_response_to_function_stream( + chunks: Iterator[llama_types.CreateCompletionStreamResponse], + ) -> Iterator[llama_types.CreateChatCompletionStreamResponse]: + # blank first message + first = True + id_ = None + created = None + model = None + tool_id = None + for chunk in chunks: + if first: + id_ = "chat" + chunk["id"] + created = chunk["created"] + model = chunk["model"] + tool_id = "call_" + "_0_" + tool_name + "_" + chunk["id"] + yield { + "id": id_, + "object": "chat.completion.chunk", + "created": created, + "model": model, + "choices": [ + { + "index": 0, + "finish_reason": None, + "logprobs": None, + "delta": { + "role": "assistant", + "content": None, + "function_call": None, + "tool_calls": None, + }, + } + ], + } + yield { + "id": "chat" + chunk["id"], + "object": "chat.completion.chunk", + "created": chunk["created"], + "model": chunk["model"], + "choices": [ + { + "index": 0, + "finish_reason": None, + "logprobs": _convert_text_completion_logprobs_to_chat(chunk["choices"][0]["logprobs"]), + "delta": { + "role": None, + "content": None, + "function_call": { + "name": tool_name, + "arguments": chunk["choices"][0]["text"], + }, + "tool_calls": [ + { + "index": 0, + "id": tool_id, + "type": "function", + "function": { + "name": tool_name, + "arguments": chunk["choices"][0][ + "text" + ], + }, + } + ], + }, + } + ], + } + first = False + continue + assert tool_id is not None + yield { + "id": "chat" + chunk["id"], + "object": "chat.completion.chunk", + "created": chunk["created"], + "model": chunk["model"], + "choices": [ + { + "index": 0, + "finish_reason": None, + "logprobs": _convert_text_completion_logprobs_to_chat(chunk["choices"][0]["logprobs"]), + "delta": { + "role": None, + "content": None, + "function_call": { + "name": tool_name, + "arguments": chunk["choices"][0]["text"], + }, + "tool_calls": [ + { + "index": 0, + "id": tool_id, + "type": "function", + "function": { + "name": tool_name, + "arguments": chunk["choices"][0]["text"], + }, + } + ], + }, + } + ], + } + + if id_ is not None and created is not None and model is not None: + yield { + "id": id_, + "object": "chat.completion.chunk", + "created": created, + "model": model, + "choices": [ + { + "index": 0, + "finish_reason": "tool_calls", + "logprobs": None, + "delta": { + "role": None, + "content": None, + "function_call": None, + "tool_calls": None, + }, + } + ], + } + + return _stream_response_to_function_stream(chunks) + + +def chat_formatter_to_chat_completion_handler( + chat_formatter: ChatFormatter, +) -> LlamaChatCompletionHandler: + def chat_completion_handler( + *, + llama: llama.Llama, + messages: List[llama_types.ChatCompletionRequestMessage], + functions: Optional[List[llama_types.ChatCompletionFunction]] = None, + function_call: Optional[llama_types.ChatCompletionRequestFunctionCall] = None, + tools: Optional[List[llama_types.ChatCompletionTool]] = None, + tool_choice: Optional[llama_types.ChatCompletionToolChoiceOption] = None, + temperature: float = 0.2, + top_p: float = 0.95, + top_k: int = 40, + min_p: float = 0.05, + typical_p: float = 1.0, + stream: bool = False, + stop: Optional[Union[str, List[str]]] = [], + seed: Optional[int] = None, + response_format: Optional[ + llama_types.ChatCompletionRequestResponseFormat + ] = None, + max_tokens: Optional[int] = None, + presence_penalty: float = 0.0, + frequency_penalty: float = 0.0, + repeat_penalty: float = 1.1, + tfs_z: float = 1.0, + mirostat_mode: int = 0, + mirostat_tau: float = 5.0, + mirostat_eta: float = 0.1, + model: Optional[str] = None, + logits_processor: Optional[llama.LogitsProcessorList] = None, + grammar: Optional[llama.LlamaGrammar] = None, + logit_bias: Optional[Dict[str, float]] = None, + logprobs: Optional[bool] = None, + top_logprobs: Optional[int] = None, + **kwargs, # type: ignore + ) -> Union[ + llama_types.CreateChatCompletionResponse, + Iterator[llama_types.CreateChatCompletionStreamResponse], + ]: + result = chat_formatter( + messages=messages, + functions=functions, + function_call=function_call, + tools=tools, + tool_choice=tool_choice, + ) + prompt = llama.tokenize( + result.prompt.encode("utf-8"), + add_bos=not result.added_special, + special=True, + ) + if result.stop is not None: + stop = [] if stop is None else [stop] if isinstance(stop, str) else stop + rstop = result.stop if isinstance(result.stop, list) else [result.stop] + stop = stop + rstop + + stopping_criteria = None + if result.stopping_criteria is not None: + stopping_criteria = result.stopping_criteria + + if response_format is not None and response_format["type"] == "json_object": + grammar = _grammar_for_response_format( + response_format, verbose=llama.verbose + ) + + # Convert legacy functions to tools + if functions is not None: + tools = [ + { + "type": "function", + "function": function, + } + for function in functions + ] + + # Convert legacy function_call to tool_choice + if function_call is not None: + if isinstance(function_call, str) and ( + function_call == "none" or function_call == "auto" + ): + tool_choice = function_call + if isinstance(function_call, dict) and "name" in function_call: + tool_choice = { + "type": "function", + "function": { + "name": function_call["name"], + }, + } + + tool = None + if ( + tool_choice is not None + and isinstance(tool_choice, dict) + and tools is not None + ): + name = tool_choice["function"]["name"] + tool = next((t for t in tools if t["function"]["name"] == name), None) + if tool is None: + raise ValueError(f"Tool choice '{name}' not found in tools.") + schema = tool["function"]["parameters"] + try: + # create grammar from json schema + grammar = llama_grammar.LlamaGrammar.from_json_schema( + json.dumps(schema), verbose=llama.verbose + ) + except Exception as e: + if llama.verbose: + print(str(e), file=sys.stderr) + grammar = llama_grammar.LlamaGrammar.from_string( + llama_grammar.JSON_GBNF, verbose=llama.verbose + ) + + completion_or_chunks = llama.create_completion( + prompt=prompt, + temperature=temperature, + top_p=top_p, + top_k=top_k, + min_p=min_p, + typical_p=typical_p, + logprobs=top_logprobs if logprobs else None, + stream=stream, + stop=stop, + seed=seed, + max_tokens=max_tokens, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + repeat_penalty=repeat_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + logits_processor=logits_processor, + stopping_criteria=stopping_criteria, + grammar=grammar, + logit_bias=logit_bias, + ) + if tool is not None: + tool_name = tool["function"]["name"] + return _convert_completion_to_chat_function( + tool_name, completion_or_chunks, stream + ) + return _convert_completion_to_chat(completion_or_chunks, stream=stream) + + return chat_completion_handler + + +def hf_autotokenizer_to_chat_formatter( + pretrained_model_name_or_path: Union[str, os.PathLike[str]] +) -> ChatFormatter: + # https://huggingface.co/docs/transformers/main/chat_templating + # https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1#instruction-format + # https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/blob/main/tokenizer_config.json + from transformers import AutoTokenizer # type: ignore + + tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path) # type: ignore + + def format_autotokenizer( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, + ) -> ChatFormatterResponse: + tokenizer.use_default_system_prompt = False # type: ignore + prompt: str = tokenizer.apply_chat_template(messages, tokenize=False) # type: ignore + assert isinstance(prompt, str) + # Return formatted prompt and eos token by default + return ChatFormatterResponse( + prompt=prompt, stop=tokenizer.eos_token, added_special=True + ) + + return format_autotokenizer + + +def hf_autotokenizer_to_chat_completion_handler( + pretrained_model_name_or_path: Union[str, os.PathLike[str]] +) -> LlamaChatCompletionHandler: + chat_formatter = hf_autotokenizer_to_chat_formatter(pretrained_model_name_or_path) + return chat_formatter_to_chat_completion_handler(chat_formatter) + + +def hf_tokenizer_config_to_chat_formatter( + tokenizer_config: Dict[str, Any], + add_generation_prompt: bool = True, +) -> ChatFormatter: + assert isinstance(tokenizer_config, dict) + + assert "chat_template" in tokenizer_config + assert isinstance(tokenizer_config["chat_template"], str) + chat_template = tokenizer_config["chat_template"] + + assert "bos_token" in tokenizer_config + assert isinstance(tokenizer_config["bos_token"], str) + bos_token = tokenizer_config["bos_token"] + + assert "eos_token" in tokenizer_config + assert isinstance(tokenizer_config["eos_token"], str) + eos_token = tokenizer_config["eos_token"] + + env = ImmutableSandboxedEnvironment( + trim_blocks=True, + lstrip_blocks=True, + ).from_string(chat_template) + + def format_tokenizer_config( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, + ) -> ChatFormatterResponse: + # TODO: veryify this is correct + # Add a blank assistant message to the end of the messages to prompt the model to generate a response + if add_generation_prompt: + messages = [ + *messages, + llama_types.ChatCompletionRequestAssistantMessage( + role="assistant", content="" + ), + ] + prompt = env.render( + messages=messages, + bos_token=bos_token, + eos_token=eos_token, + ) + return ChatFormatterResponse( + prompt=prompt, stop=[eos_token, bos_token], added_special=True + ) + + return format_tokenizer_config + + +def hf_tokenizer_config_to_chat_completion_handler( + tokenizer_config: Dict[str, Any], + add_generation_prompt: bool = True, +) -> LlamaChatCompletionHandler: + chat_formatter = hf_tokenizer_config_to_chat_formatter( + tokenizer_config, add_generation_prompt=add_generation_prompt + ) + return chat_formatter_to_chat_completion_handler(chat_formatter) + + +def guess_chat_format_from_gguf_metadata(metadata: Dict[str, str]) -> Optional[str]: + if "tokenizer.chat_template" not in metadata: + return None + + if metadata["tokenizer.chat_template"] == CHATML_CHAT_TEMPLATE: + return "chatml" + + if ( + metadata["tokenizer.chat_template"] == MISTRAL_INSTRUCT_CHAT_TEMPLATE + or metadata["tokenizer.chat_template"] == MIXTRAL_INSTRUCT_CHAT_TEMPLATE + ): + return "mistral-instruct" + + if metadata["tokenizer.chat_template"] == LLAMA3_INSTRUCT_CHAT_TEMPLATE: + return "llama-3" + + return None + + +### Utility functions for formatting chat prompts ### +# TODO: Replace these with jinja2 templates + + +def _get_system_message( + messages: List[llama_types.ChatCompletionRequestMessage], +) -> str: + """Get the first system message.""" + for message in messages: + if message["role"] == "system": + return message["content"] or "" + return "" + + +def _map_roles( + messages: List[llama_types.ChatCompletionRequestMessage], + role_map: Dict[str, str], +) -> List[Tuple[str, Optional[str]]]: + """Map the message roles.""" + output: List[Tuple[str, Optional[str]]] = [] + for message in messages: + role = message["role"] + if role in role_map: + content: str | None = ( + message["content"] if isinstance(message["content"], str) else None + ) + output.append((role_map[role], content)) + return output + + +def _format_llama2( + system_message: str, messages: List[Tuple[str, Optional[str]]], sep: str, sep2: str +) -> str: + """Format the prompt with the llama2 style.""" + seps = [sep, sep2] + ret = system_message + sep + for i, (role, message) in enumerate(messages): + if system_message and i == 0: + m = message or "" + ret += m + seps[i % 2] + elif message: + ret += role + message + " " + seps[i % 2] + else: + ret += role + " " + return ret + + +def _format_add_colon_single( + system_message: str, messages: List[Tuple[str, Optional[str]]], sep: str +) -> str: + """Format the prompt with the add-colon-single style.""" + ret = system_message + sep + for role, message in messages: + if message: + ret += role + ": " + message + sep + else: + ret += role + ":" + return ret + + +def _format_add_colon_two( + system_message: str, messages: List[Tuple[str, Optional[str]]], sep: str, sep2: str +) -> str: + """Format the prompt with the add-colon-two style.""" + seps = [sep, sep2] + ret = system_message + seps[0] + for i, (role, message) in enumerate(messages): + if message: + ret += role + ": " + message + seps[i % 2] + else: + ret += role + ":" + return ret + + +def _format_no_colon_single( + system_message: str, messages: List[Tuple[str, Optional[str]]], sep: str +) -> str: + """Format the prompt with the no-colon-single style.""" + ret = system_message + for role, message in messages: + if message: + ret += role + message + sep + else: + ret += role + return ret + + +def _format_add_colon_space_single( + system_message: str, messages: List[Tuple[str, Optional[str]]], sep: str +) -> str: + """Format the prompt with the add-colon-space-single style.""" + ret = system_message + sep + for role, message in messages: + if message: + ret += role + ": " + message + sep + else: + ret += role + ": " # must be end with a space + return ret + + +def _format_chatml( + system_message: str, messages: List[Tuple[str, Optional[str]]], sep: str +) -> str: + """Format the prompt with the chatml style.""" + ret = "" if system_message == "" else system_message + sep + "\n" + for role, message in messages: + if message: + ret += role + "\n" + message + sep + "\n" + else: + ret += role + "\n" + return ret + + +def _format_chatglm3( + system_message: str, messages: List[Tuple[str, Optional[str]]], sep: str +) -> str: + """Format the prompt with the chatglm3 style.""" + ret = "" + if system_message: + ret += system_message + for role, message in messages: + if message: + ret += role + "\n" + " " + message + else: + ret += role + return ret + + +def _grammar_for_json(verbose: bool = False): + return llama_grammar.LlamaGrammar.from_string( + llama_grammar.JSON_GBNF, verbose=verbose + ) + + +def _grammar_for_json_schema( + schema: str, verbose: bool = False, fallback_to_json: bool = True +): + try: + return llama_grammar.LlamaGrammar.from_json_schema(schema, verbose=verbose) + except Exception as e: + if fallback_to_json: + return _grammar_for_json(verbose=verbose) + else: + raise e + + +def _grammar_for_response_format( + response_format: llama_types.ChatCompletionRequestResponseFormat, + verbose: bool = False, +): + if response_format["type"] != "json_object": + return None + + if "schema" in response_format: + return _grammar_for_json_schema( + json.dumps(response_format["schema"]), verbose=verbose + ) + else: + return _grammar_for_json(verbose=verbose) + + +### Chat Formats ### + + +def register_chat_format(name: str): + def decorator(f: ChatFormatter): + chat_completion_handler = chat_formatter_to_chat_completion_handler(f) + LlamaChatCompletionHandlerRegistry().register_chat_completion_handler( + name, chat_completion_handler + ) + return f + + return decorator + + +# see https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/tokenization_llama.py +# system prompt is "embedded" in the first message +@register_chat_format("llama-2") +def format_llama2( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + _system_template = "[INST] <>\n{system_message}\n<>" + _roles = dict(user="[INST]", assistant="[/INST]") + _messages = _map_roles(messages, _roles) + system_message = _get_system_message(messages) + if system_message: + system_message = _system_template.format(system_message=system_message) + _prompt = _format_llama2(system_message, _messages, " ", "") + "[/INST]" + return ChatFormatterResponse(prompt=_prompt) + + +# Chat format for Llama-3 models, see more details at: +# https://github.com/meta-llama/llama3/blob/main/llama/tokenizer.py#L202-L229 +@register_chat_format("llama-3") +def format_llama3( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + _roles = dict( + system="<|start_header_id|>system<|end_header_id|>\n\n", + user="<|start_header_id|>user<|end_header_id|>\n\n", + assistant="<|start_header_id|>assistant<|end_header_id|>\n\n", + ) + _sep = "<|eot_id|>" + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_no_colon_single("", _messages, _sep) + return ChatFormatterResponse(prompt=_prompt, stop=_sep) + + +@register_chat_format("alpaca") +def format_alpaca( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + _roles = dict(user="### Instruction", assistant="### Response") + _sep = "\n\n" + _sep2 = "" + system_message = _get_system_message(messages) + _messages = _map_roles(messages, _roles) + _prompt = _format_add_colon_two(system_message, _messages, _sep, _sep2) + return ChatFormatterResponse(prompt=_prompt) + + +@register_chat_format("qwen") +def format_qwen( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + _roles = dict(user="<|im_start|>user", assistant="<|im_start|>assistant") + system_message = _get_system_message(messages) or "You are a helpful assistant." + system_template = "<|im_start|>system\n{system_message}" + system_message = system_template.format(system_message=system_message) + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _sep = "<|im_end|>" + _prompt = _format_chatml(system_message, _messages, _sep) + _sep2 = "<|endoftext|>" + return ChatFormatterResponse(prompt=_prompt, stop=_sep2) + + +@register_chat_format("vicuna") +def format( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + _system_message = "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions." + _roles = dict(user="USER", assistant="ASSISTANT") + _sep = " " + _sep2 = "" + system_message = _system_message + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_add_colon_two(system_message, _messages, _sep, _sep2) + return ChatFormatterResponse(prompt=_prompt) + + +@register_chat_format("oasst_llama") +def format_oasst_llama( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + _system_template = "[INST] <>\n{system_message}\n<>\n\n" + _roles = dict(user="<|prompter|>", assistant="<|assistant|>") + _sep = "" + system_message = _get_system_message(messages) + system_message = _system_template.format(system_message=system_message) + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_no_colon_single(system_message, _messages, _sep) + return ChatFormatterResponse(prompt=_prompt) + + +@register_chat_format("baichuan-2") +def format_baichuan2( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + _system_template = "{system_message}" + _roles = dict(user="", assistant="") + _sep = "" + system_message = _get_system_message(messages) + system_message = _system_template.format(system_message=system_message) + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_no_colon_single(system_message, _messages, _sep) + return ChatFormatterResponse(prompt=_prompt) + + +@register_chat_format("baichuan") +def format_baichuan( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + _system_template = "{system_message}" + _roles = dict(user="", assistant="") + _sep = "" + system_message = _get_system_message(messages) + system_message = _system_template.format(system_message=system_message) + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_no_colon_single(system_message, _messages, _sep) + return ChatFormatterResponse(prompt=_prompt) + + +@register_chat_format("openbuddy") +def format_openbuddy( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + _system_message = """You are a helpful, respectful and honest INTP-T AI Assistant named Buddy. You are talking to a human User. +Always answer as helpfully and logically as possible, while being safe. Your answers should not include any harmful, political, religious, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. +If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information. +You can speak fluently in many languages, for example: English, Chinese. +You cannot access the internet, but you have vast knowledge, cutoff: 2021-09. +You are trained by OpenBuddy team, (https://openbuddy.ai, https://github.com/OpenBuddy/OpenBuddy), you are based on LLaMA and Falcon transformers model, not related to GPT or OpenAI. + +""" + _roles = dict(user="User", assistant="Assistant") + _sep = "\n" + system_message = _system_message + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_add_colon_single(system_message, _messages, _sep) + return ChatFormatterResponse(prompt=_prompt) + + +@register_chat_format("redpajama-incite") +def format_redpajama_incite( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + _system_message = _get_system_message(messages) + _roles = dict(user="", assistant="") + _sep = "\n" + _stop = "" + system_message = _system_message + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_add_colon_single(system_message, _messages, _sep) + return ChatFormatterResponse(prompt=_prompt, stop=_stop) + + +@register_chat_format("snoozy") +def format_snoozy( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + system_template = "### Instruction:\n{system_message}" + default_system_message = "The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response." + _system_message = _get_system_message(messages) + _system_message = ( + _system_message if _system_message != "" else default_system_message + ) + system_message = system_template.format(system_message=_system_message) + _roles = dict(user="### Prompt", assistant="### Response") + _sep = "\n" + _stop = "###" + system_message = _system_message + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_add_colon_single(system_message, _messages, _sep) + return ChatFormatterResponse(prompt=_prompt, stop=_stop) + + +@register_chat_format("phind") +def format_phind( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + _roles = dict(user="### User Message", assistant="### Assistant") + _sep = "\n\n" + _system_message = "### System Prompt\nYou are an intelligent programming assistant." + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_add_colon_single(_system_message, _messages, _sep) + return ChatFormatterResponse(prompt=_prompt) + + +@register_chat_format("intel") +def format_intel( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + _roles = dict(user="### User:", assistant="### Assistant:") + _sep = "\n" + _system_message = "### System:\n{system_message}" + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_add_colon_single(_system_message, _messages, _sep) + return ChatFormatterResponse(prompt=_prompt) + + +@register_chat_format("open-orca") +def format_open_orca( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + system_template = "{system_message}" + system_message = ( + "You are a helpful assistant. Please answer truthfully and write out your " + "thinking step by step to be sure you get the right answer. If you make a mistake or encounter " + "an error in your thinking, say so out loud and attempt to correct it. If you don't know or " + "aren't sure about something, say so clearly. You will act as a professional logician, mathematician, " + "and physicist. You will also act as the most appropriate type of expert to answer any particular " + "question or solve the relevant problem; state which expert type your are, if so. Also think of " + "any particular named expert that would be ideal to answer the relevant question or solve the " + "relevant problem; name and act as them, if appropriate." + ) + roles = ("User", "Assistant") + sep = "<|end_of_turn|>\n" + # stop_token_ids=[32000, 32001], # "<|end_of_turn|>" + stop_str = "User" + system_message = system_template.format(system_message=system_message) + _messages = _map_roles(messages, dict(zip(roles, roles))) + _messages.append((roles[1], None)) + _prompt = _format_add_colon_space_single(system_message, _messages, sep) + return ChatFormatterResponse(prompt=_prompt, stop=stop_str) + + +@register_chat_format("mistrallite") +def format_mistrallite( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + _roles = dict(user="<|prompter|>", assistant="\n<|assistant|>") + _sep = " " + system_template = """<|system|>{system_message}""" + system_message = _get_system_message(messages) + system_message = system_template.format(system_message=system_message) + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_no_colon_single(system_message, _messages, _sep) + return ChatFormatterResponse(prompt=_prompt) + + +@register_chat_format("zephyr") +def format_zephyr( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + system_template = """<|system|> +{system_message}""" + system_message = _get_system_message(messages) + system_message = system_template.format(system_message=system_message) + _roles = dict(user="<|user|>\n", assistant="<|assistant|>\n") + _sep = "" + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_chatml(system_message, _messages, _sep) + return ChatFormatterResponse(prompt=_prompt, stop=_sep) + + +@register_chat_format("pygmalion") +def format_pygmalion( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + system_template = """<|system|>{system_message}""" + system_message = _get_system_message(messages) + system_message = system_template.format(system_message=system_message) + _roles = dict(user="<|user|>", assistant="<|model|>") + _sep = "\n" + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_chatml(system_message, _messages, _sep) + return ChatFormatterResponse(prompt=_prompt, stop=_sep) + + +@register_chat_format("chatml") +def format_chatml( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + system_template = """<|im_start|>system +{system_message}""" + system_message = _get_system_message(messages) + system_message = system_template.format(system_message=system_message) + _roles = dict(user="<|im_start|>user", assistant="<|im_start|>assistant") + _sep = "<|im_end|>" + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_chatml(system_message, _messages, _sep) + return ChatFormatterResponse(prompt=_prompt, stop=_sep) + + +@register_chat_format("mistral-instruct") +def format_mistral_instruct( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + eos = "" + stop = eos + prompt = "" + for message in messages: + if ( + message["role"] == "user" + and message["content"] is not None + and isinstance(message["content"], str) + ): + prompt += "[INST] " + message["content"] + elif message["role"] == "assistant" and message["content"] is not None: + prompt += " [/INST]" + message["content"] + eos + prompt += " [/INST]" + return ChatFormatterResponse(prompt=prompt, stop=stop) + + +@register_chat_format("chatglm3") +def format_chatglm3( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + system_template = """<|system|> +{system_message}""" + system_message = _get_system_message(messages) + system_message = system_template.format(system_message=system_message) + _roles = dict(user="<|user|>", assistant="<|assistant|>") + _sep = "" + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_chatglm3(system_message, _messages, _sep) + return ChatFormatterResponse(prompt=_prompt, stop=_sep) + + +@register_chat_format("openchat") +def format_openchat( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + system_template = "{system_message}<|end_of_turn|>" + system_message = _get_system_message(messages) + system_message = system_template.format(system_message=system_message) + _roles = dict( + user="GPT4 Correct User: ", assistant="<|end_of_turn|>GPT4 Correct Assistant: " + ) + _sep = "<|end_of_turn|>" + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_chatml(system_message, _messages, _sep) + return ChatFormatterResponse(prompt=_prompt, stop=_sep) + + +# Chat format for Saiga models, see more details and available models: +# https://huggingface.co/collections/IlyaGusev/saiga2-saigamistral-6505d4ccc3d1e53166b636cd +@register_chat_format("saiga") +def format_saiga( + messages: list[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + _message_template = "{role}\n{content}" + _roles = dict(user="user", bot="bot", system="system") + _messages = _map_roles(messages, _roles) + + _prompt = "" + for role, content in _messages: + if content: + _prompt += _message_template.format(role=role, content=content) + else: + _prompt += f"{role}\n" + # Response template + _prompt += "bot" + return ChatFormatterResponse(prompt=_prompt.strip()) + + +# Chat format for Google's Gemma models, see more details and available models: +# https://huggingface.co/collections/google/gemma-release-65d5efbccdbb8c4202ec078b +@register_chat_format("gemma") +def format_gemma( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + system_message = _get_system_message(messages) + if system_message != "": + logger.debug( + "`role='system'` messages are not allowed on Google's Gemma models." + ) + _roles = dict(user="user\n", assistant="model\n") + _sep = "\n" + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_no_colon_single(system_message="", messages=_messages, sep=_sep) + return ChatFormatterResponse(prompt=_prompt, stop=_sep) + + +# Tricky chat formats that require custom chat handlers + + +@register_chat_completion_handler("functionary") +def functionary_chat_handler( + llama: llama.Llama, + messages: List[llama_types.ChatCompletionRequestMessage], + functions: Optional[List[llama_types.ChatCompletionFunction]] = None, + function_call: Optional[llama_types.ChatCompletionRequestFunctionCall] = None, + tools: Optional[List[llama_types.ChatCompletionTool]] = None, + tool_choice: Optional[llama_types.ChatCompletionToolChoiceOption] = None, + temperature: float = 0.2, + top_p: float = 0.95, + top_k: int = 40, + min_p: float = 0.05, + typical_p: float = 1.0, + stream: bool = False, + stop: Optional[Union[str, List[str]]] = [], + response_format: Optional[llama_types.ChatCompletionRequestResponseFormat] = None, + max_tokens: Optional[int] = None, + presence_penalty: float = 0.0, + frequency_penalty: float = 0.0, + repeat_penalty: float = 1.1, + tfs_z: float = 1.0, + mirostat_mode: int = 0, + mirostat_tau: float = 5.0, + mirostat_eta: float = 0.1, + model: Optional[str] = None, + logits_processor: Optional[llama.LogitsProcessorList] = None, + grammar: Optional[llama.LlamaGrammar] = None, + **kwargs, # type: ignore +) -> Union[llama_types.ChatCompletion, Iterator[llama_types.ChatCompletionChunk]]: + SYSTEM_MESSAGE = """A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. The assistant calls functions with appropriate input when necessary""" + + def generate_type_definition( + param: Dict[str, llama_types.JsonType], indent_level: int, shared_defs + ) -> str: + indent = " " * indent_level + if "$ref" in param: + # Reference to a shared definition + ref_name = param["$ref"].split("/")[ + -1 + ] # Extract the type name from the reference + return ref_name + elif param.get("type") == "array": + items = param.get("items", {}) + item_type = generate_type_definition(items, indent_level + 1, shared_defs) + return f"Array<{item_type}>" + elif param.get("type") == "object": + properties = param.get("properties", {}) + nested_schema = "{\n" + for nested_param_name, nested_param in properties.items(): + nested_param_type = generate_type_definition( + nested_param, indent_level + 1, shared_defs + ) + nested_schema += ( + f"{indent} {nested_param_name}: {nested_param_type},\n" + ) + nested_schema += indent + "}" + return nested_schema + elif "enum" in param: + # Enum type + return " | ".join([f'"{enum_value}"' for enum_value in param["enum"]]) + else: + # Simple type + return param.get("type", "any") + + def generate_shared_definitions(shared_defs, indent_level: int) -> str: + indent = " " * indent_level + shared_definitions = "" + for def_name, def_properties in shared_defs.items(): + shared_definitions += f"{indent}type {def_name} = " + if def_properties.get("type") == "object": + shared_definitions += generate_type_definition( + def_properties, indent_level, shared_defs + ) + elif "enum" in def_properties: + # Enum type + shared_definitions += " | ".join( + [f'"{enum_value}"' for enum_value in def_properties["enum"]] + ) + shared_definitions += ";\n" + return shared_definitions + + def generate_schema_from_functions(functions, namespace="functions") -> str: + schema = ( + "// Supported function definitions that should be called when necessary.\n" + ) + schema += f"namespace {namespace} {{\n\n" + + # Generate shared definitions + shared_definitions = {} + for function in functions: + parameters = function.get("parameters", {}) + shared_definitions.update(parameters.get("$defs", {})) + + schema += generate_shared_definitions(shared_definitions, 1) + + for function in functions: + function_name = function["name"] + description = function.get("description", "") + parameters = function.get("parameters", {}) + required_params = parameters.get("required", []) + + schema += f" // {description}\n" + schema += f" type {function_name} = (_: {{\n" + + for param_name, param in parameters.get("properties", {}).items(): + param_description = param.get("description", "") + param_type = generate_type_definition(param, 2, shared_definitions) + optional_indicator = "" if param_name in required_params else "?" + schema += f" // {param_description}\n" + schema += f" {param_name}{optional_indicator}: {param_type},\n" + schema += " }) => any;\n\n" + + schema += "}} // namespace {}\n".format(namespace) + return schema + + def prepare_messages_for_inference( + messages: List[llama_types.ChatCompletionRequestMessage], + functions: Optional[List[llama_types.ChatCompletionFunctions]] = None, + tools: Optional[List[llama_types.ChatCompletionTool]] = None, + ): + all_messages: List[llama_types.ChatCompletionRequestMessage] = [] + if functions is not None: + all_messages.append( + llama_types.ChatCompletionRequestSystemMessage( + role="system", content=generate_schema_from_functions(functions) + ) + ) + + if tools is not None: + all_messages.append( + llama_types.ChatCompletionRequestSystemMessage( + role="system", + content=generate_schema_from_functions( + [ + tool["function"] + for tool in tools + if tool["type"] == "function" + ] + ), + ) + ) + + all_messages.append( + llama_types.ChatCompletionRequestSystemMessage( + role="system", content=SYSTEM_MESSAGE + ) + ) + + for message in messages: + # Function call responses + if message["role"] == "function" and "name" in message: + message["name"] = f"functions.{message['name']}" + # Function call requests by assistant + if "function_call" in message: + message["function_call"][ + "name" + ] = f"functions.{message['function_call']['name']}" + all_messages.append(message) + + all_messages.append( + llama_types.ChatCompletionRequestAssistantMessage( + role="assistant", content=None + ) + ) + + def message_to_str(msg: llama_types.ChatCompletionRequestMessage): + if msg["role"] == "system": + return f"system:\n{msg['content']}\n" + + elif msg["role"] == "function" and "name" in msg: + return f"function name={msg['name']}:\n{msg['content']}\n" + elif msg["role"] == "function" and "function_call" in msg: + return f"function name={msg['function_call']['name']}:\n{msg['function_call']['arguments']}\n" + elif msg["role"] == "tool": + if msg["content"] is not None: + return f"function name={msg['tool_call_id']}:\n{msg['content']}\n" + else: + return f"function name={msg['tool_call_id']}\n" + elif msg["role"] == "user": + if msg["content"] is None: + return "user:\n\n" + else: + return f"user:\n{msg['content']}\n" + elif msg["role"] == "assistant": + if msg["content"] is not None and "function_call" in msg: + return f"assistant:\n{msg['content']}\nassistant to={msg['function_call']['name']}:\n{msg['function_call']['arguments']}\n" + elif "function_call" in msg: + return f"assistant to={msg['function_call']['name']}:\n{msg['function_call']['arguments']}\n" + elif "tool_calls" in msg and len(msg["tool_calls"]) > 0: + for tool_call in msg[ + "tool_calls" + ]: # NOTE: probably doesn't work with the functionary model + return f"assistant to={tool_call['id']}:\n{tool_call['function']['arguments']}\n" + elif msg["content"] is None: + return "assistant" + else: + return f"assistant:\n{msg['content']}\n" + else: + raise ValueError(f"Unsupported role: {msg['role']}") + + return "".join([message_to_str(msg) for msg in all_messages]) + + if tools is not None: + functions = [tool["function"] for tool in tools if tool["type"] == "function"] + + if tool_choice is not None: + function_call = ( + tool_choice if isinstance(tool_choice, str) else tool_choice["function"] + ) + + prompt = prepare_messages_for_inference(messages, functions, tools) + + if function_call is None and (functions is None or len(functions) == 0): + completion_or_completion_chunks = llama.create_completion( + prompt=prompt + ":\n", + temperature=temperature, + top_p=top_p, + top_k=top_k, + min_p=min_p, + typical_p=typical_p, + stream=stream, + stop=["user:", ""], + max_tokens=max_tokens, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + repeat_penalty=repeat_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + logits_processor=logits_processor, + grammar=grammar, + ) + return _convert_completion_to_chat(completion_or_completion_chunks, stream=stream) # type: ignore + + if function_call is None or ( + isinstance(function_call, str) and function_call == "auto" + ): + stop = "\n" + completion: llama_types.Completion = llama.create_completion( + prompt=prompt, stop=stop, stream=False + ) # type: ignore + completion_text = completion["choices"][0]["text"] + # strip " to=functions." and ending ":" + function_call = completion_text.split(".")[-1][:-1] + new_prompt = prompt + completion_text + stop + elif isinstance(function_call, str) and function_call != "none": + new_prompt = prompt + ":\n" + elif isinstance(function_call, dict): + new_prompt = prompt + f" to=functions.{function_call['name']}:\n" + function_call = function_call["name"] + else: + new_prompt = prompt + ":\n" + + function_body = None + for function in functions or []: + if function["name"] == function_call: + function_body = function["parameters"] + break + for tool in tools or []: + if tool["type"] == "function" and tool["function"]["name"] == function_call: + function_body = tool["function"]["parameters"] + break + + if function_body is not None: + try: + with suppress_stdout_stderr(disable=llama.verbose): + grammar_text = llama_grammar.json_schema_to_gbnf( + json.dumps(function_body) + ) + grammar = llama_grammar.LlamaGrammar.from_string( + llama_grammar.json_schema_to_gbnf(json.dumps(function_body)), + verbose=llama.verbose, + ) + print(grammar_text) + except Exception as e: + if llama.verbose: + print( + "Failed to parse function body as JSON schema, falling back to default grammar" + ) + print(e) + with suppress_stdout_stderr(disable=llama.verbose): + grammar = llama_grammar.LlamaGrammar.from_string( + llama_grammar.JSON_GBNF, + verbose=llama.verbose, + ) + else: + with suppress_stdout_stderr(disable=llama.verbose): + grammar = llama_grammar.LlamaGrammar.from_string( + llama_grammar.JSON_GBNF, verbose=llama.verbose + ) + + completion: llama_types.Completion = llama.create_completion( + prompt=new_prompt, + stop=["user:", ""], + stream=False, + grammar=grammar, + max_tokens=max_tokens, + temperature=temperature, + top_p=top_p, + top_k=top_k, + min_p=min_p, + typical_p=typical_p, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + repeat_penalty=repeat_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + logits_processor=logits_processor, + ) # type: ignore + + assert "usage" in completion + assert isinstance(function_call, str) + assert stream is False # TODO: support stream mode + + if llama.verbose: + print(new_prompt) + print(completion["choices"][0]["text"]) + + # TODO: support stream mode + return llama_types.CreateChatCompletionResponse( + id="chat" + completion["id"], + object="chat.completion", + created=completion["created"], + model=completion["model"], + choices=[ + { + "index": 0, + "message": { + "role": "assistant", + "content": None, + "function_call": { + "name": function_call, + "arguments": completion["choices"][0]["text"], + }, + "tool_calls": [ + { + "id": function_call, + "type": "function", + "function": { + "name": function_call, + "arguments": completion["choices"][0]["text"], + }, + } + ], + }, + "logprobs": _convert_text_completion_logprobs_to_chat(completion["choices"][0]["logprobs"]), + "finish_reason": "tool_calls", + } + ], + usage=completion["usage"], + ) + + +@register_chat_completion_handler("functionary-v1") +@register_chat_completion_handler("functionary-v2") +def functionary_v1_v2_chat_handler( + llama: llama.Llama, + messages: List[llama_types.ChatCompletionRequestMessage], + functions: Optional[List[llama_types.ChatCompletionFunction]] = None, + function_call: Optional[llama_types.ChatCompletionRequestFunctionCall] = None, + tools: Optional[List[llama_types.ChatCompletionTool]] = None, + tool_choice: Optional[llama_types.ChatCompletionToolChoiceOption] = None, + temperature: float = 0.2, + top_p: float = 0.95, + top_k: int = 40, + min_p: float = 0.05, + typical_p: float = 1.0, + stream: bool = False, + stop: Optional[Union[str, List[str]]] = [], + response_format: Optional[llama_types.ChatCompletionRequestResponseFormat] = None, + max_tokens: Optional[int] = None, + presence_penalty: float = 0.0, + frequency_penalty: float = 0.0, + repeat_penalty: float = 1.1, + tfs_z: float = 1.0, + mirostat_mode: int = 0, + mirostat_tau: float = 5.0, + mirostat_eta: float = 0.1, + model: Optional[str] = None, + logits_processor: Optional[llama.LogitsProcessorList] = None, + grammar: Optional[llama.LlamaGrammar] = None, + **kwargs, # type: ignore +) -> Union[llama_types.ChatCompletion, Iterator[llama_types.ChatCompletionChunk]]: + SYSTEM_MESSAGE = """A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. The assistant calls functions with appropriate input when necessary""" + + tokenizer = llama.tokenizer_ + assert hasattr( + tokenizer, "hf_tokenizer" + ), "Please provide a valid hf_tokenizer_path from https://huggingface.co/meetkai when initializing the Llama class" + from transformers import AutoTokenizer + + if "<|START_OF_FUNCTION_CALL|>" in tokenizer.hf_tokenizer.additional_special_tokens: + version = "v1" + END_SYSTEM_TOKEN = "<|END_OF_SYSTEM|>" + END_USER_TOKEN = "<|END_OF_USER|>" + END_ASSISTANT_TOKEN = "<|END_OF_ASSISTANT|>" + END_FUNCTION_RESULT_TOKEN = "<|END_OF_FUNCTION_RESULT|>" + START_FUNCTION_CALL_TOKEN = "<|START_OF_FUNCTION_CALL|>" + END_FUNCTION_CALL_TOKEN = "<|END_OF_FUNCTION_CALL|>" + else: + version = "v2" + RECIPIENT_TOKEN = "<|recipient|>" + FROM_TOKEN = "<|from|>" + STOP_TOKEN = "<|stop|>" + CONTENT_TOKEN = "<|content|>" + + def generate_type_definition( + param: Dict[str, llama_types.JsonType], indent_level: int, shared_defs + ) -> str: + indent = " " * indent_level + if "$ref" in param: + # Reference to a shared definition + ref_name = param["$ref"].split("/")[ + -1 + ] # Extract the type name from the reference + return ref_name + elif param.get("type") == "array": + items = param.get("items", {}) + item_type = generate_type_definition(items, indent_level + 1, shared_defs) + return f"Array<{item_type}>" + elif param.get("type") == "object": + properties = param.get("properties", {}) + nested_schema = "{\n" + for nested_param_name, nested_param in properties.items(): + nested_param_type = generate_type_definition( + nested_param, indent_level + 1, shared_defs + ) + nested_schema += ( + f"{indent} {nested_param_name}: {nested_param_type},\n" + ) + nested_schema += indent + "}" + return nested_schema + elif "enum" in param: + # Enum type + return " | ".join([f'"{enum_value}"' for enum_value in param["enum"]]) + else: + # Simple type + return param.get("type", "any") + + def generate_shared_definitions(shared_defs, indent_level: int) -> str: + indent = " " * indent_level + shared_definitions = "" + for def_name, def_properties in shared_defs.items(): + shared_definitions += f"{indent}type {def_name} = " + if def_properties.get("type") == "object": + shared_definitions += generate_type_definition( + def_properties, indent_level, shared_defs + ) + elif "enum" in def_properties: + # Enum type + shared_definitions += " | ".join( + [f'"{enum_value}"' for enum_value in def_properties["enum"]] + ) + shared_definitions += ";\n" + return shared_definitions + + def generate_schema_from_functions(functions, namespace="functions") -> str: + schema = ( + "// Supported function definitions that should be called when necessary.\n" + ) + schema += f"namespace {namespace} {{\n\n" + + # Generate shared definitions + shared_definitions = {} + for function in functions: + parameters = function.get("parameters", {}) + shared_definitions.update(parameters.get("$defs", {})) + + schema += generate_shared_definitions(shared_definitions, 1) + + for function in functions: + function_name = function["name"] + description = function.get("description", "") + parameters = function.get("parameters", {}) + required_params = parameters.get("required", []) + + schema += f"// {description}\n" + schema += f"type {function_name} = (_: {{\n" + + for param_name, param in parameters.get("properties", {}).items(): + param_description = param.get("description", "") + param_type = generate_type_definition(param, 2, shared_definitions) + optional_indicator = "" if param_name in required_params else "?" + schema += f"// {param_description}\n" + schema += f"{param_name}{optional_indicator}: {param_type},\n" + schema += "}) => any;\n\n" + + schema += "}} // namespace {}".format(namespace) + return schema + + def prepare_messages_for_inference( + messages: List[llama_types.ChatCompletionRequestMessage], + tokenizer: AutoTokenizer, + version: Literal["v1", "v2"], + functions: Optional[List[llama_types.ChatCompletionFunctions]] = None, + tools: Optional[List[llama_types.ChatCompletionTool]] = None, + tool_choice: Union[Dict, str] = "auto", + ): + all_messages: List[llama_types.ChatCompletionRequestMessage] = [] + if tool_choice == "none": + all_messages.append( + llama_types.ChatCompletionRequestSystemMessage( + role="system", content=generate_schema_from_functions([]) + ) + ) + else: + if functions is not None: + all_messages.append( + llama_types.ChatCompletionRequestSystemMessage( + role="system", content=generate_schema_from_functions(functions) + ) + ) + elif tools is not None and tool_choice != "none": + all_messages.append( + llama_types.ChatCompletionRequestSystemMessage( + role="system", + content=generate_schema_from_functions( + [ + tool["function"] + for tool in tools + if tool["type"] == "function" + ] + ), + ) + ) + + all_messages.append( + llama_types.ChatCompletionRequestSystemMessage( + role="system", content=SYSTEM_MESSAGE + ) + ) + + for message in messages: + # Function call responses + if message["role"] == "function" and "name" in message: + message["name"] = f"functions.{message['name']}" + # Function call requests by assistant + if "function_call" in message: + message["function_call"][ + "name" + ] = f"functions.{message['function_call']['name']}" + all_messages.append(message) + + if version == "v1": + suffix = "assistant:\n" + else: + suffix = "<|from|>assistant\n<|recipient|>" + + return ( + tokenizer.hf_tokenizer.apply_chat_template(all_messages, tokenize=False) + + suffix + ) + + if tools is not None: + functions = [tool["function"] for tool in tools if tool["type"] == "function"] + + if tool_choice is not None: + function_call = ( + tool_choice if isinstance(tool_choice, str) else tool_choice["function"] + ) + elif function_call is not None: + pass + else: + function_call = "auto" + + prompt = prepare_messages_for_inference( + messages, tokenizer, version, functions, tools, function_call + ) + + # If no tools/functions are provided + if function_call == "none" or functions is None or len(functions) == 0: + if version == "v1": + stop = END_ASSISTANT_TOKEN + else: + stop = STOP_TOKEN + prompt += "all\n<|content|>" + + completion_or_completion_chunks = llama.create_completion( + prompt=prompt, + temperature=temperature, + top_p=top_p, + top_k=top_k, + min_p=min_p, + typical_p=typical_p, + stream=stream, + stop=stop, + max_tokens=max_tokens, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + repeat_penalty=repeat_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + logits_processor=logits_processor, + grammar=grammar, + ) + if stream is False: + completion_or_completion_chunks["choices"][0]["text"] = ( + completion_or_completion_chunks["choices"][0]["text"].lstrip() + ) + return _convert_completion_to_chat(completion_or_completion_chunks, stream=stream) # type: ignore + + def get_grammar(function_call): + function_body = None + for function in functions or []: + if function["name"] == function_call: + function_body = function["parameters"] + break + for tool in tools or []: + if tool["type"] == "function" and tool["function"]["name"] == function_call: + function_body = tool["function"]["parameters"] + break + + try: + with suppress_stdout_stderr(disable=llama.verbose): + grammar_text = llama_grammar.json_schema_to_gbnf( + json.dumps(function_body) + ) + grammar = llama_grammar.LlamaGrammar.from_string( + llama_grammar.json_schema_to_gbnf(json.dumps(function_body)) + ) + print(grammar_text) + except Exception as e: + if llama.verbose: + print( + "Failed to parse function body as JSON schema, falling back to default grammar" + ) + print(e) + with suppress_stdout_stderr(disable=llama.verbose): + grammar = llama_grammar.LlamaGrammar.from_string( + llama_grammar.JSON_GBNF, verbose=llama.verbose + ) + + return grammar + + def create_completion(prompt, stop, grammar): + completion = cast( + llama_types.Completion, + llama.create_completion( + prompt=prompt, + temperature=temperature, + top_p=top_p, + top_k=top_k, + min_p=min_p, + typical_p=typical_p, + stream=stream, + stop=stop, + max_tokens=max_tokens, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + repeat_penalty=repeat_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + logits_processor=logits_processor, + grammar=grammar, + ), + ) + + return completion + + content = "" + function_calls, function_bodies = [], [] + completion_tokens = 0 + + def generate_streaming(tools, functions, function_call, prompt): + assert version == "v2", "Streaming for v1 is not supported" + + chunk_id, chunk_created = None, None + + # If tool_choice/function_call is provided + if isinstance(function_call, dict): + prompt += f"{function_call['name']}\n{CONTENT_TOKEN}" + grammar = get_grammar(function_call["name"]) + stops = [STOP_TOKEN, FROM_TOKEN] + tool_id = "".join( + [random.choice(string.ascii_letters + string.digits) for _ in range(24)] + ) + completion = create_completion(prompt=prompt, stop=stops, grammar=grammar) + completion_text = "" + first = True + for chunk in completion: + # Yield the tool/function name first + if first: + if tools is not None: + func_call_dict = { + "tool_calls": [ + { + "index": 0, + "id": "call_" + tool_id, + "type": "function", + "function": { + "name": function_call["name"], + "arguments": "", + }, + } + ] + } + else: + func_call_dict = { + "function_call": { + "name": function_call["name"], + "arguments": "", + } + } + yield llama_types.CreateChatCompletionStreamResponse( + id="chat" + chunk["id"], + object="chat.completion.chunk", + created=chunk["created"], + model=chunk["model"], + choices=[ + { + "index": 0, + "logprobs": None, + "delta": { + "role": None, + "content": None, + **func_call_dict, + }, + } + ], + ) + first = False + if tools is not None: + func_call_dict = { + "tool_calls": [ + { + "index": 0, + "id": "call_" + tool_id, + "type": "function", + "function": { + "name": None, + "arguments": chunk["choices"][0]["text"].rstrip(), + }, + } + ] + } + else: + func_call_dict = { + "function_call": { + "name": None, + "arguments": chunk["choices"][0]["text"].rstrip(), + } + } + if len(chunk["choices"][0]["text"].rstrip()) > 0: + yield llama_types.CreateChatCompletionStreamResponse( + id="chat" + chunk["id"], + object="chat.completion.chunk", + created=chunk["created"], + model=chunk["model"], + choices=[ + { + "index": 0, + "logprobs": _convert_text_completion_logprobs_to_chat(chunk["choices"][0]["logprobs"]), + "delta": { + "role": None, + "content": None, + **func_call_dict, + }, + } + ], + ) + # Yield tool_call/function_call stop message + yield llama_types.CreateChatCompletionStreamResponse( + id="chat" + chunk["id"], + object="chat.completion.chunk", + created=chunk["created"], + model=chunk["model"], + choices=[ + { + "index": 0, + "finish_reason": ( + "tool_calls" if tools is not None else "function_call" + ), + "logprobs": None, + "delta": { + "role": None, + "content": None, + "function_call": None, + "tool_calls": None, + }, + } + ], + ) + # If "auto" or no tool_choice/function_call + elif isinstance(function_call, str) and function_call == "auto": + tool_index = 0 + while True: + # Generate function name first + grammar = None + stops = CONTENT_TOKEN + completion = create_completion( + prompt=prompt, stop=stops, grammar=grammar + ) + completion_text = "" + for chunk in completion: + completion_text += chunk["choices"][0]["text"] + if chunk_id is None: + chunk_id = chunk["id"] + if chunk_created is None: + chunk_created = chunk["created"] + function_name = completion_text.strip() + if function_name == "all": + prompt += "all\n<|content|>" + # Yield the first empty message for content + yield llama_types.CreateChatCompletionStreamResponse( + id="chat" + chunk_id, + model=chunk["model"], + created=chunk_created, + object="chat.completion.chunk", + choices=[ + { + "index": 0, + "delta": {"role": "assistant", "content": ""}, + "logprobs": None, + "finish_reason": None, + } + ], + ) + else: + prompt += f"{function_name}\n<|content|>" + grammar = get_grammar(function_name) + tool_id = "".join( + [ + random.choice(string.ascii_letters + string.digits) + for _ in range(24) + ] + ) + if tools is not None: + func_call_dict = { + "tool_calls": [ + { + "index": tool_index, + "id": "call_" + tool_id, + "type": "function", + "function": { + "name": function_name, + "arguments": "", + }, + } + ] + } + else: + func_call_dict = { + "function_call": {"name": function_name, "arguments": ""} + } + # Stream function name + yield llama_types.CreateChatCompletionStreamResponse( + id="chat" + chunk_id, + object="chat.completion.chunk", + created=chunk_created, + model=chunk["model"], + choices=[ + { + "index": 0, + "logprobs": _convert_text_completion_logprobs_to_chat(chunk["choices"][0]["logprobs"]), + "delta": { + "role": "assistant", + "content": None, + **func_call_dict, + }, + } + ], + ) + # Generate content + stops = [RECIPIENT_TOKEN, STOP_TOKEN] + completion = create_completion( + prompt=prompt, stop=stops, grammar=grammar + ) + if function_name == "all": + completion_text = "" + stop_sequence, buffer, is_end = ( + "\n<|from|>assistant\n<|recipient|>", + [], + False, + ) + for i, chunk in enumerate(completion): + completion_text += chunk["choices"][0]["text"] + if is_end: + buffer.append(chunk["choices"][0]["text"].strip(" ")) + if stop_sequence.startswith("".join(buffer)): + continue + else: + buffer.pop() + while len(buffer) > 0: + yield llama_types.CreateChatCompletionStreamResponse( + id="chat" + chunk_id, + object="chat.completion.chunk", + created=chunk_created, + model=chunk["model"], + choices=[ + { + "index": 0, + "logprobs": _convert_text_completion_logprobs_to_chat(chunk["choices"][0]["logprobs"]), + "delta": { + "role": "assistant", + "content": buffer.pop(0), + }, + } + ], + ) + is_end = False + elif chunk["choices"][0]["text"] == "\n": + is_end = True + buffer.append(chunk["choices"][0]["text"].strip(" ")) + continue + + if len(buffer) == 0 and len(chunk["choices"][0]["text"]) > 0: + yield llama_types.CreateChatCompletionStreamResponse( + id="chat" + chunk_id, + object="chat.completion.chunk", + created=chunk_created, + model=chunk["model"], + choices=[ + { + "index": 0, + "logprobs": _convert_text_completion_logprobs_to_chat(chunk["choices"][0]["logprobs"]), + "delta": { + "role": "assistant", + "content": ( + chunk["choices"][0]["text"] + if i > 0 + else chunk["choices"][0][ + "text" + ].lstrip() + ), + }, + } + ], + ) + # Check whether the model wants to generate another turn + if ( + "<|from|> assistant" in completion_text + or "<|from|>assistant" in completion_text + ): + if completion_text.endswith("\n<|from|>assistant\n"): + cleaned_completion_text = completion_text[ + : -len("\n<|from|>assistant\n") + ].strip() + elif completion_text.endswith("\n<|from|> assistant\n"): + cleaned_completion_text = completion_text[ + : -len("\n<|from|> assistant\n") + ].strip() + else: + cleaned_completion_text = completion_text.strip() + prompt += f"{cleaned_completion_text}\n<|from|>assistant\n<|recipient|>" + else: + # Yield stop message + yield llama_types.CreateChatCompletionStreamResponse( + id="chat" + chunk_id, + model=chunk["model"], + created=chunk_created, + object="chat.completion.chunk", + choices=[ + { + "index": 0, + "delta": {}, + "logprobs": None, + "finish_reason": "stop", + } + ], + ) + break + else: + # Check whether the model wants to generate another turn + completion_text = "" + for chunk in completion: + completion_text += chunk["choices"][0]["text"] + if len(chunk["choices"][0]["text"].rstrip()) > 0: + if tools is not None: + func_call_dict = { + "tool_calls": [ + { + "index": tool_index, + "id": "call_" + tool_id, + "type": "function", + "function": { + "name": None, + "arguments": chunk["choices"][0][ + "text" + ].rstrip(), + }, + } + ] + } + else: + func_call_dict = { + "function_call": { + "name": None, + "arguments": chunk["choices"][0][ + "text" + ].rstrip(), + } + } + yield llama_types.CreateChatCompletionStreamResponse( + id="chat" + chunk_id, + object="chat.completion.chunk", + created=chunk_created, + model=chunk["model"], + choices=[ + { + "index": 0, + "logprobs": _convert_text_completion_logprobs_to_chat(chunk["choices"][0]["logprobs"]), + "delta": { + "role": None, + "content": None, + **func_call_dict, + }, + } + ], + ) + prompt += completion_text.strip() + grammar = None + completion = create_completion( + prompt=prompt, stop=stops, grammar=grammar + ) + completion_text += "".join( + [chunk["choices"][0]["text"] for chunk in completion] + ) + if ( + "<|from|> assistant" in completion_text + or "<|from|>assistant" in completion_text + ) and tools is not None: + prompt += "\n<|from|>assistant\n<|recipient|>" + tool_index += 1 + else: + # Yield tool_call/function_call stop message + yield llama_types.CreateChatCompletionStreamResponse( + id="chat" + chunk_id, + object="chat.completion.chunk", + created=chunk_created, + model=chunk["model"], + choices=[ + { + "index": 0, + "finish_reason": ( + "tool_calls" + if tools is not None + else "function_call" + ), + "logprobs": None, + "delta": { + "role": None, + "content": None, + "function_call": None, + "tool_calls": None, + }, + } + ], + ) + break + + if stream is not False: + return generate_streaming( + tools=tools, functions=functions, function_call=function_call, prompt=prompt + ) + else: + if version == "v1": + # If no or "auto" tool_choice/function_call + if isinstance(function_call, str) and function_call == "auto": + stops = ["\n", END_ASSISTANT_TOKEN] + # If tool_choice/function_call is provided + elif isinstance(function_call, dict): + prompt += f"{START_FUNCTION_CALL_TOKEN}{function_call['name']}:\n" + stops = END_FUNCTION_CALL_TOKEN + function_call = function_call["name"] + function_calls.append(function_call) + grammar = get_grammar(function_call) + else: + prompt = prompt + stops = ["\n", END_ASSISTANT_TOKEN] + + completion = create_completion(prompt=prompt, stop=stops, grammar=grammar) + completion_text = completion["choices"][0]["text"] + completion_tokens += completion["usage"]["completion_tokens"] + + # If the generation does not involve a function call + if ( + START_FUNCTION_CALL_TOKEN not in prompt + and START_FUNCTION_CALL_TOKEN not in completion_text + ): + completion["usage"]["completion_tokens"] = completion_tokens + return _convert_completion_to_chat(completion, stream=stream) # type: ignore + # If the generation involves a function call in completion, generate the parameters + elif ( + START_FUNCTION_CALL_TOKEN not in prompt + and START_FUNCTION_CALL_TOKEN in completion_text + ): + prompt += ( + completion_text.replace( + f"{START_FUNCTION_CALL_TOKEN} ", START_FUNCTION_CALL_TOKEN + ) + + "\n" + ) + function_calls.append( + completion_text.split(START_FUNCTION_CALL_TOKEN)[-1][:-1].strip() + ) + grammar = get_grammar(function_calls[-1]) + completion = create_completion( + prompt=prompt, stop=END_FUNCTION_CALL_TOKEN, grammar=grammar + ) + completion_tokens += completion["usage"]["completion_tokens"] + function_bodies.append(completion["choices"][0]["text"].strip()) + # If the prompt involves a function call, just append generated parameters to function_bodies + else: + function_bodies.append(completion_text.strip()) + else: + # If tool_choice/function_call is provided + if isinstance(function_call, dict): + prompt += f"{function_call['name']}\n{CONTENT_TOKEN}" + function_call = function_call["name"] + function_calls.append(function_call) + grammar = get_grammar(function_call) + stops = [STOP_TOKEN, FROM_TOKEN] + completion = create_completion( + prompt=prompt, stop=stops, grammar=grammar + ) + completion_text = completion["choices"][0]["text"] + completion_tokens += completion["usage"]["completion_tokens"] + function_bodies.append(completion_text.strip()) + # If "auto" or no tool_choice/function_call + elif isinstance(function_call, str) and function_call == "auto": + while True: + # Generate function name first + grammar = None + stops = CONTENT_TOKEN + completion = create_completion( + prompt=prompt, stop=stops, grammar=grammar + ) + completion_text = completion["choices"][0]["text"] + completion_tokens += completion["usage"]["completion_tokens"] + function_name = completion_text.strip() + if function_name == "all": + prompt += "all\n<|content|>" + else: + function_call = completion_text.strip() + prompt += f"{function_call}\n<|content|>" + function_calls.append(function_call) + grammar = get_grammar(function_call) + # Generate content + stops = [RECIPIENT_TOKEN, STOP_TOKEN] + completion = create_completion( + prompt=prompt, stop=stops, grammar=grammar + ) + completion_text = completion["choices"][0]["text"] + completion_tokens += completion["usage"]["completion_tokens"] + if function_name == "all": + if completion_text.endswith("\n<|from|>assistant\n"): + content += completion_text[: -len("\n<|from|>assistant\n")] + if completion_text.endswith("\n<|from|> assistant\n"): + content += completion_text[-len("\n<|from|> assistant\n")] + else: + content += completion_text + content = content.lstrip() + # Check whether the model wants to generate another turn + if ( + "<|from|> assistant" in completion_text + or "<|from|>assistant" in completion_text + ): + if completion_text.endswith("\n<|from|>assistant\n"): + cleaned_completion_text = completion_text[ + : -len("\n<|from|>assistant\n") + ].strip() + elif completion_text.endswith("\n<|from|> assistant\n"): + cleaned_completion_text = completion_text[ + -len("\n<|from|> assistant\n") + ].strip() + else: + cleaned_completion_text = completion_text.strip() + prompt += f"{cleaned_completion_text}\n<|from|>assistant\n<|recipient|>" + else: + break + else: + function_bodies.append(completion_text.strip()) + # Check whether the model wants to generate another turn + prompt += completion_text.strip() + grammar = None + completion = create_completion( + prompt=prompt, stop=stops, grammar=grammar + ) + completion_tokens += completion["usage"]["completion_tokens"] + if ( + "<|from|> assistant" in completion["choices"][0]["text"] + or "<|from|>assistant" in completion["choices"][0]["text"] + ): + prompt += "\n<|from|>assistant\n<|recipient|>" + else: + break + + assert "usage" in completion + assert len(function_calls) == len(function_bodies) + + tool_calls: List[llama_types.ChatCompletionMessageToolCall] = [] + for function_call, function_body in zip(function_calls, function_bodies): + tool_calls.append( + { + "id": "call_" + + "".join( + [ + random.choice(string.ascii_letters + string.digits) + for _ in range(24) + ] + ), + "type": "function", + "function": { + "name": function_call, + "arguments": function_body, + }, + } + ) + + # TODO: support stream mode + function_call_dict: Union[ + Dict[str, str], + Dict[ + Literal["function_call"], + llama_types.ChatCompletionRequestAssistantMessageFunctionCall, + ], + ] = {} + if len(tool_calls) > 0: + if tools is not None: + function_call_dict["tool_calls"] = tool_calls + else: + function_call_dict["function_call"] = { + "name": tool_calls[0]["function"]["name"], + "arguments": tool_calls[0]["function"]["arguments"], + } + completion["usage"]["completion_tokens"] = completion_tokens + return llama_types.CreateChatCompletionResponse( + id="chat" + completion["id"], + object="chat.completion", + created=completion["created"], + model=completion["model"], + choices=[ + { + "index": 0, + "logprobs": _convert_text_completion_logprobs_to_chat(completion["choices"][0]["logprobs"]), + "message": { + "role": "assistant", + "content": None if content == "" else content, + **function_call_dict, + }, + "finish_reason": "tool_calls" if len(tool_calls) > 0 else "stop", + } + ], + usage=completion["usage"], + ) + + +class Llava15ChatHandler: + DEFAULT_SYSTEM_MESSAGE: Optional[str] = ( + "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions." + ) + + CHAT_FORMAT = ( + "{% for message in messages %}" + "{% if message.role == 'system' %}" + "{{ message.content }}" + "{% endif %}" + "{% if message.role == 'user' %}" + "{% if message.content is string %}" + "\nUSER: {{ message.content }}" + "{% endif %}" + "{% if message.content is iterable %}" + "\nUSER: " + "{% for content in message.content %}" + "{% if content.type == 'image_url' and content.image_url is string %}" + "{{ content.image_url }}" + "{% endif %}" + "{% if content.type == 'image_url' and content.image_url is mapping %}" + "{{ content.image_url.url }}" + "{% endif %}" + "{% endfor %}" + "{% for content in message.content %}" + "{% if content.type == 'text' %}" + "{{ content.text }}" + "{% endif %}" + "{% endfor %}" + "{% endif %}" + "{% endif %}" + "{% if message.role == 'assistant' and message.content is not none %}" + "\nASSISTANT: {{ message.content }}" + "{% endif %}" + "{% endfor %}" + "{% if add_generation_prompt %}" + "\nASSISTANT: " + "{% endif %}" + ) + + def __init__(self, clip_model_path: str, verbose: bool = True): + import llama_cpp.llava_cpp as llava_cpp + + self.clip_model_path = clip_model_path + self.verbose = verbose + + self._llava_cpp = llava_cpp # TODO: Fix + self._exit_stack = ExitStack() + self._last_image_embed: Optional[ + llava_cpp.CtypesPointer[llava_cpp.llava_image_embed] + ] = None + self._last_image_hash: Optional[int] = None + + if not os.path.exists(clip_model_path): + raise ValueError(f"Clip model path does not exist: {clip_model_path}") + + with suppress_stdout_stderr(disable=self.verbose): + clip_ctx = self._llava_cpp.clip_model_load(self.clip_model_path.encode(), 0) + + if clip_ctx is None: + raise ValueError(f"Failed to load clip model: {clip_model_path}") + + self.clip_ctx = clip_ctx + + def clip_free(): + with suppress_stdout_stderr(disable=self.verbose): + self._llava_cpp.clip_free(self.clip_ctx) + + self._exit_stack.callback(clip_free) + + def last_image_embed_free(): + with suppress_stdout_stderr(disable=self.verbose): + if self._last_image_embed is not None: + self._llava_cpp.llava_image_embed_free(self._last_image_embed) + self._last_image_embed = None + + self._exit_stack.callback(last_image_embed_free) + + def load_image(self, image_url: str) -> bytes: + return self._load_image(image_url) + + def _embed_image_bytes(self, image_bytes: bytes, n_threads_batch: int = 1): + if ( + self._last_image_embed is not None + and self._last_image_hash is not None + and hash(image_bytes) == self._last_image_hash + ): + return self._last_image_embed + with suppress_stdout_stderr(disable=self.verbose): + # Free the previous image embed + if self._last_image_embed is not None: + self._llava_cpp.llava_image_embed_free(self._last_image_embed) + self._last_image_embed = None + self._last_image_hash = None + embed = self._llava_cpp.llava_image_embed_make_with_bytes( + self.clip_ctx, + n_threads_batch, + (ctypes.c_uint8 * len(image_bytes)).from_buffer( + bytearray(image_bytes) + ), + len(image_bytes), + ) + self._last_image_embed = embed + self._last_image_hash = hash(image_bytes) + return embed + + def __call__( + self, + *, + llama: llama.Llama, + messages: List[llama_types.ChatCompletionRequestMessage], + functions: Optional[List[llama_types.ChatCompletionFunction]] = None, + function_call: Optional[llama_types.ChatCompletionRequestFunctionCall] = None, + tools: Optional[List[llama_types.ChatCompletionTool]] = None, + tool_choice: Optional[llama_types.ChatCompletionToolChoiceOption] = None, + temperature: float = 0.2, + top_p: float = 0.95, + top_k: int = 40, + min_p: float = 0.05, + typical_p: float = 1.0, + stream: bool = False, + stop: Optional[Union[str, List[str]]] = [], + seed: Optional[int] = None, + response_format: Optional[ + llama_types.ChatCompletionRequestResponseFormat + ] = None, + max_tokens: Optional[int] = None, + presence_penalty: float = 0.0, + frequency_penalty: float = 0.0, + repeat_penalty: float = 1.1, + tfs_z: float = 1.0, + mirostat_mode: int = 0, + mirostat_tau: float = 5.0, + mirostat_eta: float = 0.1, + model: Optional[str] = None, + logits_processor: Optional[llama.LogitsProcessorList] = None, + grammar: Optional[llama.LlamaGrammar] = None, + logit_bias: Optional[Dict[str, float]] = None, + logprobs: Optional[bool] = None, + top_logprobs: Optional[int] = None, + **kwargs, # type: ignore + ) -> Union[ + llama_types.CreateChatCompletionResponse, + Iterator[llama_types.CreateChatCompletionStreamResponse], + ]: + assert self.clip_ctx is not None + + system_prompt = _get_system_message(messages) + if system_prompt == "" and self.DEFAULT_SYSTEM_MESSAGE is not None: + messages = [ + llama_types.ChatCompletionRequestSystemMessage( + role="system", content=self.DEFAULT_SYSTEM_MESSAGE + ) + ] + messages + + image_urls = self.get_image_urls(messages) + template = ImmutableSandboxedEnvironment( + trim_blocks=True, + lstrip_blocks=True, + ).from_string(self.CHAT_FORMAT) + text = template.render( + messages=messages, + add_generation_prompt=True, + eos_token=llama.detokenize([llama.token_eos()]), + bos_token=llama.detokenize([llama.token_bos()]), + ) + split_text = self.split_text_on_image_urls(text, image_urls) + + if self.verbose: + print(text, file=sys.stderr) + + + # Evaluate prompt + llama.reset() + llama._ctx.kv_cache_clear() + for type_, value in split_text: + if type_ == "text": + tokens = llama.tokenize( + value.encode("utf8"), add_bos=False, special=True + ) + if llama.n_tokens + len(tokens) > llama.n_ctx(): + raise ValueError( + f"Prompt exceeds n_ctx: {llama.n_tokens + len(tokens)} > {llama.n_ctx()}" + ) + llama.eval(tokens) + else: + image_bytes = self.load_image(value) + embed = self._embed_image_bytes(image_bytes, llama.context_params.n_threads_batch) + if llama.n_tokens + embed.contents.n_image_pos > llama.n_ctx(): + raise ValueError( + f"Prompt exceeds n_ctx: {llama.n_tokens + embed.contents.n_image_pos} > {llama.n_ctx()}" + ) + n_past = ctypes.c_int(llama.n_tokens) + n_past_p = ctypes.pointer(n_past) + with suppress_stdout_stderr(disable=self.verbose): + self._llava_cpp.llava_eval_image_embed( + llama.ctx, + embed, + llama.n_batch, + n_past_p, + ) + # Required to avoid issues with hf tokenizer + llama.input_ids[llama.n_tokens : n_past.value] = -1 + llama.n_tokens = n_past.value + + # Get prompt tokens to avoid a cache miss + prompt = llama.input_ids[: llama.n_tokens].tolist() + + if response_format is not None and response_format["type"] == "json_object": + grammar = _grammar_for_response_format(response_format) + + # Convert legacy functions to tools + if functions is not None: + tools = [ + { + "type": "function", + "function": function, + } + for function in functions + ] + + # Convert legacy function_call to tool_choice + if function_call is not None: + if isinstance(function_call, str) and ( + function_call == "none" or function_call == "auto" + ): + tool_choice = function_call + if isinstance(function_call, dict) and "name" in function_call: + tool_choice = { + "type": "function", + "function": { + "name": function_call["name"], + }, + } + + tool = None + if ( + tool_choice is not None + and isinstance(tool_choice, dict) + and tools is not None + ): + name = tool_choice["function"]["name"] + tool = next((t for t in tools if t["function"]["name"] == name), None) + if tool is None: + raise ValueError(f"Tool choice '{name}' not found in tools.") + schema = tool["function"]["parameters"] + try: + # create grammar from json schema + grammar = llama_grammar.LlamaGrammar.from_json_schema( + json.dumps(schema), verbose=llama.verbose + ) + except Exception as e: + if llama.verbose: + print(str(e), file=sys.stderr) + grammar = llama_grammar.LlamaGrammar.from_string( + llama_grammar.JSON_GBNF, verbose=llama.verbose + ) + + completion_or_chunks = llama.create_completion( + prompt=prompt, + temperature=temperature, + top_p=top_p, + top_k=top_k, + min_p=min_p, + typical_p=typical_p, + logprobs=top_logprobs if logprobs else None, + stream=stream, + stop=stop, + seed=seed, + max_tokens=max_tokens, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + repeat_penalty=repeat_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + logits_processor=logits_processor, + grammar=grammar, + logit_bias=logit_bias, + ) + if tool is not None: + tool_name = tool["function"]["name"] + return _convert_completion_to_chat_function( + tool_name, completion_or_chunks, stream + ) + return _convert_completion_to_chat(completion_or_chunks, stream=stream) + + @staticmethod + def _load_image(image_url: str) -> bytes: + # TODO: Add Pillow support for other image formats beyond (jpg, png) + if image_url.startswith("data:"): + import base64 + + image_bytes = base64.b64decode(image_url.split(",")[1]) + return image_bytes + else: + import urllib.request + + with urllib.request.urlopen(image_url) as f: + image_bytes = f.read() + return image_bytes + + @staticmethod + def get_image_urls(messages: List[llama_types.ChatCompletionRequestMessage]): + image_urls: List[str] = [] + for message in messages: + if message["role"] == "user": + if message["content"] is None: + continue + for content in message["content"]: + if isinstance(content, dict) and "type" in content: + if content["type"] == "image_url": + if ( + isinstance(content["image_url"], dict) + and "url" in content["image_url"] + ): + image_urls.append(content["image_url"]["url"]) + else: + image_urls.append(content["image_url"]) + return image_urls + + @staticmethod + def split_text_on_image_urls(text: str, image_urls: List[str]): + def find_first(s: str, substrs: List[str]): + for i, substr in enumerate(substrs): + pos = s.find(substr) + if pos != -1: + return pos, i + return None, None + + split_text: List[Tuple[Literal["text", "image_url"], str]] = [] + remaining = text + while remaining: + # Find first image_url + pos, i = find_first(remaining, image_urls) + if pos is not None and i is not None: + if pos > 0: + split_text.append(("text", remaining[:pos])) + split_text.append(("image_url", image_urls[i])) + remaining = remaining[pos + len(image_urls[i]) :] + else: + split_text.append(("text", remaining)) + remaining = "" + return split_text + + @classmethod + def from_pretrained( + cls, + repo_id: str, + filename: Optional[str], + local_dir: Optional[Union[str, os.PathLike[str]]] = None, + local_dir_use_symlinks: Union[bool, Literal["auto"]] = "auto", + cache_dir: Optional[Union[str, os.PathLike[str]]] = None, + **kwargs: Any, + ) -> "Llava15ChatHandler": + import fnmatch + from pathlib import Path + + try: + from huggingface_hub import hf_hub_download, HfFileSystem # type: ignore + from huggingface_hub.utils import validate_repo_id # type: ignore + except ImportError: + raise ImportError( + "Llama.from_pretrained requires the huggingface-hub package. " + "You can install it with `pip install huggingface-hub`." + ) + + validate_repo_id(repo_id) + + hffs = HfFileSystem() + + files = [ + file["name"] if isinstance(file, dict) else file + for file in hffs.ls(repo_id) # type: ignore + ] + + # split each file into repo_id, subfolder, filename + file_list: List[str] = [] + for file in files: + rel_path = Path(file).relative_to(repo_id) + file_list.append(str(rel_path)) + + matching_files = [file for file in file_list if fnmatch.fnmatch(file, filename)] # type: ignore + + if len(matching_files) == 0: + raise ValueError( + f"No file found in {repo_id} that match {filename}\n\n" + f"Available Files:\n{json.dumps(file_list)}" + ) + + if len(matching_files) > 1: + raise ValueError( + f"Multiple files found in {repo_id} matching {filename}\n\n" + f"Available Files:\n{json.dumps(files)}" + ) + + (matching_file,) = matching_files + + subfolder = str(Path(matching_file).parent) + filename = Path(matching_file).name + + # download the file + hf_hub_download( + repo_id=repo_id, + filename=filename, + subfolder=subfolder, + local_dir=cast(Union[str, Path, None], local_dir), + local_dir_use_symlinks=local_dir_use_symlinks, + cache_dir=cast(Union[str, Path, None], cache_dir), + ) + + if local_dir is None: + model_path = hf_hub_download( + repo_id=repo_id, + filename=filename, + subfolder=subfolder, + local_dir=local_dir, + local_dir_use_symlinks=local_dir_use_symlinks, + cache_dir=cast(Union[str, Path, None], cache_dir), + local_files_only=True, + ) + else: + model_path = os.path.join(local_dir, filename) + + return cls( + clip_model_path=model_path, + **kwargs, + ) + + +class ObsidianChatHandler(Llava15ChatHandler): + # Prompt Format + # The model followed ChatML format. However, with ### as the seperator + + # <|im_start|>user + # What is this sign about?\n + # ### + # <|im_start|>assistant + # The sign is about bullying, and it is placed on a black background with a red background. + # ### + + CHAT_FORMAT = ( + "{% for message in messages %}" + # System message + "{% if message.role == 'system' %}" + "<|im_start|>system\n" + "{{ message.content }}\n" + "###\n" + "{% endif %}" + # User message + "{% if message.role == 'user' %}" + "<|im_start|>user\n" + "{% if message.content is string %}" + "{{ message.content }}" + "{% endif %}" + "{% if message.content is iterable %}" + "{% for content in message.content %}" + "{% if content.type == 'image_url' and content.image_url is string %}" + "{{ content.image_url }}" + "{% endif %}" + "{% if content.type == 'image_url' and content.image_url is mapping %}" + "{{ content.image_url.url }}" + "{% endif %}" + "{% endfor %}" + "{% for content in message.content %}" + "{% if content.type == 'text' %}" + "{{ content.text }}" + "{% endif %}" + "{% endfor %}" + "{% endif %}" + "###\n" + "{% endif %}" + # Assistant message + "{% if message.role == 'assistant' %}" + "<|im_start|>assistant\n" + "{{ message.content }}" + "###\n" + "{% endif %}" + "{% endfor %}" + # Generation prompt + "{% if add_generation_prompt %}" + "<|im_start|>assistant\n" + "{% endif %}" + ) + + +class MoondreamChatHandler(Llava15ChatHandler): + # Chat Format: + # f"\n\n{chat_history}Question: {question}\n\nAnswer:" + CHAT_FORMAT = ( + "{% for message in messages %}" + "{% if message.role == 'user' %}" + "{% if message.content is iterable %}" + # + "{% for content in message.content %}" + "{% if content.type == 'image_url' %}" + "{% if content.image_url is string %}" + "{{ content.image_url }}\n\n" + "{% endif %}" + "{% if content.image_url is mapping %}" + "{{ content.image_url.url }}\n\n" + "{% endif %}" + "{% endif %}" + "{% endfor %}" + # Question: + "{% for content in message.content %}" + "{% if content.type == 'text' %}" + "Question: {{ content.text }}\n\n" + "{% endif %}" + "{% endfor %}" + "{% endif %}" + # Question: + "{% if message.content is string %}" + "Question: {{ message.content }}\n\n" + "{% endif %}" + "{% endif %}" + # Answer: + "{% if message.role == 'assistant' %}" + "Answer:{{ message.content }}\n\n" + "{% endif %}" + "{% endfor %}" + # Generation prompt + "{% if add_generation_prompt %}" + "Answer:" + "{% endif %}" + ) + + +class Llava16ChatHandler(Llava15ChatHandler): + DEFAULT_SYSTEM_MESSAGE = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions. " + + # Example prompt + # "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions. USER: \nWhat is shown in this image? ASSISTANT:" + + CHAT_FORMAT = ( + "{% for message in messages %}" + "{% if message.role == 'system' %}" + "{{ message.content }}" + "{% endif %}" + "{% if message.role == 'user' %}" + "{% if message.content is iterable %}" + # + "{% for content in message.content %}" + "{% if content.type == 'image_url' %}" + "{% if content.image_url is string %}" + "{{ content.image_url }}\n" + "{% endif %}" + "{% if content.image_url is mapping %}" + "{{ content.image_url.url }}\n" + "{% endif %}" + "{% endif %}" + "{% endfor %}" + # Question: + "{% for content in message.content %}" + "{% if content.type == 'text' %}" + "{{ content.text }}" + "{% endif %}" + "{% endfor %}" + "{% endif %}" + # Question: + "{% if message.content is string %}" + "{{ message.content }}" + "{% endif %}" + "{% endif %}" + # Answer: + "{% if message.role == 'assistant' %}" + "{{ message.content }}" + "{% endif %}" + "{% endfor %}" + # Generation prompt + "{% if add_generation_prompt %}" + "Answer:" + "{% endif %}" + ) + + +class NanoLlavaChatHandler(Llava15ChatHandler): + # Prompt Format + # The model follow the ChatML standard, however, without \n at the end of <|im_end|>: + + # <|im_start|>system + # Answer the question<|im_end|><|im_start|>user + # + # What is the picture about?<|im_end|><|im_start|>assistant + DEFAULT_SYSTEM_MESSAGE = "Answer the question" + + CHAT_FORMAT = ( + "{% for message in messages %}" + # System message + "{% if message.role == 'system' %}" + "<|im_start|>system\n" + "{{ message.content }}" + "<|im_end|>" + "{% endif %}" + # User message + "{% if message.role == 'user' %}" + "<|im_start|>user\n" + "{% if message.content is string %}" + "{{ message.content }}" + "{% endif %}" + "{% if message.content is iterable %}" + "{% for content in message.content %}" + "{% if content.type == 'image_url' and content.image_url is string %}" + "{{ content.image_url }}" + "{% endif %}" + "{% if content.type == 'image_url' and content.image_url is mapping %}" + "{{ content.image_url.url }}" + "{% endif %}" + "{% endfor %}" + "{% for content in message.content %}" + "{% if content.type == 'text' %}" + "{{ content.text }}" + "{% endif %}" + "{% endfor %}" + "{% endif %}" + "<|im_end|>" + "{% endif %}" + # Assistant message + "{% if message.role == 'assistant' %}" + "<|im_start|>assistant\n" + "{{ message.content }}" + "<|im_end|>" + "{% endif %}" + "{% endfor %}" + # Generation prompt + "{% if add_generation_prompt %}" + "<|im_start|>assistant\n" + "{% endif %}" + ) + + +class Llama3VisionAlphaChatHandler(Llava15ChatHandler): + # question = "" + q + + # prompt = f"<|start_header_id|>user<|end_header_id|>\n\n{question}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n" + DEFAULT_SYSTEM_MESSAGE = None + + CHAT_FORMAT = ( + "{% for message in messages %}" + "<|start_header_id|>" + "{% if message.role == 'user' %}" + "user<|end_header_id|>\n\n" + "{% if message.content is iterable %}" + # + "{% for content in message.content %}" + "{% if content.type == 'image_url' %}" + "{% if content.image_url is string %}" + "{{ content.image_url }}" + "{% endif %}" + "{% if content.image_url is mapping %}" + "{{ content.image_url.url }}" + "{% endif %}" + "{% endif %}" + "{% endfor %}" + # Question: + "{% for content in message.content %}" + "{% if content.type == 'text' %}" + "{{ content.text }}" + "{% endif %}" + "{% endfor %}" + "{% endif %}" + # Question: + "{% if message.content is string %}" + "{{ message.content }}" + "{% endif %}" + "{% endif %}" + # Answer: + "{% if message.role == 'assistant' %}" + "assistant<|end_header_id|>\n\n" + "{{ message.content }}" + "{% endif %}" + "<|eot_id|>" + "{% endfor %}" + # Generation prompt + "{% if add_generation_prompt %}" + "<|start_header_id|>assistant<|end_header_id|>\n\n" + "{% endif %}" + ) + + +# alias +Llama3VisionAlpha = Llama3VisionAlphaChatHandler + + +class MiniCPMv26ChatHandler(Llava15ChatHandler): + DEFAULT_SYSTEM_MESSAGE = "You are a helpful assistant." + + CHAT_FORMAT = ( + "{% for message in messages %}" + "{% if loop.first and messages[0]['role'] != 'system' %}" + "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n" + "{% endif %}" + "<|im_start|>{{ message['role'] }}\n" + "{% if message['content'] is iterable %}" + "{% for content in message['content'] %}" + "{% if content.type == 'image_url' %}" + "{% if content.image_url is string %}" + "{{ content.image_url }}" + "{% endif %}" + "{% if content.image_url is mapping %}" + "{{ content.image_url.url }}" + "{% endif %}" + "{% endif %}" + "{% endfor %}" + + "{% for content in message['content'] %}" + "{% if content.type == 'text' %}" + "{{ content.text }}" + "{% endif %}" + "{% endfor %}" + "{% endif %}" + "{% if message['content'] is string %}" + "{{ message['content'] }}" + "{% endif %}" + "<|im_end|>\n" + "{% endfor %}" + "{% if add_generation_prompt %}" + "<|im_start|>assistant\n" + "{% endif %}" + ) + + +@register_chat_completion_handler("chatml-function-calling") +def chatml_function_calling( + llama: llama.Llama, + messages: List[llama_types.ChatCompletionRequestMessage], + functions: Optional[List[llama_types.ChatCompletionFunction]] = None, + function_call: Optional[llama_types.ChatCompletionRequestFunctionCall] = None, + tools: Optional[List[llama_types.ChatCompletionTool]] = None, + tool_choice: Optional[llama_types.ChatCompletionToolChoiceOption] = None, + temperature: float = 0.2, + top_p: float = 0.95, + top_k: int = 40, + min_p: float = 0.05, + typical_p: float = 1.0, + stream: bool = False, + stop: Optional[Union[str, List[str]]] = [], + response_format: Optional[llama_types.ChatCompletionRequestResponseFormat] = None, + max_tokens: Optional[int] = None, + presence_penalty: float = 0.0, + frequency_penalty: float = 0.0, + repeat_penalty: float = 1.1, + tfs_z: float = 1.0, + mirostat_mode: int = 0, + mirostat_tau: float = 5.0, + mirostat_eta: float = 0.1, + model: Optional[str] = None, + logits_processor: Optional[llama.LogitsProcessorList] = None, + grammar: Optional[llama.LlamaGrammar] = None, + logprobs: Optional[bool] = None, + top_logprobs: Optional[int] = None, + **kwargs, # type: ignore +) -> Union[ + llama_types.CreateChatCompletionResponse, + Iterator[llama_types.CreateChatCompletionStreamResponse], +]: + function_calling_template = ( + "{% for message in messages %}" + "<|im_start|>{{ message.role }}\n" + # System message + "{% if message.role == 'system' %}" + "{{ message.content }}" + "{% if tool_calls %}" + "\n\nYou have access to the following functions:\n" + "{% for tool in tools %}" + "\nfunctions.{{ tool.function.name }}:\n" + "{{ tool.function.parameters | tojson }}" + "\n{% endfor %}" + "\n\nYou can respond to users messages with either a single message or one or more function calls." + "\n\nTo respond with a message begin the message with 'message:', use the following format:" + "\n\nmessage:" + "\n" + "\n\nTo respond with one or more function calls begin the message with 'functions.:', use the following format:" + "\n\nfunctions.:" + '\n{ "arg1": "value1", "arg2": "value2" }' + "\nfunctions.:" + '\n{ "arg1": "value1", "arg2": "value2" }' + "{% endif %}" + "<|im_end|>\n" + "{% endif %}" + # User message + "{% if message.role == 'user' %}" + "{{ message.content }}" + "<|im_end|>\n" + "{% endif %}" + # Assistant message + "{% if message.role == 'assistant' %}" + ## Reglar message + "{% if message.content and message.content | length > 0 %}" + "{% if tool_calls %}" + "message:\n" + "{% endif %}" + "{{ message.content }}" + "<|im_end|>\n" + "{% endif %}" + ## Function calls + "{% if 'tool_calls' in message %}" + "{% for tool_call in message.tool_calls %}" + "functions.{{ tool_call.function.name }}:\n" + "{{ tool_call.function.arguments }}" + "{% endfor %}" + "<|im_end|>\n" + "{% endif %}" + "{% endif %}" + "{% endfor %}" + "{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}" + ) + template_renderer = ImmutableSandboxedEnvironment( + autoescape=jinja2.select_autoescape(["html", "xml"]), + undefined=jinja2.StrictUndefined, + ).from_string(function_calling_template) + + # Convert legacy functions to tools + if functions is not None: + tools = [ + { + "type": "function", + "function": function, + } + for function in functions + ] + + # Convert legacy function_call to tool_choice + if function_call is not None: + if isinstance(function_call, str) and ( + function_call == "none" or function_call == "auto" + ): + tool_choice = function_call + if isinstance(function_call, dict) and "name" in function_call: + tool_choice = { + "type": "function", + "function": { + "name": function_call["name"], + }, + } + + stop = ( + [stop, "<|im_end|>"] + if isinstance(stop, str) + else stop + ["<|im_end|>"] if stop else ["<|im_end|>"] + ) + + # Case 1: No tool choice by user + if ( + tool_choice is None + or (isinstance(tool_choice, str) and tool_choice == "none") + or tools is None + or len(tools) == 0 + ): + prompt = template_renderer.render( + messages=messages, + tools=[], + tool_calls=None, + add_generation_prompt=True, + ) + + if response_format is not None and response_format["type"] == "json_object": + grammar = _grammar_for_response_format(response_format) + + return _convert_completion_to_chat( + llama.create_completion( + prompt=prompt, + temperature=temperature, + top_p=top_p, + top_k=top_k, + min_p=min_p, + typical_p=typical_p, + stream=stream, + stop=stop, + max_tokens=max_tokens, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + repeat_penalty=repeat_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + logits_processor=logits_processor, + grammar=grammar, + logprobs=top_logprobs if logprobs else None, + ), + stream=stream, + ) + + # Case 2: Tool choice by user + if isinstance(tool_choice, dict): + tool_name = tool_choice["function"]["name"] + tool = next( + (tool for tool in tools if tool["function"]["name"] == tool_name), None + ) + if tool is None: + raise ValueError(f"Tool with name '{tool_name}' not found in tools") + prompt = template_renderer.render( + messages=messages, + tools=tools, + tool_calls=True, + add_generation_prompt=True, + ) + prompt += f"functions.{tool_name}:\n" + try: + grammar = llama_grammar.LlamaGrammar.from_json_schema( + json.dumps(tool["function"]["parameters"]), verbose=llama.verbose + ) + except Exception as e: + grammar = llama_grammar.LlamaGrammar.from_string( + llama_grammar.JSON_GBNF, verbose=llama.verbose + ) + if llama.verbose: + print( + "Failed to parse function body as JSON schema, falling back to default grammar" + ) + print(e) + completion_or_chunks = llama.create_completion( + prompt=prompt, + temperature=temperature, + top_p=top_p, + top_k=top_k, + min_p=min_p, + typical_p=typical_p, + stream=stream, + stop=stop, + max_tokens=max_tokens, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + repeat_penalty=repeat_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + logits_processor=logits_processor, + grammar=grammar, + ) + return _convert_completion_to_chat_function( + tool_name, completion_or_chunks, stream + ) + + # Case 3: Automatic tool choice + assert isinstance(tool_choice, str) and tool_choice == "auto" + function_names = " | ".join( + [f'''"functions.{tool['function']['name']}:"''' for tool in tools] + ) + initial_gbnf_tool_grammar = ( + """root ::= functions | "message:"\n""" + f"""functions ::= {function_names}\n""" + ) + follow_up_gbnf_tool_grammar = ( + """root ::= functions | "<|im_end|>"\n""" + f"""functions ::= {function_names}\n""" + ) + prompt = template_renderer.render( + messages=messages, + tools=tools, + tool_calls=True, + add_generation_prompt=True, + ) + completion_or_chunks = llama.create_completion( + prompt=prompt, + temperature=0, + top_p=top_p, + top_k=top_k, + min_p=min_p, + typical_p=typical_p, + stream=False, + stop=[":"], + max_tokens=None, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + repeat_penalty=repeat_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + logits_processor=logits_processor, + grammar=llama_grammar.LlamaGrammar.from_string( + initial_gbnf_tool_grammar, verbose=llama.verbose + ), + ) + completion: llama_types.CreateCompletionResponse = completion_or_chunks # type: ignore + text = completion["choices"][0]["text"] + if "message" in text: + return _convert_completion_to_chat( + llama.create_completion( + prompt=prompt + "message:\n", + temperature=temperature, + top_p=top_p, + top_k=top_k, + min_p=min_p, + typical_p=typical_p, + stream=stream, + stop=["<|im_end|>"], + logprobs=top_logprobs if logprobs else None, + max_tokens=None, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + repeat_penalty=repeat_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + logits_processor=logits_processor, + grammar=llama_grammar.LlamaGrammar.from_string( + follow_up_gbnf_tool_grammar, verbose=llama.verbose + ), + ), + stream=stream, + ) + + # One or more function calls + tool_name = text[len("functions.") :] + tool = next((tool for tool in tools if tool["function"]["name"] == tool_name), None) + if not stream: + completions: List[llama_types.CreateCompletionResponse] = [] + completions_tool_name: List[str] = [] + while tool is not None: + prompt += f"functions.{tool_name}:\n" + try: + grammar = llama_grammar.LlamaGrammar.from_json_schema( + json.dumps(tool["function"]["parameters"]), verbose=llama.verbose + ) + except Exception as e: + grammar = llama_grammar.LlamaGrammar.from_string( + llama_grammar.JSON_GBNF, verbose=llama.verbose + ) + if llama.verbose: + print( + "Failed to parse function body as JSON schema, falling back to default grammar" + ) + print(e) + completion_or_chunks = llama.create_completion( + prompt=prompt, + temperature=temperature, + top_p=top_p, + top_k=top_k, + min_p=min_p, + typical_p=typical_p, + stream=False, + stop=stop, + max_tokens=None, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + repeat_penalty=repeat_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + logits_processor=logits_processor, + grammar=grammar, + ) + completion_or_chunks = cast( + llama_types.CreateCompletionResponse, completion_or_chunks + ) + completions.append(completion_or_chunks) + completions_tool_name.append(tool_name) + prompt += completion_or_chunks["choices"][0]["text"] + prompt += "\n" + + response = llama.create_completion( + prompt=prompt, + temperature=temperature, + top_p=top_p, + top_k=top_k, + min_p=min_p, + typical_p=typical_p, + stream=False, + stop=stop, + max_tokens=None, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + repeat_penalty=repeat_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + logits_processor=logits_processor, + grammar=llama_grammar.LlamaGrammar.from_string( + follow_up_gbnf_tool_grammar, verbose=llama.verbose + ), + ) + response = cast(llama_types.CreateCompletionResponse, response) + + tool_name = response["choices"][0]["text"][len("functions.") :] + tool = next( + (tool for tool in tools if tool["function"]["name"] == tool_name), None + ) + + # Merge completions + function_call_dict: Union[ + Dict[str, str], + Dict[ + Literal["function_call"], + llama_types.ChatCompletionRequestAssistantMessageFunctionCall, + ], + ] = ( + { + "function_call": { + "name": tool_name, + "arguments": completions[0]["choices"][0]["text"], + } + } + if len(completions) == 1 + else {} + ) + return { + "id": "chat" + completion["id"], + "object": "chat.completion", + "created": completion["created"], + "model": completion["model"], + "choices": [ + { + "finish_reason": "tool_calls", + "index": 0, + "logprobs": _convert_text_completion_logprobs_to_chat(completion["choices"][0]["logprobs"]), + "message": { + "role": "assistant", + "content": None, + "tool_calls": [ + { + "id": "call_" + + f"_{i}_" + + tool_name + + "_" + + completion["id"], + "type": "function", + "function": { + "name": tool_name, + "arguments": completion["choices"][0]["text"], + }, + } + for i, (tool_name, completion) in enumerate( + zip(completions_tool_name, completions) + ) + ], + **function_call_dict, + }, + } + ], + "usage": { + "completion_tokens": sum( + ( + completion["usage"]["completion_tokens"] + if "usage" in completion + else 0 + ) + for completion in completions + ), + "prompt_tokens": sum( + completion["usage"]["prompt_tokens"] if "usage" in completion else 0 + for completion in completions + ), + "total_tokens": sum( + completion["usage"]["total_tokens"] if "usage" in completion else 0 + for completion in completions + ), + }, + } + + raise ValueError("Automatic streaming tool choice is not supported") diff --git a/llama_cpp/llama_cpp.py b/llama_cpp/llama_cpp.py new file mode 100644 index 0000000000000000000000000000000000000000..63de3a93a47db829c48dfb2370efd97b68eebca4 --- /dev/null +++ b/llama_cpp/llama_cpp.py @@ -0,0 +1,4315 @@ +from __future__ import annotations + +import os +import ctypes +import pathlib + +from typing import ( + Callable, + Union, + NewType, + Optional, + TYPE_CHECKING, +) + +from llama_cpp._ctypes_extensions import ( + load_shared_library, + byref, + ctypes_function_for_shared_library, +) + +if TYPE_CHECKING: + from llama_cpp._ctypes_extensions import ( + CtypesCData, + CtypesArray, + CtypesPointer, + CtypesVoidPointer, + CtypesRef, + CtypesPointerOrRef, + CtypesFuncPointer, + ) + + +# Specify the base name of the shared library to load +_lib_base_name = "llama" +_override_base_path = os.environ.get("LLAMA_CPP_LIB_PATH") +_base_path = pathlib.Path(os.path.abspath(os.path.dirname(__file__))) / "lib" if _override_base_path is None else pathlib.Path(_override_base_path) +# Load the library +_lib = load_shared_library(_lib_base_name, _base_path) + +ctypes_function = ctypes_function_for_shared_library(_lib) + + +# from ggml.h +# // NOTE: always add types at the end of the enum to keep backward compatibility +# enum ggml_type { +# GGML_TYPE_F32 = 0, +# GGML_TYPE_F16 = 1, +# GGML_TYPE_Q4_0 = 2, +# GGML_TYPE_Q4_1 = 3, +# // GGML_TYPE_Q4_2 = 4, support has been removed +# // GGML_TYPE_Q4_3 = 5, support has been removed +# GGML_TYPE_Q5_0 = 6, +# GGML_TYPE_Q5_1 = 7, +# GGML_TYPE_Q8_0 = 8, +# GGML_TYPE_Q8_1 = 9, +# GGML_TYPE_Q2_K = 10, +# GGML_TYPE_Q3_K = 11, +# GGML_TYPE_Q4_K = 12, +# GGML_TYPE_Q5_K = 13, +# GGML_TYPE_Q6_K = 14, +# GGML_TYPE_Q8_K = 15, +# GGML_TYPE_IQ2_XXS = 16, +# GGML_TYPE_IQ2_XS = 17, +# GGML_TYPE_IQ3_XXS = 18, +# GGML_TYPE_IQ1_S = 19, +# GGML_TYPE_IQ4_NL = 20, +# GGML_TYPE_IQ3_S = 21, +# GGML_TYPE_IQ2_S = 22, +# GGML_TYPE_IQ4_XS = 23, +# GGML_TYPE_I8 = 24, +# GGML_TYPE_I16 = 25, +# GGML_TYPE_I32 = 26, +# GGML_TYPE_I64 = 27, +# GGML_TYPE_F64 = 28, +# GGML_TYPE_IQ1_M = 29, +# GGML_TYPE_COUNT, +# }; +GGML_TYPE_F32 = 0 +GGML_TYPE_F16 = 1 +GGML_TYPE_Q4_0 = 2 +GGML_TYPE_Q4_1 = 3 +GGML_TYPE_Q5_0 = 6 +GGML_TYPE_Q5_1 = 7 +GGML_TYPE_Q8_0 = 8 +GGML_TYPE_Q8_1 = 9 +GGML_TYPE_Q2_K = 10 +GGML_TYPE_Q3_K = 11 +GGML_TYPE_Q4_K = 12 +GGML_TYPE_Q5_K = 13 +GGML_TYPE_Q6_K = 14 +GGML_TYPE_Q8_K = 15 +GGML_TYPE_IQ2_XXS = 16 +GGML_TYPE_IQ2_XS = 17 +GGML_TYPE_IQ3_XXS = 18 +GGML_TYPE_IQ1_S = 19 +GGML_TYPE_IQ4_NL = 20 +GGML_TYPE_IQ3_S = 21 +GGML_TYPE_IQ2_S = 22 +GGML_TYPE_IQ4_XS = 23 +GGML_TYPE_I8 = 24 +GGML_TYPE_I16 = 25 +GGML_TYPE_I32 = 26 +GGML_TYPE_I64 = 27 +GGML_TYPE_F64 = 28 +GGML_TYPE_IQ1_M = 29 +GGML_TYPE_COUNT = 30 + +# from ggml-backend.h +# typedef bool (*ggml_backend_sched_eval_callback)(struct ggml_tensor * t, bool ask, void * user_data); +ggml_backend_sched_eval_callback = ctypes.CFUNCTYPE( + ctypes.c_bool, ctypes.c_void_p, ctypes.c_bool, ctypes.c_void_p +) + +# // Abort callback +# // If not NULL, called before ggml computation +# // If it returns true, the computation is aborted +# typedef bool (*ggml_abort_callback)(void * data); +ggml_abort_callback = ctypes.CFUNCTYPE(ctypes.c_bool, ctypes.c_void_p) + +# llama.h bindings + +_lib.llama_max_devices.argtypes = [] +_lib.llama_max_devices.restype = ctypes.c_size_t + +LLAMA_MAX_DEVICES = _lib.llama_max_devices() + +# define LLAMA_DEFAULT_SEED 0xFFFFFFFF +LLAMA_DEFAULT_SEED = 0xFFFFFFFF + +# define LLAMA_TOKEN_NULL -1 +LLAMA_TOKEN_NULL = -1 + +# define LLAMA_FILE_MAGIC_GGLA 0x67676c61u // 'ggla' +LLAMA_FILE_MAGIC_GGLA = 0x67676C61 + +# define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn' +LLAMA_FILE_MAGIC_GGSN = 0x6767736E + +# define LLAMA_FILE_MAGIC_GGSQ 0x67677371u // 'ggsq' +LLAMA_FILE_MAGIC_GGSQ = 0x67677371 + +# define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN +LLAMA_SESSION_MAGIC = LLAMA_FILE_MAGIC_GGSN +# define LLAMA_SESSION_VERSION 9 +LLAMA_SESSION_VERSION = 9 + +# define LLAMA_STATE_SEQ_MAGIC LLAMA_FILE_MAGIC_GGSQ +LLAMA_STATE_SEQ_MAGIC = LLAMA_FILE_MAGIC_GGSQ +# define LLAMA_STATE_SEQ_VERSION 2 +LLAMA_STATE_SEQ_VERSION = 2 + +# struct llama_vocab; +llama_vocab_p = NewType("llama_vocab_p", int) +llama_vocab_p_ctypes = ctypes.c_void_p + +# struct llama_model; +llama_model_p = NewType("llama_model_p", int) +llama_model_p_ctypes = ctypes.c_void_p + +# struct llama_context; +llama_context_p = NewType("llama_context_p", int) +llama_context_p_ctypes = ctypes.c_void_p + +# # struct llama_sampler; +# llama_sampler_p = NewType("llama_sampler_p", int) +# llama_sampler_p_ctypes = ctypes.c_void_p + +# struct llama_kv_cache; +llama_kv_cache_p = NewType("llama_kv_cache_p", int) +llama_kv_cache_p_ctypes = ctypes.c_void_p + +# typedef int32_t llama_pos; +llama_pos = ctypes.c_int32 +# typedef int32_t llama_token; +llama_token = ctypes.c_int32 +llama_token_p = ctypes.POINTER(llama_token) +# typedef int32_t llama_seq_id; +llama_seq_id = ctypes.c_int32 + + +# enum llama_vocab_type { +# LLAMA_VOCAB_TYPE_NONE = 0, // For models without vocab +# LLAMA_VOCAB_TYPE_SPM = 1, // LLaMA tokenizer based on byte-level BPE with byte fallback +# LLAMA_VOCAB_TYPE_BPE = 2, // GPT-2 tokenizer based on byte-level BPE +# LLAMA_VOCAB_TYPE_WPM = 3, // BERT tokenizer based on WordPiece +# LLAMA_VOCAB_TYPE_UGM = 4, // T5 tokenizer based on Unigram +# LLAMA_VOCAB_TYPE_RWKV = 5, // RWKV tokenizer based on greedy tokenization +# }; +LLAMA_VOCAB_TYPE_NONE = 0 +"""For models without vocab""" +LLAMA_VOCAB_TYPE_SPM = 1 +"""LLaMA tokenizer based on byte-level BPE with byte fallback""" +LLAMA_VOCAB_TYPE_BPE = 2 +"""GPT-2 tokenizer based on byte-level BPE""" +LLAMA_VOCAB_TYPE_WPM = 3 +"""BERT tokenizer based on WordPiece""" +LLAMA_VOCAB_TYPE_UGM = 4 +"""T5 tokenizer based on Unigram""" +LLAMA_VOCAB_TYPE_RWKV = 5 +"""RWKV tokenizer based on greedy tokenization""" + + +# // pre-tokenization types +# enum llama_vocab_pre_type { +# LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0, +# LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1, +# LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM = 2, +# LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER = 3, +# LLAMA_VOCAB_PRE_TYPE_FALCON = 4, +# LLAMA_VOCAB_PRE_TYPE_MPT = 5, +# LLAMA_VOCAB_PRE_TYPE_STARCODER = 6, +# LLAMA_VOCAB_PRE_TYPE_GPT2 = 7, +# LLAMA_VOCAB_PRE_TYPE_REFACT = 8, +# LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9, +# LLAMA_VOCAB_PRE_TYPE_STABLELM2 = 10, +# LLAMA_VOCAB_PRE_TYPE_QWEN2 = 11, +# LLAMA_VOCAB_PRE_TYPE_OLMO = 12, +# LLAMA_VOCAB_PRE_TYPE_DBRX = 13, +# LLAMA_VOCAB_PRE_TYPE_SMAUG = 14, +# LLAMA_VOCAB_PRE_TYPE_PORO = 15, +# LLAMA_VOCAB_PRE_TYPE_CHATGLM3 = 16, +# LLAMA_VOCAB_PRE_TYPE_CHATGLM4 = 17, +# LLAMA_VOCAB_PRE_TYPE_VIKING = 18, +# LLAMA_VOCAB_PRE_TYPE_JAIS = 19, +# LLAMA_VOCAB_PRE_TYPE_TEKKEN = 20, +# LLAMA_VOCAB_PRE_TYPE_SMOLLM = 21, +# LLAMA_VOCAB_PRE_TYPE_CODESHELL = 22, +# LLAMA_VOCAB_PRE_TYPE_BLOOM = 23, +# LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH = 24, +# LLAMA_VOCAB_PRE_TYPE_EXAONE = 25, +# LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26, +# LLAMA_VOCAB_PRE_TYPE_MINERVA = 27, +# LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM = 28, +# LLAMA_VOCAB_PRE_TYPE_GPT4O = 29, +# LLAMA_VOCAB_PRE_TYPE_SUPERBPE = 30, +# LLAMA_VOCAB_PRE_TYPE_TRILLION = 31, +# LLAMA_VOCAB_PRE_TYPE_BAILINGMOE = 32, +# LLAMA_VOCAB_PRE_TYPE_LLAMA4 = 33, +# LLAMA_VOCAB_PRE_TYPE_PIXTRAL = 34, +# }; +LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0 +LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1 +LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM = 2 +LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER = 3 +LLAMA_VOCAB_PRE_TYPE_FALCON = 4 +LLAMA_VOCAB_PRE_TYPE_MPT = 5 +LLAMA_VOCAB_PRE_TYPE_STARCODER = 6 +LLAMA_VOCAB_PRE_TYPE_GPT2 = 7 +LLAMA_VOCAB_PRE_TYPE_REFACT = 8 +LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9 +LLAMA_VOCAB_PRE_TYPE_STABLELM2 = 10 +LLAMA_VOCAB_PRE_TYPE_QWEN2 = 11 +LLAMA_VOCAB_PRE_TYPE_OLMO = 12 +LLAMA_VOCAB_PRE_TYPE_DBRX = 13 +LLAMA_VOCAB_PRE_TYPE_SMAUG = 14 +LLAMA_VOCAB_PRE_TYPE_PORO = 15 +LLAMA_VOCAB_PRE_TYPE_CHATGLM3 = 16 +LLAMA_VOCAB_PRE_TYPE_CHATGLM4 = 17 +LLAMA_VOCAB_PRE_TYPE_VIKING = 18 +LLAMA_VOCAB_PRE_TYPE_JAIS = 19 +LLAMA_VOCAB_PRE_TYPE_TEKKEN = 20 +LLAMA_VOCAB_PRE_TYPE_SMOLLM = 21 +LLAMA_VOCAB_PRE_TYPE_CODESHELL = 22 +LLAMA_VOCAB_PRE_TYPE_BLOOM = 23 +LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH = 24 +LLAMA_VOCAB_PRE_TYPE_EXAONE = 25 +LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26 +LLAMA_VOCAB_PRE_TYPE_MINERVA = 27 +LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM = 28 +LLAMA_VOCAB_PRE_TYPE_GPT4O = 29 +LLAMA_VOCAB_PRE_TYPE_SUPERBPE = 30 +LLAMA_VOCAB_PRE_TYPE_TRILLION = 31 +LLAMA_VOCAB_PRE_TYPE_BAILINGMOE = 32 +LLAMA_VOCAB_PRE_TYPE_LLAMA4 = 33 +LLAMA_VOCAB_PRE_TYPE_PIXTRAL = 34 + + +# // note: these values should be synchronized with ggml_rope +# // TODO: maybe move this enum to ggml.h (ggml_rope_type) +# enum llama_rope_type { +# LLAMA_ROPE_TYPE_NONE = -1, +# LLAMA_ROPE_TYPE_NORM = 0, +# LLAMA_ROPE_TYPE_NEOX = GGML_ROPE_TYPE_NEOX, +# LLAMA_ROPE_TYPE_MROPE = GGML_ROPE_TYPE_MROPE, +# LLAMA_ROPE_TYPE_VISION = GGML_ROPE_TYPE_VISION, +# }; +LLAMA_ROPE_TYPE_NONE = -1 +LLAMA_ROPE_TYPE_NORM = 0 +LLAMA_ROPE_TYPE_NEOX = GGML_ROPE_TYPE_NEOX = 2 +LLAMA_ROPE_TYPE_MROPE = GGML_ROPE_TYPE_MROPE = 8 +LLAMA_ROPE_TYPE_VISION = GGML_ROPE_TYPE_VISION = 24 + + +# enum llama_token_type { //TODO: remove, required until per token attributes are available from GGUF file +# LLAMA_TOKEN_TYPE_UNDEFINED = 0, +# LLAMA_TOKEN_TYPE_NORMAL = 1, +# LLAMA_TOKEN_TYPE_UNKNOWN = 2, +# LLAMA_TOKEN_TYPE_CONTROL = 3, +# LLAMA_TOKEN_TYPE_USER_DEFINED = 4, +# LLAMA_TOKEN_TYPE_UNUSED = 5, +# LLAMA_TOKEN_TYPE_BYTE = 6, +# }; +LLAMA_TOKEN_TYPE_UNDEFINED = 0 +LLAMA_TOKEN_TYPE_NORMAL = 1 +LLAMA_TOKEN_TYPE_UNKNOWN = 2 +LLAMA_TOKEN_TYPE_CONTROL = 3 +LLAMA_TOKEN_TYPE_USER_DEFINED = 4 +LLAMA_TOKEN_TYPE_UNUSED = 5 +LLAMA_TOKEN_TYPE_BYTE = 6 + + +# enum llama_token_attr { +# LLAMA_TOKEN_ATTR_UNDEFINED = 0, +# LLAMA_TOKEN_ATTR_UNKNOWN = 1 << 0, +# LLAMA_TOKEN_ATTR_UNUSED = 1 << 1, +# LLAMA_TOKEN_ATTR_NORMAL = 1 << 2, +# LLAMA_TOKEN_ATTR_CONTROL = 1 << 3, // SPECIAL? +# LLAMA_TOKEN_ATTR_USER_DEFINED = 1 << 4, +# LLAMA_TOKEN_ATTR_BYTE = 1 << 5, +# LLAMA_TOKEN_ATTR_NORMALIZED = 1 << 6, +# LLAMA_TOKEN_ATTR_LSTRIP = 1 << 7, +# LLAMA_TOKEN_ATTR_RSTRIP = 1 << 8, +# LLAMA_TOKEN_ATTR_SINGLE_WORD = 1 << 9, +# }; +LLAMA_TOKEN_ATTR_UNDEFINED = 0 +LLAMA_TOKEN_ATTR_UNKNOWN = 1 << 0 +LLAMA_TOKEN_ATTR_UNUSED = 1 << 1 +LLAMA_TOKEN_ATTR_NORMAL = 1 << 2 +LLAMA_TOKEN_ATTR_CONTROL = 1 << 3 +LLAMA_TOKEN_ATTR_USER_DEFINED = 1 << 4 +LLAMA_TOKEN_ATTR_BYTE = 1 << 5 +LLAMA_TOKEN_ATTR_NORMALIZED = 1 << 6 +LLAMA_TOKEN_ATTR_LSTRIP = 1 << 7 +LLAMA_TOKEN_ATTR_RSTRIP = 1 << 8 +LLAMA_TOKEN_ATTR_SINGLE_WORD = 1 << 9 + + +# // model file types +# enum llama_ftype { +# LLAMA_FTYPE_ALL_F32 = 0, +# LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors +# // LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16 +# // LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed +# // LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed +# LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q3_K_S = 11, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q3_K_M = 12, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q3_K_L = 13, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q4_K_S = 14, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q4_K_M = 15, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q5_K_S = 16, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q5_K_M = 17, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q6_K = 18, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_IQ2_XXS = 19, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_IQ2_XS = 20, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q2_K_S = 21, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_IQ3_XS = 22, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_IQ3_XXS = 23, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_IQ1_S = 24, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_IQ4_NL = 25, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_IQ3_S = 26, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_IQ3_M = 27, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_IQ2_S = 28, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_IQ2_M = 29, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_IQ4_XS = 30, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_IQ1_M = 31, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_BF16 = 32, // except 1d tensors +# //LLAMA_FTYPE_MOSTLY_Q4_0_4_4 = 33, // removed from gguf files, use Q4_0 and runtime repack +# //LLAMA_FTYPE_MOSTLY_Q4_0_4_8 = 34, // removed from gguf files, use Q4_0 and runtime repack +# //LLAMA_FTYPE_MOSTLY_Q4_0_8_8 = 35, // removed from gguf files, use Q4_0 and runtime repack +# LLAMA_FTYPE_MOSTLY_TQ1_0 = 36, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_TQ2_0 = 37, // except 1d tensors +# +# LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file +# }; +LLAMA_FTYPE_ALL_F32 = 0 +LLAMA_FTYPE_MOSTLY_F16 = 1 +LLAMA_FTYPE_MOSTLY_Q4_0 = 2 +LLAMA_FTYPE_MOSTLY_Q4_1 = 3 +LLAMA_FTYPE_MOSTLY_Q8_0 = 7 +LLAMA_FTYPE_MOSTLY_Q5_0 = 8 +LLAMA_FTYPE_MOSTLY_Q5_1 = 9 +LLAMA_FTYPE_MOSTLY_Q2_K = 10 +LLAMA_FTYPE_MOSTLY_Q3_K_S = 11 +LLAMA_FTYPE_MOSTLY_Q3_K_M = 12 +LLAMA_FTYPE_MOSTLY_Q3_K_L = 13 +LLAMA_FTYPE_MOSTLY_Q4_K_S = 14 +LLAMA_FTYPE_MOSTLY_Q4_K_M = 15 +LLAMA_FTYPE_MOSTLY_Q5_K_S = 16 +LLAMA_FTYPE_MOSTLY_Q5_K_M = 17 +LLAMA_FTYPE_MOSTLY_Q6_K = 18 +LLAMA_FTYPE_MOSTLY_IQ2_XXS = 19 +LLAMA_FTYPE_MOSTLY_IQ2_XS = 20 +LLAMA_FTYPE_MOSTLY_Q2_K_S = 21 +LLAMA_FTYPE_MOSTLY_IQ3_XS = 22 +LLAMA_FTYPE_MOSTLY_IQ3_XXS = 23 +LLAMA_FTYPE_MOSTLY_IQ1_S = 24 +LLAMA_FTYPE_MOSTLY_IQ4_NL = 25 +LLAMA_FTYPE_MOSTLY_IQ3_S = 26 +LLAMA_FTYPE_MOSTLY_IQ3_M = 27 +LLAMA_FTYPE_MOSTLY_IQ2_S = 28 +LLAMA_FTYPE_MOSTLY_IQ2_M = 29 +LLAMA_FTYPE_MOSTLY_IQ4_XS = 30 +LLAMA_FTYPE_MOSTLY_IQ1_M = 31 +LLAMA_FTYPE_MOSTLY_BF16 = 32 +# LLAMA_FTYPE_MOSTLY_Q4_0_4_4 = 33 +# LLAMA_FTYPE_MOSTLY_Q4_0_4_8 = 34 +# LLAMA_FTYPE_MOSTLY_Q4_0_8_8 = 35 +LLAMA_FTYPE_MOSTLY_TQ1_0 = 36 +LLAMA_FTYPE_MOSTLY_TQ2_0 = 37 +LLAMA_FTYPE_GUESSED = 1024 + +# enum llama_rope_scaling_type { +# LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED = -1, +# LLAMA_ROPE_SCALING_TYPE_NONE = 0, +# LLAMA_ROPE_SCALING_TYPE_LINEAR = 1, +# LLAMA_ROPE_SCALING_TYPE_YARN = 2, +# LLAMA_ROPE_SCALING_TYPE_LONGROPE = 3, +# LLAMA_ROPE_SCALING_TYPE_MAX_VALUE = LLAMA_ROPE_SCALING_TYPE_YARN, +# }; +LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED = -1 +LLAMA_ROPE_SCALING_TYPE_NONE = 0 +LLAMA_ROPE_SCALING_TYPE_LINEAR = 1 +LLAMA_ROPE_SCALING_TYPE_YARN = 2 +LLAMA_ROPE_SCALING_TYPE_LONGROPE = 3 +LLAMA_ROPE_SCALING_TYPE_MAX_VALUE = LLAMA_ROPE_SCALING_TYPE_YARN + +# enum llama_pooling_type { +# LLAMA_POOLING_TYPE_UNSPECIFIED = -1, +# LLAMA_POOLING_TYPE_NONE = 0, +# LLAMA_POOLING_TYPE_MEAN = 1, +# LLAMA_POOLING_TYPE_CLS = 2, +# LLAMA_POOLING_TYPE_LAST = 3, +# LLAMA_POOLING_TYPE_RANK = 4, // used by reranking models to attach the classification head to the graph +# }; +LLAMA_POOLING_TYPE_UNSPECIFIED = -1 +LLAMA_POOLING_TYPE_NONE = 0 +LLAMA_POOLING_TYPE_MEAN = 1 +LLAMA_POOLING_TYPE_CLS = 2 +LLAMA_POOLING_TYPE_LAST = 3 +LLAMA_POOLING_TYPE_RANK = 4 + +# enum llama_attention_type { +# LLAMA_ATTENTION_TYPE_UNSPECIFIED = -1, +# LLAMA_ATTENTION_TYPE_CAUSAL = 0, +# LLAMA_ATTENTION_TYPE_NON_CAUSAL = 1, +# }; +LLAMA_ATTENTION_TYPE_UNSPECIFIED = -1 +LLAMA_ATTENTION_TYPE_CAUSAL = 0 +LLAMA_ATTENTION_TYPE_NON_CAUSAL = 1 + + +# enum llama_split_mode { +# LLAMA_SPLIT_MODE_NONE = 0, // single GPU +# LLAMA_SPLIT_MODE_LAYER = 1, // split layers and KV across GPUs +# LLAMA_SPLIT_MODE_ROW = 2, // split rows across GPUs +# }; +LLAMA_SPLIT_MODE_NONE = 0 +LLAMA_SPLIT_MODE_LAYER = 1 +LLAMA_SPLIT_MODE_ROW = 2 + + +# typedef struct llama_token_data { +# llama_token id; // token id +# float logit; // log-odds of the token +# float p; // probability of the token +# } llama_token_data; +class llama_token_data(ctypes.Structure): + """Used to store token data + + Attributes: + id (llama_token): token id + logit (float): log-odds of the token + p (float): probability of the token""" + + if TYPE_CHECKING: + id: llama_token + logit: float + p: float + + _fields_ = [ + ("id", llama_token), + ("logit", ctypes.c_float), + ("p", ctypes.c_float), + ] + + +llama_token_data_p = ctypes.POINTER(llama_token_data) + + +# typedef struct llama_token_data_array { +# // TODO: consider SoA +# // NOTE: this pointer can be modified by the samplers +# llama_token_data * data; +# size_t size; +# int64_t selected; // this is the index in the data array (i.e. not the token id) +# bool sorted; +# } llama_token_data_array; +class llama_token_data_array(ctypes.Structure): + """Used to sample tokens given logits + + Attributes: + data (ctypes.Array[llama_token_data]): token data + size (int): size of the array + selected (int): index in the data array (i.e. not the token id) + sorted (bool): whether the array is sorted""" + + if TYPE_CHECKING: + data: CtypesArray[llama_token_data] + size: int + selected: int + sorted: bool + + _fields_ = [ + ("data", llama_token_data_p), + ("size", ctypes.c_size_t), + ("selected", ctypes.c_int64), + ("sorted", ctypes.c_bool), + ] + + +llama_token_data_array_p = ctypes.POINTER(llama_token_data_array) + +# typedef bool (*llama_progress_callback)(float progress, void * user_data); +llama_progress_callback = ctypes.CFUNCTYPE( + ctypes.c_bool, ctypes.c_float, ctypes.c_void_p +) + + +# // Input data for llama_decode +# // A llama_batch object can contain input about one or many sequences +# // The provided arrays (i.e. token, embd, pos, etc.) must have size of n_tokens +# // +# // - token : the token ids of the input (used when embd is NULL) +# // - embd : token embeddings (i.e. float vector of size n_embd) (used when token is NULL) +# // - pos : the positions of the respective token in the sequence +# // (if set to NULL, the token position will be tracked automatically by llama_decode) +# // - seq_id : the sequence to which the respective token belongs +# // (if set to NULL, the sequence ID will be assumed to be 0) +# // - logits : if zero, the logits (and/or the embeddings) for the respective token will not be output +# // (if set to NULL, only the logits for last token will be returned) +# // +# typedef struct llama_batch { +# int32_t n_tokens; + +# llama_token * token; +# float * embd; +# llama_pos * pos; +# int32_t * n_seq_id; +# llama_seq_id ** seq_id; +# int8_t * logits; // TODO: rename this to "output" +# } llama_batch; +class llama_batch(ctypes.Structure): + """Input data for llama_decode + + A llama_batch object can contain input about one or many sequences + + The provided arrays (i.e. token, embd, pos, etc.) must have size of n_tokens + + Attributes: + n_tokens (int): number of tokens + token (ctypes.Array[llama_token]): the token ids of the input (used when embd is NULL) + embd (ctypes.Array[ctypes.ctypes.c_float]): token embeddings (i.e. float vector of size n_embd) (used when token is NULL) + pos (ctypes.Array[ctypes.Array[llama_pos]]): the positions of the respective token in the sequence + seq_id (ctypes.Array[ctypes.Array[llama_seq_id]]): the sequence to which the respective token belongs + logits (ctypes.Array[ctypes.ctypes.c_int8]): if zero, the logits for the respective token will not be output + """ + + if TYPE_CHECKING: + n_tokens: int + token: CtypesArray[llama_token] + embd: CtypesArray[ctypes.c_float] + pos: CtypesArray[CtypesArray[llama_pos]] + n_seq_id: CtypesArray[ctypes.c_int] + seq_id: CtypesArray[CtypesArray[llama_seq_id]] + logits: CtypesArray[ctypes.c_int8] + + _fields_ = [ + ("n_tokens", ctypes.c_int32), + ("token", ctypes.POINTER(llama_token)), + ("embd", ctypes.POINTER(ctypes.c_float)), + ("pos", ctypes.POINTER(llama_pos)), + ("n_seq_id", ctypes.POINTER(ctypes.c_int32)), + ("seq_id", ctypes.POINTER(ctypes.POINTER(llama_seq_id))), + ("logits", ctypes.POINTER(ctypes.c_int8)), + ] + + +# enum llama_model_kv_override_type { +# LLAMA_KV_OVERRIDE_TYPE_INT, +# LLAMA_KV_OVERRIDE_TYPE_FLOAT, +# LLAMA_KV_OVERRIDE_TYPE_BOOL, +# LLAMA_KV_OVERRIDE_TYPE_STR, +# }; +LLAMA_KV_OVERRIDE_TYPE_INT = 0 +LLAMA_KV_OVERRIDE_TYPE_FLOAT = 1 +LLAMA_KV_OVERRIDE_TYPE_BOOL = 2 +LLAMA_KV_OVERRIDE_TYPE_STR = 3 + + +# struct llama_model_kv_override { +# enum llama_model_kv_override_type tag; + +# char key[128]; + + +# union { +# int64_t val_i64; +# double val_f64; +# bool val_bool; +# char val_str[128]; +# }; +# }; +class llama_model_kv_override_value(ctypes.Union): + _fields_ = [ + ("val_i64", ctypes.c_int64), + ("val_f64", ctypes.c_double), + ("val_bool", ctypes.c_bool), + ("val_str", ctypes.c_char * 128), + ] + + if TYPE_CHECKING: + val_i64: int + val_f64: float + val_bool: bool + val_str: bytes + + +class llama_model_kv_override(ctypes.Structure): + _fields_ = [ + ("tag", ctypes.c_int), + ("key", ctypes.c_char * 128), + ("value", llama_model_kv_override_value), + ] + + if TYPE_CHECKING: + tag: int + key: bytes + value: Union[int, float, bool, bytes] + + +# struct llama_model_tensor_buft_override { +# const char * pattern; +# ggml_backend_buffer_type_t buft; +# }; + + +# struct llama_model_params { +# // NULL-terminated list of devices to use for offloading (if NULL, all available devices are used) +# ggml_backend_dev_t * devices; + +# // NULL-terminated list of buffer types to use for tensors that match a pattern +# const struct llama_model_tensor_buft_override * tensor_buft_overrides; + +# int32_t n_gpu_layers; // number of layers to store in VRAM +# enum llama_split_mode split_mode; // how to split the model across multiple GPUs + +# // main_gpu interpretation depends on split_mode: +# // LLAMA_SPLIT_MODE_NONE: the GPU that is used for the entire model +# // LLAMA_SPLIT_MODE_ROW: the GPU that is used for small tensors and intermediate results +# // LLAMA_SPLIT_MODE_LAYER: ignored +# int32_t main_gpu; + +# // proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices() +# const float * tensor_split; + +# // Called with a progress value between 0.0 and 1.0. Pass NULL to disable. +# // If the provided progress_callback returns true, model loading continues. +# // If it returns false, model loading is immediately aborted. +# llama_progress_callback progress_callback; + +# // context pointer passed to the progress callback +# void * progress_callback_user_data; + +# // override key-value pairs of the model meta data +# const struct llama_model_kv_override * kv_overrides; + + +# // Keep the booleans together to avoid misalignment during copy-by-value. +# bool vocab_only; // only load the vocabulary, no weights +# bool use_mmap; // use mmap if possible +# bool use_mlock; // force system to keep model in RAM +# bool check_tensors; // validate model tensor data +# }; +class llama_model_params(ctypes.Structure): + """Parameters for llama_model + + Attributes: + devices (ctypes.Array[ggml_backend_dev_t]): NULL-terminated list of devices to use for offloading (if NULL, all available devices are used) + tensor_buft_overrides (ctypes.Array[llama_model_tensor_buft_override]): NULL-terminated list of buffer types to use for tensors that match a pattern + n_gpu_layers (int): number of layers to store in VRAM + split_mode (int): how to split the model across multiple GPUs + main_gpu (int): the GPU that is used for the entire model. main_gpu interpretation depends on split_mode: LLAMA_SPLIT_NONE: the GPU that is used for the entire model LLAMA_SPLIT_ROW: the GPU that is used for small tensors and intermediate results LLAMA_SPLIT_LAYER: ignored + tensor_split (ctypes.Array[ctypes.ctypes.c_float]): proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices() + progress_callback (llama_progress_callback): called with a progress value between 0.0 and 1.0. Pass NULL to disable. If the provided progress_callback returns true, model loading continues. If it returns false, model loading is immediately aborted. + progress_callback_user_data (ctypes.ctypes.c_void_p): context pointer passed to the progress callback + kv_overrides (ctypes.Array[llama_model_kv_override]): override key-value pairs of the model meta data + vocab_only (bool): only load the vocabulary, no weights + use_mmap (bool): use mmap if possible + use_mlock (bool): force system to keep model in RAM + check_tensors (bool): validate model tensor data""" + + if TYPE_CHECKING: + devices: CtypesArray[ctypes.c_void_p] # NOTE: unused + tensor_buft_overrides: CtypesArray[llama_model_tensor_buft_override] # NOTE: unused + n_gpu_layers: int + split_mode: int + main_gpu: int + tensor_split: CtypesArray[ctypes.c_float] + progress_callback: Callable[[float, ctypes.c_void_p], bool] + progress_callback_user_data: ctypes.c_void_p + kv_overrides: CtypesArray[llama_model_kv_override] + vocab_only: bool + use_mmap: bool + use_mlock: bool + check_tensors: bool + + _fields_ = [ + ("devices", ctypes.c_void_p), # NOTE: unnused + ("tensor_buft_overrides", ctypes.c_void_p), # NOTE: unused + ("n_gpu_layers", ctypes.c_int32), + ("split_mode", ctypes.c_int), + ("main_gpu", ctypes.c_int32), + ("tensor_split", ctypes.POINTER(ctypes.c_float)), + ("progress_callback", llama_progress_callback), + ("progress_callback_user_data", ctypes.c_void_p), + ("kv_overrides", ctypes.POINTER(llama_model_kv_override)), + ("vocab_only", ctypes.c_bool), + ("use_mmap", ctypes.c_bool), + ("use_mlock", ctypes.c_bool), + ("check_tensors", ctypes.c_bool), + ] + + +# // NOTE: changing the default values of parameters marked as [EXPERIMENTAL] may cause crashes or incorrect results in certain configurations +# // https://github.com/ggerganov/llama.cpp/pull/7544 +# struct llama_context_params { +# uint32_t n_ctx; // text context, 0 = from model +# uint32_t n_batch; // logical maximum batch size that can be submitted to llama_decode +# uint32_t n_ubatch; // physical maximum batch size +# uint32_t n_seq_max; // max number of sequences (i.e. distinct states for recurrent models) +# int32_t n_threads; // number of threads to use for generation +# int32_t n_threads_batch; // number of threads to use for batch processing + +# enum llama_rope_scaling_type rope_scaling_type; // RoPE scaling type, from `enum llama_rope_scaling_type` +# enum llama_pooling_type pooling_type; // whether to pool (sum) embedding results by sequence id +# enum llama_attention_type attention_type; // attention type to use for embeddings + +# // ref: https://github.com/ggerganov/llama.cpp/pull/2054 +# float rope_freq_base; // RoPE base frequency, 0 = from model +# float rope_freq_scale; // RoPE frequency scaling factor, 0 = from model +# float yarn_ext_factor; // YaRN extrapolation mix factor, negative = from model +# float yarn_attn_factor; // YaRN magnitude scaling factor +# float yarn_beta_fast; // YaRN low correction dim +# float yarn_beta_slow; // YaRN high correction dim +# uint32_t yarn_orig_ctx; // YaRN original context size +# float defrag_thold; // defragment the KV cache if holes/size > thold, < 0 disabled (default) + +# ggml_backend_sched_eval_callback cb_eval; +# void * cb_eval_user_data; + +# enum ggml_type type_k; // data type for K cache [EXPERIMENTAL] +# enum ggml_type type_v; // data type for V cache [EXPERIMENTAL] + +# // Keep the booleans together to avoid misalignment during copy-by-value. +# bool logits_all; // the llama_decode() call computes all logits, not just the last one (DEPRECATED - set llama_batch.logits instead) +# bool embeddings; // if true, extract embeddings (together with logits) +# bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU +# bool flash_attn; // whether to use flash attention [EXPERIMENTAL] +# bool no_perf; // whether to measure performance timings + + +# // Abort callback +# // if it returns true, execution of llama_decode() will be aborted +# // currently works only with CPU execution +# ggml_abort_callback abort_callback; +# void * abort_callback_data; +# }; +class llama_context_params(ctypes.Structure): + """Parameters for llama_context + + Attributes: + n_ctx (int): text context, 0 = from model + n_batch (int): logical maximum batch size that can be submitted to llama_decode + n_ubatch (int): physical maximum batch size + n_seq_max (int): max number of sequences (i.e. distinct states for recurrent models) + n_threads (int): number of threads to use for generation + n_threads_batch (int): number of threads to use for batch processing + rope_scaling_type (int): RoPE scaling type, from `enum llama_rope_scaling_type` + pooling_type (int): whether to pool (sum) embedding results by sequence id (ignored if no pooling layer) + attention_type (int): attention type to use for embeddings + rope_freq_base (float): RoPE base frequency, 0 = from model + rope_freq_scale (float): RoPE frequency scaling factor, 0 = from model + yarn_ext_factor (float): YaRN extrapolation mix factor, negative = from model + yarn_attn_factor (float): YaRN magnitude scaling factor + yarn_beta_fast (float): YaRN low correction dim + yarn_beta_slow (float): YaRN high correction dim + yarn_orig_ctx (int): YaRN original context size + defrag_thold (float): defragment the KV cache if holes/size > thold, < 0 disabled (default) + cb_eval (ggml_backend_sched_eval_callback): callback for scheduling eval + cb_eval_user_data (ctypes.ctypes.c_void_p): user data for cb_eval + type_k (int): data type for K cache + type_v (int): data type for V cache + logits_all (bool): the llama_decode() call computes all logits, not just the last one (DEPRECATED - set llama_batch.logits instead) + embeddings (bool): if true, extract embeddings (together with logits) + offload_kqv (bool): whether to offload the KQV ops (including the KV cache) to GPU + flash_attn (bool): whether to use flash attention + no_perf (bool): whether to measure performance timings + abort_callback (ggml_abort_callback): abort callback if it returns true, execution of llama_decode() will be aborted + abort_callback_data (ctypes.ctypes.c_void_p): data for abort_callback + """ + + if TYPE_CHECKING: + n_ctx: int + n_batch: int + n_ubatch: int + n_seq_max: int + n_threads: int + n_threads_batch: int + rope_scaling_type: int + pooling_type: int + attention_type: int + rope_freq_base: float + rope_freq_scale: float + yarn_ext_factor: float + yarn_attn_factor: float + yarn_beta_fast: float + yarn_beta_slow: float + yarn_orig_ctx: int + defrag_thold: float + cb_eval: Callable[[ctypes.c_void_p, bool], bool] + cb_eval_user_data: ctypes.c_void_p + type_k: int + type_v: int + logits_all: bool + embeddings: bool + offload_kqv: bool + flash_attn: bool + no_perf: bool + abort_callback: Callable[[ctypes.c_void_p], bool] + abort_callback_data: ctypes.c_void_p + + _fields_ = [ + ("n_ctx", ctypes.c_uint32), + ("n_batch", ctypes.c_uint32), + ("n_ubatch", ctypes.c_uint32), + ("n_seq_max", ctypes.c_uint32), + ("n_threads", ctypes.c_int32), + ("n_threads_batch", ctypes.c_int32), + ("rope_scaling_type", ctypes.c_int), + ("pooling_type", ctypes.c_int), + ("attention_type", ctypes.c_int), + ("rope_freq_base", ctypes.c_float), + ("rope_freq_scale", ctypes.c_float), + ("yarn_ext_factor", ctypes.c_float), + ("yarn_attn_factor", ctypes.c_float), + ("yarn_beta_fast", ctypes.c_float), + ("yarn_beta_slow", ctypes.c_float), + ("yarn_orig_ctx", ctypes.c_uint32), + ("defrag_thold", ctypes.c_float), + ("cb_eval", ggml_backend_sched_eval_callback), + ("cb_eval_user_data", ctypes.c_void_p), + ("type_k", ctypes.c_int), + ("type_v", ctypes.c_int), + ("logits_all", ctypes.c_bool), + ("embeddings", ctypes.c_bool), + ("offload_kqv", ctypes.c_bool), + ("flash_attn", ctypes.c_bool), + ("no_perf", ctypes.c_bool), + ("abort_callback", ggml_abort_callback), + ("abort_callback_data", ctypes.c_void_p), + ] + + +# // Signature for logging events +# // Note that text includes the new line character at the end for most events. +# // If your logging mechanism cannot handle that, check if the last character is '\n' and strip it +# // if it exists. +# // It might not exist for progress report where '.' is output repeatedly. +# typedef void (*llama_log_callback)(enum llama_log_level level, const char * text, void * user_data); +llama_log_callback = ctypes.CFUNCTYPE( + None, ctypes.c_int, ctypes.c_char_p, ctypes.c_void_p +) +"""Signature for logging events +Note that text includes the new line character at the end for most events. +If your logging mechanism cannot handle that, check if the last character is '\n' and strip it +if it exists. +It might not exist for progress report where '.' is output repeatedly.""" + + +# // model quantization parameters +# typedef struct llama_model_quantize_params { +# int32_t nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency() +# enum llama_ftype ftype; // quantize to this llama_ftype +# enum ggml_type output_tensor_type; // output tensor type +# enum ggml_type token_embedding_type; // token embeddings tensor type +# bool allow_requantize; // allow quantizing non-f32/f16 tensors +# bool quantize_output_tensor; // quantize output.weight +# bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored +# bool pure; // quantize all tensors to the default type +# bool keep_split; // quantize to the same number of shards +# void * imatrix; // pointer to importance matrix data +# void * kv_overrides; // pointer to vector containing overrides +# void * tensor_types; // pointer to vector containing tensor types +# } llama_model_quantize_params; +class llama_model_quantize_params(ctypes.Structure): + """Parameters for llama_model_quantize + + Attributes: + nthread (int): number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency() + ftype (int): quantize to this llama_ftype + output_tensor_type (int): output tensor type + token_embedding_type (int): token embeddings tensor type + allow_requantize (bool): allow quantizing non-f32/f16 tensors + quantize_output_tensor (bool): quantize output.weight + only_copy (bool): only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored + pure (bool): quantize all tensors to the default type + keep_split (bool): quantize to the same number of shards + imatrix (ctypes.c_void_p): pointer to importance matrix data + kv_overrides (ctypes.c_void_p): pointer to vector containing overrides + tensor_types (ctypes.c_void_p): pointer to vector containing tensor types + """ + + if TYPE_CHECKING: + nthread: int + ftype: int + output_tensor_type: int + token_embedding_type: int + allow_requantize: bool + quantize_output_tensor: bool + only_copy: bool + pure: bool + keep_split: bool + imatrix: ctypes.c_void_p + kv_overrides: ctypes.c_void_p + tensor_types: ctypes.c_void_p + + _fields_ = [ + ("nthread", ctypes.c_int32), + ("ftype", ctypes.c_int), + ("output_tensor_type", ctypes.c_int), + ("token_embedding_type", ctypes.c_int), + ("allow_requantize", ctypes.c_bool), + ("quantize_output_tensor", ctypes.c_bool), + ("only_copy", ctypes.c_bool), + ("pure", ctypes.c_bool), + ("keep_split", ctypes.c_bool), + ("imatrix", ctypes.c_void_p), + ("kv_overrides", ctypes.c_void_p), + ("tensor_types", ctypes.c_void_p), + ] + + +# typedef struct llama_logit_bias { +# llama_token token; +# float bias; +# } llama_logit_bias; +class llama_logit_bias(ctypes.Structure): + """Used to store logit bias + + Attributes: + token (llama_token): token id + bias (float): bias""" + + if TYPE_CHECKING: + token: llama_token + bias: float + + _fields_ = [ + ("token", llama_token), + ("bias", ctypes.c_float), + ] + + +llama_logit_bias_p = ctypes.POINTER(llama_logit_bias) + + +# typedef struct llama_sampler_chain_params { +# bool no_perf; // whether to measure performance timings +# } llama_sampler_chain_params; +class llama_sampler_chain_params(ctypes.Structure): + """Parameters for llama_sampler_chain + + Attributes: + no_perf (bool): whether to measure performance timings""" + + if TYPE_CHECKING: + no_perf: bool + + _fields_ = [ + ("no_perf", ctypes.c_bool), + ] + + +# // used in chat template +# typedef struct llama_chat_message { +# const char * role; +# const char * content; +# } llama_chat_message; +class llama_chat_message(ctypes.Structure): + _fields_ = [ + ("role", ctypes.c_char_p), + ("content", ctypes.c_char_p), + ] + + +# // lora adapter +# struct llama_adapter_lora; +llama_adapter_lora_p = ctypes.c_void_p +llama_adapter_lora_p_ctypes = ctypes.POINTER(ctypes.c_void_p) + + +# // Helpers for getting default parameters +# LLAMA_API struct llama_model_params llama_model_default_params(void); +@ctypes_function( + "llama_model_default_params", + [], + llama_model_params, +) +def llama_model_default_params() -> llama_model_params: + """Get default parameters for llama_model""" + ... + + +# LLAMA_API struct llama_context_params llama_context_default_params(void); +@ctypes_function( + "llama_context_default_params", + [], + llama_context_params, +) +def llama_context_default_params() -> llama_context_params: + """Get default parameters for llama_context""" + ... + + +# LLAMA_API struct llama_sampler_chain_params llama_sampler_chain_default_params(void); +@ctypes_function( + "llama_sampler_chain_default_params", + [], + llama_sampler_chain_params, +) +def llama_sampler_chain_default_params() -> llama_sampler_chain_params: + """Get default parameters for llama_sampler_chain""" + ... + + +# LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(void); +@ctypes_function( + "llama_model_quantize_default_params", + [], + llama_model_quantize_params, +) +def llama_model_quantize_default_params() -> llama_model_quantize_params: + """Get default parameters for llama_model_quantize""" + ... + + +# // Initialize the llama + ggml backend +# // If numa is true, use NUMA optimizations +# // Call once at the start of the program +# LLAMA_API void llama_backend_init(bool numa); +# LLAMA_API void llama_backend_init(void); +@ctypes_function( + "llama_backend_init", + [], + None, +) +def llama_backend_init(): + """Initialize the llama + ggml backend + If numa is true, use NUMA optimizations + Call once at the start of the program""" + ... + + +# // numa strategies +# enum ggml_numa_strategy { +# GGML_NUMA_STRATEGY_DISABLED = 0, +# GGML_NUMA_STRATEGY_DISTRIBUTE = 1, +# GGML_NUMA_STRATEGY_ISOLATE = 2, +# GGML_NUMA_STRATEGY_NUMACTL = 3, +# GGML_NUMA_STRATEGY_MIRROR = 4, +# GGML_NUMA_STRATEGY_COUNT +# }; +GGML_NUMA_STRATEGY_DISABLED = 0 +GGML_NUMA_STRATEGY_DISTRIBUTE = 1 +GGML_NUMA_STRATEGY_ISOLATE = 2 +GGML_NUMA_STRATEGY_NUMACTL = 3 +GGML_NUMA_STRATEGY_MIRROR = 4 +GGML_NUMA_STRATEGY_COUNT = 5 + + +# // Call once at the end of the program - currently only used for MPI +# LLAMA_API void llama_backend_free(void); +@ctypes_function( + "llama_backend_free", + [], + None, +) +def llama_backend_free(): + """Call once at the end of the program - currently only used for MPI""" + ... + + +# //optional: +# LLAMA_API void llama_numa_init(enum ggml_numa_strategy numa); +@ctypes_function( + "llama_numa_init", + [ctypes.c_int], + None, +) +def llama_numa_init(numa: int, /): + ... + + +# // Optional: an auto threadpool gets created in ggml if not passed explicitly +# LLAMA_API void llama_attach_threadpool( +# struct llama_context * ctx, +# ggml_threadpool_t threadpool, +# ggml_threadpool_t threadpool_batch); +# TODO: Add llama_attach_threadpool + + +# LLAMA_API void llama_detach_threadpool(struct llama_context * ctx); +# TODO: Add llama_detach_threadpool + + +# DEPRECATED(LLAMA_API struct llama_model * llama_load_model_from_file( +# const char * path_model, +# struct llama_model_params params), +# "use llama_model_load_from_file instead"); +@ctypes_function( + "llama_load_model_from_file", + [ctypes.c_char_p, llama_model_params], + llama_model_p_ctypes, +) +def llama_load_model_from_file( + path_model: bytes, params: llama_model_params, / +) -> Optional[llama_model_p]: + ... + + +# // Load the model from a file +# // If the file is split into multiple parts, the file name must follow this pattern: -%05d-of-%05d.gguf +# // If the split file name does not follow this pattern, use llama_model_load_from_splits +# LLAMA_API struct llama_model * llama_model_load_from_file( +# const char * path_model, +# struct llama_model_params params); +@ctypes_function( + "llama_model_load_from_file", + [ctypes.c_char_p, llama_model_params], + llama_model_p_ctypes, +) +def llama_model_load_from_file( + path_model: bytes, params: llama_model_params, / +) -> Optional[llama_model_p]: + """Load the model from a file + + If the file is split into multiple parts, the file name must follow this pattern: -%05d-of-%05d.gguf + + If the split file name does not follow this pattern, use llama_model_load_from_splits""" + ... + + +# // Load the model from multiple splits (support custom naming scheme) +# // The paths must be in the correct order +# LLAMA_API struct llama_model * llama_model_load_from_splits( +# const char ** paths, +# size_t n_paths, +# struct llama_model_params params); +@ctypes_function( + "llama_model_load_from_splits", + [ctypes.POINTER(ctypes.c_char_p), ctypes.c_size_t, llama_model_params], + llama_model_p_ctypes, +) +def llama_model_load_from_splits( + paths: List[bytes], n_paths: int, params: llama_model_params, / +) -> Optional[llama_model_p]: + """Load the model from multiple splits (support custom naming scheme) + + The paths must be in the correct order""" + ... + + +# LLAMA_API void llama_free_model(struct llama_model * model); +@ctypes_function( + "llama_free_model", + [llama_model_p_ctypes], + None, +) +def llama_free_model(model: llama_model_p, /): + ... + + +# LLAMA_API void llama_model_free(struct llama_model * model); +@ctypes_function( + "llama_model_free", + [llama_model_p_ctypes], + None, +) +def llama_model_free(model: llama_model_p, /): + ... + + +# LLAMA_API struct llama_context * llama_init_from_model( +# struct llama_model * model, +# struct llama_context_params params); +@ctypes_function( + "llama_init_from_model", + [llama_model_p_ctypes, llama_context_params], + llama_context_p_ctypes, +) +def llama_init_from_model( + model: llama_model_p, params: llama_context_params, / +) -> Optional[llama_context_p]: + ... + + +# DEPRECATED(LLAMA_API struct llama_context * llama_new_context_with_model( +# struct llama_model * model, +# struct llama_context_params params), +# "use llama_init_from_model instead"); +@ctypes_function( + "llama_new_context_with_model", + [llama_model_p_ctypes, llama_context_params], + llama_context_p_ctypes, +) +def llama_new_context_with_model( + model: llama_model_p, params: llama_context_params, / +) -> Optional[llama_context_p]: + ... + + +# // Frees all allocated memory +# LLAMA_API void llama_free(struct llama_context * ctx); +@ctypes_function( + "llama_free", + [llama_context_p_ctypes], + None, +) +def llama_free(ctx: llama_context_p, /): + """Frees all allocated memory""" + ... + + +# LLAMA_API int64_t llama_time_us(void); +@ctypes_function( + "llama_time_us", + [], + ctypes.c_int64, +) +def llama_time_us() -> int: + ... + + +# LLAMA_API size_t llama_max_devices(void); +@ctypes_function("llama_max_devices", [], ctypes.c_size_t) +def llama_max_devices() -> int: + ... + + +# LLAMA_API bool llama_supports_mmap (void); +@ctypes_function("llama_supports_mmap", [], ctypes.c_bool) +def llama_supports_mmap() -> bool: + ... + + +# LLAMA_API bool llama_supports_mlock (void); +@ctypes_function("llama_supports_mlock", [], ctypes.c_bool) +def llama_supports_mlock() -> bool: + ... + + +# LLAMA_API bool llama_supports_gpu_offload(void); +@ctypes_function("llama_supports_gpu_offload", [], ctypes.c_bool) +def llama_supports_gpu_offload() -> bool: + ... + + +# LLAMA_API bool llama_supports_rpc (void); +@ctypes_function("llama_supports_rpc", [], ctypes.c_bool) +def llama_supports_rpc() -> bool: + ... + + +# LLAMA_API uint32_t llama_n_ctx (const struct llama_context * ctx); +@ctypes_function("llama_n_ctx", [llama_context_p_ctypes], ctypes.c_uint32) +def llama_n_ctx(ctx: llama_context_p, /) -> int: + ... + + +# LLAMA_API uint32_t llama_n_batch (const struct llama_context * ctx); +@ctypes_function("llama_n_batch", [llama_context_p_ctypes], ctypes.c_uint32) +def llama_n_batch(ctx: llama_context_p, /) -> int: + ... + + +# LLAMA_API uint32_t llama_n_ubatch (const struct llama_context * ctx); +@ctypes_function("llama_n_ubatch", [llama_context_p_ctypes], ctypes.c_uint32) +def llama_n_ubatch(ctx: llama_context_p, /) -> int: + ... + + +# LLAMA_API uint32_t llama_n_seq_max (const struct llama_context * ctx); +@ctypes_function("llama_n_seq_max", [llama_context_p_ctypes], ctypes.c_uint32) +def llama_n_seq_max(ctx: llama_context_p, /) -> int: + ... + + + + +# DEPRECATED(LLAMA_API int32_t llama_n_ctx_train(const struct llama_model * model), "use llama_model_n_ctx_train instead"); +@ctypes_function("llama_n_ctx_train", [llama_model_p_ctypes], ctypes.c_int32) +def llama_n_ctx_train(model: llama_model_p, /) -> int: + ... + + +# DEPRECATED(LLAMA_API int32_t llama_n_embd (const struct llama_model * model), "use llama_model_n_embd instead"); +@ctypes_function("llama_n_embd", [llama_model_p_ctypes], ctypes.c_int32) +def llama_n_embd(model: llama_model_p, /) -> int: + ... + + +# DEPRECATED(LLAMA_API int32_t llama_n_layer (const struct llama_model * model), "use llama_model_n_layer instead"); +@ctypes_function("llama_n_layer", [llama_model_p_ctypes], ctypes.c_int32) +def llama_n_layer(model: llama_model_p, /) -> int: + ... + + +# DEPRECATED(LLAMA_API int32_t llama_n_head (const struct llama_model * model), "use llama_model_n_head instead"); +@ctypes_function("llama_n_head", [llama_model_p_ctypes], ctypes.c_int32) +def llama_n_head(model: llama_model_p, /) -> int: + ... + + +# DEPRECATED(LLAMA_API int32_t llama_n_vocab (const struct llama_vocab * vocab), "use llama_vocab_n_tokens instead"); +@ctypes_function("llama_n_vocab", [llama_vocab_p_ctypes], ctypes.c_int32) +def llama_n_vocab(model: llama_vocab_p, /) -> int: + ... + + +# LLAMA_API const struct llama_model * llama_get_model (const struct llama_context * ctx); +@ctypes_function("llama_get_model", [llama_context_p_ctypes], llama_model_p_ctypes) +def llama_get_model(ctx: llama_context_p, /) -> Optional[llama_model_p]: + ... + + +# LLAMA_API struct llama_kv_cache * llama_get_kv_self ( struct llama_context * ctx); +@ctypes_function( + "llama_get_kv_self", + [llama_context_p_ctypes], + llama_kv_cache_p_ctypes, +) +def llama_get_kv_self(ctx: llama_context_p, /) -> Optional[llama_kv_cache_p]: + """Get the KV cache for self-attention""" + ... + + +# LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx); +@ctypes_function("llama_pooling_type", [llama_context_p_ctypes], ctypes.c_int) +def llama_pooling_type(ctx: llama_context_p, /) -> int: + ... + + +# LLAMA_API const struct llama_vocab * llama_model_get_vocab(const struct llama_model * model); +@ctypes_function("llama_model_get_vocab", [llama_model_p_ctypes], llama_vocab_p_ctypes) +def llama_model_get_vocab(model: llama_model_p, /) -> Optional[llama_vocab_p]: + ... + + +# LLAMA_API enum llama_rope_type llama_model_rope_type(const struct llama_model * model); +@ctypes_function("llama_model_rope_type", [llama_model_p_ctypes], ctypes.c_int) +def llama_model_rope_type(model: llama_model_p, /) -> int: + ... + + +# LLAMA_API int32_t llama_model_n_ctx_train(const struct llama_model * model); +@ctypes_function("llama_model_n_ctx_train", [llama_model_p_ctypes], ctypes.c_int32) +def llama_model_n_ctx_train(model: llama_model_p, /) -> int: + ... + + +# LLAMA_API int32_t llama_model_n_embd (const struct llama_model * model); +@ctypes_function("llama_model_n_embd", [llama_model_p_ctypes], ctypes.c_int32) +def llama_model_n_embd(model: llama_model_p, /) -> int: + ... + + +# LLAMA_API int32_t llama_model_n_layer (const struct llama_model * model); +@ctypes_function("llama_model_n_layer", [llama_model_p_ctypes], ctypes.c_int32) +def llama_model_n_layer(model: llama_model_p, /) -> int: + ... + + +# LLAMA_API int32_t llama_model_n_head (const struct llama_model * model); +@ctypes_function("llama_model_n_head", [llama_model_p_ctypes], ctypes.c_int32) +def llama_model_n_head(model: llama_model_p, /) -> int: + ... + + +# LLAMA_API int32_t llama_model_n_head_kv (const struct llama_model * model); +@ctypes_function("llama_model_n_head_kv", [llama_model_p_ctypes], ctypes.c_int32) +def llama_model_n_head_kv(model: llama_model_p, /) -> int: + ... + + +# // Get the model's RoPE frequency scaling factor +# LLAMA_API float llama_model_rope_freq_scale_train(const struct llama_model * model); +@ctypes_function("llama_model_rope_freq_scale_train", [llama_model_p_ctypes], ctypes.c_float) +def llama_model_rope_freq_scale_train(model: llama_model_p, /) -> float: + ... + + +# LLAMA_API enum llama_vocab_type llama_vocab_type (const struct llama_model * model); +@ctypes_function("llama_vocab_type", [llama_model_p_ctypes], ctypes.c_int) +def llama_vocab_type(model: llama_model_p, /) -> int: + ... + + +# LLAMA_API int32_t llama_vocab_n_tokens(const struct llama_vocab * vocab); +@ctypes_function("llama_vocab_n_tokens", [llama_vocab_p_ctypes], ctypes.c_int32) +def llama_vocab_n_tokens(vocab: llama_vocab_p, /) -> int: + ... + + +# // Functions to access the model's GGUF metadata scalar values +# // - The functions return the length of the string on success, or -1 on failure +# // - The output string is always null-terminated and cleared on failure +# // - When retrieving a string, an extra byte must be allocated to account for the null terminator +# // - GGUF array values are not supported by these functions + + +# // Get metadata value as a string by key name +# LLAMA_API int32_t llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size); +@ctypes_function( + "llama_model_meta_val_str", + [ + llama_model_p_ctypes, + ctypes.c_char_p, + ctypes.c_char_p, + ctypes.c_size_t, + ], + ctypes.c_int32, +) +def llama_model_meta_val_str( + model: llama_model_p, + key: Union[ctypes.c_char_p, bytes], + buf: bytes, + buf_size: int, + /, +) -> int: + """Get metadata value as a string by key name""" + ... + + +# // Get the number of metadata key/value pairs +# LLAMA_API int32_t llama_model_meta_count(const struct llama_model * model); +@ctypes_function("llama_model_meta_count", [llama_model_p_ctypes], ctypes.c_int32) +def llama_model_meta_count(model: llama_model_p, /) -> int: + """Get the number of metadata key/value pairs""" + ... + + +# // Get metadata key name by index +# LLAMA_API int32_t llama_model_meta_key_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size); +@ctypes_function( + "llama_model_meta_key_by_index", + [ + llama_model_p_ctypes, + ctypes.c_int32, + ctypes.c_char_p, + ctypes.c_size_t, + ], + ctypes.c_int32, +) +def llama_model_meta_key_by_index( + model: llama_model_p, + i: Union[ctypes.c_int, int], + buf: Union[bytes, CtypesArray[ctypes.c_char]], + buf_size: int, + /, +) -> int: + """Get metadata key name by index""" + ... + + +# // Get metadata value as a string by index +# LLAMA_API int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size); +@ctypes_function( + "llama_model_meta_val_str_by_index", + [ + llama_model_p_ctypes, + ctypes.c_int32, + ctypes.c_char_p, + ctypes.c_size_t, + ], + ctypes.c_int32, +) +def llama_model_meta_val_str_by_index( + model: llama_model_p, + i: Union[ctypes.c_int, int], + buf: Union[bytes, CtypesArray[ctypes.c_char]], + buf_size: int, + /, +) -> int: + """Get metadata value as a string by index""" + ... + + +# // Get a string describing the model type +# LLAMA_API int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size); +@ctypes_function( + "llama_model_desc", + [llama_model_p_ctypes, ctypes.c_char_p, ctypes.c_size_t], + ctypes.c_int32, +) +def llama_model_desc( + model: llama_model_p, + buf: Union[bytes, CtypesArray[ctypes.c_char]], + buf_size: Union[ctypes.c_size_t, int], + /, +) -> int: + """Get a string describing the model type""" + ... + + +# // Returns the total size of all the tensors in the model in bytes +# LLAMA_API uint64_t llama_model_size(const struct llama_model * model); +@ctypes_function("llama_model_size", [llama_model_p_ctypes], ctypes.c_uint64) +def llama_model_size(model: llama_model_p, /) -> int: + """Returns the total size of all the tensors in the model in bytes""" + ... + + +# // Get the default chat template. Returns nullptr if not available +# // If name is NULL, returns the default chat template +# LLAMA_API const char * llama_model_chat_template(const struct llama_model * model, const char * name); +@ctypes_function("llama_model_chat_template", [llama_model_p_ctypes, ctypes.c_char_p], ctypes.c_char_p) +def llama_model_chat_template(model: llama_model_p, name: Optional[bytes], /) -> Optional[bytes]: + """Get the default chat template. Returns None if not available + If name is None, returns the default chat template""" + ... + + +# // Returns the total number of parameters in the model +# LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model); +@ctypes_function("llama_model_n_params", [llama_model_p_ctypes], ctypes.c_uint64) +def llama_model_n_params(model: llama_model_p, /) -> int: + """Returns the total number of parameters in the model""" + ... + + +# // Returns true if the model contains an encoder that requires llama_encode() call +# LLAMA_API bool llama_model_has_encoder(const struct llama_model * model); +@ctypes_function("llama_model_has_encoder", [llama_model_p_ctypes], ctypes.c_bool) +def llama_model_has_encoder(model: llama_model_p, /) -> bool: + """Returns true if the model contains an encoder that requires llama_encode() call""" + ... + + +# // Returns true if the model contains a decoder that requires llama_decode() call +# LLAMA_API bool llama_model_has_decoder(const struct llama_model * model); +@ctypes_function("llama_model_has_decoder", [llama_model_p_ctypes], ctypes.c_bool) +def llama_model_has_decoder(model: llama_model_p, /) -> bool: + """Returns true if the model contains a decoder that requires llama_decode() call""" + ... + + +# // For encoder-decoder models, this function returns id of the token that must be provided +# // to the decoder to start generating output sequence. For other models, it returns -1. +# LLAMA_API llama_token llama_model_decoder_start_token(const struct llama_model * model); +@ctypes_function( + "llama_model_decoder_start_token", [llama_model_p_ctypes], ctypes.c_int32 +) +def llama_model_decoder_start_token(model: llama_model_p, /) -> int: + """For encoder-decoder models, this function returns id of the token that must be provided + to the decoder to start generating output sequence. For other models, it returns -1. + """ + ... + + +# // Returns true if the model is recurrent (like Mamba, RWKV, etc.) +# LLAMA_API bool llama_model_is_recurrent(const struct llama_model * model); +@ctypes_function("llama_model_is_recurrent", [llama_model_p_ctypes], ctypes.c_bool) +def llama_model_is_recurrent(model: llama_model_p, /) -> bool: + """Returns true if the model is recurrent (like Mamba, RWKV, etc.)""" + ... + + +# // Returns 0 on success +# LLAMA_API uint32_t llama_model_quantize( +# const char * fname_inp, +# const char * fname_out, +# const llama_model_quantize_params * params); +@ctypes_function( + "llama_model_quantize", + [ + ctypes.c_char_p, + ctypes.c_char_p, + ctypes.POINTER(llama_model_quantize_params), + ], + ctypes.c_uint32, +) +def llama_model_quantize( + fname_inp: bytes, + fname_out: bytes, + params: CtypesPointerOrRef[llama_model_quantize_params], + /, +) -> int: + """Returns 0 on success""" + ... + + +# // Load a LoRA adapter from file +# LLAMA_API struct llama_adapter_lora * llama_adapter_lora_init( +# struct llama_model * model, +# const char * path_lora); +@ctypes_function( + "llama_adapter_lora_init", + [llama_model_p_ctypes, ctypes.c_char_p], + llama_adapter_lora_p_ctypes, +) +def llama_adapter_lora_init( + model: llama_model_p, path_lora: bytes, / +) -> Optional[llama_adapter_lora_p]: + ... + + +# // Manually free a LoRA adapter +# // Note: loaded adapters will be free when the associated model is deleted +# LLAMA_API void llama_adapter_lora_free(struct llama_adapter_lora * adapter); +@ctypes_function( + "llama_adapter_lora_free", + [llama_adapter_lora_p_ctypes], + None, +) +def llama_adapter_lora_free(adapter: llama_adapter_lora_p, /): + ... + + +# // The following functions operate on a llama_context, hence the naming: llama_verb_... + + +# // Add a loaded LoRA adapter to given context +# // This will not modify model's weight +# LLAMA_API int32_t llama_set_adapter_lora( +# struct llama_context * ctx, +# struct llama_adapter_lora * adapter, +# float scale); +@ctypes_function( + "llama_set_adapter_lora", + [llama_context_p_ctypes, llama_adapter_lora_p_ctypes, ctypes.c_float], + ctypes.c_int32, +) +def llama_set_adapter_lora( + ctx: llama_context_p, adapter: llama_adapter_lora_p, scale: float, / +) -> int: + """Add a loaded LoRA adapter to given context + This will not modify model's weight""" + ... + + +# // Remove a specific LoRA adapter from given context +# // Return -1 if the adapter is not present in the context +# LLAMA_API int32_t llama_rm_adapter_lora( +# struct llama_context * ctx, +# struct llama_adapter_lora * adapter); +@ctypes_function( + "llama_rm_adapter_lora", + [llama_context_p_ctypes, llama_adapter_lora_p_ctypes], + ctypes.c_int32, +) +def llama_rm_adapter_lora( + ctx: llama_context_p, adapter: llama_adapter_lora_p, / +) -> int: + """Remove a specific LoRA adapter from given context + Return -1 if the adapter is not present in the context""" + ... + + +# // Remove all LoRA adapters from given context +# LLAMA_API void llama_clear_adapter_lora(struct llama_context * ctx); +@ctypes_function( + "llama_clear_adapter_lora", + [llama_context_p_ctypes], + None, +) +def llama_clear_adapter_lora(ctx: llama_context_p, /): + """Remove all LoRA adapters from given context""" + ... + + +# // Apply a loaded control vector to a llama_context, or if data is NULL, clear +# // the currently loaded vector. +# // n_embd should be the size of a single layer's control, and data should point +# // to an n_embd x n_layers buffer starting from layer 1. +# // il_start and il_end are the layer range the vector should apply to (both inclusive) +# // See llama_control_vector_load in common to load a control vector. +# LLAMA_API int32_t llama_apply_adapter_cvec( +# struct llama_context * ctx, +# const float * data, +# size_t len, +# int32_t n_embd, +# int32_t il_start, +# int32_t il_end); +@ctypes_function( + "llama_apply_adapter_cvec", + [ + llama_context_p_ctypes, + ctypes.POINTER(ctypes.c_float), + ctypes.c_size_t, + ctypes.c_int32, + ctypes.c_int32, + ctypes.c_int32, + ], + ctypes.c_int32, +) +def llama_apply_adapter_cvec( + ctx: llama_context_p, + data: CtypesPointerOrRef[ctypes.c_float], + len: int, + n_embd: int, + il_start: int, + il_end: int, + /, +) -> int: + """Apply a loaded control vector to a llama_context, or if data is NULL, clear + the currently loaded vector. + n_embd should be the size of a single layer's control, and data should point + to an n_embd x n_layers buffer starting from layer 1. + il_start and il_end are the layer range the vector should apply to (both inclusive) + See llama_control_vector_load in common to load a control vector.""" + ... + + +# // +# // KV cache +# // + + +# // Information associated with an individual cell in the KV cache view. +# struct llama_kv_cache_view_cell { +# // The position for this cell. Takes KV cache shifts into account. +# // May be negative if the cell is not populated. +# llama_pos pos; +# }; +class llama_kv_cache_view_cell(ctypes.Structure): + """Information associated with an individual cell in the KV cache view. + + Attributes: + pos (llama_pos): The position for this cell. Takes KV cache shifts into account. + May be negative if the cell is not populated.""" + + if TYPE_CHECKING: + pos: llama_pos + + _fields_ = [("pos", llama_pos)] + + +# // An updateable view of the KV cache. +# struct llama_kv_cache_view { +# // Number of KV cache cells. This will be the same as the context size. +# int32_t n_cells; + +# // Maximum number of sequences that can exist in a cell. It's not an error +# // if there are more sequences in a cell than this value, however they will +# // not be visible in the view cells_sequences. +# int32_t n_seq_max; + +# // Number of tokens in the cache. For example, if there are two populated +# // cells, the first with 1 sequence id in it and the second with 2 sequence +# // ids then you'll have 3 tokens. +# int32_t token_count; + +# // Number of populated cache cells. +# int32_t used_cells; + +# // Maximum contiguous empty slots in the cache. +# int32_t max_contiguous; + +# // Index to the start of the max_contiguous slot range. Can be negative +# // when cache is full. +# int32_t max_contiguous_idx; + +# // Information for an individual cell. +# struct llama_kv_cache_view_cell * cells; + + +# // The sequences for each cell. There will be n_seq_max items per cell. +# llama_seq_id * cells_sequences; +# }; +class llama_kv_cache_view(ctypes.Structure): + if TYPE_CHECKING: + n_cells: int + n_max_seq: int + token_count: int + used_cells: int + max_contiguous: int + max_contiguous_idx: int + cells: CtypesArray[llama_kv_cache_view_cell] + cells_sequences: CtypesArray[llama_seq_id] + + _fields_ = [ + ("n_cells", ctypes.c_int32), + ("n_max_seq", ctypes.c_int32), + ("token_count", ctypes.c_int32), + ("used_cells", ctypes.c_int32), + ("max_contiguous", ctypes.c_int32), + ("max_contiguous_idx", ctypes.c_int32), + ("cells", ctypes.POINTER(llama_kv_cache_view_cell)), + ("cells_sequences", ctypes.POINTER(llama_seq_id)), + ] + + +llama_kv_cache_view_p = ctypes.POINTER(llama_kv_cache_view) + + +# // Create an empty KV cache view. (use only for debugging purposes) +# LLAMA_API struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_context * ctx, int32_t n_seq_max); +@ctypes_function( + "llama_kv_cache_view_init", + [llama_context_p_ctypes, ctypes.c_int32], + llama_kv_cache_view, +) +def llama_kv_cache_view_init( + ctx: llama_context_p, n_seq_max: Union[ctypes.c_int32, int], / +) -> llama_kv_cache_view: + """Create an empty KV cache view. (use only for debugging purposes)""" + ... + + +# // Free a KV cache view. (use only for debugging purposes) +# LLAMA_API void llama_kv_cache_view_free(struct llama_kv_cache_view * view); +@ctypes_function("llama_kv_cache_view_free", [llama_kv_cache_view_p], None) +def llama_kv_cache_view_free(view: "ctypes.pointer[llama_kv_cache_view]", /): # type: ignore + """Free a KV cache view. (use only for debugging purposes)""" + ... + + +# // Update the KV cache view structure with the current state of the KV cache. (use only for debugging purposes) +# LLAMA_API void llama_kv_cache_view_update(const struct llama_context * ctx, struct llama_kv_cache_view * view); +@ctypes_function( + "llama_kv_cache_view_update", [llama_context_p_ctypes, llama_kv_cache_view_p], None +) +def llama_kv_cache_view_update(ctx: llama_context_p, view: CtypesPointerOrRef[llama_kv_cache_view], /): # type: ignore + """Update the KV cache view structure with the current state of the KV cache. (use only for debugging purposes)""" + ... + + +# // Returns the number of tokens in the KV cache (slow, use only for debug) +# // If a KV cell has multiple sequences assigned to it, it will be counted multiple times +# LLAMA_API int32_t llama_kv_self_n_tokens(const struct llama_context * ctx); +@ctypes_function( + "llama_kv_self_n_tokens", [llama_context_p_ctypes], ctypes.c_int32 +) +def llama_kv_self_n_tokens(ctx: llama_context_p, /) -> int: + """Returns the number of tokens in the KV cache (slow, use only for debug) + If a KV cell has multiple sequences assigned to it, it will be counted multiple times + """ + ... + + +# DEPRECATED(LLAMA_API int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx), +# "use llama_kv_self_n_tokens instead"); +@ctypes_function( + "llama_get_kv_cache_token_count", [llama_context_p_ctypes], ctypes.c_int32 +) +def llama_get_kv_cache_token_count(ctx: llama_context_p, /) -> int: + """Returns the number of tokens in the KV cache (slow, use only for debug) + If a KV cell has multiple sequences assigned to it, it will be counted multiple times + """ + ... + + +# // Returns the number of used KV cells (i.e. have at least one sequence assigned to them) +# LLAMA_API int32_t llama_kv_self_used_cells(const struct llama_context * ctx); +@ctypes_function( + "llama_kv_self_used_cells", [llama_context_p_ctypes], ctypes.c_int32 +) +def llama_kv_self_used_cells(ctx: llama_context_p, /) -> int: + """Returns the number of used KV cells (i.e. have at least one sequence assigned to them)""" + ... + + +# DEPRECATED(LLAMA_API int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx), +# "use llama_kv_self_used_cells instead"); +@ctypes_function( + "llama_get_kv_cache_used_cells", [llama_context_p_ctypes], ctypes.c_int32 +) +def llama_get_kv_cache_used_cells(ctx: llama_context_p, /) -> int: + """Returns the number of used KV cells (i.e. have at least one sequence assigned to them)""" + ... + + +# // Clear the KV cache - both cell info is erased and KV data is zeroed +# LLAMA_API void llama_kv_self_clear( +# struct llama_context * ctx); +@ctypes_function( + "llama_kv_self_clear", [llama_context_p_ctypes], None +) +def llama_kv_self_clear(ctx: llama_context_p, /): + """Clear the KV cache - both cell info is erased and KV data is zeroed""" + ... + +# NOTE: Deprecated +@ctypes_function("llama_kv_self_clear", [llama_context_p_ctypes], None) +def llama_kv_cache_clear(ctx: llama_context_p, /): + """Clear the KV cache""" + ... + + +# // Removes all tokens that belong to the specified sequence and have positions in [p0, p1) +# // Returns false if a partial sequence cannot be removed. Removing a whole sequence never fails +# // seq_id < 0 : match any sequence +# // p0 < 0 : [0, p1] +# // p1 < 0 : [p0, inf) +# LLAMA_API bool llama_kv_cache_seq_rm( +# struct llama_context * ctx, +# llama_seq_id seq_id, +# llama_pos p0, +# llama_pos p1); +@ctypes_function( + "llama_kv_cache_seq_rm", + [ + llama_context_p_ctypes, + llama_seq_id, + llama_pos, + llama_pos, + ], + ctypes.c_bool, +) +def llama_kv_cache_seq_rm( + ctx: llama_context_p, + seq_id: Union[llama_seq_id, int], + p0: Union[llama_pos, int], + p1: Union[llama_pos, int], + /, +) -> bool: + """Removes all tokens that belong to the specified sequence and have positions in [p0, p1) + + Returns false if a partial sequence cannot be removed. Removing a whole sequence never fails + + seq_id < 0 : match any sequence + p0 < 0 : [0, p1] + p1 < 0 : [p0, inf)""" + ... + + +# // Copy all tokens that belong to the specified sequence to another sequence +# // Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence +# // p0 < 0 : [0, p1] +# // p1 < 0 : [p0, inf) +# LLAMA_API void llama_kv_self_seq_cp( +# struct llama_context * ctx, +# llama_seq_id seq_id_src, +# llama_seq_id seq_id_dst, +# llama_pos p0, +# llama_pos p1); +@ctypes_function( + "llama_kv_self_seq_cp", + [ + llama_context_p_ctypes, + llama_seq_id, + llama_seq_id, + llama_pos, + llama_pos, + ], + None, +) +def llama_kv_self_seq_cp( + ctx: llama_context_p, + seq_id_src: Union[llama_seq_id, int], + seq_id_dst: Union[llama_seq_id, int], + p0: Union[llama_pos, int], + p1: Union[llama_pos, int], + /, +): + """Copy all tokens that belong to the specified sequence to another sequence + Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence + p0 < 0 : [0, p1] + p1 < 0 : [p0, inf)""" + ... + + +# NOTE: Deprecated +@ctypes_function( + "llama_kv_self_seq_cp", + [ + llama_context_p_ctypes, + llama_seq_id, + llama_seq_id, + llama_pos, + llama_pos, + ], + None, +) +def llama_kv_cache_seq_cp( + ctx: llama_context_p, + seq_id_src: Union[llama_seq_id, int], + seq_id_dst: Union[llama_seq_id, int], + p0: Union[llama_pos, int], + p1: Union[llama_pos, int], + /, +): + """Copy all tokens that belong to the specified sequence to another sequence + Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence + p0 < 0 : [0, p1] + p1 < 0 : [p0, inf)""" + ... + + +# // Removes all tokens that do not belong to the specified sequence +# LLAMA_API void llama_kv_self_seq_keep( +# struct llama_context * ctx, +# llama_seq_id seq_id); +@ctypes_function( + "llama_kv_self_seq_keep", [llama_context_p_ctypes, llama_seq_id], None +) +def llama_kv_self_seq_keep(ctx: llama_context_p, seq_id: Union[llama_seq_id, int], /): + """Removes all tokens that do not belong to the specified sequence""" + ... + + +# NOTE: Deprecated +@ctypes_function( + "llama_kv_self_seq_keep", [llama_context_p_ctypes, llama_seq_id], None +) +def llama_kv_cache_seq_keep(ctx: llama_context_p, seq_id: Union[llama_seq_id, int], /): + """Removes all tokens that do not belong to the specified sequence""" + ... + + + +# // Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1) +# // If the KV cache is RoPEd, the KV data is updated accordingly: +# // - lazily on next llama_decode() +# // - explicitly with llama_kv_cache_update() +# // p0 < 0 : [0, p1] +# // p1 < 0 : [p0, inf) +# LLAMA_API void llama_kv_cache_seq_add( +# struct llama_context * ctx, +# llama_seq_id seq_id, +# llama_pos p0, +# llama_pos p1, +# llama_pos delta); +@ctypes_function( + "llama_kv_self_seq_add", + [ + llama_context_p_ctypes, + llama_seq_id, + llama_pos, + llama_pos, + llama_pos, + ], + None, +) +def llama_kv_self_seq_add( + ctx: llama_context_p, + seq_id: Union[llama_seq_id, int], + p0: Union[llama_pos, int], + p1: Union[llama_pos, int], + delta: Union[llama_pos, int], + /, +): + """Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1) + If the KV cache is RoPEd, the KV data is updated accordingly: + - lazily on next llama_decode() + - explicitly with llama_kv_cache_update() + p0 < 0 : [0, p1] + p1 < 0 : [p0, inf)""" + ... + + +# // NOTE: Deprecated +# // Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1) +# // If the KV cache is RoPEd, the KV data is updated accordingly: +# // - lazily on next llama_decode() +# // - explicitly with llama_kv_cache_update() +# // p0 < 0 : [0, p1] +# // p1 < 0 : [p0, inf) +# LLAMA_API void llama_kv_cache_seq_add( +# struct llama_context * ctx, +# llama_seq_id seq_id, +# llama_pos p0, +# llama_pos p1, +# llama_pos delta); +@ctypes_function( + "llama_kv_self_seq_add", + [ + llama_context_p_ctypes, + llama_seq_id, + llama_pos, + llama_pos, + llama_pos, + ], + None, +) +def llama_kv_cache_seq_add( + ctx: llama_context_p, + seq_id: Union[llama_seq_id, int], + p0: Union[llama_pos, int], + p1: Union[llama_pos, int], + delta: Union[llama_pos, int], + /, +): + """Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1) + If the KV cache is RoPEd, the KV data is updated accordingly: + - lazily on next llama_decode() + - explicitly with llama_kv_cache_update() + p0 < 0 : [0, p1] + p1 < 0 : [p0, inf)""" + ... + + +# // Integer division of the positions by factor of `d > 1` +# // If the KV cache is RoPEd, the KV data is updated accordingly +# // p0 < 0 : [0, p1] +# // p1 < 0 : [p0, inf) +# LLAMA_API void llama_kv_cache_seq_div( +# struct llama_context * ctx, +# llama_seq_id seq_id, +# llama_pos p0, +# llama_pos p1, +# int d); +@ctypes_function( + "llama_kv_self_seq_div", + [ + llama_context_p_ctypes, + llama_seq_id, + llama_pos, + llama_pos, + ctypes.c_int, + ], + None, +) +def llama_kv_self_seq_div( + ctx: llama_context_p, + seq_id: Union[llama_seq_id, int], + p0: Union[llama_pos, int], + p1: Union[llama_pos, int], + d: Union[ctypes.c_int, int], + /, +): + """Integer division of the positions by factor of `d > 1` + If the KV cache is RoPEd, the KV data is updated accordingly + p0 < 0 : [0, p1] + p1 < 0 : [p0, inf)""" + ... + + +# // NOTE: Deprecated +# // Integer division of the positions by factor of `d > 1` +# // If the KV cache is RoPEd, the KV data is updated accordingly +# // p0 < 0 : [0, p1] +# // p1 < 0 : [p0, inf) +# LLAMA_API void llama_kv_cache_seq_div( +# struct llama_context * ctx, +# llama_seq_id seq_id, +# llama_pos p0, +# llama_pos p1, +# int d); +@ctypes_function( + "llama_kv_self_seq_div", + [ + llama_context_p_ctypes, + llama_seq_id, + llama_pos, + llama_pos, + ctypes.c_int, + ], + None, +) +def llama_kv_cache_seq_div( + ctx: llama_context_p, + seq_id: Union[llama_seq_id, int], + p0: Union[llama_pos, int], + p1: Union[llama_pos, int], + d: Union[ctypes.c_int, int], + /, +): + """Integer division of the positions by factor of `d > 1` + If the KV cache is RoPEd, the KV data is updated accordingly + p0 < 0 : [0, p1] + p1 < 0 : [p0, inf)""" + ... + + +# // Returns the largest position present in the KV cache for the specified sequence +# LLAMA_API llama_pos llama_kv_self_seq_pos_max( +# struct llama_context * ctx, +# llama_seq_id seq_id); +@ctypes_function( + "llama_kv_self_seq_pos_max", [llama_context_p_ctypes, llama_seq_id], llama_pos +) +def llama_kv_self_seq_pos_max( + ctx: llama_context_p, seq_id: Union[llama_seq_id, int], / +) -> int: + """Returns the largest position present in the KV cache for the specified sequence""" + ... + + +# // Defragment the KV cache +# // This will be applied: +# // - lazily on next llama_decode() +# // - explicitly with llama_kv_self_update() +# LLAMA_API void llama_kv_self_defrag(struct llama_context * ctx); +@ctypes_function("llama_kv_self_defrag", [llama_context_p_ctypes], None) +def llama_kv_self_defrag(ctx: llama_context_p, /): + """Defragment the KV cache + This will be applied: + - lazily on next llama_decode() + - explicitly with llama_kv_cache_update()""" + ... + + +# NOTE: Deprecated +# // Defragment the KV cache +# // This will be applied: +# // - lazily on next llama_decode() +# // - explicitly with llama_kv_self_update() +# LLAMA_API void llama_kv_cache_defrag(struct llama_context * ctx); +@ctypes_function("llama_kv_cache_defrag", [llama_context_p_ctypes], None) +def llama_kv_cache_defrag(ctx: llama_context_p, /): + """Defragment the KV cache + This will be applied: + - lazily on next llama_decode() + - explicitly with llama_kv_cache_update()""" + ... + + +# // Apply the KV cache updates (such as K-shifts, defragmentation, etc.) +# LLAMA_API void llama_kv_cache_update(struct llama_context * ctx); +@ctypes_function("llama_kv_self_update", [llama_context_p_ctypes], None) +def llama_kv_self_update(ctx: llama_context_p, /): + """Apply the KV cache updates (such as K-shifts, defragmentation, etc.)""" + ... + +# // NOTE: Deprecated +# // Apply the KV cache updates (such as K-shifts, defragmentation, etc.) +# LLAMA_API void llama_kv_cache_update(struct llama_context * ctx); +@ctypes_function("llama_kv_self_update", [llama_context_p_ctypes], None) +def llama_kv_cache_update(ctx: llama_context_p, /): + """Apply the KV cache updates (such as K-shifts, defragmentation, etc.)""" + ... + + +# // Check if the context supports KV cache shifting +# LLAMA_API bool llama_kv_cache_can_shift(struct llama_context * ctx); +@ctypes_function("llama_kv_self_can_shift", [llama_context_p_ctypes], ctypes.c_bool) +def llama_kv_self_can_shift(ctx: llama_context_p, /) -> bool: + """Check if the context supports KV cache shifting""" + ... + + +# // NOTE: Deprecated +# // Check if the context supports KV cache shifting +# LLAMA_API bool llama_kv_cache_can_shift(struct llama_context * ctx); +@ctypes_function("llama_kv_self_can_shift", [llama_context_p_ctypes], ctypes.c_bool) +def llama_kv_cache_can_shift(ctx: llama_context_p, /) -> bool: + """Check if the context supports KV cache shifting""" + ... + + +# // +# // State / sessions +# // + + +# // Returns the *actual* size in bytes of the state +# // (logits, embedding and kv_cache) +# // Only use when saving the state, not when restoring it, otherwise the size may be too small. +# LLAMA_API size_t llama_state_get_size(struct llama_context * ctx); +@ctypes_function("llama_state_get_size", [llama_context_p_ctypes], ctypes.c_size_t) +def llama_state_get_size(ctx: llama_context_p, /) -> int: + """Returns the *actual* size in bytes of the state (rng, logits, embedding and kv_cache) - will often be smaller after compacting tokens""" + ... + + +# LLAMA_API DEPRECATED(size_t llama_get_state_size(struct llama_context * ctx), +# "use llama_state_get_size instead"); +@ctypes_function("llama_get_state_size", [llama_context_p_ctypes], ctypes.c_size_t) +def llama_get_state_size(ctx: llama_context_p, /) -> int: + """Returns the maximum size in bytes of the state (rng, logits, embedding + and kv_cache) - will often be smaller after compacting tokens""" + ... + + +# // Copies the state to the specified destination address. +# // Destination needs to have allocated enough memory. +# // Returns the number of bytes copied +# LLAMA_API size_t llama_state_get_data( +# struct llama_context * ctx, +# uint8_t * dst, +# size_t size); +@ctypes_function( + "llama_state_get_data", + [ + llama_context_p_ctypes, + ctypes.POINTER(ctypes.c_uint8), + ctypes.c_size_t, + ], + ctypes.c_size_t, +) +def llama_state_get_data( + ctx: llama_context_p, + dst: CtypesArray[ctypes.c_uint8], + size: Union[ctypes.c_size_t, int], + /, +) -> int: + """Copies the state to the specified destination address. + Destination needs to have allocated enough memory. + Returns the number of bytes copied""" + ... + + +# LLAMA_API DEPRECATED(size_t llama_copy_state_data( +# struct llama_context * ctx, +# uint8_t * dst), +# "use llama_state_get_data instead"); +@ctypes_function( + "llama_copy_state_data", + [ + llama_context_p_ctypes, + ctypes.POINTER(ctypes.c_uint8), + ], + ctypes.c_size_t, +) +def llama_copy_state_data( + ctx: llama_context_p, dst: CtypesArray[ctypes.c_uint8], / +) -> int: + """Copies the state to the specified destination address. + Destination needs to have allocated enough memory. + Returns the number of bytes copied""" + ... + + +# // Set the state reading from the specified address +# // Returns the number of bytes read +# LLAMA_API size_t llama_state_set_data( +# struct llama_context * ctx, +# const uint8_t * src, +# size_t size); +@ctypes_function( + "llama_state_set_data", + [llama_context_p_ctypes, ctypes.POINTER(ctypes.c_uint8), ctypes.c_size_t], + ctypes.c_size_t, +) +def llama_state_set_data( + ctx: llama_context_p, + src: CtypesArray[ctypes.c_uint8], + size: Union[ctypes.c_size_t, int], + /, +) -> int: + """Set the state reading from the specified address + Returns the number of bytes read""" + ... + + +# LLAMA_API DEPRECATED(size_t llama_set_state_data( +# struct llama_context * ctx, +# const uint8_t * src), +# "use llama_state_set_data instead"); +@ctypes_function( + "llama_set_state_data", + [llama_context_p_ctypes, ctypes.POINTER(ctypes.c_uint8)], + ctypes.c_size_t, +) +def llama_set_state_data( + ctx: llama_context_p, src: CtypesArray[ctypes.c_uint8], / +) -> int: + """Set the state reading from the specified address""" + ... + + +# Save/load session file +# LLAMA_API bool llama_state_load_file( +# struct llama_context * ctx, +# const char * path_session, +# llama_token * tokens_out, +# size_t n_token_capacity, +# size_t * n_token_count_out); +@ctypes_function( + "llama_state_load_file", + [ + llama_context_p_ctypes, + ctypes.c_char_p, + llama_token_p, + ctypes.c_size_t, + ctypes.POINTER(ctypes.c_size_t), + ], + ctypes.c_bool, +) +def llama_state_load_file( + ctx: llama_context_p, + path_session: bytes, + tokens_out: CtypesArray[llama_token], + n_token_capacity: Union[ctypes.c_size_t, int], + n_token_count_out: CtypesPointerOrRef[ctypes.c_size_t], + /, +) -> bool: + ... + + +# LLAMA_API DEPRECATED(bool llama_load_session_file( +# struct llama_context * ctx, +# const char * path_session, +# llama_token * tokens_out, +# size_t n_token_capacity, +# size_t * n_token_count_out), +# "use llama_state_load_file instead"); +@ctypes_function( + "llama_load_session_file", + [ + llama_context_p_ctypes, + ctypes.c_char_p, + llama_token_p, + ctypes.c_size_t, + ctypes.POINTER(ctypes.c_size_t), + ], + ctypes.c_size_t, +) +def llama_load_session_file( + ctx: llama_context_p, + path_session: bytes, + tokens_out: CtypesArray[llama_token], + n_token_capacity: Union[ctypes.c_size_t, int], + n_token_count_out: CtypesPointerOrRef[ctypes.c_size_t], + /, +) -> int: + ... + + +# LLAMA_API bool llama_state_save_file( +# struct llama_context * ctx, +# const char * path_session, +# const llama_token * tokens, +# size_t n_token_count); +@ctypes_function( + "llama_state_save_file", + [ + llama_context_p_ctypes, + ctypes.c_char_p, + llama_token_p, + ctypes.c_size_t, + ], + ctypes.c_bool, +) +def llama_state_save_file( + ctx: llama_context_p, + path_session: bytes, + tokens: CtypesArray[llama_token], + n_token_count: Union[ctypes.c_size_t, int], + /, +) -> bool: + ... + + +# LLAMA_API DEPRECATED(bool llama_save_session_file( +# struct llama_context * ctx, +# const char * path_session, +# const llama_token * tokens, +# size_t n_token_count), +# "use llama_state_save_file instead"); +@ctypes_function( + "llama_save_session_file", + [ + llama_context_p_ctypes, + ctypes.c_char_p, + llama_token_p, + ctypes.c_size_t, + ], + ctypes.c_size_t, +) +def llama_save_session_file( + ctx: llama_context_p, + path_session: bytes, + tokens: CtypesArray[llama_token], + n_token_count: Union[ctypes.c_size_t, int], + /, +) -> int: + ... + + +# // Get the exact size needed to copy the KV cache of a single sequence +# LLAMA_API size_t llama_state_seq_get_size( +# struct llama_context * ctx, +# llama_seq_id seq_id); +@ctypes_function( + "llama_state_seq_get_size", + [llama_context_p_ctypes, llama_seq_id], + ctypes.c_size_t, +) +def llama_state_seq_get_size(ctx: llama_context_p, seq_id: llama_seq_id, /) -> int: + """Get the exact size needed to copy the KV cache of a single sequence""" + ... + + +# // Copy the KV cache of a single sequence into the specified buffer +# LLAMA_API size_t llama_state_seq_get_data( +# struct llama_context * ctx, +# uint8_t * dst, +# size_t size, +# llama_seq_id seq_id); +@ctypes_function( + "llama_state_seq_get_data", + [ + llama_context_p_ctypes, + ctypes.POINTER(ctypes.c_uint8), + ctypes.c_size_t, + llama_seq_id, + ], + ctypes.c_size_t, +) +def llama_state_seq_get_data( + ctx: llama_context_p, + dst: CtypesArray[ctypes.c_uint8], + size: Union[ctypes.c_size_t, int], + seq_id: llama_seq_id, + /, +) -> int: + """Copy the KV cache of a single sequence into the specified buffer""" + ... + + +# // Copy the sequence data (originally copied with `llama_state_seq_get_data`) into the specified sequence +# // Returns: +# // - Positive: Ok +# // - Zero: Failed to load +# LLAMA_API size_t llama_state_seq_set_data( +# struct llama_context * ctx, +# const uint8_t * src, +# size_t size, +# llama_seq_id dest_seq_id); +@ctypes_function( + "llama_state_seq_set_data", + [ + llama_context_p_ctypes, + ctypes.POINTER(ctypes.c_uint8), + ctypes.c_size_t, + llama_seq_id, + ], + ctypes.c_size_t, +) +def llama_state_seq_set_data( + ctx: llama_context_p, + src: CtypesArray[ctypes.c_uint8], + size: Union[ctypes.c_size_t, int], + dest_seq_id: llama_seq_id, + /, +) -> int: + """Copy the sequence data (originally copied with `llama_state_seq_get_data`) into the specified sequence""" + ... + + +# LLAMA_API size_t llama_state_seq_save_file( +# struct llama_context * ctx, +# const char * filepath, +# llama_seq_id seq_id, +# const llama_token * tokens, +# size_t n_token_count); +@ctypes_function( + "llama_state_seq_save_file", + [ + llama_context_p_ctypes, + ctypes.c_char_p, + llama_seq_id, + llama_token_p, + ctypes.c_size_t, + ], + ctypes.c_size_t, +) +def llama_state_seq_save_file( + ctx: llama_context_p, + filepath: bytes, + seq_id: llama_seq_id, + tokens: CtypesArray[llama_token], + n_token_count: Union[ctypes.c_size_t, int], + /, +) -> int: + ... + + +# LLAMA_API size_t llama_state_seq_load_file( +# struct llama_context * ctx, +# const char * filepath, +# llama_seq_id dest_seq_id, +# llama_token * tokens_out, +# size_t n_token_capacity, +# size_t * n_token_count_out); +@ctypes_function( + "llama_state_seq_load_file", + [ + llama_context_p_ctypes, + ctypes.c_char_p, + llama_seq_id, + llama_token_p, + ctypes.c_size_t, + ctypes.POINTER(ctypes.c_size_t), + ], + ctypes.c_size_t, +) +def llama_state_seq_load_file( + ctx: llama_context_p, + filepath: bytes, + dest_seq_id: llama_seq_id, + tokens_out: CtypesArray[llama_token], + n_token_capacity: Union[ctypes.c_size_t, int], + n_token_count_out: CtypesPointerOrRef[ctypes.c_size_t], + /, +) -> int: + ... + + +# // +# // Decoding +# // + + +# // Return batch for single sequence of tokens +# // The sequence ID will be fixed to 0 +# // The position of the tokens will be tracked automatically by llama_decode +# // +# // NOTE: this is a helper function to facilitate transition to the new batch API - avoid using it +# // +# LLAMA_API struct llama_batch llama_batch_get_one( +# llama_token * tokens, +# int32_t n_tokens); +@ctypes_function( + "llama_batch_get_one", + [ + llama_token_p, + ctypes.c_int32, + ], + llama_batch, +) +def llama_batch_get_one( + tokens: CtypesArray[llama_token], + n_tokens: Union[ctypes.c_int, int], + /, +) -> llama_batch: + """Return batch for single sequence of tokens starting at pos_0 + + NOTE: this is a helper function to facilitate transition to the new batch API - avoid using it + """ + ... + + +# // Allocates a batch of tokens on the heap that can hold a maximum of n_tokens +# // Each token can be assigned up to n_seq_max sequence ids +# // The batch has to be freed with llama_batch_free() +# // If embd != 0, llama_batch.embd will be allocated with size of n_tokens * embd * sizeof(float) +# // Otherwise, llama_batch.token will be allocated to store n_tokens llama_token +# // The rest of the llama_batch members are allocated with size n_tokens +# // All members are left uninitialized +# LLAMA_API struct llama_batch llama_batch_init( +# int32_t n_tokens, +# int32_t embd, +# int32_t n_seq_max); +@ctypes_function( + "llama_batch_init", [ctypes.c_int32, ctypes.c_int32, ctypes.c_int32], llama_batch +) +def llama_batch_init( + n_tokens: Union[ctypes.c_int32, int], + embd: Union[ctypes.c_int32, int], + n_seq_max: Union[ctypes.c_int32, int], + /, +) -> llama_batch: + """Allocates a batch of tokens on the heap that can hold a maximum of n_tokens + Each token can be assigned up to n_seq_max sequence ids + The batch has to be freed with llama_batch_free() + If embd != 0, llama_batch.embd will be allocated with size of n_tokens * embd * sizeof(float) + Otherwise, llama_batch.token will be allocated to store n_tokens llama_token + The rest of the llama_batch members are allocated with size n_tokens + All members are left uninitialized""" + ... + + +# // Frees a batch of tokens allocated with llama_batch_init() +# LLAMA_API void llama_batch_free(struct llama_batch batch); +@ctypes_function("llama_batch_free", [llama_batch], None) +def llama_batch_free(batch: llama_batch, /): + """Frees a batch of tokens allocated with llama_batch_init()""" + ... + + +# // Processes a batch of tokens with the ecoder part of the encoder-decoder model. +# // Stores the encoder output internally for later use by the decoder cross-attention layers. +# // 0 - success +# // < 0 - error +# LLAMA_API int32_t llama_encode( +# struct llama_context * ctx, +# struct llama_batch batch); +@ctypes_function("llama_encode", [llama_context_p_ctypes, llama_batch], ctypes.c_int32) +def llama_encode(ctx: llama_context_p, batch: llama_batch, /) -> int: + """Processes a batch of tokens with the ecoder part of the encoder-decoder model. + Stores the encoder output internally for later use by the decoder cross-attention layers. + 0 - success + < 0 - error""" + ... + + +# // Positive return values does not mean a fatal error, but rather a warning. +# // 0 - success +# // 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context) +# // < 0 - error +# LLAMA_API int32_t llama_decode( +# struct llama_context * ctx, +# struct llama_batch batch); +@ctypes_function("llama_decode", [llama_context_p_ctypes, llama_batch], ctypes.c_int32) +def llama_decode(ctx: llama_context_p, batch: llama_batch, /) -> int: + """Positive return values does not mean a fatal error, but rather a warning. + 0 - success + 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context) + < 0 - error""" + ... + + +# // Set the number of threads used for decoding +# // n_threads is the number of threads used for generation (single token) +# // n_threads_batch is the number of threads used for prompt and batch processing (multiple tokens) +# LLAMA_API void llama_set_n_threads(struct llama_context * ctx, int32_t n_threads, int32_t n_threads_batch); +@ctypes_function( + "llama_set_n_threads", + [ + llama_context_p_ctypes, + ctypes.c_int32, + ctypes.c_int32, + ], + None, +) +def llama_set_n_threads( + ctx: llama_context_p, + n_threads: Union[ctypes.c_int32, int], + n_threads_batch: Union[ctypes.c_int32, int], + /, +): + """Set the number of threads used for decoding + n_threads is the number of threads used for generation (single token) + n_threads_batch is the number of threads used for prompt and batch processing (multiple tokens) + """ + ... + + +# // Get the number of threads used for generation of a single token. +# LLAMA_API int32_t llama_n_threads(struct llama_context * ctx); +@ctypes_function("llama_n_threads", [llama_context_p_ctypes], ctypes.c_int32) +def llama_n_threads(ctx: llama_context_p, /) -> int: + """Get the number of threads used for generation of a single token""" + ... + + +# // Get the number of threads used for prompt and batch processing (multiple token). +# LLAMA_API int32_t llama_n_threads_batch(struct llama_context * ctx); +@ctypes_function("llama_n_threads_batch", [llama_context_p_ctypes], ctypes.c_int32) +def llama_n_threads_batch(ctx: llama_context_p, /) -> int: + """Get the number of threads used for prompt and batch processing (multiple token)""" + ... + + +# // Set whether the model is in embeddings mode or not +# // If true, embeddings will be returned but logits will not +# LLAMA_API void llama_set_embeddings(struct llama_context * ctx, bool embeddings); +@ctypes_function("llama_set_embeddings", [llama_context_p_ctypes, ctypes.c_bool], None) +def llama_set_embeddings(ctx: llama_context_p, embeddings: bool, /): + """Set whether the model is in embeddings model or not + If true, embeddings will be returned but logits will not""" + ... + + +# // Set whether to use causal attention or not +# // If set to true, the model will only attend to the past tokens +# LLAMA_API void llama_set_causal_attn(struct llama_context * ctx, bool causal_attn); +@ctypes_function("llama_set_causal_attn", [llama_context_p_ctypes, ctypes.c_bool], None) +def llama_set_causal_attn(ctx: llama_context_p, causal_attn: bool, /): + """Set whether to use causal attention or not + If set to true, the model will only attend to the past tokens""" + ... + + +# // Set whether the model is in warmup mode or not +# // If true, all model tensors are activated during llama_decode() to load and cache their weights. +# LLAMA_API void llama_set_warmup(struct llama_context * ctx, bool warmup); +@ctypes_function("llama_set_warmup", [llama_context_p_ctypes, ctypes.c_bool], None) +def llama_set_warmup(ctx: llama_context_p, warmup: bool, /): + """Set whether the model is in warmup mode or not + If true, all model tensors are activated during llama_decode() to load and cache their weights.""" + ... + + +# // Set abort callback +# LLAMA_API void llama_set_abort_callback(struct llama_context * ctx, ggml_abort_callback abort_callback, void * abort_callback_data); +@ctypes_function( + "llama_set_abort_callback", + [llama_context_p_ctypes, ggml_abort_callback, ctypes.c_void_p], + None, +) +def llama_set_abort_callback( + ctx: llama_context_p, + abort_callback: Callable[[ctypes.c_void_p], None], + abort_callback_data: ctypes.c_void_p, + /, +): + """Set abort callback""" + ... + + +# // Wait until all computations are finished +# // This is automatically done when using one of the functions below to obtain the computation results +# // and is not necessary to call it explicitly in most cases +# LLAMA_API void llama_synchronize(struct llama_context * ctx); +@ctypes_function("llama_synchronize", [llama_context_p_ctypes], None) +def llama_synchronize(ctx: llama_context_p, /): + """Wait until all computations are finished + This is automatically done when using one of the functions below to obtain the computation results + and is not necessary to call it explicitly in most cases""" + ... + + +# // Token logits obtained from the last call to llama_decode() +# // The logits for which llama_batch.logits[i] != 0 are stored contiguously +# // in the order they have appeared in the batch. +# // Rows: number of tokens for which llama_batch.logits[i] != 0 +# // Cols: n_vocab +# LLAMA_API float * llama_get_logits(struct llama_context * ctx); +@ctypes_function( + "llama_get_logits", [llama_context_p_ctypes], ctypes.POINTER(ctypes.c_float) +) +def llama_get_logits(ctx: llama_context_p, /) -> CtypesArray[ctypes.c_float]: + """Token logits obtained from the last call to llama_decode() + The logits for which llama_batch.logits[i] != 0 are stored contiguously + in the order they have appeared in the batch. + Rows: number of tokens for which llama_batch.logits[i] != 0 + Cols: n_vocab + + Returns: + Pointer to the logits buffer of shape (n_tokens, n_vocab)""" + ... + + +# // Logits for the ith token. For positive indices, Equivalent to: +# // llama_get_logits(ctx) + ctx->output_ids[i]*n_vocab +# // Negative indicies can be used to access logits in reverse order, -1 is the last logit. +# // returns NULL for invalid ids. +# LLAMA_API float * llama_get_logits_ith(struct llama_context * ctx, int32_t i); +@ctypes_function( + "llama_get_logits_ith", + [llama_context_p_ctypes, ctypes.c_int32], + ctypes.POINTER(ctypes.c_float), +) +def llama_get_logits_ith( + ctx: llama_context_p, i: Union[ctypes.c_int32, int], / +) -> CtypesArray[ctypes.c_float]: + """Logits for the ith token. Equivalent to: + llama_get_logits(ctx) + i*n_vocab""" + ... + + +# // Get all output token embeddings. +# // when pooling_type == LLAMA_POOLING_TYPE_NONE or when using a generative model, +# // the embeddings for which llama_batch.logits[i] != 0 are stored contiguously +# // in the order they have appeared in the batch. +# // shape: [n_outputs*n_embd] +# // Otherwise, returns NULL. +# LLAMA_API float * llama_get_embeddings(struct llama_context * ctx); +@ctypes_function( + "llama_get_embeddings", [llama_context_p_ctypes], ctypes.POINTER(ctypes.c_float) +) +def llama_get_embeddings(ctx: llama_context_p, /) -> CtypesArray[ctypes.c_float]: + """Get the embeddings for the input + shape: [n_embd] (1-dimensional)""" + ... + + +# // Get the embeddings for the ith token. For positive indices, Equivalent to: +# // llama_get_embeddings(ctx) + ctx->output_ids[i]*n_embd +# // Negative indicies can be used to access embeddings in reverse order, -1 is the last embedding. +# // shape: [n_embd] (1-dimensional) +# // returns NULL for invalid ids. +# LLAMA_API float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i); +@ctypes_function( + "llama_get_embeddings_ith", + [llama_context_p_ctypes, ctypes.c_int32], + ctypes.POINTER(ctypes.c_float), +) +def llama_get_embeddings_ith( + ctx: llama_context_p, i: Union[ctypes.c_int32, int], / +) -> CtypesArray[ctypes.c_float]: + """Get the embeddings for the ith sequence + llama_get_embeddings(ctx) + i*n_embd""" + ... + + +# // Get the embeddings for a sequence id +# // Returns NULL if pooling_type is LLAMA_POOLING_TYPE_NONE +# // when pooling_type == LLAMA_POOLING_TYPE_RANK, returns float[1] with the rank of the sequence +# // otherwise: float[n_embd] (1-dimensional) +# LLAMA_API float * llama_get_embeddings_seq(struct llama_context * ctx, llama_seq_id seq_id); +@ctypes_function( + "llama_get_embeddings_seq", + [llama_context_p_ctypes, llama_seq_id], + ctypes.POINTER(ctypes.c_float), +) +def llama_get_embeddings_seq( + ctx: llama_context_p, seq_id: Union[llama_seq_id, int], / +) -> CtypesArray[ctypes.c_float]: + """Get the embeddings for a sequence id + Returns NULL if pooling_type is LLAMA_POOLING_TYPE_NONE + shape: [n_embd] (1-dimensional)""" + ... + + +# // +# // Vocab +# // + + +# LLAMA_API const char * llama_vocab_get_text(const struct llama_vocab * vocab, llama_token token); +@ctypes_function( + "llama_vocab_get_text", [llama_vocab_p_ctypes, llama_token], ctypes.c_char_p +) +def llama_vocab_get_text( + vocab: llama_vocab_p, token: Union[llama_token, int], / +) -> bytes: + ... + + +# LLAMA_API float llama_vocab_get_score(const struct llama_vocab * vocab, llama_token token); +@ctypes_function( + "llama_vocab_get_score", [llama_vocab_p_ctypes, llama_token], ctypes.c_float +) +def llama_vocab_get_score( + vocab: llama_vocab_p, token: Union[llama_token, int], / +) -> float: + ... + + +# LLAMA_API enum llama_token_attr llama_vocab_get_attr(const struct llama_vocab * vocab, llama_token token); +@ctypes_function( + "llama_vocab_get_attr", [llama_vocab_p_ctypes, llama_token], ctypes.c_int +) +def llama_vocab_get_attr( + vocab: llama_vocab_p, token: Union[llama_token, int], / +) -> int: + ... + + +# // Check if the token is supposed to end generation (end-of-generation, eg. EOS, EOT, etc.) +# LLAMA_API bool llama_vocab_is_eog(const struct llama_vocab * vocab, llama_token token); +@ctypes_function( + "llama_vocab_is_eog", [llama_vocab_p_ctypes, llama_token], ctypes.c_bool +) +def llama_vocab_is_eog(vocab: llama_vocab_p, token: Union[llama_token, int], /) -> bool: + """Check if the token is supposed to end generation (end-of-generation, eg. EOS, EOT, etc.)""" + ... + + +# // Identify if Token Id is a control token or a render-able token +# LLAMA_API bool llama_vocab_is_control(const struct llama_vocab * vocab, llama_token token); +@ctypes_function( + "llama_vocab_is_control", [llama_vocab_p_ctypes, llama_token], ctypes.c_bool +) +def llama_vocab_is_control( + vocab: llama_vocab_p, token: Union[llama_token, int], / +) -> bool: + """Identify if Token Id is a control token or a render-able token""" + ... + + +# // Special tokens + + +# LLAMA_API llama_token llama_vocab_bos(const struct llama_vocab * vocab); // beginning-of-sentence +@ctypes_function("llama_vocab_bos", [llama_vocab_p_ctypes], llama_token) +def llama_vocab_bos(vocab: llama_vocab_p, /) -> llama_token: + """beginning-of-sentence""" + ... + + +# LLAMA_API llama_token llama_vocab_eos(const struct llama_vocab * vocab); // end-of-sentence +@ctypes_function("llama_vocab_eos", [llama_vocab_p_ctypes], llama_token) +def llama_vocab_eos(vocab: llama_vocab_p, /) -> llama_token: + """end-of-sentence""" + ... + + +# LLAMA_API llama_token llama_vocab_eot(const struct llama_vocab * vocab); // end-of-turn +@ctypes_function("llama_vocab_eot", [llama_vocab_p_ctypes], llama_token) +def llama_vocab_eot(vocab: llama_vocab_p, /) -> llama_token: + """end-of-turn""" + ... + + +# LLAMA_API llama_token llama_vocab_sep(const struct llama_vocab * vocab); // sentence separator +@ctypes_function("llama_vocab_sep", [llama_vocab_p_ctypes], llama_token) +def llama_vocab_sep(vocab: llama_vocab_p, /) -> llama_token: + """sentence separator""" + ... + + +# LLAMA_API llama_token llama_vocab_nl (const struct llama_vocab * vocab); // next-line +@ctypes_function("llama_vocab_nl", [llama_vocab_p_ctypes], llama_token) +def llama_vocab_nl(vocab: llama_vocab_p, /) -> llama_token: + """next-line""" + ... + + +# LLAMA_API llama_token llama_vocab_pad(const struct llama_vocab * vocab); // padding +@ctypes_function("llama_vocab_pad", [llama_vocab_p_ctypes], llama_token) +def llama_vocab_pad(vocab: llama_vocab_p, /) -> llama_token: + """padding""" + ... + +# LLAMA_API bool llama_vocab_get_add_bos(const struct llama_vocab * vocab); +@ctypes_function( + "llama_vocab_get_add_bos", + [llama_vocab_p_ctypes], + ctypes.c_bool, +) +def llama_vocab_get_add_bos(vocab: llama_vocab_p, /) -> bool: + ... + + +# LLAMA_API bool llama_vocab_get_add_eos(const struct llama_vocab * vocab); +@ctypes_function( + "llama_vocab_get_add_eos", + [llama_vocab_p_ctypes], + ctypes.c_bool, +) +def llama_vocab_get_add_eos(vocab: llama_vocab_p, /) -> bool: + ... + + +# LLAMA_API llama_token llama_vocab_fim_pre(const struct llama_vocab * vocab); +@ctypes_function( + "llama_vocab_fim_pre", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_vocab_fim_pre(vocab: llama_vocab_p, /) -> llama_token: + ... + + +# LLAMA_API llama_token llama_vocab_fim_suf(const struct llama_vocab * vocab); +@ctypes_function( + "llama_vocab_fim_suf", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_vocab_fim_suf(vocab: llama_vocab_p, /) -> llama_token: + ... + + +# LLAMA_API llama_token llama_vocab_fim_mid(const struct llama_vocab * vocab); +@ctypes_function( + "llama_vocab_fim_mid", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_vocab_fim_mid(vocab: llama_vocab_p, /) -> llama_token: + ... + + +# LLAMA_API llama_token llama_vocab_fim_pad(const struct llama_vocab * vocab); +@ctypes_function( + "llama_vocab_fim_pad", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_vocab_fim_pad(vocab: llama_vocab_p, /) -> llama_token: + ... + + +# LLAMA_API llama_token llama_vocab_fim_rep(const struct llama_vocab * vocab); +@ctypes_function( + "llama_vocab_fim_rep", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_vocab_fim_rep(vocab: llama_vocab_p, /) -> llama_token: + ... + + +# LLAMA_API llama_token llama_vocab_fim_sep(const struct llama_vocab * vocab); +@ctypes_function( + "llama_vocab_fim_sep", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_vocab_fim_sep(vocab: llama_vocab_p, /) -> llama_token: + ... + + + +# DEPRECATED(LLAMA_API const char * llama_token_get_text(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_text instead"); +@ctypes_function( + "llama_token_get_text", + [llama_vocab_p_ctypes, llama_token], + ctypes.c_char_p, +) +def llama_token_get_text( + vocab: llama_vocab_p, token: Union[llama_token, int], / +) -> bytes: + ... + + +# DEPRECATED(LLAMA_API float llama_token_get_score(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_score instead"); +@ctypes_function( + "llama_token_get_score", + [llama_vocab_p_ctypes, llama_token], + ctypes.c_float, +) +def llama_token_get_score( + vocab: llama_vocab_p, token: Union[llama_token, int], / +) -> float: + ... + +# DEPRECATED(LLAMA_API enum llama_token_attr llama_token_get_attr(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_attr instead"); +@ctypes_function( + "llama_token_get_attr", + [llama_vocab_p_ctypes, llama_token], + ctypes.c_int, +) +def llama_token_get_attr( + vocab: llama_vocab_p, token: Union[llama_token, int], / +) -> int: + ... + +# DEPRECATED(LLAMA_API bool llama_token_is_eog(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_is_eog instead"); +@ctypes_function( + "llama_token_is_eog", + [llama_vocab_p_ctypes, llama_token], + ctypes.c_bool, +) +def llama_token_is_eog( + vocab: llama_vocab_p, token: Union[llama_token, int], / +) -> bool: + ... + +# DEPRECATED(LLAMA_API bool llama_token_is_control(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_is_control instead"); +@ctypes_function( + "llama_token_is_control", + [llama_vocab_p_ctypes, llama_token], + ctypes.c_bool, +) +def llama_token_is_control( + vocab: llama_vocab_p, token: Union[llama_token, int], / +) -> bool: + ... + +# DEPRECATED(LLAMA_API llama_token llama_token_bos(const struct llama_vocab * vocab), "use llama_vocab_bos instead"); +@ctypes_function( + "llama_token_bos", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_bos(vocab: llama_vocab_p, /) -> int: + ... + +# DEPRECATED(LLAMA_API llama_token llama_token_eos(const struct llama_vocab * vocab), "use llama_vocab_eos instead"); +@ctypes_function( + "llama_token_eos", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_eos(vocab: llama_vocab_p, /) -> int: + ... + +# DEPRECATED(LLAMA_API llama_token llama_token_eot(const struct llama_vocab * vocab), "use llama_vocab_eot instead"); +@ctypes_function( + "llama_token_eot", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_eot(vocab: llama_vocab_p, /) -> int: + ... + +# DEPRECATED(LLAMA_API llama_token llama_token_cls(const struct llama_vocab * vocab), "use llama_vocab_cls instead"); +@ctypes_function( + "llama_token_cls", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_cls(vocab: llama_vocab_p, /) -> int: + ... + +# DEPRECATED(LLAMA_API llama_token llama_token_sep(const struct llama_vocab * vocab), "use llama_vocab_sep instead"); +@ctypes_function( + "llama_token_sep", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_sep(vocab: llama_vocab_p, /) -> int: + ... + + +# DEPRECATED(LLAMA_API llama_token llama_token_nl (const struct llama_vocab * vocab), "use llama_vocab_nl instead"); +@ctypes_function( + "llama_token_nl", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_nl(vocab: llama_vocab_p, /) -> int: + ... + + +# DEPRECATED(LLAMA_API llama_token llama_token_pad(const struct llama_vocab * vocab), "use llama_vocab_pad instead"); +@ctypes_function( + "llama_token_pad", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_pad(vocab: llama_vocab_p, /) -> int: + ... + + +# DEPRECATED(LLAMA_API bool llama_add_bos_token(const struct llama_vocab * vocab), "use llama_vocab_get_add_bos instead"); +@ctypes_function( + "llama_add_bos_token", + [llama_vocab_p_ctypes], + ctypes.c_bool, +) +def llama_add_bos_token(vocab: llama_vocab_p, /) -> bool: + ... + +# DEPRECATED(LLAMA_API bool llama_add_eos_token(const struct llama_vocab * vocab), "use llama_vocab_get_add_eos instead"); +@ctypes_function( + "llama_add_eos_token", + [llama_vocab_p_ctypes], + ctypes.c_bool, +) +def llama_add_eos_token(vocab: llama_vocab_p, /) -> bool: + ... + + +# DEPRECATED(LLAMA_API llama_token llama_token_fim_pre(const struct llama_vocab * vocab), "use llama_vocab_fim_pre instead"); +@ctypes_function( + "llama_token_fim_pre", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_fim_pre(vocab: llama_vocab_p, /) -> llama_token: + ... + +# DEPRECATED(LLAMA_API llama_token llama_token_fim_suf(const struct llama_vocab * vocab), "use llama_vocab_fim_suf instead"); +@ctypes_function( + "llama_token_fim_suf", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_fim_suf(vocab: llama_vocab_p, /) -> llama_token: + ... + +# DEPRECATED(LLAMA_API llama_token llama_token_fim_mid(const struct llama_vocab * vocab), "use llama_vocab_fim_mid instead"); +@ctypes_function( + "llama_token_fim_mid", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_fim_mid(vocab: llama_vocab_p, /) -> llama_token: + ... + +# DEPRECATED(LLAMA_API llama_token llama_token_fim_pad(const struct llama_vocab * vocab), "use llama_vocab_fim_pad instead"); +@ctypes_function( + "llama_token_fim_pad", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_fim_pad(vocab: llama_vocab_p, /) -> llama_token: + ... + +# DEPRECATED(LLAMA_API llama_token llama_token_fim_rep(const struct llama_vocab * vocab), "use llama_vocab_fim_rep instead"); +@ctypes_function( + "llama_token_fim_rep", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_fim_rep(vocab: llama_vocab_p, /) -> llama_token: + ... + +# DEPRECATED(LLAMA_API llama_token llama_token_fim_sep(const struct llama_vocab * vocab), "use llama_vocab_fim_sep instead"); +@ctypes_function( + "llama_token_fim_sep", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_fim_sep(vocab: llama_vocab_p, /) -> llama_token: + ... + +# // CLS is equivalent to BOS +# DEPRECATED(LLAMA_API llama_token llama_vocab_cls(const struct llama_vocab * vocab), // classification +# "use llama_vocab_bos instead"); +@ctypes_function( + "llama_vocab_cls", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_vocab_cls(vocab: llama_vocab_p, /) -> llama_token: + ... + + +# // +# // Tokenization +# // +# // The API is thread-safe. +# // + + +# /// @details Convert the provided text into tokens. +# /// @param tokens The tokens pointer must be large enough to hold the resulting tokens. +# /// @return Returns the number of tokens on success, no more than n_tokens_max +# /// @return Returns a negative number on failure - the number of tokens that would have been returned +# /// @param add_special Allow to add BOS and EOS tokens if model is configured to do so. +# /// @param parse_special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated +# /// as plaintext. Does not insert a leading space. +# LLAMA_API int32_t llama_tokenize( +# const struct llama_vocab * vocab, +# const char * text, +# int32_t text_len, +# llama_token * tokens, +# int32_t n_tokens_max, +# bool add_special, +# bool parse_special); +@ctypes_function( + "llama_tokenize", + [ + llama_vocab_p_ctypes, + ctypes.c_char_p, + ctypes.c_int32, + llama_token_p, + ctypes.c_int32, + ctypes.c_bool, + ctypes.c_bool, + ], + ctypes.c_int32, +) +def llama_tokenize( + vocab: llama_vocab_p, + text: bytes, + text_len: Union[ctypes.c_int, int], + tokens: CtypesArray[llama_token], + n_tokens_max: Union[ctypes.c_int, int], + add_special: Union[ctypes.c_bool, bool], + parse_special: Union[ctypes.c_bool, bool], + /, +) -> int: + """Convert the provided text into tokens. + + Args: + vocab: The vocabulary to use for tokenization. + text: The text to tokenize. + text_len: The length of the text. + tokens: The tokens pointer must be large enough to hold the resulting tokens. + n_max_tokens: The maximum number of tokens to return. + add_special: Allow adding special tokenns if the model is configured to do so. + parse_special: Allow parsing special tokens. + + Returns: + Returns the number of tokens on success, no more than n_tokens_max + Returns a negative number on failure - the number of tokens that would have been returned + """ + ... + + +# // Token Id -> Piece. +# // Uses the vocabulary in the provided context. +# // Does not write null terminator to the buffer. +# // User can skip up to 'lstrip' leading spaces before copying (useful when encoding/decoding multiple tokens with 'add_space_prefix') +# // @param special If true, special tokens are rendered in the output. +# LLAMA_API int32_t llama_token_to_piece( +# const struct llama_vocab * vocab, +# llama_token token, +# char * buf, +# int32_t length, +# int32_t lstrip, +# bool special); +@ctypes_function( + "llama_token_to_piece", + [ + llama_vocab_p_ctypes, + llama_token, + ctypes.c_char_p, + ctypes.c_int32, + ctypes.c_int32, + ctypes.c_bool, + ], + ctypes.c_int32, +) +def llama_token_to_piece( + vocab: llama_vocab_p, + token: Union[llama_token, int], + buf: Union[ctypes.c_char_p, bytes, CtypesArray[ctypes.c_char]], + length: Union[ctypes.c_int, int], + lstrip: Union[ctypes.c_int, int], + special: Union[ctypes.c_bool, bool], + /, +) -> int: + """Token Id -> Piece. + Uses the vocabulary in the provided context. + Does not write null terminator to the buffer. + User code is responsible to remove the leading whitespace of the first non-BOS token when decoding multiple tokens. + + Args: + vocab: The vocabulary to use for tokenization. + token: The token to convert. + buf: The buffer to write the token to. + length: The length of the buffer. + lstrip: The number of leading spaces to skip. + special: If true, special tokens are rendered in the output.""" + ... + + +# # // check if token0 is contained as a prefix in token1 +# # LLAMA_API bool llama_token_is_prefix( +# # const struct llama_model * model, +# # llama_token token0, +# # llama_token token1); +# @ctypes_function( +# "llama_token_is_prefix", +# [llama_model_p_ctypes, llama_token, llama_token], +# ctypes.c_bool, +# ) +# def llama_token_is_prefix( +# model: llama_model_p, token0: Union[llama_token, int], token1: Union[llama_token, int], / +# ) -> bool: +# """Check if token0 is contained as a prefix in token1""" +# ... + + +# /// @details Convert the provided tokens into text (inverse of llama_tokenize()). +# /// @param text The char pointer must be large enough to hold the resulting text. +# /// @return Returns the number of chars/bytes on success, no more than text_len_max. +# /// @return Returns a negative number on failure - the number of chars/bytes that would have been returned. +# /// @param remove_special Allow to remove BOS and EOS tokens if model is configured to do so. +# /// @param unparse_special If true, special tokens are rendered in the output. +# LLAMA_API int32_t llama_detokenize( +# const struct llama_model * model, +# const llama_token * tokens, +# int32_t n_tokens, +# char * text, +# int32_t text_len_max, +# bool remove_special, +# bool unparse_special); +@ctypes_function( + "llama_detokenize", + [ + llama_model_p_ctypes, + ctypes.POINTER(llama_token), + ctypes.c_int32, + ctypes.c_char_p, + ctypes.c_int32, + ctypes.c_bool, + ctypes.c_bool, + ], + ctypes.c_int32, +) +def llama_detokenize( + model: llama_model_p, + tokens: CtypesArray[llama_token], + n_tokens: Union[ctypes.c_int, int], + text: bytes, + text_len_max: Union[ctypes.c_int, int], + remove_special: Union[ctypes.c_bool, bool], + unparse_special: Union[ctypes.c_bool, bool], + /, +) -> int: + """Convert the provided tokens into text (inverse of llama_tokenize()). + + Args: + model: The model to use for tokenization. + tokens: The tokens to convert. + n_tokens: The number of tokens. + text: The buffer to write the text to. + text_len_max: The length of the buffer. + remove_special: Allow to remove BOS and EOS tokens if model is configured to do so. + unparse_special: If true, special tokens are rendered in the output.""" + ... + + +# // +# // Chat templates +# // + + +# /// Apply chat template. Inspired by hf apply_chat_template() on python. +# /// Both "model" and "custom_template" are optional, but at least one is required. "custom_template" has higher precedence than "model" +# /// NOTE: This function does not use a jinja parser. It only support a pre-defined list of template. See more: https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template +# /// @param tmpl A Jinja template to use for this chat. If this is nullptr, the model’s default chat template will be used instead. +# /// @param chat Pointer to a list of multiple llama_chat_message +# /// @param n_msg Number of llama_chat_message in this chat +# /// @param add_ass Whether to end the prompt with the token(s) that indicate the start of an assistant message. +# /// @param buf A buffer to hold the output formatted prompt. The recommended alloc size is 2 * (total number of characters of all messages) +# /// @param length The size of the allocated buffer +# /// @return The total number of bytes of the formatted prompt. If is it larger than the size of buffer, you may need to re-alloc it and then re-apply the template. +# LLAMA_API int32_t llama_chat_apply_template( +# const char * tmpl, +# const struct llama_chat_message * chat, +# size_t n_msg, +# bool add_ass, +# char * buf, +# int32_t length); +@ctypes_function( + "llama_chat_apply_template", + [ + ctypes.c_char_p, # tmpl + ctypes.POINTER(llama_chat_message), # chat + ctypes.c_size_t, # n_msg + ctypes.c_bool, # add_ass (added) + ctypes.c_char_p, # buf + ctypes.c_int32, # length + ], + ctypes.c_int32, +) +def llama_chat_apply_template( + tmpl: bytes, + chat: CtypesArray[llama_chat_message], + n_msg: int, + add_ass: bool, # Added parameter + buf: bytes, + length: int, + /, +) -> int: + """Apply chat template. + + Args: + tmpl: Template to use. If None, uses model's default + chat: Array of chat messages + n_msg: Number of messages + add_ass: Whether to end prompt with assistant token + buf: Output buffer + length: Buffer length + + Returns: + Number of bytes written, or needed if buffer too small + """ + ... + + +# // Get list of built-in chat templates +# LLAMA_API int32_t llama_chat_builtin_templates(const char ** output, size_t len); +@ctypes_function( + "llama_chat_builtin_templates", + [ + ctypes.POINTER(ctypes.c_char_p), + ctypes.c_size_t, + ], + ctypes.c_int32, +) +def llama_chat_builtin_templates( + output: CtypesArray[bytes], + len: Union[ctypes.c_size_t, int], + /, +) -> int: + """Get list of built-in chat templates. + + Args: + output: Output buffer to store template names. + len: Length of the output buffer. + + Returns: + Number of templates available. + Returns a negative number on error. + """ + ... + + +# // +# // Sampling API +# // +# // Sample usage: +# // +# // // prepare the sampling chain at the start +# // auto sparams = llama_sampler_chain_default_params(); +# // +# // llama_sampler * smpl = llama_sampler_chain_init(sparams); +# // +# // llama_sampler_chain_add(smpl, llama_sampler_init_top_k(50)); +# // llama_sampler_chain_add(smpl, llama_sampler_init_top_p(0.9, 1)); +# // llama_sampler_chain_add(smpl, llama_sampler_init_temp (0.8)); +# // +# // // typically, the chain should end with a sampler such as "greedy", "dist" or "mirostat" +# // // this sampler will be responsible to select the actual token +# // llama_sampler_chain_add(smpl, llama_sampler_init_dist(seed)); +# // +# // ... +# // +# // // decoding loop: +# // while (...) { +# // ... +# // +# // llama_decode(ctx, batch); +# // +# // // sample from the logits of the last token in the batch +# // const llama_token id = llama_sampler_sample(smpl, ctx, -1); +# // +# // // accepting the token updates the internal state of certain samplers (e.g. grammar, repetition, etc.) +# // llama_sampler_accept(smpl, id); +# // ... +# // } +# // +# // llama_sampler_free(smpl); +# // +# // TODO: In the future, llama_sampler will be utilized to offload the sampling to the backends (e.g. GPU). +# // + +# typedef void * llama_sampler_context_t; +llama_sampler_context_t = ctypes.c_void_p + + +# // user code can implement the interface below in order to create custom llama_sampler +# struct llama_sampler_i { +# const char * (*name) (const struct llama_sampler * smpl); // can be NULL +# void (*accept)( struct llama_sampler * smpl, llama_token token); // can be NULL +# void (*apply) ( struct llama_sampler * smpl, llama_token_data_array * cur_p); // required +# void (*reset) ( struct llama_sampler * smpl); // can be NULL +# struct llama_sampler * (*clone) (const struct llama_sampler * smpl); // can be NULL if ctx is NULL +# void (*free) ( struct llama_sampler * smpl); // can be NULL if ctx is NULL +# +# // TODO: API for internal libllama usage for appending the sampling to an existing ggml_cgraph +# //void (*apply_ggml) (struct llama_sampler * smpl, ...); +# }; +class llama_sampler_i(ctypes.Structure): + ... + + +# struct llama_sampler { +# const struct llama_sampler_i * iface; +# llama_sampler_context_t ctx; +# }; +class llama_sampler(ctypes.Structure): + _fields_ = [ + ("iface", ctypes.POINTER(llama_sampler_i)), + ("ctx", llama_sampler_context_t), + ] + + +if TYPE_CHECKING: + llama_sampler_p = CtypesPointer[llama_sampler] + +llama_sampler_p_ctypes = ctypes.POINTER(llama_sampler) + +llama_sampler_i_name = ctypes.CFUNCTYPE(ctypes.c_char_p, llama_sampler_p_ctypes) +llama_sampler_i_accept = ctypes.CFUNCTYPE(None, llama_sampler_p_ctypes, llama_token) +llama_sampler_i_apply = ctypes.CFUNCTYPE( + None, llama_sampler_p_ctypes, llama_token_data_array_p +) +llama_sampler_i_reset = ctypes.CFUNCTYPE(None, llama_sampler_p_ctypes) +llama_sampler_i_clone = ctypes.CFUNCTYPE(llama_sampler_p_ctypes, llama_sampler_p_ctypes) +llama_sampler_i_free = ctypes.CFUNCTYPE(None, llama_sampler_p_ctypes) + +llama_sampler_i._fields_ = [ + ("name", llama_sampler_i_name), + ("accept", llama_sampler_i_accept), + ("apply", llama_sampler_i_apply), + ("reset", llama_sampler_i_reset), + ("clone", llama_sampler_i_clone), + ("free", llama_sampler_i_free), +] + + +# // mirror of llama_sampler_i: +# LLAMA_API struct llama_sampler * llama_sampler_init (const struct llama_sampler_i * iface, llama_sampler_context_t ctx); +@ctypes_function( + "llama_sampler_init", + [ctypes.POINTER(llama_sampler_i), llama_sampler_context_t], + llama_sampler_p_ctypes, +) +def llama_sampler_init( + iface: ctypes.POINTER(llama_sampler_i), ctx: llama_sampler_context_t, / +) -> llama_sampler_p: + ... + + +# LLAMA_API const char * llama_sampler_name (const struct llama_sampler * smpl); +@ctypes_function( + "llama_sampler_name", + [llama_sampler_p_ctypes], + ctypes.c_char_p, +) +def llama_sampler_name(smpl: llama_sampler_p, /) -> bytes: + ... + + +# LLAMA_API void llama_sampler_accept( struct llama_sampler * smpl, llama_token token); +@ctypes_function( + "llama_sampler_accept", + [llama_sampler_p_ctypes, llama_token], + None, +) +def llama_sampler_accept(smpl: llama_sampler_p, token: Union[llama_token, int], /): + ... + + +# LLAMA_API void llama_sampler_apply ( struct llama_sampler * smpl, llama_token_data_array * cur_p); +@ctypes_function( + "llama_sampler_apply", + [llama_sampler_p_ctypes, llama_token_data_array_p], + None, +) +def llama_sampler_apply( + smpl: llama_sampler_p, cur_p: CtypesArray[llama_token_data_array], / +): + ... + + +# LLAMA_API void llama_sampler_reset ( struct llama_sampler * smpl); +@ctypes_function( + "llama_sampler_reset", + [llama_sampler_p_ctypes], + None, +) +def llama_sampler_reset(smpl: llama_sampler_p, /): + ... + + +# LLAMA_API struct llama_sampler * llama_sampler_clone (const struct llama_sampler * smpl); +@ctypes_function( + "llama_sampler_clone", + [llama_sampler_p_ctypes], + llama_sampler_p_ctypes, +) +def llama_sampler_clone(smpl: llama_sampler_p, /) -> llama_sampler_p: + ... + + +# // important: do not free if the sampler has been added to a llama_sampler_chain (via llama_sampler_chain_add) +# LLAMA_API void llama_sampler_free ( struct llama_sampler * smpl); +@ctypes_function( + "llama_sampler_free", + [llama_sampler_p_ctypes], + None, +) +def llama_sampler_free(smpl: llama_sampler_p, /): + ... + + +# // llama_sampler_chain +# // a type of llama_sampler that can chain multiple samplers one after another +# +# LLAMA_API struct llama_sampler * llama_sampler_chain_init(struct llama_sampler_chain_params params); +@ctypes_function( + "llama_sampler_chain_init", + [llama_sampler_chain_params], + llama_sampler_p_ctypes, +) +def llama_sampler_chain_init(params: llama_sampler_chain_params, /) -> llama_sampler_p: + ... + + +# // important: takes ownership of the sampler object and will free it when llama_sampler_free is called +# LLAMA_API void llama_sampler_chain_add( struct llama_sampler * chain, struct llama_sampler * smpl); +@ctypes_function( + "llama_sampler_chain_add", + [llama_sampler_p_ctypes, llama_sampler_p_ctypes], + None, +) +def llama_sampler_chain_add(chain: llama_sampler_p, smpl: llama_sampler_p, /): + ... + + +# LLAMA_API struct llama_sampler * llama_sampler_chain_get(const struct llama_sampler * chain, int32_t i); +@ctypes_function( + "llama_sampler_chain_get", + [llama_sampler_p_ctypes, ctypes.c_int32], + llama_sampler_p_ctypes, +) +def llama_sampler_chain_get( + chain: llama_sampler_p, i: Union[ctypes.c_int32, int], / +) -> llama_sampler_p: + ... + + +# LLAMA_API int llama_sampler_chain_n (const struct llama_sampler * chain); +@ctypes_function( + "llama_sampler_chain_n", + [llama_sampler_p_ctypes], + ctypes.c_int, +) +def llama_sampler_chain_n(chain: llama_sampler_p, /) -> int: + ... + + +# // after removing a sampler, the chain will no longer own it, and it will not be freed when the chain is freed +# LLAMA_API struct llama_sampler * llama_sampler_chain_remove( struct llama_sampler * chain, int32_t i); +@ctypes_function( + "llama_sampler_chain_remove", + [llama_sampler_p_ctypes, ctypes.c_int32], + llama_sampler_p_ctypes, +) +def llama_sampler_chain_remove( + chain: llama_sampler_p, i: Union[ctypes.c_int32, int], / +) -> llama_sampler_p: + ... + + +# // available samplers: +# +# LLAMA_API struct llama_sampler * llama_sampler_init_greedy(void); +@ctypes_function("llama_sampler_init_greedy", [], llama_sampler_p_ctypes) +def llama_sampler_init_greedy() -> llama_sampler_p: + ... + + +# LLAMA_API struct llama_sampler * llama_sampler_init_dist (uint32_t seed); +@ctypes_function("llama_sampler_init_dist", [ctypes.c_uint32], llama_sampler_p_ctypes) +def llama_sampler_init_dist(seed: int) -> llama_sampler_p: + ... + + +# /// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits. +# /// NOTE: Avoid using on the full vocabulary as the sorting can become slow. For example, apply top-k or top-p sampling first. +# DEPRECATED(LLAMA_API struct llama_sampler * llama_sampler_init_softmax (void), +# "will be removed in the future (see https://github.com/ggerganov/llama.cpp/pull/9896#discussion_r1800920915)"); +@ctypes_function("llama_sampler_init_softmax", [], llama_sampler_p_ctypes) +def llama_sampler_init_softmax() -> llama_sampler_p: + ... + + +# /// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 +# /// Setting k <= 0 makes this a noop +# LLAMA_API struct llama_sampler * llama_sampler_init_top_k (int32_t k); +@ctypes_function("llama_sampler_init_top_k", [ctypes.c_int32], llama_sampler_p_ctypes) +def llama_sampler_init_top_k(k: int) -> llama_sampler_p: + ... + + +# /// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 +# LLAMA_API struct llama_sampler * llama_sampler_init_top_p (float p, size_t min_keep); +@ctypes_function( + "llama_sampler_init_top_p", + [ctypes.c_float, ctypes.c_size_t], + llama_sampler_p_ctypes, +) +def llama_sampler_init_top_p(p: float, min_keep: int) -> llama_sampler_p: + ... + + +# /// @details Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841 +# LLAMA_API struct llama_sampler * llama_sampler_init_min_p (float p, size_t min_keep); +@ctypes_function( + "llama_sampler_init_min_p", + [ctypes.c_float, ctypes.c_size_t], + llama_sampler_p_ctypes, +) +def llama_sampler_init_min_p(p: float, min_keep: int) -> llama_sampler_p: + ... + + +# /// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666. +# LLAMA_API struct llama_sampler * llama_sampler_init_typical (float p, size_t min_keep); +@ctypes_function( + "llama_sampler_init_typical", + [ctypes.c_float, ctypes.c_size_t], + llama_sampler_p_ctypes, +) +def llama_sampler_init_typical(p: float, min_keep: int) -> llama_sampler_p: + ... + + +# LLAMA_API struct llama_sampler * llama_sampler_init_temp (float t); +@ctypes_function("llama_sampler_init_temp", [ctypes.c_float], llama_sampler_p_ctypes) +def llama_sampler_init_temp(t: float) -> llama_sampler_p: + ... + + +# /// @details Dynamic temperature implementation (a.k.a. entropy) described in the paper https://arxiv.org/abs/2309.02772. +# LLAMA_API struct llama_sampler * llama_sampler_init_temp_ext (float t, float delta, float exponent); +@ctypes_function( + "llama_sampler_init_temp_ext", + [ctypes.c_float, ctypes.c_float, ctypes.c_float], + llama_sampler_p_ctypes, +) +def llama_sampler_init_temp_ext( + t: float, delta: float, exponent: float +) -> llama_sampler_p: + ... + + +# /// @details XTC sampler as described in https://github.com/oobabooga/text-generation-webui/pull/6335 +# LLAMA_API struct llama_sampler * llama_sampler_init_xtc (float p, float t, size_t min_keep, uint32_t seed); +@ctypes_function( + "llama_sampler_init_xtc", + [ctypes.c_float, ctypes.c_float, ctypes.c_size_t, ctypes.c_uint32], + llama_sampler_p_ctypes, +) +def llama_sampler_init_xtc( + p: float, t: float, min_keep: int, seed: int, / +) -> llama_sampler_p: + ... + + +# /// @details Top n sigma sampling as described in academic paper "Top-nσ: Not All Logits Are You Need" https://arxiv.org/pdf/2411.07641 +# LLAMA_API struct llama_sampler * llama_sampler_init_top_n_sigma(float n); +@ctypes_function( + "llama_sampler_init_top_n_sigma", + [ctypes.c_float], + llama_sampler_p_ctypes, +) +def llama_sampler_init_top_n_sigma(n: float, /) -> llama_sampler_p: + ... + + +# /// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words. +# /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text. +# /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text. +# /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates. +# /// @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm. +# /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal. +# LLAMA_API struct llama_sampler * llama_sampler_init_mirostat( +# int32_t n_vocab, +# uint32_t seed, +# float tau, +# float eta, +# int32_t m); +@ctypes_function( + "llama_sampler_init_mirostat", + [ctypes.c_int32, ctypes.c_uint32, ctypes.c_float, ctypes.c_float, ctypes.c_int32], + llama_sampler_p_ctypes, +) +def llama_sampler_init_mirostat( + n_vocab: int, seed: int, tau: float, eta: float, m: int, / +) -> llama_sampler_p: + ... + + +# /// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words. +# /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text. +# /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text. +# /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates. +# /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal. +# LLAMA_API struct llama_sampler * llama_sampler_init_mirostat_v2( +# uint32_t seed, +# float tau, +# float eta); +@ctypes_function( + "llama_sampler_init_mirostat_v2", + [ctypes.c_uint32, ctypes.c_float, ctypes.c_float], + llama_sampler_p_ctypes, +) +def llama_sampler_init_mirostat_v2( + seed: int, tau: float, eta: float, / +) -> llama_sampler_p: + ... + + +# /// @details Intializes a GBNF grammar, see grammars/README.md for details. +# /// @param vocab The vocabulary that this grammar will be used with. +# /// @param grammar_str The production rules for the grammar, encoded as a string. Returns an empty grammar if empty. Returns NULL if parsing of grammar_str fails. +# /// @param grammar_root The name of the start symbol for the grammar. +# LLAMA_API struct llama_sampler * llama_sampler_init_grammar( +# const struct llama_vocab * vocab, +# const char * grammar_str, +# const char * grammar_root); +@ctypes_function( + "llama_sampler_init_grammar", + [llama_vocab_p_ctypes, ctypes.c_char_p, ctypes.c_char_p], + llama_sampler_p_ctypes, +) +def llama_sampler_init_grammar( + vocab: llama_vocab_p, grammar_str: bytes, grammar_root: bytes, / +) -> llama_sampler_p: + ... + + +# /// @details Lazy grammar sampler, introduced in https://github.com/ggml-org/llama.cpp/pull/9639 +# /// @param trigger_patterns A list of patterns that will trigger the grammar sampler. Pattern will be matched from the start of the generation output, and grammar sampler will be fed content starting from its first match group. +# /// @param trigger_tokens A list of tokens that will trigger the grammar sampler. Grammar sampler will be fed content starting from the trigger token included. +# LLAMA_API struct llama_sampler * llama_sampler_init_grammar_lazy_patterns( +# const struct llama_vocab * vocab, +# const char * grammar_str, +# const char * grammar_root, +# const char ** trigger_patterns, +# size_t num_trigger_patterns, +# const llama_token * trigger_tokens, +# size_t num_trigger_tokens); +@ctypes_function( + "llama_sampler_init_grammar_lazy_patterns", + [ + llama_vocab_p_ctypes, + ctypes.c_char_p, + ctypes.c_char_p, + ctypes.POINTER(ctypes.c_char_p), + ctypes.c_size_t, + ctypes.POINTER(llama_token), + ctypes.c_size_t, + ], + llama_sampler_p_ctypes, +) +def llama_sampler_init_grammar_lazy_patterns( + vocab: llama_vocab_p, + grammar_str: bytes, + grammar_root: bytes, + trigger_patterns: CtypesArray[bytes], + num_trigger_patterns: int, + trigger_tokens: CtypesArray[llama_token], + num_trigger_tokens: int, + /, +) -> llama_sampler_p: + ... + + +# /// NOTE: Avoid using on the full vocabulary as searching for repeated tokens can become slow. For example, apply top-k or top-p sampling first. +# LLAMA_API struct llama_sampler * llama_sampler_init_penalties( +# int32_t penalty_last_n, // last n tokens to penalize (0 = disable penalty, -1 = context size) +# float penalty_repeat, // 1.0 = disabled +# float penalty_freq, // 0.0 = disabled +# float penalty_present); // 0.0 = disabled +@ctypes_function( + "llama_sampler_init_penalties", + [ctypes.c_int32, ctypes.c_float, ctypes.c_float, ctypes.c_float], + llama_sampler_p_ctypes, +) +def llama_sampler_init_penalties( + penalty_last_n: int, + penalty_repeat: float, + penalty_freq: float, + penalty_present: float, + /, +) -> llama_sampler_p: + ... + + +# /// @details DRY sampler, designed by p-e-w, as described in: https://github.com/oobabooga/text-generation-webui/pull/5677, porting Koboldcpp implementation authored by pi6am: https://github.com/LostRuins/koboldcpp/pull/982 +# LLAMA_API struct llama_sampler * llama_sampler_init_dry( +# const struct llama_vocab * vocab, +# int32_t n_ctx_train, +# float dry_multiplier, +# float dry_base, +# int32_t dry_allowed_length, +# int32_t dry_penalty_last_n, +# const char ** seq_breakers, +# size_t num_breakers); +@ctypes_function( + "llama_sampler_init_dry", + [ + llama_vocab_p_ctypes, + ctypes.c_int32, + ctypes.c_float, + ctypes.c_float, + ctypes.c_int32, + ctypes.c_int32, + ctypes.POINTER(ctypes.c_char_p), + ctypes.c_size_t, + ], + llama_sampler_p_ctypes, +) +def llama_sampler_init_dry( + vocab: llama_vocab_p, + n_ctx_train: int, + dry_multiplier: float, + dry_base: float, + dry_allowed_length: int, + dry_penalty_last_n: int, + seq_breakers, + num_breakers: int, + /, +) -> llama_sampler_p: + ... + + +# LLAMA_API struct llama_sampler * llama_sampler_init_logit_bias( +# int32_t n_vocab, +# int32_t n_logit_bias, +# const llama_logit_bias * logit_bias); +@ctypes_function( + "llama_sampler_init_logit_bias", + [ctypes.c_int32, ctypes.c_int32, llama_logit_bias_p], + llama_sampler_p_ctypes, +) +def llama_sampler_init_logit_bias( + n_vocab: int, n_logit_bias: int, logit_bias: CtypesArray[llama_logit_bias], / +) -> llama_sampler_p: + ... + + +# // this sampler is meant to be used for fill-in-the-middle infilling +# // it's supposed to be used after top_k + top_p sampling +# // +# // 1. if the sum of the EOG probs times the number of candidates is higher than the sum of the other probs -> pick EOG +# // 2. combine probs of tokens that have the same prefix +# // +# // example: +# // +# // - before: +# // "hel": 0.5 +# // "hell": 0.2 +# // "hello": 0.1 +# // "dummy": 0.1 +# // +# // - after: +# // "hel": 0.8 +# // "dummy": 0.1 +# // +# // 3. discard non-EOG tokens with low prob +# // 4. if no tokens are left -> pick EOT +# // +# LLAMA_API struct llama_sampler * llama_sampler_init_infill(const struct llama_vocab * vocab); +@ctypes_function( + "llama_sampler_init_infill", + [llama_vocab_p_ctypes], + llama_sampler_p_ctypes, +) +def llama_sampler_init_infill(vocab: llama_vocab_p, /) -> llama_sampler_p: + ... + + +# // Returns the seed used by the sampler if applicable, LLAMA_DEFAULT_SEED otherwise +# LLAMA_API uint32_t llama_sampler_get_seed(const struct llama_sampler * smpl); +@ctypes_function( + "llama_sampler_get_seed", + [llama_sampler_p_ctypes], + ctypes.c_uint32, +) +def llama_sampler_get_seed(smpl: llama_sampler_p, /) -> int: + ... + + +# /// @details Sample and accept a token from the idx-th output of the last evaluation +# // +# // Shorthand for: +# // const auto * logits = llama_get_logits_ith(ctx, idx); +# // llama_token_data_array cur_p = { ... init from logits ... }; +# // llama_sampler_apply(smpl, &cur_p); +# // auto token = cur_p.data[cur_p.selected].id; +# // llama_sampler_accept(smpl, token); +# // return token; +# // Returns the sampled token +# LLAMA_API llama_token llama_sampler_sample(struct llama_sampler * smpl, struct llama_context * ctx, int32_t idx); +@ctypes_function( + "llama_sampler_sample", + [llama_sampler_p_ctypes, llama_context_p_ctypes, ctypes.c_int32], + llama_token, +) +def llama_sampler_sample( + smpl: llama_sampler_p, ctx: llama_context_p, idx: int, / +) -> int: + ... + + +# // +# // Model split +# // + + +# /// @details Build a split GGUF final path for this chunk. +# /// llama_split_path(split_path, sizeof(split_path), "/models/ggml-model-q4_0", 2, 4) => split_path = "/models/ggml-model-q4_0-00002-of-00004.gguf" +# // Returns the split_path length. +# LLAMA_API int llama_split_path(char * split_path, size_t maxlen, const char * path_prefix, int split_no, int split_count); +@ctypes_function( + "llama_split_path", + [ctypes.c_char_p, ctypes.c_size_t, ctypes.c_char_p, ctypes.c_int, ctypes.c_int], + ctypes.c_int, +) +def llama_split_path( + split_path: bytes, + maxlen: Union[ctypes.c_size_t, int], + path_prefix: bytes, + split_no: Union[ctypes.c_int, int], + split_count: Union[ctypes.c_int, int], + /, +) -> int: + """Build a split GGUF final path for this chunk.""" + ... + + +# /// @details Extract the path prefix from the split_path if and only if the split_no and split_count match. +# /// llama_split_prefix(split_prefix, 64, "/models/ggml-model-q4_0-00002-of-00004.gguf", 2, 4) => split_prefix = "/models/ggml-model-q4_0" +# // Returns the split_prefix length. +# LLAMA_API int llama_split_prefix(char * split_prefix, size_t maxlen, const char * split_path, int split_no, int split_count); +@ctypes_function( + "llama_split_prefix", + [ctypes.c_char_p, ctypes.c_size_t, ctypes.c_char_p, ctypes.c_int, ctypes.c_int], + ctypes.c_int, +) +def llama_split_prefix( + split_prefix: bytes, + maxlen: Union[ctypes.c_size_t, int], + split_path: bytes, + split_no: Union[ctypes.c_int, int], + split_count: Union[ctypes.c_int, int], + /, +) -> int: + """Extract the path prefix from the split_path if and only if the split_no and split_count match.""" + ... + + +# // Print system information +# LLAMA_API const char * llama_print_system_info(void); +@ctypes_function("llama_print_system_info", [], ctypes.c_char_p) +def llama_print_system_info() -> bytes: + ... + + +# // Set callback for all future logging events. +# // If this is not called, or NULL is supplied, everything is output on stderr. +# LLAMA_API void llama_log_set(ggml_log_callback log_callback, void * user_data); +@ctypes_function( + "llama_log_set", + [ctypes.c_void_p, ctypes.c_void_p], + None, +) +def llama_log_set( + log_callback: Optional[CtypesFuncPointer], + user_data: ctypes.c_void_p, + /, +): + """Set callback for all future logging events. + + If this is not called, or NULL is supplied, everything is output on stderr.""" + ... + + +# // +# // Performance utils +# // +# // NOTE: Used by llama.cpp examples, avoid using in third-party apps. Instead, do your own performance measurements. +# // + + +# struct llama_perf_context_data { +# double t_start_ms; +# double t_load_ms; +# double t_p_eval_ms; +# double t_eval_ms; +# +# int32_t n_p_eval; +# int32_t n_eval; +# }; +class llama_perf_context_data(ctypes.Structure): + _fields_ = [ + ("t_start_ms", ctypes.c_double), + ("t_load_ms", ctypes.c_double), + ("t_p_eval_ms", ctypes.c_double), + ("t_eval_ms", ctypes.c_double), + ("n_p_eval", ctypes.c_int32), + ("n_eval", ctypes.c_int32), + ] + + +# struct llama_perf_sampler_data { +# double t_sample_ms; +# +# int32_t n_sample; +# }; +class llama_perf_sampler_data(ctypes.Structure): + _fields_ = [ + ("t_sample_ms", ctypes.c_double), + ("n_sample", ctypes.c_int32), + ] + + +# LLAMA_API struct llama_perf_context_data llama_perf_context (const struct llama_context * ctx); +@ctypes_function( + "llama_perf_context", + [llama_context_p_ctypes], + llama_perf_context_data, +) +def llama_perf_context(ctx: llama_context_p, /) -> llama_perf_context_data: + ... + + +# LLAMA_API void llama_perf_context_print(const struct llama_context * ctx); +@ctypes_function( + "llama_perf_context_print", + [llama_context_p_ctypes], + None, +) +def llama_perf_context_print(ctx: llama_context_p, /): + ... + + +# LLAMA_API void llama_perf_context_reset( struct llama_context * ctx); +@ctypes_function( + "llama_perf_context_reset", + [llama_context_p_ctypes], + None, +) +def llama_perf_context_reset(ctx: llama_context_p, /): + ... + + +# // NOTE: the following work only with samplers constructed via llama_sampler_chain_init +# LLAMA_API struct llama_perf_sampler_data llama_perf_sampler (const struct llama_sampler * chain); +@ctypes_function( + "llama_perf_sampler", + [llama_sampler_p_ctypes], + llama_perf_sampler_data, +) +def llama_perf_sampler(chain: llama_sampler_p, /) -> llama_perf_sampler_data: + ... + + +# LLAMA_API void llama_perf_sampler_print(const struct llama_sampler * chain); +@ctypes_function( + "llama_perf_sampler_print", + [llama_sampler_p_ctypes], + None, +) +def llama_perf_sampler_print(chain: llama_sampler_p, /): + ... + + +# LLAMA_API void llama_perf_sampler_reset( struct llama_sampler * chain); +@ctypes_function( + "llama_perf_sampler_reset", + [llama_sampler_p_ctypes], + None, +) +def llama_perf_sampler_reset(chain: llama_sampler_p, /): + ... + + diff --git a/llama_cpp/llama_grammar.py b/llama_cpp/llama_grammar.py new file mode 100644 index 0000000000000000000000000000000000000000..b95c77ab5ec081feebb6fbf506af84ae07088f7b --- /dev/null +++ b/llama_cpp/llama_grammar.py @@ -0,0 +1,953 @@ +"""Python implementation of llama grammar parser directly translated from C++ source file in vendor/llama.cpp/common/grammar-parser.cpp.""" + +# flake8: noqa +from pathlib import Path + +from itertools import groupby +from typing import ( + Any, + Set, + List, + Optional, + Tuple, + Union, +) + +LLAMA_GRAMMAR_DEFAULT_ROOT = "root" + + +class LlamaGrammar: + def __init__(self, *args, _grammar: str, **kwargs): + self._grammar = _grammar + self._root = LLAMA_GRAMMAR_DEFAULT_ROOT + + @classmethod + def from_string(cls, grammar: str, verbose: bool = True) -> "LlamaGrammar": + return cls(_grammar=grammar) + + @classmethod + def from_file(cls, file: Union[str, Path], verbose: bool = True) -> "LlamaGrammar": + try: + with open(file) as f: + grammar = f.read() + except Exception as err: + raise Exception( + f"{cls.from_file.__name__}: error reading grammar file: {err}" + ) + + if grammar: + return cls.from_string(grammar, verbose=verbose) + + raise ValueError( + f"{cls.from_file.__name__}: error parsing grammar file: params_grammer is empty" + ) + + @classmethod + def from_json_schema(cls, json_schema: str, verbose: bool = True) -> "LlamaGrammar": + return cls.from_string(json_schema_to_gbnf(json_schema), verbose=verbose) + + +"""llama.cpp gbnf rules from vendor/llama.cpp/grammars""" + +ARITHMETIC_GBNF = r""" +root ::= (expr "=" ws term "\n")+ +expr ::= term ([-+*/] term)* +term ::= ident | num | "(" ws expr ")" ws +ident ::= [a-z] [a-z0-9_]* ws +num ::= [0-9]+ ws +ws ::= [ \t\n]* +""" + +C_GBNF = r""" +root ::= (declaration)* + +declaration ::= dataType identifier "(" parameter? ")" "{" statement* "}" + +dataType ::= "int" ws | "float" ws | "char" ws +identifier ::= [a-zA-Z_] [a-zA-Z_0-9]* + +parameter ::= dataType identifier + +statement ::= + ( dataType identifier ws "=" ws expression ";" ) | + ( identifier ws "=" ws expression ";" ) | + ( identifier ws "(" argList? ")" ";" ) | + ( "return" ws expression ";" ) | + ( "while" "(" condition ")" "{" statement* "}" ) | + ( "for" "(" forInit ";" ws condition ";" ws forUpdate ")" "{" statement* "}" ) | + ( "if" "(" condition ")" "{" statement* "}" ("else" "{" statement* "}")? ) | + ( singleLineComment ) | + ( multiLineComment ) + +forInit ::= dataType identifier ws "=" ws expression | identifier ws "=" ws expression +forUpdate ::= identifier ws "=" ws expression + +condition ::= expression relationOperator expression +relationOperator ::= ("<=" | "<" | "==" | "!=" | ">=" | ">") + +expression ::= term (("+" | "-") term)* +term ::= factor(("*" | "/") factor)* + +factor ::= identifier | number | unaryTerm | funcCall | parenExpression +unaryTerm ::= "-" factor +funcCall ::= identifier "(" argList? ")" +parenExpression ::= "(" ws expression ws ")" + +argList ::= expression ("," ws expression)* + +number ::= [0-9]+ + +singleLineComment ::= "//" [^\n]* "\n" +multiLineComment ::= "/*" ( [^*] | ("*" [^/]) )* "*/" + +ws ::= ([ \t\n]+) +""" + +CHESS_GBNF = r""" +root ::= object +value ::= object | array | string | number | ("true" | "false" | "null") ws + +object ::= + "{" ws ( + string ":" ws value + ("," ws string ":" ws value)* + )? "}" ws + +array ::= + "[" ws ( + value + ("," ws value)* + )? "]" ws + +string ::= + "\"" ( + [^"\\] | + "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes + )* "\"" ws + +number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws + +# Optional space: by convention, applied in this grammar after literal chars when allowed +ws ::= ([ \t\n] ws)? +""" + +JAPANESE_GBNF = r""" +root ::= object +value ::= object | array | string | number | ("true" | "false" | "null") ws + +object ::= + "{" ws ( + string ":" ws value + ("," ws string ":" ws value)* + )? "}" ws + +array ::= + "[" ws ( + value + ("," ws value)* + )? "]" ws + +string ::= + "\"" ( + [^"\\] | + "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes + )* "\"" ws + +number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws + +# Optional space: by convention, applied in this grammar after literal chars when allowed +ws ::= ([ \t\n] ws)? +""" + +JSON_ARR_GBNF = r""" +# This is the same as json.gbnf but we restrict whitespaces at the end of the root array +# Useful for generating JSON arrays + +root ::= arr +value ::= object | array | string | number | ("true" | "false" | "null") ws + +arr ::= + "[\n" ws ( + value + (",\n" ws value)* + )? "]" + +object ::= + "{" ws ( + string ":" ws value + ("," ws string ":" ws value)* + )? "}" ws + +array ::= + "[" ws ( + value + ("," ws value)* + )? "]" ws + +string ::= + "\"" ( + [^"\\\x7F\x00-\x1F] | + "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes + )* "\"" ws + +number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws + +# Optional space: by convention, applied in this grammar after literal chars when allowed +ws ::= ([ \t\n] ws)? +""" + + +JSON_GBNF = r""" +root ::= object +value ::= object | array | string | number | ("true" | "false" | "null") ws + +object ::= + "{" ws ( + string ":" ws value + ("," ws string ":" ws value)* + )? "}" ws + +array ::= + "[" ws ( + value + ("," ws value)* + )? "]" ws + +string ::= + "\"" ( + [^"\\\x7F\x00-\x1F] | + "\\" (["\\bfnrt] | "u" [0-9a-fA-F]{4}) # escapes + )* "\"" ws + +number ::= ("-"? ([0-9] | [1-9] [0-9]{0,15})) ("." [0-9]+)? ([eE] [-+]? [0-9] [1-9]{0,15})? ws + +# Optional space: by convention, applied in this grammar after literal chars when allowed +ws ::= | " " | "\n" [ \t]{0,20} +""" + +LIST_GBNF = r""" +root ::= item+ + +# Excludes various line break characters +item ::= "- " [^\r\n\x0b\x0c\x85\u2028\u2029]+ "\n" +""" + +"""llama.cpp json-schema to grammar converter from vendor/llama.cpp/examples/json-schema-to-grammar.py""" +import json +import re +from typing import List, Optional + +# whitespace is constrained to a single space char to prevent model "running away" in +# whitespace. Also maybe improves generation quality? +SPACE_RULE = '" "?' + + +INVALID_RULE_CHARS_RE = re.compile(r"[^a-zA-Z0-9-]+") +GRAMMAR_LITERAL_ESCAPE_RE = re.compile(r'[\r\n"]') +GRAMMAR_LITERAL_ESCAPES = {"\r": "\\r", "\n": "\\n", '"': '\\"'} + +# whitespace is constrained to a single space char to prevent model "running away" in +# whitespace. Also maybe improves generation quality? +SPACE_RULE = '" "?' + + +def _build_repetition( + item_rule, min_items, max_items, separator_rule=None, item_rule_is_literal=False +): + if not separator_rule: + if min_items == 0 and max_items == 1: + return f"{item_rule}?" + elif min_items == 1 and max_items is None: + return f"{item_rule}+" + + result = "" + + if min_items > 0: + if item_rule_is_literal and separator_rule is None: + result = '"' + (item_rule[1:-1] * min_items) + '"' + else: + result = (f" {separator_rule} " if separator_rule else " ").join( + [item_rule] * min_items + ) + + def opt_repetitions(up_to_n, prefix_with_sep=False): + """ + - n=4, no sep: '(a (a (a (a)?)?)?)?' + - n=4, sep=',', prefix: '("," a ("," a ("," a ("," a)?)?)?)?' + - n=4, sep=',', no prefix: '(a ("," a ("," a ("," a)?)?)?)?' + """ + + content = ( + f"{separator_rule} {item_rule}" + if prefix_with_sep and separator_rule + else item_rule + ) + if up_to_n == 0: + return "" + elif up_to_n == 1: + return f"({content})?" + elif separator_rule and not prefix_with_sep: + return f"({content} {opt_repetitions(up_to_n - 1, prefix_with_sep=True)})?" + else: + return (f"({content} " * up_to_n).rstrip() + (")?" * up_to_n) + + if min_items > 0 and max_items != min_items: + result += " " + + if max_items is not None: + result += opt_repetitions(max_items - min_items, prefix_with_sep=min_items > 0) + else: + item_operator = f'({separator_rule + " " if separator_rule else ""}{item_rule})' + + if min_items == 0 and separator_rule: + result = f"({item_rule} {item_operator}*)?" + else: + result += f"{item_operator}*" + + return result + + +class BuiltinRule: + def __init__(self, content: str, deps: list = None): + self.content = content + self.deps = deps or [] + + +_up_to_15_digits = _build_repetition("[0-9]", 0, 15) + +PRIMITIVE_RULES = { + "boolean": BuiltinRule('("true" | "false") space', []), + "decimal-part": BuiltinRule("[0-9] " + _up_to_15_digits, []), + "integral-part": BuiltinRule("[0-9] | [1-9] " + _up_to_15_digits, []), + "number": BuiltinRule( + '("-"? integral-part) ("." decimal-part)? ([eE] [-+]? integral-part)? space', + ["integral-part", "decimal-part"], + ), + "integer": BuiltinRule('("-"? integral-part) space', ["integral-part"]), + "value": BuiltinRule( + "object | array | string | number | boolean | null", + ["object", "array", "string", "number", "boolean", "null"], + ), + "object": BuiltinRule( + '"{" space ( string ":" space value ("," space string ":" space value)* )? "}" space', + ["string", "value"], + ), + "array": BuiltinRule( + '"[" space ( value ("," space value)* )? "]" space', ["value"] + ), + "uuid": BuiltinRule( + r'"\"" ' + + ' "-" '.join("[0-9a-fA-F]" * n for n in [8, 4, 4, 4, 12]) + + r' "\"" space', + [], + ), + "char": BuiltinRule( + r'[^"\\] | "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F])', + [], + ), + "string": BuiltinRule(r'"\"" char* "\"" space', ["char"]), + "null": BuiltinRule('"null" space', []), +} + +# TODO: support "uri", "email" string formats +STRING_FORMAT_RULES = { + "date": BuiltinRule( + '[0-9] [0-9] [0-9] [0-9] "-" ( "0" [1-9] | "1" [0-2] ) "-" ( "0" [1-9] | [1-2] [0-9] | "3" [0-1] )', + [], + ), + "time": BuiltinRule( + '([01] [0-9] | "2" [0-3]) ":" [0-5] [0-9] ":" [0-5] [0-9] ( "." [0-9] [0-9] [0-9] )? ( "Z" | ( "+" | "-" ) ( [01] [0-9] | "2" [0-3] ) ":" [0-5] [0-9] )', + [], + ), + "date-time": BuiltinRule('date "T" time', ["date", "time"]), + "date-string": BuiltinRule('"\\"" date "\\"" space', ["date"]), + "time-string": BuiltinRule('"\\"" time "\\"" space', ["time"]), + "date-time-string": BuiltinRule('"\\"" date-time "\\"" space', ["date-time"]), +} + +DOTALL = "[\\U00000000-\\U0010FFFF]" +DOT = "[^\\x0A\\x0D]" + +RESERVED_NAMES = set( + ["root", "dot", *PRIMITIVE_RULES.keys(), *STRING_FORMAT_RULES.keys()] +) + + +NON_LITERAL_SET = set("|.()[]{}*+?") +ESCAPED_IN_REGEXPS_BUT_NOT_IN_LITERALS = set("[]()|{}*+?") + + +class SchemaConverter: + def __init__(self, *, prop_order, allow_fetch, dotall, raw_pattern): + self._prop_order = prop_order + self._allow_fetch = allow_fetch + self._dotall = dotall + self._raw_pattern = raw_pattern + self._rules = { + "space": SPACE_RULE, + } + self._refs = {} + self._refs_being_resolved = set() + + def _format_literal(self, literal): + escaped = GRAMMAR_LITERAL_ESCAPE_RE.sub( + lambda m: GRAMMAR_LITERAL_ESCAPES.get(m.group(0)), literal + ) + return f'"{escaped}"' + + def not_literal( + self, literal: str, dotall: bool = True, maybe_escaped_underscores=False + ) -> str: + """ + not_literal('a') -> '[^a]' + not_literal('abc') -> '([^a] | "a" ([^b] | "b" ([^c])?)?)?' + """ + assert len(literal) > 0, "Empty literal not supported" + + def recurse(i: int): + c = literal[i] + if maybe_escaped_underscores and c == "_": + yield f"[^{c}\\\\]" + yield " | " + yield f'"\\\\"? "{c}"' + else: + yield f"[^{c}]" + if i < len(literal) - 1: + yield " | " + yield self._format_literal(c) + yield " (" + yield from recurse(i + 1) + yield ")?" + + return "".join(("(", *recurse(0), ")")) + + def _add_rule(self, name, rule): + esc_name = INVALID_RULE_CHARS_RE.sub("-", name) + if esc_name not in self._rules or self._rules[esc_name] == rule: + key = esc_name + else: + i = 0 + while ( + f"{esc_name}{i}" in self._rules + and self._rules[f"{esc_name}{i}"] != rule + ): + i += 1 + key = f"{esc_name}{i}" + self._rules[key] = rule + return key + + def resolve_refs(self, schema: dict, url: str): + """ + Resolves all $ref fields in the given schema, fetching any remote schemas, + replacing $ref with absolute reference URL and populating self._refs with the + respective referenced (sub)schema dictionaries. + """ + + def visit(n: dict): + if isinstance(n, list): + return [visit(x) for x in n] + elif isinstance(n, dict): + ref = n.get("$ref") + if ref is not None and ref not in self._refs: + if ref.startswith("https://"): + assert ( + self._allow_fetch + ), "Fetching remote schemas is not allowed (use --allow-fetch for force)" + import requests + + frag_split = ref.split("#") + base_url = frag_split[0] + + target = self._refs.get(base_url) + if target is None: + target = self.resolve_refs( + requests.get(ref).json(), base_url + ) + self._refs[base_url] = target + + if len(frag_split) == 1 or frag_split[-1] == "": + return target + elif ref.startswith("#/"): + target = schema + ref = f"{url}{ref}" + n["$ref"] = ref + else: + raise ValueError(f"Unsupported ref {ref}") + + for sel in ref.split("#")[-1].split("/")[1:]: + assert ( + target is not None and sel in target + ), f"Error resolving ref {ref}: {sel} not in {target}" + target = target[sel] + + self._refs[ref] = target + else: + for v in n.values(): + visit(v) + + return n + + return visit(schema) + + def _generate_union_rule(self, name, alt_schemas): + return " | ".join( + ( + self.visit(alt_schema, f'{name}{"-" if name else "alternative-"}{i}') + for i, alt_schema in enumerate(alt_schemas) + ) + ) + + def _visit_pattern(self, pattern, name): + """ + Transforms a regular expression pattern into a GBNF rule. + + Input: https://json-schema.org/understanding-json-schema/reference/regular_expressions + Output: https://github.com/ggerganov/llama.cpp/blob/master/grammars/README.md + + Unsupported features: negative/positive lookaheads, greedy/non-greedy modifiers. + + Mostly a 1:1 translation, except for {x} / {x,} / {x,y} quantifiers for which + we define sub-rules to keep the output lean. + """ + + assert pattern.startswith("^") and pattern.endswith( + "$" + ), 'Pattern must start with "^" and end with "$"' + pattern = pattern[1:-1] + sub_rule_ids = {} + + i = 0 + length = len(pattern) + + def to_rule(s: Tuple[str, bool]) -> str: + (txt, is_literal) = s + return '"' + txt + '"' if is_literal else txt + + def transform() -> Tuple[str, bool]: + """ + Parse a unit at index i (advancing it), and return its string representation + whether it's a literal. + """ + nonlocal i + nonlocal pattern + nonlocal sub_rule_ids + + start = i + # For each component of this sequence, store its string representation and whether it's a literal. + # We only need a flat structure here to apply repetition operators to the last item, and + # to merge literals at the and (we're parsing grouped ( sequences ) recursively and don't treat '|' specially + # (GBNF's syntax is luckily very close to regular expressions!) + seq: list[Tuple[str, bool]] = [] + + def get_dot(): + if self._dotall: + rule = DOTALL + else: + # Accept any character... except \n and \r line break chars (\x0A and \xOD) + rule = DOT + return self._add_rule(f"dot", rule) + + def join_seq(): + nonlocal seq + ret = [] + for is_literal, g in groupby(seq, lambda x: x[1]): + if is_literal: + ret.append(("".join(x[0] for x in g), True)) + else: + ret.extend(g) + if len(ret) == 1: + return ret[0] + return (" ".join(to_rule(x) for x in seq), False) + + while i < length: + c = pattern[i] + if c == ".": + seq.append((get_dot(), False)) + i += 1 + elif c == "(": + i += 1 + if i < length: + assert ( + pattern[i] != "?" + ), f'Unsupported pattern syntax "{pattern[i]}" at index {i} of /{pattern}/' + seq.append((f"({to_rule(transform())})", False)) + elif c == ")": + i += 1 + assert ( + start > 0 and pattern[start - 1] == "(" + ), f"Unbalanced parentheses; start = {start}, i = {i}, pattern = {pattern}" + return join_seq() + elif c == "[": + square_brackets = c + i += 1 + while i < length and pattern[i] != "]": + if pattern[i] == "\\": + square_brackets += pattern[i : i + 2] + i += 2 + else: + square_brackets += pattern[i] + i += 1 + assert ( + i < length + ), f"Unbalanced square brackets; start = {start}, i = {i}, pattern = {pattern}" + square_brackets += "]" + i += 1 + seq.append((square_brackets, False)) + elif c == "|": + seq.append(("|", False)) + i += 1 + elif c in ("*", "+", "?"): + seq[-1] = (to_rule(seq[-1]) + c, False) + i += 1 + elif c == "{": + curly_brackets = c + i += 1 + while i < length and pattern[i] != "}": + curly_brackets += pattern[i] + i += 1 + assert ( + i < length + ), f"Unbalanced curly brackets; start = {start}, i = {i}, pattern = {pattern}" + curly_brackets += "}" + i += 1 + nums = [s.strip() for s in curly_brackets[1:-1].split(",")] + min_times = 0 + max_times = None + try: + if len(nums) == 1: + min_times = int(nums[0]) + max_times = min_times + else: + assert len(nums) == 2 + min_times = int(nums[0]) if nums[0] else 0 + max_times = int(nums[1]) if nums[1] else None + except ValueError: + raise ValueError( + f"Invalid quantifier {curly_brackets} in /{pattern}/" + ) + + (sub, sub_is_literal) = seq[-1] + + if not sub_is_literal: + id = sub_rule_ids.get(sub) + if id is None: + id = self._add_rule(f"{name}-{len(sub_rule_ids) + 1}", sub) + sub_rule_ids[sub] = id + sub = id + + seq[-1] = ( + _build_repetition( + f'"{sub}"' if sub_is_literal else sub, + min_times, + max_times, + item_rule_is_literal=sub_is_literal, + ), + False, + ) + else: + literal = "" + while i < length: + if pattern[i] == "\\" and i < length - 1: + next = pattern[i + 1] + if next in ESCAPED_IN_REGEXPS_BUT_NOT_IN_LITERALS: + i += 1 + literal += pattern[i] + i += 1 + else: + literal += pattern[i : i + 2] + i += 2 + elif pattern[i] == '"' and not self._raw_pattern: + literal += '\\"' + i += 1 + elif pattern[i] not in NON_LITERAL_SET and ( + i == length - 1 + or literal == "" + or pattern[i + 1] == "." + or pattern[i + 1] not in NON_LITERAL_SET + ): + literal += pattern[i] + i += 1 + else: + break + if literal: + seq.append((literal, True)) + + return join_seq() + + return self._add_rule( + name, + ( + to_rule(transform()) + if self._raw_pattern + else '"\\"" ' + to_rule(transform()) + ' "\\"" space' + ), + ) + + def _resolve_ref(self, ref): + ref_name = ref.split("/")[-1] + if ref_name not in self._rules and ref not in self._refs_being_resolved: + self._refs_being_resolved.add(ref) + resolved = self._refs[ref] + ref_name = self.visit(resolved, ref_name) + self._refs_being_resolved.remove(ref) + return ref_name + + def _generate_constant_rule(self, value): + return self._format_literal(json.dumps(value)) + + def visit(self, schema, name): + schema_type = schema.get("type") + schema_format = schema.get("format") + rule_name = name + "-" if name in RESERVED_NAMES else name or "root" + + if (ref := schema.get("$ref")) is not None: + return self._add_rule(rule_name, self._resolve_ref(ref)) + + elif "oneOf" in schema or "anyOf" in schema: + return self._add_rule( + rule_name, + self._generate_union_rule(name, schema.get("oneOf") or schema["anyOf"]), + ) + + elif isinstance(schema_type, list): + return self._add_rule( + rule_name, + self._generate_union_rule(name, [{"type": t} for t in schema_type]), + ) + + elif "const" in schema: + return self._add_rule( + rule_name, self._generate_constant_rule(schema["const"]) + ) + + elif "enum" in schema: + rule = " | ".join((self._generate_constant_rule(v) for v in schema["enum"])) + return self._add_rule(rule_name, rule) + + elif schema_type in (None, "object") and ( + "properties" in schema + or ( + "additionalProperties" in schema + and schema["additionalProperties"] is not True + ) + ): + required = set(schema.get("required", [])) + properties = list(schema.get("properties", {}).items()) + return self._add_rule( + rule_name, + self._build_object_rule( + properties, required, name, schema.get("additionalProperties") + ), + ) + + elif schema_type in (None, "object") and "allOf" in schema: + required = set() + properties = [] + hybrid_name = name + + def add_component(comp_schema, is_required): + if (ref := comp_schema.get("$ref")) is not None: + comp_schema = self._refs[ref] + + if "properties" in comp_schema: + for prop_name, prop_schema in comp_schema["properties"].items(): + properties.append((prop_name, prop_schema)) + if is_required: + required.add(prop_name) + + for t in schema["allOf"]: + if "anyOf" in t: + for tt in t["anyOf"]: + add_component(tt, is_required=False) + else: + add_component(t, is_required=True) + + return self._add_rule( + rule_name, + self._build_object_rule( + properties, required, hybrid_name, additional_properties=[] + ), + ) + + elif schema_type in (None, "array") and ( + "items" in schema or "prefixItems" in schema + ): + items = schema.get("items") or schema["prefixItems"] + if isinstance(items, list): + return self._add_rule( + rule_name, + '"[" space ' + + ' "," space '.join( + self.visit(item, f'{name}{"-" if name else ""}tuple-{i}') + for i, item in enumerate(items) + ) + + ' "]" space', + ) + else: + item_rule_name = self.visit(items, f'{name}{"-" if name else ""}item') + min_items = schema.get("minItems", 0) + max_items = schema.get("maxItems") + return self._add_rule( + rule_name, + '"[" space ' + + _build_repetition( + item_rule_name, min_items, max_items, separator_rule='"," space' + ) + + ' "]" space', + ) + + elif schema_type in (None, "string") and "pattern" in schema: + return self._visit_pattern(schema["pattern"], rule_name) + + elif schema_type in (None, "string") and re.match( + r"^uuid[1-5]?$", schema_format or "" + ): + return self._add_primitive( + "root" if rule_name == "root" else schema_format, + PRIMITIVE_RULES["uuid"], + ) + + elif ( + schema_type in (None, "string") + and f"{schema_format}-string" in STRING_FORMAT_RULES + ): + prim_name = f"{schema_format}-string" + return self._add_rule( + rule_name, + self._add_primitive(prim_name, STRING_FORMAT_RULES[prim_name]), + ) + + elif schema_type == "string" and ( + "minLength" in schema or "maxLength" in schema + ): + char_rule = self._add_primitive("char", PRIMITIVE_RULES["char"]) + min_len = schema.get("minLength", 0) + max_len = schema.get("maxLength") + + return self._add_rule( + rule_name, + r'"\"" ' + + _build_repetition(char_rule, min_len, max_len) + + r' "\"" space', + ) + + elif (schema_type == "object") or (len(schema) == 0): + return self._add_rule( + rule_name, self._add_primitive("object", PRIMITIVE_RULES["object"]) + ) + + else: + assert schema_type in PRIMITIVE_RULES, f"Unrecognized schema: {schema}" + # TODO: support minimum, maximum, exclusiveMinimum, exclusiveMaximum at least for zero + return self._add_primitive( + "root" if rule_name == "root" else schema_type, + PRIMITIVE_RULES[schema_type], + ) + + def _add_primitive(self, name: str, rule: BuiltinRule): + n = self._add_rule(name, rule.content) + + for dep in rule.deps: + dep_rule = PRIMITIVE_RULES.get(dep) or STRING_FORMAT_RULES.get(dep) + assert dep_rule, f"Rule {dep} not known" + if dep not in self._rules: + self._add_primitive(dep, dep_rule) + return n + + def _build_object_rule( + self, + properties: List[Tuple[str, Any]], + required: Set[str], + name: str, + additional_properties: Union[bool, Any], + ): + prop_order = self._prop_order + # sort by position in prop_order (if specified) then by original order + sorted_props = [ + kv[0] + for _, kv in sorted( + enumerate(properties), + key=lambda ikv: (prop_order.get(ikv[1][0], len(prop_order)), ikv[0]), + ) + ] + + prop_kv_rule_names = {} + for prop_name, prop_schema in properties: + prop_rule_name = self.visit( + prop_schema, f'{name}{"-" if name else ""}{prop_name}' + ) + prop_kv_rule_names[prop_name] = self._add_rule( + f'{name}{"-" if name else ""}{prop_name}-kv', + rf'{self._format_literal(json.dumps(prop_name))} space ":" space {prop_rule_name}', + ) + required_props = [k for k in sorted_props if k in required] + optional_props = [k for k in sorted_props if k not in required] + + if additional_properties == True or isinstance(additional_properties, dict): + sub_name = f'{name}{"-" if name else ""}additional' + value_rule = self.visit( + {} if additional_properties == True else additional_properties, + f"{sub_name}-value", + ) + prop_kv_rule_names["*"] = self._add_rule( + f"{sub_name}-kv", + self._add_primitive("string", PRIMITIVE_RULES["string"]) + + f' ":" space {value_rule}', + ) + optional_props.append("*") + + rule = '"{" space ' + rule += ' "," space '.join(prop_kv_rule_names[k] for k in required_props) + + if optional_props: + rule += " (" + if required_props: + rule += ' "," space ( ' + + def get_recursive_refs(ks, first_is_optional): + [k, *rest] = ks + kv_rule_name = prop_kv_rule_names[k] + if k == "*": + res = self._add_rule( + f'{name}{"-" if name else ""}additional-kvs', + f'{kv_rule_name} ( "," space ' + kv_rule_name + " )*", + ) + elif first_is_optional: + res = f'( "," space {kv_rule_name} )?' + else: + res = kv_rule_name + if len(rest) > 0: + res += " " + self._add_rule( + f'{name}{"-" if name else ""}{k}-rest', + get_recursive_refs(rest, first_is_optional=True), + ) + return res + + rule += " | ".join( + get_recursive_refs(optional_props[i:], first_is_optional=False) + for i in range(len(optional_props)) + ) + if required_props: + rule += " )" + rule += " )?" + + rule += ' "}" space' + + return rule + + def format_grammar(self): + return "\n".join( + f"{name} ::= {rule}" + for name, rule in sorted(self._rules.items(), key=lambda kv: kv[0]) + ) + + +def json_schema_to_gbnf(schema: str, prop_order: Optional[List[str]] = None): + prop_order = prop_order or [] + schema = json.loads(schema) + prop_order = {name: idx for idx, name in enumerate(prop_order)} + converter = SchemaConverter( + prop_order=prop_order, allow_fetch=False, dotall=False, raw_pattern=False + ) + schema = converter.resolve_refs(schema, "stdin") + converter.visit(schema, "") + return converter.format_grammar() diff --git a/llama_cpp/llama_speculative.py b/llama_cpp/llama_speculative.py new file mode 100644 index 0000000000000000000000000000000000000000..39dfb903ba43d89c83c7b5b2f7d93502716bb16a --- /dev/null +++ b/llama_cpp/llama_speculative.py @@ -0,0 +1,64 @@ +import abc + +from typing import Any + +import numpy as np +import numpy.typing as npt + + +class LlamaDraftModel(abc.ABC): + @abc.abstractmethod + def __call__( + self, input_ids: npt.NDArray[np.intc], /, **kwargs: Any + ) -> npt.NDArray[np.intc]: + raise NotImplementedError() + + +class LlamaPromptLookupDecoding(LlamaDraftModel): + """Based on https://github.com/apoorvumang/prompt-lookup-decoding""" + + def __init__(self, max_ngram_size: int = 2, num_pred_tokens: int = 10): + self.max_ngram_size = max_ngram_size + self.num_pred_tokens = num_pred_tokens + + @staticmethod + def find_candidate_pred_tokens( + input_ids: npt.NDArray[np.intc], + max_ngram_size: int, + num_pred_tokens: int, + ): + input_length = input_ids.shape[0] + + for ngram_size in range(min(max_ngram_size, input_length - 1), 0, -1): + # Create sliding windows of size ngram_size + windows = np.lib.stride_tricks.sliding_window_view(input_ids, (ngram_size,)) + + # Convert ngram to an array for comparison + ngram_array = input_ids[-ngram_size:] + + # Find where the windows match the ngram + matches = np.all(windows == ngram_array, axis=1) + + # Get the indices of matches + match_indices = np.nonzero(matches)[0] + + # Iterate through match indices to find a valid continuation + for idx in match_indices: + start_idx = idx + ngram_size + end_idx = start_idx + num_pred_tokens + end_idx = min(end_idx, input_length) + + if start_idx < end_idx: + return input_ids[start_idx:end_idx] + + # If no match is found, return an empty array + return np.array([], dtype=np.intc) + + def __call__( + self, input_ids: npt.NDArray[np.intc], /, **kwargs: Any + ) -> npt.NDArray[np.intc]: + return self.find_candidate_pred_tokens( + input_ids=input_ids, + max_ngram_size=self.max_ngram_size, + num_pred_tokens=self.num_pred_tokens, + ) diff --git a/llama_cpp/llama_tokenizer.py b/llama_cpp/llama_tokenizer.py new file mode 100644 index 0000000000000000000000000000000000000000..1375e1392d2b1752b9c1d3416717b99a11f2b07a --- /dev/null +++ b/llama_cpp/llama_tokenizer.py @@ -0,0 +1,120 @@ +from __future__ import annotations + +import abc +from typing import ( + List, + Optional, + Any, +) + +import llama_cpp +from llama_cpp.llama_types import List + + +class BaseLlamaTokenizer(abc.ABC): + @abc.abstractmethod + def tokenize( + self, text: bytes, add_bos: bool = True, special: bool = True + ) -> List[int]: + """Tokenize the text into tokens. + + Args: + text: The utf-8 encoded string to tokenize. + add_bos: Whether to add a beginning of sequence token. + special: Whether to tokenize special tokens. + """ + raise NotImplementedError + + @abc.abstractmethod + def detokenize( + self, + tokens: List[int], + prev_tokens: Optional[List[int]] = None, + special: bool = False, + ) -> bytes: + """Detokenize the tokens into text. + + Args: + tokens: The list of tokens to detokenize. + prev_tokens: The list of previous tokens. Offset mapping will be performed if provided. + special: Whether to detokenize special tokens. + """ + raise NotImplementedError + + +class LlamaTokenizer(BaseLlamaTokenizer): + def __init__(self, llama: llama_cpp.Llama): + self._model = llama._model # type: ignore + + def tokenize( + self, text: bytes, add_bos: bool = True, special: bool = True + ) -> List[int]: + return self._model.tokenize(text, add_bos=add_bos, special=special) + + def detokenize( + self, + tokens: List[int], + prev_tokens: Optional[List[int]] = None, + special: bool = False, + ) -> bytes: + return self._model.detokenize(tokens, special=special) + + def encode( + self, text: str, add_bos: bool = True, special: bool = True + ) -> List[int]: + return self.tokenize( + text.encode("utf-8", errors="ignore"), add_bos=add_bos, special=special + ) + + def decode(self, tokens: List[int]) -> str: + return self.detokenize(tokens).decode("utf-8", errors="ignore") + + @classmethod + def from_ggml_file(cls, path: str) -> "LlamaTokenizer": + return cls(llama_cpp.Llama(model_path=path, vocab_only=True)) + + +class LlamaHFTokenizer(BaseLlamaTokenizer): + def __init__(self, hf_tokenizer: Any): + self.hf_tokenizer = hf_tokenizer + + def tokenize( + self, text: bytes, add_bos: bool = True, special: bool = True + ) -> List[int]: + return self.hf_tokenizer.encode( + text.decode("utf-8", errors="ignore"), add_special_tokens=special + ) + + def detokenize( + self, + tokens: List[int], + prev_tokens: Optional[List[int]] = None, + special: bool = False, + ) -> bytes: + skip_special_tokens = not special + if prev_tokens is not None: + text = self.hf_tokenizer.decode( + prev_tokens + tokens, skip_special_tokens=skip_special_tokens + ).encode("utf-8", errors="ignore") + prev_text = self.hf_tokenizer.decode( + prev_tokens, skip_special_tokens=skip_special_tokens + ).encode("utf-8", errors="ignore") + return text[len(prev_text) :] + else: + return self.hf_tokenizer.decode( + tokens, skip_special_tokens=skip_special_tokens + ).encode("utf-8", errors="ignore") + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path: str) -> "LlamaHFTokenizer": + try: + from transformers import AutoTokenizer + except ImportError: + raise ImportError( + "The `transformers` library is required to use the `HFTokenizer`." + "You can install it with `pip install transformers`." + ) + hf_tokenizer = AutoTokenizer.from_pretrained( + pretrained_model_name_or_path=pretrained_model_name_or_path + ) + return cls(hf_tokenizer) diff --git a/llama_cpp/llama_types.py b/llama_cpp/llama_types.py new file mode 100644 index 0000000000000000000000000000000000000000..f647822ff55a53965ef3c8a1fa51fbcb84ee3278 --- /dev/null +++ b/llama_cpp/llama_types.py @@ -0,0 +1,316 @@ +"""Types and request signatures for OpenAI compatibility + +NOTE: These types may change to match the OpenAI OpenAPI specification. + +Based on the OpenAI OpenAPI specification: +https://github.com/openai/openai-openapi/blob/master/openapi.yaml + +""" + +from typing import Any, List, Optional, Dict, Union +from typing_extensions import TypedDict, NotRequired, Literal + + +# NOTE: Defining this correctly using annotations seems to break pydantic validation. +# This is a workaround until we can figure out how to do this correctly +# JsonType = Union[None, int, str, bool, List["JsonType"], Dict[str, "JsonType"]] +JsonType = Union[None, int, str, bool, List[Any], Dict[str, Any]] + + +class EmbeddingUsage(TypedDict): + prompt_tokens: int + total_tokens: int + + +class Embedding(TypedDict): + index: int + object: str + embedding: Union[List[float], List[List[float]]] + + +class CreateEmbeddingResponse(TypedDict): + object: Literal["list"] + model: str + data: List[Embedding] + usage: EmbeddingUsage + + +class CompletionLogprobs(TypedDict): + text_offset: List[int] + token_logprobs: List[Optional[float]] + tokens: List[str] + top_logprobs: List[Optional[Dict[str, float]]] + + +class CompletionChoice(TypedDict): + text: str + index: int + logprobs: Optional[CompletionLogprobs] + finish_reason: Optional[Literal["stop", "length"]] + + +class CompletionUsage(TypedDict): + prompt_tokens: int + completion_tokens: int + total_tokens: int + + +class CreateCompletionResponse(TypedDict): + id: str + object: Literal["text_completion"] + created: int + model: str + choices: List[CompletionChoice] + usage: NotRequired[CompletionUsage] + + +class ChatCompletionResponseFunctionCall(TypedDict): + name: str + arguments: str + + +class ChatCompletionResponseMessage(TypedDict): + content: Optional[str] + tool_calls: NotRequired["ChatCompletionMessageToolCalls"] + role: Literal["assistant", "function"] # NOTE: "function" may be incorrect here + function_call: NotRequired[ChatCompletionResponseFunctionCall] # DEPRECATED + + +class ChatCompletionFunction(TypedDict): + name: str + description: NotRequired[str] + parameters: Dict[str, JsonType] # TODO: make this more specific + + +class ChatCompletionTopLogprobToken(TypedDict): + token: str + logprob: float + bytes: Optional[List[int]] + + +class ChatCompletionLogprobToken(ChatCompletionTopLogprobToken): + token: str + logprob: float + bytes: Optional[List[int]] + top_logprobs: List[ChatCompletionTopLogprobToken] + + +class ChatCompletionLogprobs(TypedDict): + content: Optional[List[ChatCompletionLogprobToken]] + refusal: Optional[List[ChatCompletionLogprobToken]] + + +class ChatCompletionResponseChoice(TypedDict): + index: int + message: "ChatCompletionResponseMessage" + logprobs: Optional[ChatCompletionLogprobs] + finish_reason: Optional[str] + + +class CreateChatCompletionResponse(TypedDict): + id: str + object: Literal["chat.completion"] + created: int + model: str + choices: List["ChatCompletionResponseChoice"] + usage: CompletionUsage + + +class ChatCompletionMessageToolCallChunkFunction(TypedDict): + name: Optional[str] + arguments: str + + +class ChatCompletionMessageToolCallChunk(TypedDict): + index: int + id: NotRequired[str] + type: Literal["function"] + function: ChatCompletionMessageToolCallChunkFunction + + +class ChatCompletionStreamResponseDeltaEmpty(TypedDict): + pass + + +class ChatCompletionStreamResponseDeltaFunctionCall(TypedDict): + name: str + arguments: str + + +class ChatCompletionStreamResponseDelta(TypedDict): + content: NotRequired[Optional[str]] + function_call: NotRequired[ + Optional[ChatCompletionStreamResponseDeltaFunctionCall] + ] # DEPRECATED + tool_calls: NotRequired[Optional[List[ChatCompletionMessageToolCallChunk]]] + role: NotRequired[Optional[Literal["system", "user", "assistant", "tool"]]] + + +class ChatCompletionStreamResponseChoice(TypedDict): + index: int + delta: Union[ + ChatCompletionStreamResponseDelta, ChatCompletionStreamResponseDeltaEmpty + ] + finish_reason: Optional[Literal["stop", "length", "tool_calls", "function_call"]] + logprobs: NotRequired[Optional[ChatCompletionLogprobs]] + + +class CreateChatCompletionStreamResponse(TypedDict): + id: str + model: str + object: Literal["chat.completion.chunk"] + created: int + choices: List[ChatCompletionStreamResponseChoice] + + +class ChatCompletionFunctions(TypedDict): + name: str + description: NotRequired[str] + parameters: Dict[str, JsonType] # TODO: make this more specific + + +class ChatCompletionFunctionCallOption(TypedDict): + name: str + + +class ChatCompletionRequestResponseFormat(TypedDict): + type: Literal["text", "json_object"] + schema: NotRequired[ + JsonType + ] # https://docs.endpoints.anyscale.com/guides/json_mode/ + + +class ChatCompletionRequestMessageContentPartText(TypedDict): + type: Literal["text"] + text: str + + +class ChatCompletionRequestMessageContentPartImageImageUrl(TypedDict): + url: str + detail: NotRequired[Literal["auto", "low", "high"]] + + +class ChatCompletionRequestMessageContentPartImage(TypedDict): + type: Literal["image_url"] + image_url: Union[str, ChatCompletionRequestMessageContentPartImageImageUrl] + + +ChatCompletionRequestMessageContentPart = Union[ + ChatCompletionRequestMessageContentPartText, + ChatCompletionRequestMessageContentPartImage, +] + + +class ChatCompletionRequestSystemMessage(TypedDict): + role: Literal["system"] + content: Optional[str] + + +class ChatCompletionRequestUserMessage(TypedDict): + role: Literal["user"] + content: Optional[Union[str, List[ChatCompletionRequestMessageContentPart]]] + + +class ChatCompletionMessageToolCallFunction(TypedDict): + name: str + arguments: str + + +class ChatCompletionMessageToolCall(TypedDict): + id: str + type: Literal["function"] + function: ChatCompletionMessageToolCallFunction + + +ChatCompletionMessageToolCalls = List[ChatCompletionMessageToolCall] + + +class ChatCompletionRequestAssistantMessageFunctionCall(TypedDict): + name: str + arguments: str + + +class ChatCompletionRequestAssistantMessage(TypedDict): + role: Literal["assistant"] + content: NotRequired[str] + tool_calls: NotRequired[ChatCompletionMessageToolCalls] + function_call: NotRequired[ + ChatCompletionRequestAssistantMessageFunctionCall + ] # DEPRECATED + + +class ChatCompletionRequestToolMessage(TypedDict): + role: Literal["tool"] + content: Optional[str] + tool_call_id: str + + +class ChatCompletionRequestFunctionMessage(TypedDict): + role: Literal["function"] + content: Optional[str] + name: str + + +ChatCompletionRequestMessage = Union[ + ChatCompletionRequestSystemMessage, + ChatCompletionRequestUserMessage, + ChatCompletionRequestAssistantMessage, + ChatCompletionRequestUserMessage, + ChatCompletionRequestToolMessage, + ChatCompletionRequestFunctionMessage, +] + + +class ChatCompletionRequestFunctionCallOption(TypedDict): + name: str + + +ChatCompletionRequestFunctionCall = Union[ + Literal["none", "auto"], ChatCompletionRequestFunctionCallOption +] + +ChatCompletionFunctionParameters = Dict[str, JsonType] # TODO: make this more specific + + +class ChatCompletionToolFunction(TypedDict): + name: str + description: NotRequired[str] + parameters: ChatCompletionFunctionParameters + + +class ChatCompletionTool(TypedDict): + type: Literal["function"] + function: ChatCompletionToolFunction + + +class ChatCompletionNamedToolChoiceFunction(TypedDict): + name: str + + +class ChatCompletionNamedToolChoice(TypedDict): + type: Literal["function"] + function: ChatCompletionNamedToolChoiceFunction + + +ChatCompletionToolChoiceOption = Union[ + Literal["none", "auto", "required"], ChatCompletionNamedToolChoice +] + + +# NOTE: The following type names are not part of the OpenAI OpenAPI specification +# and will be removed in a future major release. + +EmbeddingData = Embedding +CompletionChunk = CreateCompletionResponse +Completion = CreateCompletionResponse +CreateCompletionStreamResponse = CreateCompletionResponse +ChatCompletionMessage = ChatCompletionResponseMessage +ChatCompletionChoice = ChatCompletionResponseChoice +ChatCompletion = CreateChatCompletionResponse +ChatCompletionChunkDeltaEmpty = ChatCompletionStreamResponseDeltaEmpty +ChatCompletionChunkChoice = ChatCompletionStreamResponseChoice +ChatCompletionChunkDelta = ChatCompletionStreamResponseDelta +ChatCompletionChunk = CreateChatCompletionStreamResponse +ChatCompletionStreamResponse = CreateChatCompletionStreamResponse +ChatCompletionResponseFunction = ChatCompletionFunction +ChatCompletionFunctionCall = ChatCompletionResponseFunctionCall diff --git a/llama_cpp/llava_cpp.py b/llama_cpp/llava_cpp.py new file mode 100644 index 0000000000000000000000000000000000000000..d9dfaf5fd0a0a6ed9804870f8948d6c53474aa30 --- /dev/null +++ b/llama_cpp/llava_cpp.py @@ -0,0 +1,158 @@ +from __future__ import annotations + +import os +from ctypes import ( + c_bool, + c_char_p, + c_int, + c_uint8, + c_float, + c_void_p, + POINTER, + _Pointer, # type: ignore + Structure, +) +import pathlib +from typing import ( + Union, + NewType, + Optional, + TYPE_CHECKING, +) + +import llama_cpp.llama_cpp as llama_cpp + +from llama_cpp._ctypes_extensions import ( + load_shared_library, + ctypes_function_for_shared_library, +) + +if TYPE_CHECKING: + from llama_cpp._ctypes_extensions import ( + CtypesArray, + ) + + +# Specify the base name of the shared library to load +_libllava_base_name = "llava" +_libllava_override_path = os.environ.get("LLAVA_CPP_LIB") +_libllava_base_path = pathlib.Path(os.path.abspath(os.path.dirname(__file__))) / "lib" if _libllava_override_path is None else pathlib.Path() + +# Load the library +_libllava = load_shared_library(_libllava_base_name, _libllava_base_path) + +ctypes_function = ctypes_function_for_shared_library(_libllava) + + +################################################ +# llava.h +################################################ + +# struct clip_ctx; +clip_ctx_p = NewType("clip_ctx_p", int) +clip_ctx_p_ctypes = c_void_p + + +# struct llava_image_embed { +# float * embed; +# int n_image_pos; +# }; +class llava_image_embed(Structure): + _fields_ = [ + ("embed", POINTER(c_float)), + ("n_image_pos", c_int), + ] + + +# /** sanity check for clip <-> llava embed size match */ +# LLAVA_API bool llava_validate_embed_size(const llama_context * ctx_llama, const clip_ctx * ctx_clip); +@ctypes_function( + "llava_validate_embed_size", + [llama_cpp.llama_context_p_ctypes, clip_ctx_p_ctypes], + c_bool, +) +def llava_validate_embed_size( + ctx_llama: llama_cpp.llama_context_p, ctx_clip: clip_ctx_p, / +) -> bool: + ... + + +# /** build an image embed from image file bytes */ +# LLAVA_API struct llava_image_embed * llava_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length); +@ctypes_function( + "llava_image_embed_make_with_bytes", + [clip_ctx_p_ctypes, c_int, POINTER(c_uint8), c_int], + POINTER(llava_image_embed), +) +def llava_image_embed_make_with_bytes( + ctx_clip: clip_ctx_p, + n_threads: Union[c_int, int], + image_bytes: CtypesArray[c_uint8], + image_bytes_length: Union[c_int, int], + /, +) -> "_Pointer[llava_image_embed]": + ... + + +# /** build an image embed from a path to an image filename */ +# LLAVA_API struct llava_image_embed * llava_image_embed_make_with_filename(struct clip_ctx * ctx_clip, int n_threads, const char * image_path); +@ctypes_function( + "llava_image_embed_make_with_filename", + [clip_ctx_p_ctypes, c_int, c_char_p], + POINTER(llava_image_embed), +) +def llava_image_embed_make_with_filename( + ctx_clip: clip_ctx_p, n_threads: Union[c_int, int], image_path: bytes, / +) -> "_Pointer[llava_image_embed]": + ... + + +# LLAVA_API void llava_image_embed_free(struct llava_image_embed * embed); +# /** free an embedding made with llava_image_embed_make_* */ +@ctypes_function("llava_image_embed_free", [POINTER(llava_image_embed)], None) +def llava_image_embed_free(embed: "_Pointer[llava_image_embed]", /): + ... + + +# /** write the image represented by embed into the llama context with batch size n_batch, starting at context pos n_past. on completion, n_past points to the next position in the context after the image embed. */ +# LLAVA_API bool llava_eval_image_embed(struct llama_context * ctx_llama, const struct llava_image_embed * embed, int n_batch, int * n_past); +@ctypes_function( + "llava_eval_image_embed", + [ + llama_cpp.llama_context_p_ctypes, + POINTER(llava_image_embed), + c_int, + POINTER(c_int), + ], + c_bool, +) +def llava_eval_image_embed( + ctx_llama: llama_cpp.llama_context_p, + embed: "_Pointer[llava_image_embed]", + n_batch: Union[c_int, int], + n_past: "_Pointer[c_int]", + /, +) -> bool: + ... + + +################################################ +# clip.h +################################################ + + +# /** load mmproj model */ +# CLIP_API struct clip_ctx * clip_model_load (const char * fname, int verbosity); +@ctypes_function("clip_model_load", [c_char_p, c_int], clip_ctx_p_ctypes) +def clip_model_load( + fname: bytes, verbosity: Union[c_int, int], / +) -> Optional[clip_ctx_p]: + ... + + +# /** free mmproj model */ +# CLIP_API void clip_free(struct clip_ctx * ctx); +@ctypes_function("clip_free", [clip_ctx_p_ctypes], None) +def clip_free(ctx: clip_ctx_p, /): + ... + diff --git a/llama_cpp/py.typed b/llama_cpp/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llama_cpp/server/__init__.py b/llama_cpp/server/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llama_cpp/server/__main__.py b/llama_cpp/server/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..bbac4957e92e98b400452ec776264ff53caf25f3 --- /dev/null +++ b/llama_cpp/server/__main__.py @@ -0,0 +1,100 @@ +"""Example FastAPI server for llama.cpp. + +To run this example: + +```bash +pip install fastapi uvicorn sse-starlette pydantic-settings +export MODEL=../models/7B/... +``` + +Then run: +``` +uvicorn llama_cpp.server.app:create_app --reload +``` + +or + +``` +python3 -m llama_cpp.server +``` + +Then visit http://localhost:8000/docs to see the interactive API docs. + +""" + +from __future__ import annotations + +import os +import sys +import argparse + +import uvicorn + +from llama_cpp.server.app import create_app +from llama_cpp.server.settings import ( + Settings, + ServerSettings, + ModelSettings, + ConfigFileSettings, +) +from llama_cpp.server.cli import add_args_from_model, parse_model_from_args + + +def main(): + description = "🦙 Llama.cpp python server. Host your own LLMs!🚀" + parser = argparse.ArgumentParser(description=description) + + add_args_from_model(parser, Settings) + parser.add_argument( + "--config_file", + type=str, + help="Path to a config file to load.", + ) + server_settings: ServerSettings | None = None + model_settings: list[ModelSettings] = [] + args = parser.parse_args() + try: + # Load server settings from config_file if provided + config_file = os.environ.get("CONFIG_FILE", args.config_file) + if config_file: + if not os.path.exists(config_file): + raise ValueError(f"Config file {config_file} not found!") + with open(config_file, "rb") as f: + # Check if yaml file + if config_file.endswith(".yaml") or config_file.endswith(".yml"): + import yaml + import json + + config_file_settings = ConfigFileSettings.model_validate_json( + json.dumps(yaml.safe_load(f)) + ) + else: + config_file_settings = ConfigFileSettings.model_validate_json( + f.read() + ) + server_settings = ServerSettings.model_validate(config_file_settings) + model_settings = config_file_settings.models + else: + server_settings = parse_model_from_args(ServerSettings, args) + model_settings = [parse_model_from_args(ModelSettings, args)] + except Exception as e: + print(e, file=sys.stderr) + parser.print_help() + sys.exit(1) + assert server_settings is not None + assert model_settings is not None + app = create_app( + server_settings=server_settings, + model_settings=model_settings, + ) + uvicorn.run( + app, + host=os.getenv("HOST", server_settings.host), + port=int(os.getenv("PORT", server_settings.port)), + ssl_keyfile=server_settings.ssl_keyfile, + ssl_certfile=server_settings.ssl_certfile, + ) + + +if __name__ == "__main__": + main() diff --git a/llama_cpp/server/__pycache__/__init__.cpython-310.pyc b/llama_cpp/server/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c983e046f29f873f45de6c9525f9ff40dcfe1dc Binary files /dev/null and b/llama_cpp/server/__pycache__/__init__.cpython-310.pyc differ diff --git a/llama_cpp/server/__pycache__/__main__.cpython-310.pyc b/llama_cpp/server/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..add1ed76bb2b9a1d30bcaecd53b15ffd07418271 Binary files /dev/null and b/llama_cpp/server/__pycache__/__main__.cpython-310.pyc differ diff --git a/llama_cpp/server/__pycache__/app.cpython-310.pyc b/llama_cpp/server/__pycache__/app.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..716992dc36e872231b0f5fa47888ed65b7ea1ea7 Binary files /dev/null and b/llama_cpp/server/__pycache__/app.cpython-310.pyc differ diff --git a/llama_cpp/server/__pycache__/cli.cpython-310.pyc b/llama_cpp/server/__pycache__/cli.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6573bddc3bf4ce5f5f9c683bc94cf7048d2c2bd Binary files /dev/null and b/llama_cpp/server/__pycache__/cli.cpython-310.pyc differ diff --git a/llama_cpp/server/__pycache__/errors.cpython-310.pyc b/llama_cpp/server/__pycache__/errors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9dc35f3e015f4fc861f40d3096d8b8c9e756c6c Binary files /dev/null and b/llama_cpp/server/__pycache__/errors.cpython-310.pyc differ diff --git a/llama_cpp/server/__pycache__/model.cpython-310.pyc b/llama_cpp/server/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03d24b562bc8b203d55e9a370271c66f0cdc925f Binary files /dev/null and b/llama_cpp/server/__pycache__/model.cpython-310.pyc differ diff --git a/llama_cpp/server/__pycache__/settings.cpython-310.pyc b/llama_cpp/server/__pycache__/settings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10e63146ad6d0ec55794019794fc3fc40289e532 Binary files /dev/null and b/llama_cpp/server/__pycache__/settings.cpython-310.pyc differ diff --git a/llama_cpp/server/__pycache__/types.cpython-310.pyc b/llama_cpp/server/__pycache__/types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3712d88a8db6fd2d70f6c1b3ca02a314fab515b2 Binary files /dev/null and b/llama_cpp/server/__pycache__/types.cpython-310.pyc differ diff --git a/llama_cpp/server/app.py b/llama_cpp/server/app.py new file mode 100644 index 0000000000000000000000000000000000000000..5120f241624251f9e7fec96fa6079459f9cb82d7 --- /dev/null +++ b/llama_cpp/server/app.py @@ -0,0 +1,597 @@ +from __future__ import annotations + +import os +import json +import typing +import contextlib + +from anyio import Lock +from functools import partial +from typing import List, Optional, Union, Dict + +import llama_cpp + +import anyio +from anyio.streams.memory import MemoryObjectSendStream +from starlette.concurrency import run_in_threadpool, iterate_in_threadpool +from fastapi import Depends, FastAPI, APIRouter, Request, HTTPException, status, Body +from fastapi.middleware import Middleware +from fastapi.middleware.cors import CORSMiddleware +from fastapi.security import HTTPBearer +from sse_starlette.sse import EventSourceResponse +from starlette_context.plugins import RequestIdPlugin # type: ignore +from starlette_context.middleware import RawContextMiddleware + +from llama_cpp.server.model import ( + LlamaProxy, +) +from llama_cpp.server.settings import ( + ConfigFileSettings, + Settings, + ModelSettings, + ServerSettings, +) +from llama_cpp.server.types import ( + CreateCompletionRequest, + CreateEmbeddingRequest, + CreateChatCompletionRequest, + ModelList, + TokenizeInputRequest, + TokenizeInputResponse, + TokenizeInputCountResponse, + DetokenizeInputRequest, + DetokenizeInputResponse, +) +from llama_cpp.server.errors import RouteErrorHandler + + +router = APIRouter(route_class=RouteErrorHandler) + +_server_settings: Optional[ServerSettings] = None + + +def set_server_settings(server_settings: ServerSettings): + global _server_settings + _server_settings = server_settings + + +def get_server_settings(): + yield _server_settings + + +_llama_proxy: Optional[LlamaProxy] = None + +llama_outer_lock = Lock() +llama_inner_lock = Lock() + + +def set_llama_proxy(model_settings: List[ModelSettings]): + global _llama_proxy + _llama_proxy = LlamaProxy(models=model_settings) + + +async def get_llama_proxy(): + # NOTE: This double lock allows the currently streaming llama model to + # check if any other requests are pending in the same thread and cancel + # the stream if so. + await llama_outer_lock.acquire() + release_outer_lock = True + try: + await llama_inner_lock.acquire() + try: + llama_outer_lock.release() + release_outer_lock = False + yield _llama_proxy + finally: + llama_inner_lock.release() + finally: + if release_outer_lock: + llama_outer_lock.release() + + +_ping_message_factory: typing.Optional[typing.Callable[[], bytes]] = None + + +def set_ping_message_factory(factory: typing.Callable[[], bytes]): + global _ping_message_factory + _ping_message_factory = factory + + +def create_app( + settings: Settings | None = None, + server_settings: ServerSettings | None = None, + model_settings: List[ModelSettings] | None = None, +): + config_file = os.environ.get("CONFIG_FILE", None) + if config_file is not None: + if not os.path.exists(config_file): + raise ValueError(f"Config file {config_file} not found!") + with open(config_file, "rb") as f: + # Check if yaml file + if config_file.endswith(".yaml") or config_file.endswith(".yml"): + import yaml + + config_file_settings = ConfigFileSettings.model_validate_json( + json.dumps(yaml.safe_load(f)) + ) + else: + config_file_settings = ConfigFileSettings.model_validate_json(f.read()) + server_settings = ServerSettings.model_validate(config_file_settings) + model_settings = config_file_settings.models + + if server_settings is None and model_settings is None: + if settings is None: + settings = Settings() + server_settings = ServerSettings.model_validate(settings) + model_settings = [ModelSettings.model_validate(settings)] + + assert ( + server_settings is not None and model_settings is not None + ), "server_settings and model_settings must be provided together" + + set_server_settings(server_settings) + middleware = [Middleware(RawContextMiddleware, plugins=(RequestIdPlugin(),))] + app = FastAPI( + middleware=middleware, + title="🦙 llama.cpp Python API", + version=llama_cpp.__version__, + root_path=server_settings.root_path, + ) + app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], + ) + app.include_router(router) + + assert model_settings is not None + set_llama_proxy(model_settings=model_settings) + + if server_settings.disable_ping_events: + set_ping_message_factory(lambda: bytes()) + + return app + + +def prepare_request_resources( + body: CreateCompletionRequest | CreateChatCompletionRequest, + llama_proxy: LlamaProxy, + body_model: str | None, + kwargs, +) -> llama_cpp.Llama: + if llama_proxy is None: + raise HTTPException( + status_code=status.HTTP_503_SERVICE_UNAVAILABLE, + detail="Service is not available", + ) + llama = llama_proxy(body_model) + if body.logit_bias is not None: + kwargs["logit_bias"] = ( + _logit_bias_tokens_to_input_ids(llama, body.logit_bias) + if body.logit_bias_type == "tokens" + else body.logit_bias + ) + + if body.grammar is not None: + kwargs["grammar"] = llama_cpp.LlamaGrammar.from_string(body.grammar) + + if body.min_tokens > 0: + _min_tokens_logits_processor = llama_cpp.LogitsProcessorList( + [llama_cpp.MinTokensLogitsProcessor(body.min_tokens, llama.token_eos())] + ) + if "logits_processor" not in kwargs: + kwargs["logits_processor"] = _min_tokens_logits_processor + else: + kwargs["logits_processor"].extend(_min_tokens_logits_processor) + return llama + + +async def get_event_publisher( + request: Request, + inner_send_chan: MemoryObjectSendStream[typing.Any], + body: CreateCompletionRequest | CreateChatCompletionRequest, + body_model: str | None, + llama_call, + kwargs, +): + server_settings = next(get_server_settings()) + interrupt_requests = ( + server_settings.interrupt_requests if server_settings else False + ) + async with contextlib.asynccontextmanager(get_llama_proxy)() as llama_proxy: + llama = prepare_request_resources(body, llama_proxy, body_model, kwargs) + async with inner_send_chan: + try: + iterator = await run_in_threadpool(llama_call, llama, **kwargs) + async for chunk in iterate_in_threadpool(iterator): + await inner_send_chan.send(dict(data=json.dumps(chunk))) + if await request.is_disconnected(): + raise anyio.get_cancelled_exc_class()() + if interrupt_requests and llama_outer_lock.locked(): + await inner_send_chan.send(dict(data="[DONE]")) + raise anyio.get_cancelled_exc_class()() + await inner_send_chan.send(dict(data="[DONE]")) + except anyio.get_cancelled_exc_class() as e: + print("disconnected") + with anyio.move_on_after(1, shield=True): + print( + f"Disconnected from client (via refresh/close) {request.client}" + ) + raise e + + +def _logit_bias_tokens_to_input_ids( + llama: llama_cpp.Llama, + logit_bias: Dict[str, float], +) -> Dict[str, float]: + to_bias: Dict[str, float] = {} + for token, score in logit_bias.items(): + token = token.encode("utf-8") + for input_id in llama.tokenize(token, add_bos=False, special=True): + to_bias[str(input_id)] = score + return to_bias + + +# Setup Bearer authentication scheme +bearer_scheme = HTTPBearer(auto_error=False) + + +async def authenticate( + settings: Settings = Depends(get_server_settings), + authorization: Optional[str] = Depends(bearer_scheme), +): + # Skip API key check if it's not set in settings + if settings.api_key is None: + return True + + # check bearer credentials against the api_key + if authorization and authorization.credentials == settings.api_key: + # api key is valid + return authorization.credentials + + # raise http error 401 + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid API key", + ) + + +openai_v1_tag = "OpenAI V1" + + +@router.post( + "/v1/completions", + summary="Completion", + dependencies=[Depends(authenticate)], + response_model=Union[ + llama_cpp.CreateCompletionResponse, + str, + ], + responses={ + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "anyOf": [ + {"$ref": "#/components/schemas/CreateCompletionResponse"} + ], + "title": "Completion response, when stream=False", + } + }, + "text/event-stream": { + "schema": { + "type": "string", + "title": "Server Side Streaming response, when stream=True. " + + "See SSE format: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format", # noqa: E501 + "example": """data: {... see CreateCompletionResponse ...} \\n\\n data: ... \\n\\n ... data: [DONE]""", + } + }, + }, + } + }, + tags=[openai_v1_tag], +) +@router.post( + "/v1/engines/copilot-codex/completions", + include_in_schema=False, + dependencies=[Depends(authenticate)], + tags=[openai_v1_tag], +) +async def create_completion( + request: Request, + body: CreateCompletionRequest, +) -> llama_cpp.Completion: + if isinstance(body.prompt, list): + assert len(body.prompt) <= 1 + body.prompt = body.prompt[0] if len(body.prompt) > 0 else "" + + body_model = ( + body.model + if request.url.path != "/v1/engines/copilot-codex/completions" + else "copilot-codex" + ) + + exclude = { + "n", + "best_of", + "logit_bias_type", + "user", + "min_tokens", + } + kwargs = body.model_dump(exclude=exclude) + + # handle streaming request + if kwargs.get("stream", False): + send_chan, recv_chan = anyio.create_memory_object_stream(10) + return EventSourceResponse( + recv_chan, + data_sender_callable=partial( # type: ignore + get_event_publisher, + request=request, + inner_send_chan=send_chan, + body=body, + body_model=body_model, + llama_call=llama_cpp.Llama.__call__, + kwargs=kwargs, + ), + sep="\n", + ping_message_factory=_ping_message_factory, + ) + + # handle regular request + async with contextlib.asynccontextmanager(get_llama_proxy)() as llama_proxy: + llama = prepare_request_resources(body, llama_proxy, body_model, kwargs) + + if await request.is_disconnected(): + print( + f"Disconnected from client (via refresh/close) before llm invoked {request.client}" + ) + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Client closed request", + ) + + return await run_in_threadpool(llama, **kwargs) + + +@router.post( + "/v1/embeddings", + summary="Embedding", + dependencies=[Depends(authenticate)], + tags=[openai_v1_tag], +) +async def create_embedding( + request: CreateEmbeddingRequest, + llama_proxy: LlamaProxy = Depends(get_llama_proxy), +): + return await run_in_threadpool( + llama_proxy(request.model).create_embedding, + **request.model_dump(exclude={"user"}), + ) + + +@router.post( + "/v1/chat/completions", + summary="Chat", + dependencies=[Depends(authenticate)], + response_model=Union[llama_cpp.ChatCompletion, str], + responses={ + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "anyOf": [ + { + "$ref": "#/components/schemas/CreateChatCompletionResponse" + } + ], + "title": "Completion response, when stream=False", + } + }, + "text/event-stream": { + "schema": { + "type": "string", + "title": "Server Side Streaming response, when stream=True" + + "See SSE format: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format", # noqa: E501 + "example": """data: {... see CreateChatCompletionResponse ...} \\n\\n data: ... \\n\\n ... data: [DONE]""", + } + }, + }, + } + }, + tags=[openai_v1_tag], +) +async def create_chat_completion( + request: Request, + body: CreateChatCompletionRequest = Body( + openapi_examples={ + "normal": { + "summary": "Chat Completion", + "value": { + "model": "gpt-3.5-turbo", + "messages": [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "What is the capital of France?"}, + ], + }, + }, + "json_mode": { + "summary": "JSON Mode", + "value": { + "model": "gpt-3.5-turbo", + "messages": [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Who won the world series in 2020"}, + ], + "response_format": {"type": "json_object"}, + }, + }, + "tool_calling": { + "summary": "Tool Calling", + "value": { + "model": "gpt-3.5-turbo", + "messages": [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Extract Jason is 30 years old."}, + ], + "tools": [ + { + "type": "function", + "function": { + "name": "User", + "description": "User record", + "parameters": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "age": {"type": "number"}, + }, + "required": ["name", "age"], + }, + }, + } + ], + "tool_choice": { + "type": "function", + "function": { + "name": "User", + }, + }, + }, + }, + "logprobs": { + "summary": "Logprobs", + "value": { + "model": "gpt-3.5-turbo", + "messages": [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "What is the capital of France?"}, + ], + "logprobs": True, + "top_logprobs": 10, + }, + }, + } + ), +) -> llama_cpp.ChatCompletion: + # This is a workaround for an issue in FastAPI dependencies + # where the dependency is cleaned up before a StreamingResponse + # is complete. + # https://github.com/tiangolo/fastapi/issues/11143 + + body_model = body.model + exclude = { + "n", + "logit_bias_type", + "user", + "min_tokens", + } + kwargs = body.model_dump(exclude=exclude) + + # handle streaming request + if kwargs.get("stream", False): + send_chan, recv_chan = anyio.create_memory_object_stream(10) + return EventSourceResponse( + recv_chan, + data_sender_callable=partial( # type: ignore + get_event_publisher, + request=request, + inner_send_chan=send_chan, + body=body, + body_model=body_model, + llama_call=llama_cpp.Llama.create_chat_completion, + kwargs=kwargs, + ), + sep="\n", + ping_message_factory=_ping_message_factory, + ) + + # handle regular request + async with contextlib.asynccontextmanager(get_llama_proxy)() as llama_proxy: + llama = prepare_request_resources(body, llama_proxy, body_model, kwargs) + + if await request.is_disconnected(): + print( + f"Disconnected from client (via refresh/close) before llm invoked {request.client}" + ) + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Client closed request", + ) + + return await run_in_threadpool(llama.create_chat_completion, **kwargs) + + +@router.get( + "/v1/models", + summary="Models", + dependencies=[Depends(authenticate)], + tags=[openai_v1_tag], +) +async def get_models( + llama_proxy: LlamaProxy = Depends(get_llama_proxy), +) -> ModelList: + return { + "object": "list", + "data": [ + { + "id": model_alias, + "object": "model", + "owned_by": "me", + "permissions": [], + } + for model_alias in llama_proxy + ], + } + + +extras_tag = "Extras" + + +@router.post( + "/extras/tokenize", + summary="Tokenize", + dependencies=[Depends(authenticate)], + tags=[extras_tag], +) +async def tokenize( + body: TokenizeInputRequest, + llama_proxy: LlamaProxy = Depends(get_llama_proxy), +) -> TokenizeInputResponse: + tokens = llama_proxy(body.model).tokenize(body.input.encode("utf-8"), special=True) + + return TokenizeInputResponse(tokens=tokens) + + +@router.post( + "/extras/tokenize/count", + summary="Tokenize Count", + dependencies=[Depends(authenticate)], + tags=[extras_tag], +) +async def count_query_tokens( + body: TokenizeInputRequest, + llama_proxy: LlamaProxy = Depends(get_llama_proxy), +) -> TokenizeInputCountResponse: + tokens = llama_proxy(body.model).tokenize(body.input.encode("utf-8"), special=True) + + return TokenizeInputCountResponse(count=len(tokens)) + + +@router.post( + "/extras/detokenize", + summary="Detokenize", + dependencies=[Depends(authenticate)], + tags=[extras_tag], +) +async def detokenize( + body: DetokenizeInputRequest, + llama_proxy: LlamaProxy = Depends(get_llama_proxy), +) -> DetokenizeInputResponse: + text = llama_proxy(body.model).detokenize(body.tokens).decode("utf-8") + + return DetokenizeInputResponse(text=text) diff --git a/llama_cpp/server/cli.py b/llama_cpp/server/cli.py new file mode 100644 index 0000000000000000000000000000000000000000..3dd00767671c5e9dac5a2ab8f4f1331531294b60 --- /dev/null +++ b/llama_cpp/server/cli.py @@ -0,0 +1,97 @@ +from __future__ import annotations + +import argparse + +from typing import List, Literal, Union, Any, Type, TypeVar + +from pydantic import BaseModel + + +def _get_base_type(annotation: Type[Any]) -> Type[Any]: + if getattr(annotation, "__origin__", None) is Literal: + assert hasattr(annotation, "__args__") and len(annotation.__args__) >= 1 # type: ignore + return type(annotation.__args__[0]) # type: ignore + elif getattr(annotation, "__origin__", None) is Union: + assert hasattr(annotation, "__args__") and len(annotation.__args__) >= 1 # type: ignore + non_optional_args: List[Type[Any]] = [ + arg for arg in annotation.__args__ if arg is not type(None) # type: ignore + ] + if non_optional_args: + return _get_base_type(non_optional_args[0]) + elif ( + getattr(annotation, "__origin__", None) is list + or getattr(annotation, "__origin__", None) is List + ): + assert hasattr(annotation, "__args__") and len(annotation.__args__) >= 1 # type: ignore + return _get_base_type(annotation.__args__[0]) # type: ignore + return annotation + + +def _contains_list_type(annotation: Type[Any] | None) -> bool: + origin = getattr(annotation, "__origin__", None) + + if origin is list or origin is List: + return True + elif origin in (Literal, Union): + return any(_contains_list_type(arg) for arg in annotation.__args__) # type: ignore + else: + return False + + +def _parse_bool_arg(arg: str | bytes | bool) -> bool: + if isinstance(arg, bytes): + arg = arg.decode("utf-8") + + true_values = {"1", "on", "t", "true", "y", "yes"} + false_values = {"0", "off", "f", "false", "n", "no"} + + arg_str = str(arg).lower().strip() + + if arg_str in true_values: + return True + elif arg_str in false_values: + return False + else: + raise ValueError(f"Invalid boolean argument: {arg}") + + +def add_args_from_model(parser: argparse.ArgumentParser, model: Type[BaseModel]): + """Add arguments from a pydantic model to an argparse parser.""" + + for name, field in model.model_fields.items(): + description = field.description + if field.default and description and not field.is_required(): + description += f" (default: {field.default})" + base_type = ( + _get_base_type(field.annotation) if field.annotation is not None else str + ) + list_type = _contains_list_type(field.annotation) + if base_type is not bool: + parser.add_argument( + f"--{name}", + dest=name, + nargs="*" if list_type else None, + type=base_type, + help=description, + ) + if base_type is bool: + parser.add_argument( + f"--{name}", + dest=name, + type=_parse_bool_arg, + help=f"{description}", + ) + + +T = TypeVar("T", bound=Type[BaseModel]) + + +def parse_model_from_args(model: T, args: argparse.Namespace) -> T: + """Parse a pydantic model from an argparse namespace.""" + return model( + **{ + k: v + for k, v in vars(args).items() + if v is not None and k in model.model_fields + } + ) diff --git a/llama_cpp/server/errors.py b/llama_cpp/server/errors.py new file mode 100644 index 0000000000000000000000000000000000000000..d0eda5664ba583d053205e1e01c2f4365648c7f2 --- /dev/null +++ b/llama_cpp/server/errors.py @@ -0,0 +1,212 @@ +from __future__ import annotations + +import sys +import traceback +import time +from re import compile, Match, Pattern +from typing import Callable, Coroutine, Optional, Tuple, Union, Dict +from typing_extensions import TypedDict + + +from fastapi import ( + Request, + Response, + HTTPException, +) +from fastapi.responses import JSONResponse +from fastapi.routing import APIRoute + +from llama_cpp.server.types import ( + CreateCompletionRequest, + CreateEmbeddingRequest, + CreateChatCompletionRequest, +) + + +class ErrorResponse(TypedDict): + """OpenAI style error response""" + + message: str + type: str + param: Optional[str] + code: Optional[str] + + +class ErrorResponseFormatters: + """Collection of formatters for error responses. + + Args: + request (Union[CreateCompletionRequest, CreateChatCompletionRequest]): + Request body + match (Match[str]): Match object from regex pattern + + Returns: + Tuple[int, ErrorResponse]: Status code and error response + """ + + @staticmethod + def context_length_exceeded( + request: Union["CreateCompletionRequest", "CreateChatCompletionRequest"], + match, # type: Match[str] # type: ignore + ) -> Tuple[int, ErrorResponse]: + """Formatter for context length exceeded error""" + + context_window = int(match.group(2)) + prompt_tokens = int(match.group(1)) + completion_tokens = request.max_tokens + if hasattr(request, "messages"): + # Chat completion + message = ( + "This model's maximum context length is {} tokens. " + "However, you requested {} tokens " + "({} in the messages, {} in the completion). " + "Please reduce the length of the messages or completion." + ) + else: + # Text completion + message = ( + "This model's maximum context length is {} tokens, " + "however you requested {} tokens " + "({} in your prompt; {} for the completion). " + "Please reduce your prompt; or completion length." + ) + return 400, ErrorResponse( + message=message.format( + context_window, + (completion_tokens or 0) + prompt_tokens, + prompt_tokens, + completion_tokens, + ), # type: ignore + type="invalid_request_error", + param="messages", + code="context_length_exceeded", + ) + + @staticmethod + def model_not_found( + request: Union["CreateCompletionRequest", "CreateChatCompletionRequest"], + match, # type: Match[str] # type: ignore + ) -> Tuple[int, ErrorResponse]: + """Formatter for model_not_found error""" + + model_path = str(match.group(1)) + message = f"The model `{model_path}` does not exist" + return 400, ErrorResponse( + message=message, + type="invalid_request_error", + param=None, + code="model_not_found", + ) + + +class RouteErrorHandler(APIRoute): + """Custom APIRoute that handles application errors and exceptions""" + + # key: regex pattern for original error message from llama_cpp + # value: formatter function + pattern_and_formatters: Dict[ + "Pattern[str]", + Callable[ + [ + Union["CreateCompletionRequest", "CreateChatCompletionRequest"], + "Match[str]", + ], + Tuple[int, ErrorResponse], + ], + ] = { + compile( + r"Requested tokens \((\d+)\) exceed context window of (\d+)" + ): ErrorResponseFormatters.context_length_exceeded, + compile( + r"Model path does not exist: (.+)" + ): ErrorResponseFormatters.model_not_found, + } + + def error_message_wrapper( + self, + error: Exception, + body: Optional[ + Union[ + "CreateChatCompletionRequest", + "CreateCompletionRequest", + "CreateEmbeddingRequest", + ] + ] = None, + ) -> Tuple[int, ErrorResponse]: + """Wraps error message in OpenAI style error response""" + if body is not None and isinstance( + body, + ( + CreateCompletionRequest, + CreateChatCompletionRequest, + ), + ): + # When text completion or chat completion + for pattern, callback in self.pattern_and_formatters.items(): + match = pattern.search(str(error)) + if match is not None: + return callback(body, match) + + # Only print the trace on unexpected exceptions + print(f"Exception: {str(error)}", file=sys.stderr) + traceback.print_exc(file=sys.stderr) + + # Wrap other errors as internal server error + return 500, ErrorResponse( + message=str(error), + type="internal_server_error", + param=None, + code=None, + ) + + def get_route_handler( + self, + ) -> Callable[[Request], Coroutine[None, None, Response]]: + """Defines custom route handler that catches exceptions and formats + in OpenAI style error response""" + + original_route_handler = super().get_route_handler() + + async def custom_route_handler(request: Request) -> Response: + try: + start_sec = time.perf_counter() + response = await original_route_handler(request) + elapsed_time_ms = int((time.perf_counter() - start_sec) * 1000) + response.headers["openai-processing-ms"] = f"{elapsed_time_ms}" + return response + except HTTPException as unauthorized: + # api key check failed + raise unauthorized + except Exception as exc: + json_body = await request.json() + try: + if "messages" in json_body: + # Chat completion + body: Optional[ + Union[ + CreateChatCompletionRequest, + CreateCompletionRequest, + CreateEmbeddingRequest, + ] + ] = CreateChatCompletionRequest(**json_body) + elif "prompt" in json_body: + # Text completion + body = CreateCompletionRequest(**json_body) + else: + # Embedding + body = CreateEmbeddingRequest(**json_body) + except Exception: + # Invalid request body + body = None + + # Get proper error message from the exception + ( + status_code, + error_message, + ) = self.error_message_wrapper(error=exc, body=body) + return JSONResponse( + {"error": error_message}, + status_code=status_code, + ) + + return custom_route_handler diff --git a/llama_cpp/server/model.py b/llama_cpp/server/model.py new file mode 100644 index 0000000000000000000000000000000000000000..c6716f9196d80eeca92ea4ba3d62d679cb1a825a --- /dev/null +++ b/llama_cpp/server/model.py @@ -0,0 +1,298 @@ +from __future__ import annotations + +import json + +from typing import Dict, Optional, Union, List + +import llama_cpp +import llama_cpp.llama_speculative as llama_speculative +import llama_cpp.llama_tokenizer as llama_tokenizer + +from llama_cpp.server.settings import ModelSettings + + +class LlamaProxy: + def __init__(self, models: List[ModelSettings]) -> None: + assert len(models) > 0, "No models provided!" + + self._model_settings_dict: dict[str, ModelSettings] = {} + for model in models: + if not model.model_alias: + model.model_alias = model.model + self._model_settings_dict[model.model_alias] = model + + self._current_model: Optional[llama_cpp.Llama] = None + self._current_model_alias: Optional[str] = None + + self._default_model_settings: ModelSettings = models[0] + self._default_model_alias: str = self._default_model_settings.model_alias # type: ignore + + # Load default model + self._current_model = self.load_llama_from_model_settings( + self._default_model_settings + ) + self._current_model_alias = self._default_model_alias + + def __call__(self, model: Optional[str] = None) -> llama_cpp.Llama: + if model is None: + model = self._default_model_alias + + if model not in self._model_settings_dict: + model = self._default_model_alias + + if model == self._current_model_alias: + if self._current_model is not None: + return self._current_model + + if self._current_model: + self._current_model.close() + self._current_model = None + + settings = self._model_settings_dict[model] + self._current_model = self.load_llama_from_model_settings(settings) + self._current_model_alias = model + return self._current_model + + def __getitem__(self, model: str): + return self._model_settings_dict[model].model_dump() + + def __setitem__(self, model: str, settings: Union[ModelSettings, str, bytes]): + if isinstance(settings, (bytes, str)): + settings = ModelSettings.model_validate_json(settings) + self._model_settings_dict[model] = settings + + def __iter__(self): + for model in self._model_settings_dict: + yield model + + def free(self): + if self._current_model: + self._current_model.close() + del self._current_model + + @staticmethod + def load_llama_from_model_settings(settings: ModelSettings) -> llama_cpp.Llama: + chat_handler = None + if settings.chat_format == "llava-1-5": + assert settings.clip_model_path is not None, "clip model not found" + if settings.hf_model_repo_id is not None: + chat_handler = ( + llama_cpp.llama_chat_format.Llava15ChatHandler.from_pretrained( + repo_id=settings.hf_model_repo_id, + filename=settings.clip_model_path, + verbose=settings.verbose, + ) + ) + else: + chat_handler = llama_cpp.llama_chat_format.Llava15ChatHandler( + clip_model_path=settings.clip_model_path, verbose=settings.verbose + ) + elif settings.chat_format == "obsidian": + assert settings.clip_model_path is not None, "clip model not found" + if settings.hf_model_repo_id is not None: + chat_handler = ( + llama_cpp.llama_chat_format.ObsidianChatHandler.from_pretrained( + repo_id=settings.hf_model_repo_id, + filename=settings.clip_model_path, + verbose=settings.verbose, + ) + ) + else: + chat_handler = llama_cpp.llama_chat_format.ObsidianChatHandler( + clip_model_path=settings.clip_model_path, verbose=settings.verbose + ) + elif settings.chat_format == "llava-1-6": + assert settings.clip_model_path is not None, "clip model not found" + if settings.hf_model_repo_id is not None: + chat_handler = ( + llama_cpp.llama_chat_format.Llava16ChatHandler.from_pretrained( + repo_id=settings.hf_model_repo_id, + filename=settings.clip_model_path, + verbose=settings.verbose, + ) + ) + else: + chat_handler = llama_cpp.llama_chat_format.Llava16ChatHandler( + clip_model_path=settings.clip_model_path, verbose=settings.verbose + ) + elif settings.chat_format == "moondream": + assert settings.clip_model_path is not None, "clip model not found" + if settings.hf_model_repo_id is not None: + chat_handler = ( + llama_cpp.llama_chat_format.MoondreamChatHandler.from_pretrained( + repo_id=settings.hf_model_repo_id, + filename=settings.clip_model_path, + verbose=settings.verbose, + ) + ) + else: + chat_handler = llama_cpp.llama_chat_format.MoondreamChatHandler( + clip_model_path=settings.clip_model_path, verbose=settings.verbose + ) + elif settings.chat_format == "nanollava": + assert settings.clip_model_path is not None, "clip model not found" + if settings.hf_model_repo_id is not None: + chat_handler = ( + llama_cpp.llama_chat_format.NanoLlavaChatHandler.from_pretrained( + repo_id=settings.hf_model_repo_id, + filename=settings.clip_model_path, + verbose=settings.verbose, + ) + ) + else: + chat_handler = llama_cpp.llama_chat_format.NanoLlavaChatHandler( + clip_model_path=settings.clip_model_path, verbose=settings.verbose + ) + elif settings.chat_format == "llama-3-vision-alpha": + assert settings.clip_model_path is not None, "clip model not found" + if settings.hf_model_repo_id is not None: + chat_handler = ( + llama_cpp.llama_chat_format.Llama3VisionAlpha.from_pretrained( + repo_id=settings.hf_model_repo_id, + filename=settings.clip_model_path, + verbose=settings.verbose, + ) + ) + else: + chat_handler = llama_cpp.llama_chat_format.Llama3VisionAlpha( + clip_model_path=settings.clip_model_path, verbose=settings.verbose + ) + elif settings.chat_format == "minicpm-v-2.6": + assert settings.clip_model_path is not None, "clip model not found" + if settings.hf_model_repo_id is not None: + chat_handler = ( + llama_cpp.llama_chat_format.MiniCPMv26ChatHandler.from_pretrained( + repo_id=settings.hf_model_repo_id, + filename=settings.clip_model_path, + verbose=settings.verbose, + ) + ) + else: + chat_handler = llama_cpp.llama_chat_format.MiniCPMv26ChatHandler( + clip_model_path=settings.clip_model_path, verbose=settings.verbose + ) + elif settings.chat_format == "hf-autotokenizer": + assert ( + settings.hf_pretrained_model_name_or_path is not None + ), "hf_pretrained_model_name_or_path must be set for hf-autotokenizer" + chat_handler = ( + llama_cpp.llama_chat_format.hf_autotokenizer_to_chat_completion_handler( + settings.hf_pretrained_model_name_or_path + ) + ) + elif settings.chat_format == "hf-tokenizer-config": + assert ( + settings.hf_tokenizer_config_path is not None + ), "hf_tokenizer_config_path must be set for hf-tokenizer-config" + chat_handler = llama_cpp.llama_chat_format.hf_tokenizer_config_to_chat_completion_handler( + json.load(open(settings.hf_tokenizer_config_path)) + ) + + tokenizer: Optional[llama_cpp.BaseLlamaTokenizer] = None + if settings.hf_pretrained_model_name_or_path is not None: + tokenizer = llama_tokenizer.LlamaHFTokenizer.from_pretrained( + settings.hf_pretrained_model_name_or_path + ) + + draft_model = None + if settings.draft_model is not None: + draft_model = llama_speculative.LlamaPromptLookupDecoding( + num_pred_tokens=settings.draft_model_num_pred_tokens + ) + + kv_overrides: Optional[Dict[str, Union[bool, int, float, str]]] = None + if settings.kv_overrides is not None: + assert isinstance(settings.kv_overrides, list) + kv_overrides = {} + for kv in settings.kv_overrides: + key, value = kv.split("=") + if ":" in value: + value_type, value = value.split(":") + if value_type == "bool": + kv_overrides[key] = value.lower() in ["true", "1"] + elif value_type == "int": + kv_overrides[key] = int(value) + elif value_type == "float": + kv_overrides[key] = float(value) + elif value_type == "str": + kv_overrides[key] = value + else: + raise ValueError(f"Unknown value type {value_type}") + + import functools + + kwargs = {} + + if settings.hf_model_repo_id is not None: + create_fn = functools.partial( + llama_cpp.Llama.from_pretrained, + repo_id=settings.hf_model_repo_id, + filename=settings.model, + ) + else: + create_fn = llama_cpp.Llama + kwargs["model_path"] = settings.model + + _model = create_fn( + **kwargs, + # Model Params + n_gpu_layers=settings.n_gpu_layers, + split_mode=settings.split_mode, + main_gpu=settings.main_gpu, + tensor_split=settings.tensor_split, + vocab_only=settings.vocab_only, + use_mmap=settings.use_mmap, + use_mlock=settings.use_mlock, + kv_overrides=kv_overrides, + rpc_servers=settings.rpc_servers, + # Context Params + seed=settings.seed, + n_ctx=settings.n_ctx, + n_batch=settings.n_batch, + n_ubatch=settings.n_ubatch, + n_threads=settings.n_threads, + n_threads_batch=settings.n_threads_batch, + rope_scaling_type=settings.rope_scaling_type, + rope_freq_base=settings.rope_freq_base, + rope_freq_scale=settings.rope_freq_scale, + yarn_ext_factor=settings.yarn_ext_factor, + yarn_attn_factor=settings.yarn_attn_factor, + yarn_beta_fast=settings.yarn_beta_fast, + yarn_beta_slow=settings.yarn_beta_slow, + yarn_orig_ctx=settings.yarn_orig_ctx, + mul_mat_q=settings.mul_mat_q, + logits_all=settings.logits_all, + embedding=settings.embedding, + offload_kqv=settings.offload_kqv, + flash_attn=settings.flash_attn, + # Sampling Params + last_n_tokens_size=settings.last_n_tokens_size, + # LoRA Params + lora_base=settings.lora_base, + lora_path=settings.lora_path, + # Backend Params + numa=settings.numa, + # Chat Format Params + chat_format=settings.chat_format, + chat_handler=chat_handler, + # Speculative Decoding + draft_model=draft_model, + # KV Cache Quantization + type_k=settings.type_k, + type_v=settings.type_v, + # Tokenizer + tokenizer=tokenizer, + # Misc + verbose=settings.verbose, + ) + if settings.cache: + if settings.cache_type == "disk": + if settings.verbose: + print(f"Using disk cache with size {settings.cache_size}") + cache = llama_cpp.LlamaDiskCache(capacity_bytes=settings.cache_size) + else: + if settings.verbose: + print(f"Using ram cache with size {settings.cache_size}") + cache = llama_cpp.LlamaRAMCache(capacity_bytes=settings.cache_size) + _model.set_cache(cache) + return _model diff --git a/llama_cpp/server/settings.py b/llama_cpp/server/settings.py new file mode 100644 index 0000000000000000000000000000000000000000..13c9512419bb39423b13c3063d25b8142abd7b6d --- /dev/null +++ b/llama_cpp/server/settings.py @@ -0,0 +1,240 @@ +from __future__ import annotations + +import multiprocessing + +from typing import Optional, List, Literal, Union, Dict, cast +from typing_extensions import Self + +from pydantic import Field, model_validator +from pydantic_settings import BaseSettings + +import llama_cpp + +# Disable warning for model and model_alias settings +BaseSettings.model_config["protected_namespaces"] = () + + +class ModelSettings(BaseSettings): + """Model settings used to load a Llama model.""" + + model: str = Field( + description="The path to the model to use for generating completions." + ) + model_alias: Optional[str] = Field( + default=None, + description="The alias of the model to use for generating completions.", + ) + # Model Params + n_gpu_layers: int = Field( + default=0, + ge=-1, + description="The number of layers to put on the GPU. The rest will be on the CPU. Set -1 to move all to GPU.", + ) + split_mode: int = Field( + default=llama_cpp.LLAMA_SPLIT_MODE_LAYER, + description="The split mode to use.", + ) + main_gpu: int = Field( + default=0, + ge=0, + description="Main GPU to use.", + ) + tensor_split: Optional[List[float]] = Field( + default=None, + description="Split layers across multiple GPUs in proportion.", + ) + vocab_only: bool = Field( + default=False, description="Whether to only return the vocabulary." + ) + use_mmap: bool = Field( + default=llama_cpp.llama_supports_mmap(), + description="Use mmap.", + ) + use_mlock: bool = Field( + default=llama_cpp.llama_supports_mlock(), + description="Use mlock.", + ) + kv_overrides: Optional[List[str]] = Field( + default=None, + description="List of model kv overrides in the format key=type:value where type is one of (bool, int, float). Valid true values are (true, TRUE, 1), otherwise false.", + ) + rpc_servers: Optional[str] = Field( + default=None, + description="comma seperated list of rpc servers for offloading", + ) + # Context Params + seed: int = Field( + default=llama_cpp.LLAMA_DEFAULT_SEED, description="Random seed. -1 for random." + ) + n_ctx: int = Field(default=2048, ge=0, description="The context size.") + n_batch: int = Field( + default=512, ge=1, description="The batch size to use per eval." + ) + n_ubatch: int = Field( + default=512, ge=1, description="The physical batch size used by llama.cpp" + ) + n_threads: int = Field( + default=max(multiprocessing.cpu_count() // 2, 1), + ge=1, + description="The number of threads to use. Use -1 for max cpu threads", + ) + n_threads_batch: int = Field( + default=max(multiprocessing.cpu_count(), 1), + ge=0, + description="The number of threads to use when batch processing. Use -1 for max cpu threads", + ) + rope_scaling_type: int = Field( + default=llama_cpp.LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED + ) + rope_freq_base: float = Field(default=0.0, description="RoPE base frequency") + rope_freq_scale: float = Field( + default=0.0, description="RoPE frequency scaling factor" + ) + yarn_ext_factor: float = Field(default=-1.0) + yarn_attn_factor: float = Field(default=1.0) + yarn_beta_fast: float = Field(default=32.0) + yarn_beta_slow: float = Field(default=1.0) + yarn_orig_ctx: int = Field(default=0) + mul_mat_q: bool = Field( + default=True, description="if true, use experimental mul_mat_q kernels" + ) + logits_all: bool = Field(default=True, description="Whether to return logits.") + embedding: bool = Field(default=False, description="Whether to use embeddings.") + offload_kqv: bool = Field( + default=True, description="Whether to offload kqv to the GPU." + ) + flash_attn: bool = Field( + default=False, description="Whether to use flash attention." + ) + # Sampling Params + last_n_tokens_size: int = Field( + default=64, + ge=0, + description="Last n tokens to keep for repeat penalty calculation.", + ) + # LoRA Params + lora_base: Optional[str] = Field( + default=None, + description="Optional path to base model, useful if using a quantized base model and you want to apply LoRA to an f16 model.", + ) + lora_path: Optional[str] = Field( + default=None, + description="Path to a LoRA file to apply to the model.", + ) + # Backend Params + numa: Union[bool, int] = Field( + default=False, + description="Enable NUMA support.", + ) + # Chat Format Params + chat_format: Optional[str] = Field( + default=None, + description="Chat format to use.", + ) + clip_model_path: Optional[str] = Field( + default=None, + description="Path to a CLIP model to use for multi-modal chat completion.", + ) + # Cache Params + cache: bool = Field( + default=False, + description="Use a cache to reduce processing times for evaluated prompts.", + ) + cache_type: Literal["ram", "disk"] = Field( + default="ram", + description="The type of cache to use. Only used if cache is True.", + ) + cache_size: int = Field( + default=2 << 30, + description="The size of the cache in bytes. Only used if cache is True.", + ) + # Tokenizer Options + hf_tokenizer_config_path: Optional[str] = Field( + default=None, + description="The path to a HuggingFace tokenizer_config.json file.", + ) + hf_pretrained_model_name_or_path: Optional[str] = Field( + default=None, + description="The model name or path to a pretrained HuggingFace tokenizer model. Same as you would pass to AutoTokenizer.from_pretrained().", + ) + # Loading from HuggingFace Model Hub + hf_model_repo_id: Optional[str] = Field( + default=None, + description="The model repo id to use for the HuggingFace tokenizer model.", + ) + # Speculative Decoding + draft_model: Optional[str] = Field( + default=None, + description="Method to use for speculative decoding. One of (prompt-lookup-decoding).", + ) + draft_model_num_pred_tokens: int = Field( + default=10, + description="Number of tokens to predict using the draft model.", + ) + # KV Cache Quantization + type_k: Optional[int] = Field( + default=None, + description="Type of the key cache quantization.", + ) + type_v: Optional[int] = Field( + default=None, + description="Type of the value cache quantization.", + ) + # Misc + verbose: bool = Field( + default=True, description="Whether to print debug information." + ) + + @model_validator( + mode="before" + ) # pre=True to ensure this runs before any other validation + def set_dynamic_defaults(self) -> Self: + # If n_threads or n_threads_batch is -1, set it to multiprocessing.cpu_count() + cpu_count = multiprocessing.cpu_count() + values = cast(Dict[str, int], self) + if values.get("n_threads", 0) == -1: + values["n_threads"] = cpu_count + if values.get("n_threads_batch", 0) == -1: + values["n_threads_batch"] = cpu_count + return self + + +class ServerSettings(BaseSettings): + """Server settings used to configure the FastAPI and Uvicorn server.""" + + # Uvicorn Settings + host: str = Field(default="localhost", description="Listen address") + port: int = Field(default=8000, description="Listen port") + ssl_keyfile: Optional[str] = Field( + default=None, description="SSL key file for HTTPS" + ) + ssl_certfile: Optional[str] = Field( + default=None, description="SSL certificate file for HTTPS" + ) + # FastAPI Settings + api_key: Optional[str] = Field( + default=None, + description="API key for authentication. If set all requests need to be authenticated.", + ) + interrupt_requests: bool = Field( + default=True, + description="Whether to interrupt requests when a new request is received.", + ) + disable_ping_events: bool = Field( + default=False, + description="Disable EventSource pings (may be needed for some clients).", + ) + root_path: str = Field( + default="", + description="The root path for the server. Useful when running behind a reverse proxy.", + ) + + +class Settings(ServerSettings, ModelSettings): + pass + + +class ConfigFileSettings(ServerSettings): + """Configuration file format settings.""" + + models: List[ModelSettings] = Field(default=[], description="Model configs") diff --git a/llama_cpp/server/types.py b/llama_cpp/server/types.py new file mode 100644 index 0000000000000000000000000000000000000000..fdd16445685f425c9fd4092128bda65c32f55ce7 --- /dev/null +++ b/llama_cpp/server/types.py @@ -0,0 +1,316 @@ +from __future__ import annotations + +from typing import List, Optional, Union, Dict +from typing_extensions import TypedDict, Literal + +from pydantic import BaseModel, Field + +import llama_cpp + + +model_field = Field( + description="The model to use for generating completions.", default=None +) + +max_tokens_field = Field( + default=16, ge=1, description="The maximum number of tokens to generate." +) + +min_tokens_field = Field( + default=0, + ge=0, + description="The minimum number of tokens to generate. It may return fewer tokens if another condition is met (e.g. max_tokens, stop).", +) + +temperature_field = Field( + default=0.8, + description="Adjust the randomness of the generated text.\n\n" + + "Temperature is a hyperparameter that controls the randomness of the generated text. It affects the probability distribution of the model's output tokens. A higher temperature (e.g., 1.5) makes the output more random and creative, while a lower temperature (e.g., 0.5) makes the output more focused, deterministic, and conservative. The default value is 0.8, which provides a balance between randomness and determinism. At the extreme, a temperature of 0 will always pick the most likely next token, leading to identical outputs in each run.", +) + +top_p_field = Field( + default=0.95, + ge=0.0, + le=1.0, + description="Limit the next token selection to a subset of tokens with a cumulative probability above a threshold P.\n\n" + + "Top-p sampling, also known as nucleus sampling, is another text generation method that selects the next token from a subset of tokens that together have a cumulative probability of at least p. This method provides a balance between diversity and quality by considering both the probabilities of tokens and the number of tokens to sample from. A higher value for top_p (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.", +) + +min_p_field = Field( + default=0.05, + ge=0.0, + le=1.0, + description="Sets a minimum base probability threshold for token selection.\n\n" + + "The Min-P sampling method was designed as an alternative to Top-P, and aims to ensure a balance of quality and variety. The parameter min_p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with min_p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.", +) + +stop_field = Field( + default=None, + description="A list of tokens at which to stop generation. If None, no stop tokens are used.", +) + +stream_field = Field( + default=False, + description="Whether to stream the results as they are generated. Useful for chatbots.", +) + +top_k_field = Field( + default=40, + ge=0, + description="Limit the next token selection to the K most probable tokens.\n\n" + + "Top-k sampling is a text generation method that selects the next token only from the top k most likely tokens predicted by the model. It helps reduce the risk of generating low-probability or nonsensical tokens, but it may also limit the diversity of the output. A higher value for top_k (e.g., 100) will consider more tokens and lead to more diverse text, while a lower value (e.g., 10) will focus on the most probable tokens and generate more conservative text.", +) + +repeat_penalty_field = Field( + default=1.1, + ge=0.0, + description="A penalty applied to each token that is already generated. This helps prevent the model from repeating itself.\n\n" + + "Repeat penalty is a hyperparameter used to penalize the repetition of token sequences during text generation. It helps prevent the model from generating repetitive or monotonous text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient.", +) + +presence_penalty_field = Field( + default=0.0, + ge=-2.0, + le=2.0, + description="Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.", +) + +frequency_penalty_field = Field( + default=0.0, + ge=-2.0, + le=2.0, + description="Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.", +) + +mirostat_mode_field = Field( + default=0, + ge=0, + le=2, + description="Enable Mirostat constant-perplexity algorithm of the specified version (1 or 2; 0 = disabled)", +) + +mirostat_tau_field = Field( + default=5.0, + ge=0.0, + le=10.0, + description="Mirostat target entropy, i.e. the target perplexity - lower values produce focused and coherent text, larger values produce more diverse and less coherent text", +) + +mirostat_eta_field = Field( + default=0.1, ge=0.001, le=1.0, description="Mirostat learning rate" +) + +grammar = Field( + default=None, + description="A CBNF grammar (as string) to be used for formatting the model's output.", +) + + +class CreateCompletionRequest(BaseModel): + prompt: Union[str, List[str]] = Field( + default="", description="The prompt to generate completions for." + ) + suffix: Optional[str] = Field( + default=None, + description="A suffix to append to the generated text. If None, no suffix is appended. Useful for chatbots.", + ) + max_tokens: Optional[int] = Field( + default=16, ge=0, description="The maximum number of tokens to generate." + ) + min_tokens: int = min_tokens_field + temperature: float = temperature_field + top_p: float = top_p_field + min_p: float = min_p_field + echo: bool = Field( + default=False, + description="Whether to echo the prompt in the generated text. Useful for chatbots.", + ) + stop: Optional[Union[str, List[str]]] = stop_field + stream: bool = stream_field + logprobs: Optional[int] = Field( + default=None, + ge=0, + description="The number of logprobs to generate. If None, no logprobs are generated.", + ) + presence_penalty: Optional[float] = presence_penalty_field + frequency_penalty: Optional[float] = frequency_penalty_field + logit_bias: Optional[Dict[str, float]] = Field(None) + seed: Optional[int] = Field(None) + + # ignored or currently unsupported + model: Optional[str] = model_field + n: Optional[int] = 1 + best_of: Optional[int] = 1 + user: Optional[str] = Field(default=None) + + # llama.cpp specific parameters + top_k: int = top_k_field + repeat_penalty: float = repeat_penalty_field + logit_bias_type: Optional[Literal["input_ids", "tokens"]] = Field(None) + mirostat_mode: int = mirostat_mode_field + mirostat_tau: float = mirostat_tau_field + mirostat_eta: float = mirostat_eta_field + grammar: Optional[str] = None + + model_config = { + "json_schema_extra": { + "examples": [ + { + "prompt": "\n\n### Instructions:\nWhat is the capital of France?\n\n### Response:\n", + "stop": ["\n", "###"], + } + ] + } + } + + +class CreateEmbeddingRequest(BaseModel): + model: Optional[str] = model_field + input: Union[str, List[str]] = Field(description="The input to embed.") + user: Optional[str] = Field(default=None) + + model_config = { + "json_schema_extra": { + "examples": [ + { + "input": "The food was delicious and the waiter...", + } + ] + } + } + + +class ChatCompletionRequestMessage(BaseModel): + role: Literal["system", "user", "assistant", "function"] = Field( + default="user", description="The role of the message." + ) + content: Optional[str] = Field( + default="", description="The content of the message." + ) + + +class CreateChatCompletionRequest(BaseModel): + messages: List[llama_cpp.ChatCompletionRequestMessage] = Field( + default=[], description="A list of messages to generate completions for." + ) + functions: Optional[List[llama_cpp.ChatCompletionFunction]] = Field( + default=None, + description="A list of functions to apply to the generated completions.", + ) + function_call: Optional[llama_cpp.ChatCompletionRequestFunctionCall] = Field( + default=None, + description="A function to apply to the generated completions.", + ) + tools: Optional[List[llama_cpp.ChatCompletionTool]] = Field( + default=None, + description="A list of tools to apply to the generated completions.", + ) + tool_choice: Optional[llama_cpp.ChatCompletionToolChoiceOption] = Field( + default=None, + description="A tool to apply to the generated completions.", + ) # TODO: verify + max_tokens: Optional[int] = Field( + default=None, + description="The maximum number of tokens to generate. Defaults to inf", + ) + min_tokens: int = min_tokens_field + logprobs: Optional[bool] = Field( + default=False, + description="Whether to output the logprobs or not. Default is True", + ) + top_logprobs: Optional[int] = Field( + default=None, + ge=0, + description="The number of logprobs to generate. If None, no logprobs are generated. logprobs need to set to True.", + ) + temperature: float = temperature_field + top_p: float = top_p_field + min_p: float = min_p_field + stop: Optional[Union[str, List[str]]] = stop_field + stream: bool = stream_field + presence_penalty: Optional[float] = presence_penalty_field + frequency_penalty: Optional[float] = frequency_penalty_field + logit_bias: Optional[Dict[str, float]] = Field(None) + seed: Optional[int] = Field(None) + response_format: Optional[llama_cpp.ChatCompletionRequestResponseFormat] = Field( + default=None, + ) + + # ignored or currently unsupported + model: Optional[str] = model_field + n: Optional[int] = 1 + user: Optional[str] = Field(None) + + # llama.cpp specific parameters + top_k: int = top_k_field + repeat_penalty: float = repeat_penalty_field + logit_bias_type: Optional[Literal["input_ids", "tokens"]] = Field(None) + mirostat_mode: int = mirostat_mode_field + mirostat_tau: float = mirostat_tau_field + mirostat_eta: float = mirostat_eta_field + grammar: Optional[str] = None + + model_config = { + "json_schema_extra": { + "examples": [ + { + "messages": [ + ChatCompletionRequestMessage( + role="system", content="You are a helpful assistant." + ).model_dump(), + ChatCompletionRequestMessage( + role="user", content="What is the capital of France?" + ).model_dump(), + ] + } + ] + } + } + + +class ModelData(TypedDict): + id: str + object: Literal["model"] + owned_by: str + permissions: List[str] + + +class ModelList(TypedDict): + object: Literal["list"] + data: List[ModelData] + + +class TokenizeInputRequest(BaseModel): + model: Optional[str] = model_field + input: str = Field(description="The input to tokenize.") + + model_config = { + "json_schema_extra": {"examples": [{"input": "How many tokens in this query?"}]} + } + + +class TokenizeInputResponse(BaseModel): + tokens: List[int] = Field(description="A list of tokens.") + + model_config = {"json_schema_extra": {"example": {"tokens": [123, 321, 222]}}} + + +class TokenizeInputCountResponse(BaseModel): + count: int = Field(description="The number of tokens in the input.") + + model_config = {"json_schema_extra": {"example": {"count": 5}}} + + +class DetokenizeInputRequest(BaseModel): + model: Optional[str] = model_field + tokens: List[int] = Field(description="A list of toekns to detokenize.") + + model_config = {"json_schema_extra": {"example": [{"tokens": [123, 321, 222]}]}} + + +class DetokenizeInputResponse(BaseModel): + text: str = Field(description="The detokenized text.") + + model_config = { + "json_schema_extra": {"example": {"text": "How many tokens in this query?"}} + } diff --git a/tinyllama-1.1B-q4.gguf b/tinyllama-1.1B-q4.gguf new file mode 100644 index 0000000000000000000000000000000000000000..08792d2adfcedb30fcaa03000a5f7e0a080c4e35 --- /dev/null +++ b/tinyllama-1.1B-q4.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd013692b131e7134b159819e22da1eba85c0f4e40acf3e991365b414416b9bf +size 636727552