Datasets:

Modalities:
Text
Formats:
text
Size:
< 1K
ArXiv:
Libraries:
Datasets
dlxj commited on
Commit
2517be1
·
1 Parent(s): 9962f2d

todo: 基于 CUDA 13.0 编译

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .clang-format +171 -0
  2. .clang-tidy +28 -0
  3. .devops/cann.Dockerfile +130 -0
  4. .devops/cpu.Dockerfile +88 -0
  5. .devops/cuda-new.Dockerfile +95 -0
  6. .devops/cuda.Dockerfile +94 -0
  7. .devops/intel.Dockerfile +95 -0
  8. .devops/llama-cli-cann.Dockerfile +45 -0
  9. .devops/llama-cpp-cuda.srpm.spec +85 -0
  10. .devops/llama-cpp.srpm.spec +87 -0
  11. .devops/musa.Dockerfile +101 -0
  12. .devops/nix/apps.nix +21 -0
  13. .devops/nix/devshells.nix +52 -0
  14. .devops/nix/docker.nix +37 -0
  15. .devops/nix/jetson-support.nix +39 -0
  16. .devops/nix/nixpkgs-instances.nix +45 -0
  17. .devops/nix/package-gguf-py.nix +38 -0
  18. .devops/nix/package.nix +243 -0
  19. .devops/nix/python-scripts.nix +66 -0
  20. .devops/nix/scope.nix +35 -0
  21. .devops/nix/sif.nix +27 -0
  22. .devops/openvino.Dockerfile +138 -0
  23. .devops/rocm.Dockerfile +113 -0
  24. .devops/s390x.Dockerfile +126 -0
  25. .devops/tools.sh +53 -0
  26. .devops/vulkan.Dockerfile +91 -0
  27. .dockerignore +20 -0
  28. .ecrc +6 -0
  29. .editorconfig +70 -0
  30. .flake8 +18 -0
  31. .gemini/settings.json +1 -0
  32. .gitattributes +3 -0
  33. .github/ISSUE_TEMPLATE/010-bug-compilation.yml +88 -0
  34. .github/ISSUE_TEMPLATE/011-bug-results.yml +115 -0
  35. .github/ISSUE_TEMPLATE/019-bug-misc.yml +103 -0
  36. .github/ISSUE_TEMPLATE/020-enhancement.yml +51 -0
  37. .github/ISSUE_TEMPLATE/030-research.yml +52 -0
  38. .github/ISSUE_TEMPLATE/040-refactor.yml +28 -0
  39. .github/ISSUE_TEMPLATE/config.yml +11 -0
  40. .github/actions/get-tag-name/action.yml +22 -0
  41. .github/actions/install-exe/action.yml +36 -0
  42. .github/actions/linux-setup-openvino/action.yml +25 -0
  43. .github/actions/linux-setup-spacemit/action.yml +20 -0
  44. .github/actions/linux-setup-vulkan/action.yml +20 -0
  45. .github/actions/unarchive-tar/action.yml +27 -0
  46. .github/actions/windows-setup-cuda/action.yml +98 -0
  47. .github/actions/windows-setup-rocm/action.yml +15 -0
  48. .github/labeler.yml +123 -0
  49. .github/pull_request_template.md +1 -0
  50. .github/workflows/bench.yml.disabled +304 -0
.clang-format ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ Language: Cpp
3
+ AlignAfterOpenBracket: Align
4
+ AlignArrayOfStructures: Left
5
+ AlignConsecutiveAssignments: AcrossComments
6
+ AlignConsecutiveBitFields: AcrossComments
7
+ AlignConsecutiveDeclarations: AcrossComments
8
+ AlignConsecutiveMacros: AcrossComments
9
+ # AlignConsecutiveShortCaseStatements: AcrossComments
10
+ AlignEscapedNewlines: Left # LeftWithLastLine
11
+ AlignOperands: Align
12
+ AlignTrailingComments:
13
+ Kind: Always
14
+ OverEmptyLines: 1
15
+ AllowAllArgumentsOnNextLine: true
16
+ AllowAllParametersOfDeclarationOnNextLine: false
17
+ # AllowBreakBeforeNoexceptSpecifier: OnlyWithParen
18
+ AllowShortBlocksOnASingleLine: Never
19
+ AllowShortCaseLabelsOnASingleLine: false
20
+ AllowShortFunctionsOnASingleLine: Inline
21
+ AllowShortIfStatementsOnASingleLine: Never
22
+ AllowShortLambdasOnASingleLine: Inline
23
+ AllowShortLoopsOnASingleLine: false
24
+ AlwaysBreakBeforeMultilineStrings: true
25
+ # Treat CUDA keywords/attributes as "attribute macros" and avoid breaking lines inside them
26
+ AttributeMacros:
27
+ - __host__
28
+ - __device__
29
+ - __global__
30
+ - __forceinline__
31
+ - __launch_bounds__
32
+ BinPackArguments: true
33
+ BinPackParameters: false # OnePerLine
34
+ BitFieldColonSpacing: Both
35
+ BreakBeforeBraces: Custom # Attach
36
+ BraceWrapping:
37
+ AfterCaseLabel: true
38
+ AfterClass: false
39
+ AfterControlStatement: false
40
+ AfterEnum: false
41
+ AfterFunction: false
42
+ AfterNamespace: false
43
+ AfterObjCDeclaration: false
44
+ AfterStruct: false
45
+ AfterUnion: false
46
+ AfterExternBlock: false
47
+ BeforeCatch: false
48
+ BeforeElse: false
49
+ BeforeLambdaBody: false
50
+ BeforeWhile: false
51
+ IndentBraces: false
52
+ SplitEmptyFunction: false
53
+ SplitEmptyRecord: false
54
+ SplitEmptyNamespace: false
55
+ # BreakAdjacentStringLiterals: true
56
+ BreakAfterAttributes: Never
57
+ BreakBeforeBinaryOperators: None
58
+ BreakBeforeInlineASMColon: OnlyMultiline
59
+ BreakBeforeTernaryOperators: false
60
+ # BreakBinaryOperations: Never
61
+ BreakConstructorInitializers: AfterColon
62
+ # BreakFunctionDefinitionParameters: false
63
+ BreakInheritanceList: AfterComma
64
+ BreakStringLiterals: true
65
+ # BreakTemplateDeclarations: Yes
66
+ ColumnLimit: 120
67
+ CommentPragmas: '^ IWYU pragma:'
68
+ CompactNamespaces: false
69
+ ConstructorInitializerIndentWidth: 4
70
+ ContinuationIndentWidth: 4
71
+ Cpp11BracedListStyle: false
72
+ DerivePointerAlignment: false
73
+ DisableFormat: false
74
+ EmptyLineBeforeAccessModifier: Leave
75
+ EmptyLineAfterAccessModifier: Never
76
+ ExperimentalAutoDetectBinPacking: false
77
+ FixNamespaceComments: true
78
+ IncludeBlocks: Regroup
79
+ IncludeCategories:
80
+ - Regex: '".*"'
81
+ Priority: 1
82
+ SortPriority: 0
83
+ - Regex: '^<.*\.h>'
84
+ Priority: 2
85
+ SortPriority: 0
86
+ - Regex: '^<.*'
87
+ Priority: 3
88
+ SortPriority: 0
89
+ - Regex: '.*'
90
+ Priority: 4
91
+ SortPriority: 0
92
+ IncludeIsMainRegex: '([-_](test|unittest))?$'
93
+ IncludeIsMainSourceRegex: ''
94
+ IndentAccessModifiers: false
95
+ IndentCaseBlocks: true
96
+ IndentCaseLabels: true
97
+ IndentExternBlock: NoIndent
98
+ IndentGotoLabels: false
99
+ IndentPPDirectives: AfterHash
100
+ IndentWidth: 4
101
+ IndentWrappedFunctionNames: false
102
+ InsertBraces: true # NOTE: may lead to incorrect formatting
103
+ InsertNewlineAtEOF: true
104
+ JavaScriptQuotes: Leave
105
+ JavaScriptWrapImports: true
106
+ KeepEmptyLinesAtTheStartOfBlocks: false
107
+ LambdaBodyIndentation: Signature
108
+ LineEnding: LF
109
+ MacroBlockBegin: ''
110
+ MacroBlockEnd: ''
111
+ MaxEmptyLinesToKeep: 1
112
+ NamespaceIndentation: None
113
+ ObjCBinPackProtocolList: Auto
114
+ ObjCBlockIndentWidth: 4
115
+ ObjCSpaceAfterProperty: true
116
+ ObjCSpaceBeforeProtocolList: true
117
+ PPIndentWidth: -1
118
+ PackConstructorInitializers: CurrentLine
119
+ PenaltyBreakAssignment: 2
120
+ PenaltyBreakBeforeFirstCallParameter: 1
121
+ PenaltyBreakComment: 300
122
+ PenaltyBreakFirstLessLess: 120
123
+ PenaltyBreakString: 1000
124
+ PenaltyBreakTemplateDeclaration: 10
125
+ PenaltyExcessCharacter: 1000000
126
+ PenaltyReturnTypeOnItsOwnLine: 200
127
+ PointerAlignment: Middle
128
+ QualifierAlignment: Left
129
+ #QualifierOrder: ['static', 'inline', 'friend', 'constexpr', 'const', 'volatile', 'type', 'restrict']
130
+ RawStringFormats:
131
+ - Language: Cpp
132
+ Delimiters:
133
+ - cc
134
+ - CC
135
+ - cpp
136
+ - Cpp
137
+ - CPP
138
+ - 'c++'
139
+ - 'C++'
140
+ CanonicalDelimiter: ''
141
+ ReferenceAlignment: Middle
142
+ ReflowComments: false # IndentOnly
143
+ SeparateDefinitionBlocks: Always
144
+ SortIncludes: CaseInsensitive
145
+ SortUsingDeclarations: LexicographicNumeric
146
+ SpaceAfterCStyleCast: true
147
+ SpaceAfterLogicalNot: false
148
+ SpaceAfterTemplateKeyword: true
149
+ SpaceBeforeAssignmentOperators: true
150
+ SpaceBeforeCpp11BracedList: false
151
+ SpaceBeforeCtorInitializerColon: true
152
+ SpaceBeforeInheritanceColon: true
153
+ SpaceBeforeParens: ControlStatements
154
+ SpaceBeforeRangeBasedForLoopColon: true
155
+ SpaceInEmptyBlock: false
156
+ SpaceInEmptyParentheses: false
157
+ SpacesBeforeTrailingComments: 2
158
+ SpacesInAngles: Never
159
+ SpacesInContainerLiterals: true
160
+ SpacesInLineCommentPrefix:
161
+ Minimum: 1
162
+ Maximum: -1
163
+ SpacesInParentheses: false
164
+ SpacesInSquareBrackets: false
165
+ SpaceBeforeSquareBrackets: false
166
+ Standard: c++17
167
+ TabWidth: 4
168
+ UseTab: Never
169
+ WhitespaceSensitiveMacros: ['STRINGIZE']
170
+ ...
171
+
.clang-tidy ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ Checks: >
3
+ bugprone-*,
4
+ -bugprone-easily-swappable-parameters,
5
+ -bugprone-implicit-widening-of-multiplication-result,
6
+ -bugprone-misplaced-widening-cast,
7
+ -bugprone-narrowing-conversions,
8
+ readability-*,
9
+ -readability-avoid-unconditional-preprocessor-if,
10
+ -readability-function-cognitive-complexity,
11
+ -readability-identifier-length,
12
+ -readability-implicit-bool-conversion,
13
+ -readability-magic-numbers,
14
+ -readability-uppercase-literal-suffix,
15
+ -readability-simplify-boolean-expr,
16
+ -readability-math-missing-parentheses,
17
+ clang-analyzer-*,
18
+ -clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling,
19
+ performance-*,
20
+ -performance-enum-size,
21
+ portability-*,
22
+ -portability-simd-intrinsics,
23
+ misc-*,
24
+ -misc-const-correctness,
25
+ -misc-non-private-member-variables-in-classes,
26
+ -misc-no-recursion,
27
+ -misc-use-anonymous-namespace,
28
+ FormatStyle: none
.devops/cann.Dockerfile ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ==============================================================================
2
+ # ARGUMENTS
3
+ # ==============================================================================
4
+
5
+ # Define the CANN base image for easier version updates later
6
+ ARG CHIP_TYPE=910b
7
+ ARG CANN_BASE_IMAGE=quay.io/ascend/cann:8.3.rc2-${CHIP_TYPE}-openeuler24.03-py3.11
8
+
9
+ # ==============================================================================
10
+ # BUILD STAGE
11
+ # Compile all binary files and libraries
12
+ # ==============================================================================
13
+ FROM ${CANN_BASE_IMAGE} AS build
14
+
15
+ # -- Install build dependencies --
16
+ RUN yum install -y gcc g++ cmake make git openssl-devel python3 python3-pip && \
17
+ yum clean all && \
18
+ rm -rf /var/cache/yum
19
+
20
+ # -- Set the working directory --
21
+ WORKDIR /app
22
+
23
+ # -- Copy project files --
24
+ COPY . .
25
+
26
+ # -- Set CANN environment variables (required for compilation) --
27
+ # Using ENV instead of `source` allows environment variables to persist across the entire image layer
28
+ ENV ASCEND_TOOLKIT_HOME=/usr/local/Ascend/ascend-toolkit/latest
29
+ ENV LD_LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/lib64:${LD_LIBRARY_PATH}
30
+ ENV PATH=${ASCEND_TOOLKIT_HOME}/bin:${PATH}
31
+ ENV ASCEND_OPP_PATH=${ASCEND_TOOLKIT_HOME}/opp
32
+ ENV LD_LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/runtime/lib64/stub:$LD_LIBRARY_PATH
33
+ # ... You can add other environment variables from the original file as needed ...
34
+ # For brevity, only core variables are listed here. You can paste the original ENV list here.
35
+
36
+ # -- Build llama.cpp --
37
+ # Use the passed CHIP_TYPE argument and add general build options
38
+ ARG CHIP_TYPE
39
+ RUN source /usr/local/Ascend/ascend-toolkit/set_env.sh --force \
40
+ && \
41
+ cmake -B build \
42
+ -DGGML_CANN=ON \
43
+ -DCMAKE_BUILD_TYPE=Release \
44
+ -DSOC_TYPE=ascend${CHIP_TYPE} \
45
+ -DUSE_ACL_GRAPH=ON \
46
+ . && \
47
+ cmake --build build --config Release -j$(nproc)
48
+
49
+ # -- Organize build artifacts for copying in later stages --
50
+ # Create a lib directory to store all .so files
51
+ RUN mkdir -p /app/lib && \
52
+ find build -name "*.so*" -exec cp -P {} /app/lib \;
53
+
54
+ # Create a full directory to store all executables and Python scripts
55
+ RUN mkdir -p /app/full && \
56
+ cp build/bin/* /app/full/ && \
57
+ cp *.py /app/full/ && \
58
+ cp -r gguf-py /app/full/ && \
59
+ cp -r requirements /app/full/ && \
60
+ cp requirements.txt /app/full/
61
+ # If you have a tools.sh script, make sure it is copied here
62
+ # cp .devops/tools.sh /app/full/tools.sh
63
+
64
+ # ==============================================================================
65
+ # BASE STAGE
66
+ # Create a minimal base image with CANN runtime and common libraries
67
+ # ==============================================================================
68
+ FROM ${CANN_BASE_IMAGE} AS base
69
+
70
+ # -- Install runtime dependencies --
71
+ RUN yum install -y libgomp curl && \
72
+ yum clean all && \
73
+ rm -rf /var/cache/yum
74
+
75
+ # -- Set CANN environment variables (required for runtime) --
76
+ ENV ASCEND_TOOLKIT_HOME=/usr/local/Ascend/ascend-toolkit/latest
77
+ ENV LD_LIBRARY_PATH=/app:${ASCEND_TOOLKIT_HOME}/lib64:${LD_LIBRARY_PATH}
78
+ ENV PATH=${ASCEND_TOOLKIT_HOME}/bin:${PATH}
79
+ ENV ASCEND_OPP_PATH=${ASCEND_TOOLKIT_HOME}/opp
80
+ # ... You can add other environment variables from the original file as needed ...
81
+
82
+ WORKDIR /app
83
+
84
+ # Copy compiled .so files from the build stage
85
+ COPY --from=build /app/lib/ /app
86
+
87
+ # ==============================================================================
88
+ # FINAL STAGES (TARGETS)
89
+ # ==============================================================================
90
+
91
+ ### Target: full
92
+ # Complete image with all tools, Python bindings, and dependencies
93
+ # ==============================================================================
94
+ FROM base AS full
95
+
96
+ COPY --from=build /app/full /app
97
+
98
+ # Install Python dependencies
99
+ RUN yum install -y git python3 python3-pip && \
100
+ pip3 install --no-cache-dir --upgrade pip setuptools wheel && \
101
+ pip3 install --no-cache-dir -r requirements.txt && \
102
+ yum clean all && \
103
+ rm -rf /var/cache/yum
104
+
105
+ # You need to provide a tools.sh script as the entrypoint
106
+ ENTRYPOINT ["/app/tools.sh"]
107
+ # If there is no tools.sh, you can set the default to start the server
108
+ # ENTRYPOINT ["/app/llama-server"]
109
+
110
+ ### Target: light
111
+ # Lightweight image containing only llama-cli and llama-completion
112
+ # ==============================================================================
113
+ FROM base AS light
114
+
115
+ COPY --from=build /app/full/llama-cli /app/full/llama-completion /app
116
+
117
+ ENTRYPOINT [ "/app/llama-cli" ]
118
+
119
+ ### Target: server
120
+ # Dedicated server image containing only llama-server
121
+ # ==============================================================================
122
+ FROM base AS server
123
+
124
+ ENV LLAMA_ARG_HOST=0.0.0.0
125
+
126
+ COPY --from=build /app/full/llama-server /app
127
+
128
+ HEALTHCHECK --interval=5m CMD [ "curl", "-f", "http://localhost:8080/health" ]
129
+
130
+ ENTRYPOINT [ "/app/llama-server" ]
.devops/cpu.Dockerfile ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ARG UBUNTU_VERSION=22.04
2
+
3
+ FROM ubuntu:$UBUNTU_VERSION AS build
4
+
5
+ ARG TARGETARCH
6
+
7
+ RUN apt-get update && \
8
+ apt-get install -y build-essential git cmake libssl-dev
9
+
10
+ WORKDIR /app
11
+
12
+ COPY . .
13
+
14
+ RUN if [ "$TARGETARCH" = "amd64" ] || [ "$TARGETARCH" = "arm64" ]; then \
15
+ cmake -S . -B build -DCMAKE_BUILD_TYPE=Release -DGGML_NATIVE=OFF -DLLAMA_BUILD_TESTS=OFF -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON; \
16
+ else \
17
+ echo "Unsupported architecture"; \
18
+ exit 1; \
19
+ fi && \
20
+ cmake --build build -j $(nproc)
21
+
22
+ RUN mkdir -p /app/lib && \
23
+ find build -name "*.so*" -exec cp -P {} /app/lib \;
24
+
25
+ RUN mkdir -p /app/full \
26
+ && cp build/bin/* /app/full \
27
+ && cp *.py /app/full \
28
+ && cp -r gguf-py /app/full \
29
+ && cp -r requirements /app/full \
30
+ && cp requirements.txt /app/full \
31
+ && cp .devops/tools.sh /app/full/tools.sh
32
+
33
+ ## Base image
34
+ FROM ubuntu:$UBUNTU_VERSION AS base
35
+
36
+ RUN apt-get update \
37
+ && apt-get install -y libgomp1 curl\
38
+ && apt autoremove -y \
39
+ && apt clean -y \
40
+ && rm -rf /tmp/* /var/tmp/* \
41
+ && find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \
42
+ && find /var/cache -type f -delete
43
+
44
+ COPY --from=build /app/lib/ /app
45
+
46
+ ### Full
47
+ FROM base AS full
48
+
49
+ COPY --from=build /app/full /app
50
+
51
+ WORKDIR /app
52
+
53
+ RUN apt-get update \
54
+ && apt-get install -y \
55
+ git \
56
+ python3 \
57
+ python3-pip \
58
+ && pip install --upgrade pip setuptools wheel \
59
+ && pip install -r requirements.txt \
60
+ && apt autoremove -y \
61
+ && apt clean -y \
62
+ && rm -rf /tmp/* /var/tmp/* \
63
+ && find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \
64
+ && find /var/cache -type f -delete
65
+
66
+ ENTRYPOINT ["/app/tools.sh"]
67
+
68
+ ### Light, CLI only
69
+ FROM base AS light
70
+
71
+ COPY --from=build /app/full/llama-cli /app/full/llama-completion /app
72
+
73
+ WORKDIR /app
74
+
75
+ ENTRYPOINT [ "/app/llama-cli" ]
76
+
77
+ ### Server, Server only
78
+ FROM base AS server
79
+
80
+ ENV LLAMA_ARG_HOST=0.0.0.0
81
+
82
+ COPY --from=build /app/full/llama-server /app
83
+
84
+ WORKDIR /app
85
+
86
+ HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
87
+
88
+ ENTRYPOINT [ "/app/llama-server" ]
.devops/cuda-new.Dockerfile ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ARG UBUNTU_VERSION=24.04
2
+ # This needs to generally match the container host's environment.
3
+ ARG CUDA_VERSION=13.1.0
4
+ # Target the CUDA build image
5
+ ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
6
+
7
+ ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
8
+
9
+ FROM ${BASE_CUDA_DEV_CONTAINER} AS build
10
+
11
+ # CUDA architecture to build for (defaults to all supported archs)
12
+ ARG CUDA_DOCKER_ARCH=default
13
+
14
+ RUN apt-get update && \
15
+ apt-get install -y build-essential cmake python3 python3-pip git libssl-dev libgomp1
16
+
17
+ WORKDIR /app
18
+
19
+ COPY . .
20
+
21
+ RUN if [ "${CUDA_DOCKER_ARCH}" != "default" ]; then \
22
+ export CMAKE_ARGS="-DCMAKE_CUDA_ARCHITECTURES=${CUDA_DOCKER_ARCH}"; \
23
+ fi && \
24
+ cmake -B build -DGGML_NATIVE=OFF -DGGML_CUDA=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DLLAMA_BUILD_TESTS=OFF ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \
25
+ cmake --build build --config Release -j$(nproc)
26
+
27
+ RUN mkdir -p /app/lib && \
28
+ find build -name "*.so*" -exec cp -P {} /app/lib \;
29
+
30
+ RUN mkdir -p /app/full \
31
+ && cp build/bin/* /app/full \
32
+ && cp *.py /app/full \
33
+ && cp -r gguf-py /app/full \
34
+ && cp -r requirements /app/full \
35
+ && cp requirements.txt /app/full \
36
+ && cp .devops/tools.sh /app/full/tools.sh
37
+
38
+ ## Base image
39
+ FROM ${BASE_CUDA_RUN_CONTAINER} AS base
40
+
41
+ RUN apt-get update \
42
+ && apt-get install -y libgomp1 curl\
43
+ && apt autoremove -y \
44
+ && apt clean -y \
45
+ && rm -rf /tmp/* /var/tmp/* \
46
+ && find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \
47
+ && find /var/cache -type f -delete
48
+
49
+ COPY --from=build /app/lib/ /app
50
+
51
+ ### Full
52
+ FROM base AS full
53
+
54
+ COPY --from=build /app/full /app
55
+
56
+ WORKDIR /app
57
+
58
+ RUN apt-get update \
59
+ && apt-get install -y \
60
+ git \
61
+ python3 \
62
+ python3-pip \
63
+ python3-wheel \
64
+ && pip install --break-system-packages --upgrade setuptools \
65
+ && pip install --break-system-packages -r requirements.txt \
66
+ && apt autoremove -y \
67
+ && apt clean -y \
68
+ && rm -rf /tmp/* /var/tmp/* \
69
+ && find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \
70
+ && find /var/cache -type f -delete
71
+
72
+
73
+ ENTRYPOINT ["/app/tools.sh"]
74
+
75
+ ### Light, CLI only
76
+ FROM base AS light
77
+
78
+ COPY --from=build /app/full/llama-cli /app/full/llama-completion /app
79
+
80
+ WORKDIR /app
81
+
82
+ ENTRYPOINT [ "/app/llama-cli" ]
83
+
84
+ ### Server, Server only
85
+ FROM base AS server
86
+
87
+ ENV LLAMA_ARG_HOST=0.0.0.0
88
+
89
+ COPY --from=build /app/full/llama-server /app
90
+
91
+ WORKDIR /app
92
+
93
+ HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
94
+
95
+ ENTRYPOINT [ "/app/llama-server" ]
.devops/cuda.Dockerfile ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ARG UBUNTU_VERSION=22.04
2
+ # This needs to generally match the container host's environment.
3
+ ARG CUDA_VERSION=12.4.0
4
+ # Target the CUDA build image
5
+ ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
6
+
7
+ ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
8
+
9
+ FROM ${BASE_CUDA_DEV_CONTAINER} AS build
10
+
11
+ # CUDA architecture to build for (defaults to all supported archs)
12
+ ARG CUDA_DOCKER_ARCH=default
13
+
14
+ RUN apt-get update && \
15
+ apt-get install -y build-essential cmake python3 python3-pip git libssl-dev libgomp1
16
+
17
+ WORKDIR /app
18
+
19
+ COPY . .
20
+
21
+ RUN if [ "${CUDA_DOCKER_ARCH}" != "default" ]; then \
22
+ export CMAKE_ARGS="-DCMAKE_CUDA_ARCHITECTURES=${CUDA_DOCKER_ARCH}"; \
23
+ fi && \
24
+ cmake -B build -DGGML_NATIVE=OFF -DGGML_CUDA=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DLLAMA_BUILD_TESTS=OFF ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \
25
+ cmake --build build --config Release -j$(nproc)
26
+
27
+ RUN mkdir -p /app/lib && \
28
+ find build -name "*.so*" -exec cp -P {} /app/lib \;
29
+
30
+ RUN mkdir -p /app/full \
31
+ && cp build/bin/* /app/full \
32
+ && cp *.py /app/full \
33
+ && cp -r gguf-py /app/full \
34
+ && cp -r requirements /app/full \
35
+ && cp requirements.txt /app/full \
36
+ && cp .devops/tools.sh /app/full/tools.sh
37
+
38
+ ## Base image
39
+ FROM ${BASE_CUDA_RUN_CONTAINER} AS base
40
+
41
+ RUN apt-get update \
42
+ && apt-get install -y libgomp1 curl\
43
+ && apt autoremove -y \
44
+ && apt clean -y \
45
+ && rm -rf /tmp/* /var/tmp/* \
46
+ && find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \
47
+ && find /var/cache -type f -delete
48
+
49
+ COPY --from=build /app/lib/ /app
50
+
51
+ ### Full
52
+ FROM base AS full
53
+
54
+ COPY --from=build /app/full /app
55
+
56
+ WORKDIR /app
57
+
58
+ RUN apt-get update \
59
+ && apt-get install -y \
60
+ git \
61
+ python3 \
62
+ python3-pip \
63
+ && pip install --upgrade pip setuptools wheel \
64
+ && pip install --break-system-packages -r requirements.txt \
65
+ && apt autoremove -y \
66
+ && apt clean -y \
67
+ && rm -rf /tmp/* /var/tmp/* \
68
+ && find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \
69
+ && find /var/cache -type f -delete
70
+
71
+
72
+ ENTRYPOINT ["/app/tools.sh"]
73
+
74
+ ### Light, CLI only
75
+ FROM base AS light
76
+
77
+ COPY --from=build /app/full/llama-cli /app/full/llama-completion /app
78
+
79
+ WORKDIR /app
80
+
81
+ ENTRYPOINT [ "/app/llama-cli" ]
82
+
83
+ ### Server, Server only
84
+ FROM base AS server
85
+
86
+ ENV LLAMA_ARG_HOST=0.0.0.0
87
+
88
+ COPY --from=build /app/full/llama-server /app
89
+
90
+ WORKDIR /app
91
+
92
+ HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
93
+
94
+ ENTRYPOINT [ "/app/llama-server" ]
.devops/intel.Dockerfile ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ARG ONEAPI_VERSION=2025.2.2-0-devel-ubuntu24.04
2
+
3
+ ## Build Image
4
+
5
+ FROM intel/deep-learning-essentials:$ONEAPI_VERSION AS build
6
+
7
+ ARG GGML_SYCL_F16=OFF
8
+ RUN apt-get update && \
9
+ apt-get install -y git libssl-dev
10
+
11
+ WORKDIR /app
12
+
13
+ COPY . .
14
+
15
+ RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \
16
+ echo "GGML_SYCL_F16 is set" \
17
+ && export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \
18
+ fi && \
19
+ echo "Building with dynamic libs" && \
20
+ cmake -B build -DGGML_NATIVE=OFF -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DLLAMA_BUILD_TESTS=OFF ${OPT_SYCL_F16} && \
21
+ cmake --build build --config Release -j$(nproc)
22
+
23
+ RUN mkdir -p /app/lib && \
24
+ find build -name "*.so*" -exec cp -P {} /app/lib \;
25
+
26
+ RUN mkdir -p /app/full \
27
+ && cp build/bin/* /app/full \
28
+ && cp *.py /app/full \
29
+ && cp -r gguf-py /app/full \
30
+ && cp -r requirements /app/full \
31
+ && cp requirements.txt /app/full \
32
+ && cp .devops/tools.sh /app/full/tools.sh
33
+
34
+ FROM intel/deep-learning-essentials:$ONEAPI_VERSION AS base
35
+
36
+ RUN apt-get update \
37
+ && apt-get install -y libgomp1 curl\
38
+ && apt autoremove -y \
39
+ && apt clean -y \
40
+ && rm -rf /tmp/* /var/tmp/* \
41
+ && find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \
42
+ && find /var/cache -type f -delete
43
+
44
+ ### Full
45
+ FROM base AS full
46
+
47
+ COPY --from=build /app/lib/ /app
48
+ COPY --from=build /app/full /app
49
+
50
+ WORKDIR /app
51
+
52
+ RUN apt-get update && \
53
+ apt-get install -y \
54
+ git \
55
+ python3 \
56
+ python3-pip \
57
+ python3-venv && \
58
+ python3 -m venv /opt/venv && \
59
+ . /opt/venv/bin/activate && \
60
+ pip install --upgrade pip setuptools wheel && \
61
+ pip install -r requirements.txt && \
62
+ apt autoremove -y && \
63
+ apt clean -y && \
64
+ rm -rf /tmp/* /var/tmp/* && \
65
+ find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete && \
66
+ find /var/cache -type f -delete
67
+
68
+ ENV PATH="/opt/venv/bin:$PATH"
69
+
70
+ ENTRYPOINT ["/app/tools.sh"]
71
+
72
+ ### Light, CLI only
73
+ FROM base AS light
74
+
75
+ COPY --from=build /app/lib/ /app
76
+ COPY --from=build /app/full/llama-cli /app/full/llama-completion /app
77
+
78
+ WORKDIR /app
79
+
80
+ ENTRYPOINT [ "/app/llama-cli" ]
81
+
82
+ ### Server, Server only
83
+ FROM base AS server
84
+
85
+ ENV LLAMA_ARG_HOST=0.0.0.0
86
+
87
+ COPY --from=build /app/lib/ /app
88
+ COPY --from=build /app/full/llama-server /app
89
+
90
+ WORKDIR /app
91
+
92
+ HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
93
+
94
+ ENTRYPOINT [ "/app/llama-server" ]
95
+
.devops/llama-cli-cann.Dockerfile ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ARG ASCEND_VERSION=8.1.RC1.alpha001-910b-openeuler22.03-py3.10
2
+
3
+ FROM ascendai/cann:$ASCEND_VERSION AS build
4
+
5
+ WORKDIR /app
6
+
7
+ COPY . .
8
+
9
+ RUN yum install -y gcc g++ cmake make openssl-devel
10
+ ENV ASCEND_TOOLKIT_HOME=/usr/local/Ascend/ascend-toolkit/latest
11
+ ENV LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/lib64:$LIBRARY_PATH
12
+ ENV LD_LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/lib64:${ASCEND_TOOLKIT_HOME}/lib64/plugin/opskernel:${ASCEND_TOOLKIT_HOME}/lib64/plugin/nnengine:${ASCEND_TOOLKIT_HOME}/opp/built-in/op_impl/ai_core/tbe/op_tiling:${LD_LIBRARY_PATH}
13
+ ENV PYTHONPATH=${ASCEND_TOOLKIT_HOME}/python/site-packages:${ASCEND_TOOLKIT_HOME}/opp/built-in/op_impl/ai_core/tbe:${PYTHONPATH}
14
+ ENV PATH=${ASCEND_TOOLKIT_HOME}/bin:${ASCEND_TOOLKIT_HOME}/compiler/ccec_compiler/bin:${PATH}
15
+ ENV ASCEND_AICPU_PATH=${ASCEND_TOOLKIT_HOME}
16
+ ENV ASCEND_OPP_PATH=${ASCEND_TOOLKIT_HOME}/opp
17
+ ENV TOOLCHAIN_HOME=${ASCEND_TOOLKIT_HOME}/toolkit
18
+ ENV ASCEND_HOME_PATH=${ASCEND_TOOLKIT_HOME}
19
+
20
+ # find libascend_hal.so, because the drive hasn`t been mounted.
21
+ ENV LD_LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/runtime/lib64/stub:$LD_LIBRARY_PATH
22
+
23
+ RUN echo "Building with static libs" && \
24
+ source /usr/local/Ascend/ascend-toolkit/set_env.sh --force && \
25
+ cmake -B build -DGGML_NATIVE=OFF -DGGML_CANN=ON -DBUILD_SHARED_LIBS=OFF -DLLAMA_BUILD_TESTS=OFF && \
26
+ cmake --build build --config Release --target llama-cli && \
27
+ cmake --build build --config Release --target llama-completion
28
+
29
+ # TODO: use image with NNRT
30
+ FROM ascendai/cann:$ASCEND_VERSION AS runtime
31
+ COPY --from=build /app/build/bin/llama-cli /app/build/bin/llama-completion /
32
+
33
+ ENV LC_ALL=C.utf8
34
+
35
+ ENV ASCEND_TOOLKIT_HOME=/usr/local/Ascend/ascend-toolkit/latest
36
+ ENV LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/lib64:$LIBRARY_PATH
37
+ ENV LD_LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/lib64:${ASCEND_TOOLKIT_HOME}/lib64/plugin/opskernel:${ASCEND_TOOLKIT_HOME}/lib64/plugin/nnengine:${ASCEND_TOOLKIT_HOME}/opp/built-in/op_impl/ai_core/tbe/op_tiling:${LD_LIBRARY_PATH}
38
+ ENV PYTHONPATH=${ASCEND_TOOLKIT_HOME}/python/site-packages:${ASCEND_TOOLKIT_HOME}/opp/built-in/op_impl/ai_core/tbe:${PYTHONPATH}
39
+ ENV PATH=${ASCEND_TOOLKIT_HOME}/bin:${ASCEND_TOOLKIT_HOME}/compiler/ccec_compiler/bin:${PATH}
40
+ ENV ASCEND_AICPU_PATH=${ASCEND_TOOLKIT_HOME}
41
+ ENV ASCEND_OPP_PATH=${ASCEND_TOOLKIT_HOME}/opp
42
+ ENV TOOLCHAIN_HOME=${ASCEND_TOOLKIT_HOME}/toolkit
43
+ ENV ASCEND_HOME_PATH=${ASCEND_TOOLKIT_HOME}
44
+
45
+ ENTRYPOINT ["/llama-cli" ]
.devops/llama-cpp-cuda.srpm.spec ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SRPM for building from source and packaging an RPM for RPM-based distros.
2
+ # https://docs.fedoraproject.org/en-US/quick-docs/creating-rpm-packages
3
+ # Built and maintained by John Boero - boeroboy@gmail.com
4
+ # In honor of Seth Vidal https://www.redhat.com/it/blog/thank-you-seth-vidal
5
+
6
+ # Notes for llama.cpp:
7
+ # 1. Tags are currently based on hash - which will not sort asciibetically.
8
+ # We need to declare standard versioning if people want to sort latest releases.
9
+ # 2. Builds for CUDA/OpenCL support are separate, with different depenedencies.
10
+ # 3. NVidia's developer repo must be enabled with nvcc, cublas, clblas, etc installed.
11
+ # Example: https://developer.download.nvidia.com/compute/cuda/repos/fedora37/x86_64/cuda-fedora37.repo
12
+ # 4. OpenCL/CLBLAST support simply requires the ICD loader and basic opencl libraries.
13
+ # It is up to the user to install the correct vendor-specific support.
14
+
15
+ Name: llama.cpp-cuda
16
+ Version: %( date "+%%Y%%m%%d" )
17
+ Release: 1%{?dist}
18
+ Summary: CPU Inference of LLaMA model in pure C/C++ (no CUDA/OpenCL)
19
+ License: MIT
20
+ Source0: https://github.com/ggml-org/llama.cpp/archive/refs/heads/master.tar.gz
21
+ BuildRequires: coreutils make gcc-c++ git cuda-toolkit
22
+ Requires: cuda-toolkit
23
+ URL: https://github.com/ggml-org/llama.cpp
24
+
25
+ %define debug_package %{nil}
26
+ %define source_date_epoch_from_changelog 0
27
+
28
+ %description
29
+ CPU inference for Meta's Lllama2 models using default options.
30
+
31
+ %prep
32
+ %setup -n llama.cpp-master
33
+
34
+ %build
35
+ make -j GGML_CUDA=1
36
+
37
+ %install
38
+ mkdir -p %{buildroot}%{_bindir}/
39
+ cp -p llama-cli %{buildroot}%{_bindir}/llama-cuda-cli
40
+ cp -p llama-completion %{buildroot}%{_bindir}/llama-cuda-completion
41
+ cp -p llama-server %{buildroot}%{_bindir}/llama-cuda-server
42
+ cp -p llama-simple %{buildroot}%{_bindir}/llama-cuda-simple
43
+
44
+ mkdir -p %{buildroot}/usr/lib/systemd/system
45
+ %{__cat} <<EOF > %{buildroot}/usr/lib/systemd/system/llamacuda.service
46
+ [Unit]
47
+ Description=Llama.cpp server, CPU only (no GPU support in this build).
48
+ After=syslog.target network.target local-fs.target remote-fs.target nss-lookup.target
49
+
50
+ [Service]
51
+ Type=simple
52
+ EnvironmentFile=/etc/sysconfig/llama
53
+ ExecStart=/usr/bin/llama-cuda-server $LLAMA_ARGS
54
+ ExecReload=/bin/kill -s HUP $MAINPID
55
+ Restart=never
56
+
57
+ [Install]
58
+ WantedBy=default.target
59
+ EOF
60
+
61
+ mkdir -p %{buildroot}/etc/sysconfig
62
+ %{__cat} <<EOF > %{buildroot}/etc/sysconfig/llama
63
+ LLAMA_ARGS="-m /opt/llama2/ggml-model-f32.bin"
64
+ EOF
65
+
66
+ %clean
67
+ rm -rf %{buildroot}
68
+ rm -rf %{_builddir}/*
69
+
70
+ %files
71
+ %{_bindir}/llama-cuda-cli
72
+ %{_bindir}/llama-cuda-completion
73
+ %{_bindir}/llama-cuda-server
74
+ %{_bindir}/llama-cuda-simple
75
+ /usr/lib/systemd/system/llamacuda.service
76
+ %config /etc/sysconfig/llama
77
+
78
+ %pre
79
+
80
+ %post
81
+
82
+ %preun
83
+ %postun
84
+
85
+ %changelog
.devops/llama-cpp.srpm.spec ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SRPM for building from source and packaging an RPM for RPM-based distros.
2
+ # https://docs.fedoraproject.org/en-US/quick-docs/creating-rpm-packages
3
+ # Built and maintained by John Boero - boeroboy@gmail.com
4
+ # In honor of Seth Vidal https://www.redhat.com/it/blog/thank-you-seth-vidal
5
+
6
+ # Notes for llama.cpp:
7
+ # 1. Tags are currently based on hash - which will not sort asciibetically.
8
+ # We need to declare standard versioning if people want to sort latest releases.
9
+ # In the meantime, YYYYMMDD format will be used.
10
+ # 2. Builds for CUDA/OpenCL support are separate, with different depenedencies.
11
+ # 3. NVidia's developer repo must be enabled with nvcc, cublas, clblas, etc installed.
12
+ # Example: https://developer.download.nvidia.com/compute/cuda/repos/fedora37/x86_64/cuda-fedora37.repo
13
+ # 4. OpenCL/CLBLAST support simply requires the ICD loader and basic opencl libraries.
14
+ # It is up to the user to install the correct vendor-specific support.
15
+
16
+ Name: llama.cpp
17
+ Version: %( date "+%%Y%%m%%d" )
18
+ Release: 1%{?dist}
19
+ Summary: CPU Inference of LLaMA model in pure C/C++ (no CUDA/OpenCL)
20
+ License: MIT
21
+ Source0: https://github.com/ggml-org/llama.cpp/archive/refs/heads/master.tar.gz
22
+ BuildRequires: coreutils make gcc-c++ git libstdc++-devel
23
+ Requires: libstdc++
24
+ URL: https://github.com/ggml-org/llama.cpp
25
+
26
+ %define debug_package %{nil}
27
+ %define source_date_epoch_from_changelog 0
28
+
29
+ %description
30
+ CPU inference for Meta's Lllama2 models using default options.
31
+ Models are not included in this package and must be downloaded separately.
32
+
33
+ %prep
34
+ %setup -n llama.cpp-master
35
+
36
+ %build
37
+ make -j
38
+
39
+ %install
40
+ mkdir -p %{buildroot}%{_bindir}/
41
+ cp -p llama-cli %{buildroot}%{_bindir}/llama-cli
42
+ cp -p llama-completion %{buildroot}%{_bindir}/llama-completion
43
+ cp -p llama-server %{buildroot}%{_bindir}/llama-server
44
+ cp -p llama-simple %{buildroot}%{_bindir}/llama-simple
45
+
46
+ mkdir -p %{buildroot}/usr/lib/systemd/system
47
+ %{__cat} <<EOF > %{buildroot}/usr/lib/systemd/system/llama.service
48
+ [Unit]
49
+ Description=Llama.cpp server, CPU only (no GPU support in this build).
50
+ After=syslog.target network.target local-fs.target remote-fs.target nss-lookup.target
51
+
52
+ [Service]
53
+ Type=simple
54
+ EnvironmentFile=/etc/sysconfig/llama
55
+ ExecStart=/usr/bin/llama-server $LLAMA_ARGS
56
+ ExecReload=/bin/kill -s HUP $MAINPID
57
+ Restart=never
58
+
59
+ [Install]
60
+ WantedBy=default.target
61
+ EOF
62
+
63
+ mkdir -p %{buildroot}/etc/sysconfig
64
+ %{__cat} <<EOF > %{buildroot}/etc/sysconfig/llama
65
+ LLAMA_ARGS="-m /opt/llama2/ggml-model-f32.bin"
66
+ EOF
67
+
68
+ %clean
69
+ rm -rf %{buildroot}
70
+ rm -rf %{_builddir}/*
71
+
72
+ %files
73
+ %{_bindir}/llama-cli
74
+ %{_bindir}/llama-completion
75
+ %{_bindir}/llama-server
76
+ %{_bindir}/llama-simple
77
+ /usr/lib/systemd/system/llama.service
78
+ %config /etc/sysconfig/llama
79
+
80
+ %pre
81
+
82
+ %post
83
+
84
+ %preun
85
+ %postun
86
+
87
+ %changelog
.devops/musa.Dockerfile ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ARG UBUNTU_VERSION=22.04
2
+ # This needs to generally match the container host's environment.
3
+ ARG MUSA_VERSION=rc4.3.0
4
+ # Target the MUSA build image
5
+ ARG BASE_MUSA_DEV_CONTAINER=mthreads/musa:${MUSA_VERSION}-devel-ubuntu${UBUNTU_VERSION}-amd64
6
+
7
+ ARG BASE_MUSA_RUN_CONTAINER=mthreads/musa:${MUSA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}-amd64
8
+
9
+ FROM ${BASE_MUSA_DEV_CONTAINER} AS build
10
+
11
+ # MUSA architecture to build for (defaults to all supported archs)
12
+ ARG MUSA_DOCKER_ARCH=default
13
+
14
+ RUN apt-get update && \
15
+ apt-get install -y \
16
+ build-essential \
17
+ cmake \
18
+ python3 \
19
+ python3-pip \
20
+ git \
21
+ libssl-dev \
22
+ libgomp1
23
+
24
+ WORKDIR /app
25
+
26
+ COPY . .
27
+
28
+ RUN if [ "${MUSA_DOCKER_ARCH}" != "default" ]; then \
29
+ export CMAKE_ARGS="-DMUSA_ARCHITECTURES=${MUSA_DOCKER_ARCH}"; \
30
+ fi && \
31
+ cmake -B build -DGGML_NATIVE=OFF -DGGML_MUSA=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DLLAMA_BUILD_TESTS=OFF ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \
32
+ cmake --build build --config Release -j$(nproc)
33
+
34
+ RUN mkdir -p /app/lib && \
35
+ find build -name "*.so*" -exec cp -P {} /app/lib \;
36
+
37
+ RUN mkdir -p /app/full \
38
+ && cp build/bin/* /app/full \
39
+ && cp *.py /app/full \
40
+ && cp -r gguf-py /app/full \
41
+ && cp -r requirements /app/full \
42
+ && cp requirements.txt /app/full \
43
+ && cp .devops/tools.sh /app/full/tools.sh
44
+
45
+ ## Base image
46
+ FROM ${BASE_MUSA_RUN_CONTAINER} AS base
47
+
48
+ RUN apt-get update \
49
+ && apt-get install -y libgomp1 curl\
50
+ && apt autoremove -y \
51
+ && apt clean -y \
52
+ && rm -rf /tmp/* /var/tmp/* \
53
+ && find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \
54
+ && find /var/cache -type f -delete
55
+
56
+ COPY --from=build /app/lib/ /app
57
+
58
+ ### Full
59
+ FROM base AS full
60
+
61
+ COPY --from=build /app/full /app
62
+
63
+ WORKDIR /app
64
+
65
+ RUN apt-get update \
66
+ && apt-get install -y \
67
+ git \
68
+ python3 \
69
+ python3-pip \
70
+ && pip install --upgrade pip setuptools wheel \
71
+ && pip install -r requirements.txt \
72
+ && apt autoremove -y \
73
+ && apt clean -y \
74
+ && rm -rf /tmp/* /var/tmp/* \
75
+ && find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \
76
+ && find /var/cache -type f -delete
77
+
78
+
79
+ ENTRYPOINT ["/app/tools.sh"]
80
+
81
+ ### Light, CLI only
82
+ FROM base AS light
83
+
84
+ COPY --from=build /app/full/llama-cli /app/full/llama-completion /app
85
+
86
+ WORKDIR /app
87
+
88
+ ENTRYPOINT [ "/app/llama-cli" ]
89
+
90
+ ### Server, Server only
91
+ FROM base AS server
92
+
93
+ ENV LLAMA_ARG_HOST=0.0.0.0
94
+
95
+ COPY --from=build /app/full/llama-server /app
96
+
97
+ WORKDIR /app
98
+
99
+ HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
100
+
101
+ ENTRYPOINT [ "/app/llama-server" ]
.devops/nix/apps.nix ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ perSystem =
3
+ { config, lib, ... }:
4
+ {
5
+ apps =
6
+ let
7
+ inherit (config.packages) default;
8
+ binaries = [
9
+ "llama-cli"
10
+ "llama-embedding"
11
+ "llama-server"
12
+ "llama-quantize"
13
+ ];
14
+ mkApp = name: {
15
+ type = "app";
16
+ program = "${default}/bin/${name}";
17
+ };
18
+ in
19
+ lib.genAttrs binaries mkApp;
20
+ };
21
+ }
.devops/nix/devshells.nix ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ { inputs, ... }:
2
+
3
+ {
4
+ perSystem =
5
+ {
6
+ config,
7
+ lib,
8
+ system,
9
+ ...
10
+ }:
11
+ {
12
+ devShells =
13
+ let
14
+ pkgs = import inputs.nixpkgs { inherit system; };
15
+ stdenv = pkgs.stdenv;
16
+ scripts = config.packages.python-scripts;
17
+ in
18
+ lib.pipe (config.packages) [
19
+ (lib.concatMapAttrs (
20
+ name: package: {
21
+ ${name} = pkgs.mkShell {
22
+ name = "${name}";
23
+ inputsFrom = [ package ];
24
+ shellHook = ''
25
+ echo "Entering ${name} devShell"
26
+ '';
27
+ };
28
+ "${name}-extra" =
29
+ if (name == "python-scripts") then
30
+ null
31
+ else
32
+ pkgs.mkShell {
33
+ name = "${name}-extra";
34
+ inputsFrom = [
35
+ package
36
+ scripts
37
+ ];
38
+ # Extra packages that *may* be used by some scripts
39
+ packages = [
40
+ pkgs.python3Packages.tiktoken
41
+ ];
42
+ shellHook = ''
43
+ echo "Entering ${name} devShell"
44
+ addToSearchPath "LD_LIBRARY_PATH" "${lib.getLib stdenv.cc.cc}/lib"
45
+ '';
46
+ };
47
+ }
48
+ ))
49
+ (lib.filterAttrs (name: value: value != null))
50
+ ];
51
+ };
52
+ }
.devops/nix/docker.nix ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ lib,
3
+ dockerTools,
4
+ buildEnv,
5
+ llama-cpp,
6
+ interactive ? true,
7
+ coreutils,
8
+ }:
9
+
10
+ # A tar that can be fed into `docker load`:
11
+ #
12
+ # $ nix build .#llamaPackages.docker
13
+ # $ docker load < result
14
+
15
+ # For details and variations cf.
16
+ # - https://nixos.org/manual/nixpkgs/unstable/#ssec-pkgs-dockerTools-buildLayeredImage
17
+ # - https://discourse.nixos.org/t/a-faster-dockertools-buildimage-prototype/16922
18
+ # - https://nixery.dev/
19
+
20
+ # Approximate (compressed) sizes, at the time of writing, are:
21
+ #
22
+ # .#llamaPackages.docker: 125M;
23
+ # .#llamaPackagesCuda.docker: 537M;
24
+ # .#legacyPackages.aarch64-linux.llamaPackagesXavier.docker: 415M.
25
+
26
+ dockerTools.buildLayeredImage {
27
+ name = llama-cpp.pname;
28
+ tag = "latest";
29
+
30
+ contents =
31
+ [ llama-cpp ]
32
+ ++ lib.optionals interactive [
33
+ coreutils
34
+ dockerTools.binSh
35
+ dockerTools.caCertificates
36
+ ];
37
+ }
.devops/nix/jetson-support.nix ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ { inputs, ... }:
2
+ {
3
+ perSystem =
4
+ {
5
+ config,
6
+ system,
7
+ lib,
8
+ pkgsCuda,
9
+ ...
10
+ }:
11
+ {
12
+ legacyPackages =
13
+ let
14
+ caps.llamaPackagesXavier = "7.2";
15
+ caps.llamaPackagesOrin = "8.7";
16
+ caps.llamaPackagesTX2 = "6.2";
17
+ caps.llamaPackagesNano = "5.3";
18
+
19
+ pkgsFor =
20
+ cap:
21
+ import inputs.nixpkgs {
22
+ inherit system;
23
+ config = {
24
+ cudaSupport = true;
25
+ cudaCapabilities = [ cap ];
26
+ cudaEnableForwardCompat = false;
27
+ inherit (pkgsCuda.config) allowUnfreePredicate;
28
+ };
29
+ };
30
+ in
31
+ builtins.mapAttrs (name: cap: (pkgsFor cap).callPackage ./scope.nix { }) caps;
32
+
33
+ packages = lib.optionalAttrs (system == "aarch64-linux") {
34
+ jetson-xavier = config.legacyPackages.llamaPackagesXavier.llama-cpp;
35
+ jetson-orin = config.legacyPackages.llamaPackagesOrin.llama-cpp;
36
+ jetson-nano = config.legacyPackages.llamaPackagesNano.llama-cpp;
37
+ };
38
+ };
39
+ }
.devops/nix/nixpkgs-instances.nix ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ { inputs, ... }:
2
+ {
3
+ # The _module.args definitions are passed on to modules as arguments. E.g.
4
+ # the module `{ pkgs ... }: { /* config */ }` implicitly uses
5
+ # `_module.args.pkgs` (defined in this case by flake-parts).
6
+ perSystem =
7
+ { lib, system, ... }:
8
+ {
9
+ _module.args = {
10
+ # Note: bringing up https://zimbatm.com/notes/1000-instances-of-nixpkgs
11
+ # again, the below creates several nixpkgs instances which the
12
+ # flake-centric CLI will be forced to evaluate e.g. on `nix flake show`.
13
+ #
14
+ # This is currently "slow" and "expensive", on a certain scale.
15
+ # This also isn't "right" in that this hinders dependency injection at
16
+ # the level of flake inputs. This might get removed in the foreseeable
17
+ # future.
18
+ #
19
+ # Note that you can use these expressions without Nix
20
+ # (`pkgs.callPackage ./devops/nix/scope.nix { }` is the entry point).
21
+
22
+ pkgsCuda = import inputs.nixpkgs {
23
+ inherit system;
24
+ # Ensure dependencies use CUDA consistently (e.g. that openmpi, ucc,
25
+ # and ucx are built with CUDA support)
26
+ config.cudaSupport = true;
27
+ config.allowUnfreePredicate =
28
+ p:
29
+ builtins.all (
30
+ license:
31
+ license.free
32
+ || builtins.elem license.shortName [
33
+ "CUDA EULA"
34
+ "cuDNN EULA"
35
+ ]
36
+ ) (p.meta.licenses or (lib.toList p.meta.license));
37
+ };
38
+ # Ensure dependencies use ROCm consistently
39
+ pkgsRocm = import inputs.nixpkgs {
40
+ inherit system;
41
+ config.rocmSupport = true;
42
+ };
43
+ };
44
+ };
45
+ }
.devops/nix/package-gguf-py.nix ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ lib,
3
+ llamaVersion,
4
+ numpy,
5
+ tqdm,
6
+ requests,
7
+ sentencepiece,
8
+ pyyaml,
9
+ poetry-core,
10
+ buildPythonPackage,
11
+ pytestCheckHook,
12
+ }:
13
+
14
+ buildPythonPackage {
15
+ pname = "gguf";
16
+ version = llamaVersion;
17
+ pyproject = true;
18
+ nativeBuildInputs = [ poetry-core ];
19
+ propagatedBuildInputs = [
20
+ numpy
21
+ tqdm
22
+ sentencepiece
23
+ pyyaml
24
+ requests
25
+ ];
26
+ src = lib.cleanSource ../../gguf-py;
27
+ pythonImportsCheck = [
28
+ "numpy"
29
+ "gguf"
30
+ ];
31
+ nativeCheckInputs = [ pytestCheckHook ];
32
+ doCheck = true;
33
+ meta = with lib; {
34
+ description = "Python package for writing binary files in the GGUF format";
35
+ license = licenses.mit;
36
+ maintainers = [ maintainers.ditsuke ];
37
+ };
38
+ }
.devops/nix/package.nix ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ lib,
3
+ glibc,
4
+ config,
5
+ stdenv,
6
+ runCommand,
7
+ cmake,
8
+ ninja,
9
+ pkg-config,
10
+ git,
11
+ mpi,
12
+ blas,
13
+ cudaPackages,
14
+ autoAddDriverRunpath,
15
+ darwin,
16
+ rocmPackages,
17
+ vulkan-headers,
18
+ vulkan-loader,
19
+ curl,
20
+ shaderc,
21
+ useBlas ?
22
+ builtins.all (x: !x) [
23
+ useCuda
24
+ useMetalKit
25
+ useRocm
26
+ useVulkan
27
+ ]
28
+ && blas.meta.available,
29
+ useCuda ? config.cudaSupport,
30
+ useMetalKit ? stdenv.isAarch64 && stdenv.isDarwin,
31
+ # Increases the runtime closure size by ~700M
32
+ useMpi ? false,
33
+ useRocm ? config.rocmSupport,
34
+ rocmGpuTargets ? builtins.concatStringsSep ";" rocmPackages.clr.gpuTargets,
35
+ useVulkan ? false,
36
+ useRpc ? false,
37
+ llamaVersion ? "0.0.0", # Arbitrary version, substituted by the flake
38
+
39
+ # It's necessary to consistently use backendStdenv when building with CUDA support,
40
+ # otherwise we get libstdc++ errors downstream.
41
+ effectiveStdenv ? if useCuda then cudaPackages.backendStdenv else stdenv,
42
+ enableStatic ? effectiveStdenv.hostPlatform.isStatic,
43
+ precompileMetalShaders ? false,
44
+ }:
45
+
46
+ let
47
+ inherit (lib)
48
+ cmakeBool
49
+ cmakeFeature
50
+ optionalAttrs
51
+ optionals
52
+ strings
53
+ ;
54
+
55
+ stdenv = throw "Use effectiveStdenv instead";
56
+
57
+ suffices =
58
+ lib.optionals useBlas [ "BLAS" ]
59
+ ++ lib.optionals useCuda [ "CUDA" ]
60
+ ++ lib.optionals useMetalKit [ "MetalKit" ]
61
+ ++ lib.optionals useMpi [ "MPI" ]
62
+ ++ lib.optionals useRocm [ "ROCm" ]
63
+ ++ lib.optionals useVulkan [ "Vulkan" ];
64
+
65
+ pnameSuffix =
66
+ strings.optionalString (suffices != [ ])
67
+ "-${strings.concatMapStringsSep "-" strings.toLower suffices}";
68
+ descriptionSuffix = strings.optionalString (
69
+ suffices != [ ]
70
+ ) ", accelerated with ${strings.concatStringsSep ", " suffices}";
71
+
72
+ xcrunHost = runCommand "xcrunHost" { } ''
73
+ mkdir -p $out/bin
74
+ ln -s /usr/bin/xcrun $out/bin
75
+ '';
76
+
77
+ # apple_sdk is supposed to choose sane defaults, no need to handle isAarch64
78
+ # separately
79
+ darwinBuildInputs =
80
+ with darwin.apple_sdk.frameworks;
81
+ [
82
+ Accelerate
83
+ CoreVideo
84
+ CoreGraphics
85
+ ]
86
+ ++ optionals useMetalKit [ MetalKit ];
87
+
88
+ cudaBuildInputs = with cudaPackages; [
89
+ cuda_cudart
90
+ cuda_cccl # <nv/target>
91
+ libcublas
92
+ ];
93
+
94
+ rocmBuildInputs = with rocmPackages; [
95
+ clr
96
+ hipblas
97
+ rocblas
98
+ ];
99
+
100
+ vulkanBuildInputs = [
101
+ vulkan-headers
102
+ vulkan-loader
103
+ shaderc
104
+ ];
105
+ in
106
+
107
+ effectiveStdenv.mkDerivation (finalAttrs: {
108
+ pname = "llama-cpp${pnameSuffix}";
109
+ version = llamaVersion;
110
+
111
+ # Note: none of the files discarded here are visible in the sandbox or
112
+ # affect the output hash. This also means they can be modified without
113
+ # triggering a rebuild.
114
+ src = lib.cleanSourceWith {
115
+ filter =
116
+ name: type:
117
+ let
118
+ noneOf = builtins.all (x: !x);
119
+ baseName = baseNameOf name;
120
+ in
121
+ noneOf [
122
+ (lib.hasSuffix ".nix" name) # Ignore *.nix files when computing outPaths
123
+ (lib.hasSuffix ".md" name) # Ignore *.md changes whe computing outPaths
124
+ (lib.hasPrefix "." baseName) # Skip hidden files and directories
125
+ (baseName == "flake.lock")
126
+ ];
127
+ src = lib.cleanSource ../../.;
128
+ };
129
+
130
+ postPatch = ''
131
+ '';
132
+
133
+ # With PR#6015 https://github.com/ggml-org/llama.cpp/pull/6015,
134
+ # `default.metallib` may be compiled with Metal compiler from XCode
135
+ # and we need to escape sandbox on MacOS to access Metal compiler.
136
+ # `xcrun` is used find the path of the Metal compiler, which is varible
137
+ # and not on $PATH
138
+ # see https://github.com/ggml-org/llama.cpp/pull/6118 for discussion
139
+ __noChroot = effectiveStdenv.isDarwin && useMetalKit && precompileMetalShaders;
140
+
141
+ nativeBuildInputs =
142
+ [
143
+ cmake
144
+ ninja
145
+ pkg-config
146
+ git
147
+ ]
148
+ ++ optionals useCuda [
149
+ cudaPackages.cuda_nvcc
150
+
151
+ autoAddDriverRunpath
152
+ ]
153
+ ++ optionals (effectiveStdenv.hostPlatform.isGnu && enableStatic) [ glibc.static ]
154
+ ++ optionals (effectiveStdenv.isDarwin && useMetalKit && precompileMetalShaders) [ xcrunHost ];
155
+
156
+ buildInputs =
157
+ optionals effectiveStdenv.isDarwin darwinBuildInputs
158
+ ++ optionals useCuda cudaBuildInputs
159
+ ++ optionals useMpi [ mpi ]
160
+ ++ optionals useRocm rocmBuildInputs
161
+ ++ optionals useBlas [ blas ]
162
+ ++ optionals useVulkan vulkanBuildInputs;
163
+
164
+ cmakeFlags =
165
+ [
166
+ (cmakeBool "LLAMA_BUILD_SERVER" true)
167
+ (cmakeBool "BUILD_SHARED_LIBS" (!enableStatic))
168
+ (cmakeBool "CMAKE_SKIP_BUILD_RPATH" true)
169
+ (cmakeBool "GGML_NATIVE" false)
170
+ (cmakeBool "GGML_BLAS" useBlas)
171
+ (cmakeBool "GGML_CUDA" useCuda)
172
+ (cmakeBool "GGML_HIP" useRocm)
173
+ (cmakeBool "GGML_METAL" useMetalKit)
174
+ (cmakeBool "GGML_VULKAN" useVulkan)
175
+ (cmakeBool "GGML_STATIC" enableStatic)
176
+ (cmakeBool "GGML_RPC" useRpc)
177
+ ]
178
+ ++ optionals useCuda [
179
+ (
180
+ with cudaPackages.flags;
181
+ cmakeFeature "CMAKE_CUDA_ARCHITECTURES" (
182
+ builtins.concatStringsSep ";" (map dropDot cudaCapabilities)
183
+ )
184
+ )
185
+ ]
186
+ ++ optionals useRocm [
187
+ (cmakeFeature "CMAKE_HIP_COMPILER" "${rocmPackages.llvm.clang}/bin/clang")
188
+ (cmakeFeature "CMAKE_HIP_ARCHITECTURES" rocmGpuTargets)
189
+ ]
190
+ ++ optionals useMetalKit [
191
+ (lib.cmakeFeature "CMAKE_C_FLAGS" "-D__ARM_FEATURE_DOTPROD=1")
192
+ (cmakeBool "GGML_METAL_EMBED_LIBRARY" (!precompileMetalShaders))
193
+ ];
194
+
195
+ # Environment variables needed for ROCm
196
+ env = optionalAttrs useRocm {
197
+ ROCM_PATH = "${rocmPackages.clr}";
198
+ HIP_DEVICE_LIB_PATH = "${rocmPackages.rocm-device-libs}/amdgcn/bitcode";
199
+ };
200
+
201
+ # TODO(SomeoneSerge): It's better to add proper install targets at the CMake level,
202
+ # if they haven't been added yet.
203
+ postInstall = ''
204
+ mkdir -p $out/include
205
+ cp $src/include/llama.h $out/include/
206
+ '';
207
+
208
+ meta = {
209
+ # Configurations we don't want even the CI to evaluate. Results in the
210
+ # "unsupported platform" messages. This is mostly a no-op, because
211
+ # cudaPackages would've refused to evaluate anyway.
212
+ badPlatforms = optionals useCuda lib.platforms.darwin;
213
+
214
+ # Configurations that are known to result in build failures. Can be
215
+ # overridden by importing Nixpkgs with `allowBroken = true`.
216
+ broken = (useMetalKit && !effectiveStdenv.isDarwin);
217
+
218
+ description = "Inference of LLaMA model in pure C/C++${descriptionSuffix}";
219
+ homepage = "https://github.com/ggml-org/llama.cpp/";
220
+ license = lib.licenses.mit;
221
+
222
+ # Accommodates `nix run` and `lib.getExe`
223
+ mainProgram = "llama-cli";
224
+
225
+ # These people might respond, on the best effort basis, if you ping them
226
+ # in case of Nix-specific regressions or for reviewing Nix-specific PRs.
227
+ # Consider adding yourself to this list if you want to ensure this flake
228
+ # stays maintained and you're willing to invest your time. Do not add
229
+ # other people without their consent. Consider removing people after
230
+ # they've been unreachable for long periods of time.
231
+
232
+ # Note that lib.maintainers is defined in Nixpkgs, but you may just add
233
+ # an attrset following the same format as in
234
+ # https://github.com/NixOS/nixpkgs/blob/f36a80e54da29775c78d7eff0e628c2b4e34d1d7/maintainers/maintainer-list.nix
235
+ maintainers = with lib.maintainers; [
236
+ philiptaron
237
+ SomeoneSerge
238
+ ];
239
+
240
+ # Extend `badPlatforms` instead
241
+ platforms = lib.platforms.all;
242
+ };
243
+ })
.devops/nix/python-scripts.nix ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ lib,
3
+ stdenv,
4
+ buildPythonPackage,
5
+ poetry-core,
6
+ mkShell,
7
+ python3Packages,
8
+ gguf-py,
9
+ }@inputs:
10
+
11
+ let
12
+ llama-python-deps = with python3Packages; [
13
+ numpy
14
+ sentencepiece
15
+ transformers
16
+ protobuf
17
+ torchWithoutCuda
18
+ gguf-py
19
+ tqdm
20
+
21
+ # for scripts/compare-llama-bench.py
22
+ gitpython
23
+ tabulate
24
+
25
+ # for examples/pydantic-models-to-grammar-examples.py
26
+ docstring-parser
27
+ pydantic
28
+
29
+ ];
30
+
31
+ llama-python-test-deps = with python3Packages; [
32
+ # Server bench
33
+ matplotlib
34
+
35
+ # server tests
36
+ openai
37
+ pytest
38
+ prometheus-client
39
+ ];
40
+ in
41
+
42
+ buildPythonPackage ({
43
+ pname = "llama-scripts";
44
+ version = "0.0.0";
45
+ pyproject = true;
46
+
47
+ # NOTE: The files filtered out here are not visible in the build sandbox, neither
48
+ # do they affect the output hash. They can be modified without triggering a rebuild.
49
+ src = lib.cleanSourceWith {
50
+ filter =
51
+ name: type:
52
+ let
53
+ any = builtins.any (x: x);
54
+ baseName = builtins.baseNameOf name;
55
+ in
56
+ any [
57
+ (lib.hasSuffix ".py" name)
58
+ (baseName == "README.md")
59
+ (baseName == "pyproject.toml")
60
+ ];
61
+ src = lib.cleanSource ../../.;
62
+ };
63
+ nativeBuildInputs = [ poetry-core ];
64
+ nativeCheckInputs = llama-python-test-deps;
65
+ dependencies = llama-python-deps;
66
+ })
.devops/nix/scope.nix ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ lib,
3
+ newScope,
4
+ python3,
5
+ llamaVersion ? "0.0.0",
6
+ }:
7
+
8
+ let
9
+ pythonPackages = python3.pkgs;
10
+ in
11
+
12
+ # We're using `makeScope` instead of just writing out an attrset
13
+ # because it allows users to apply overlays later using `overrideScope'`.
14
+ # Cf. https://noogle.dev/f/lib/makeScope
15
+
16
+ lib.makeScope newScope (self: {
17
+ inherit llamaVersion;
18
+ gguf-py = self.callPackage ./package-gguf-py.nix {
19
+ inherit (pythonPackages)
20
+ numpy
21
+ tqdm
22
+ sentencepiece
23
+ pyyaml
24
+ pytestCheckHook
25
+ requests
26
+ buildPythonPackage
27
+ poetry-core
28
+ ;
29
+ };
30
+ python-scripts = self.callPackage ./python-scripts.nix { inherit (pythonPackages) buildPythonPackage poetry-core; };
31
+ llama-cpp = self.callPackage ./package.nix { };
32
+ docker = self.callPackage ./docker.nix { };
33
+ docker-min = self.callPackage ./docker.nix { interactive = false; };
34
+ sif = self.callPackage ./sif.nix { };
35
+ })
.devops/nix/sif.nix ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ lib,
3
+ singularity-tools,
4
+ llama-cpp,
5
+ bashInteractive,
6
+ interactive ? false,
7
+ }:
8
+
9
+ let
10
+ optionalInt = cond: x: if cond then x else 0;
11
+ in
12
+ singularity-tools.buildImage rec {
13
+ inherit (llama-cpp) name;
14
+ contents = [ llama-cpp ] ++ lib.optionals interactive [ bashInteractive ];
15
+
16
+ # These are excessive (but safe) for most variants. Building singularity
17
+ # images requires superuser privileges, so we build them inside a VM in a
18
+ # writable image of pre-determined size.
19
+ #
20
+ # ROCm is currently affected by https://github.com/NixOS/nixpkgs/issues/276846
21
+ #
22
+ # Expected image sizes:
23
+ # - cpu/blas: 150M,
24
+ # - cuda, all gencodes: 560M,
25
+ diskSize = 4096 + optionalInt llama-cpp.useRocm 16384;
26
+ memSize = diskSize;
27
+ }
.devops/openvino.Dockerfile ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ARG OPENVINO_VERSION_MAJOR=2026.0
2
+ ARG OPENVINO_VERSION_FULL=2026.0.0.20965.c6d6a13a886
3
+ ARG UBUNTU_VERSION=24.04
4
+
5
+ # Optional proxy build arguments - empty by default
6
+ ARG http_proxy=
7
+ ARG https_proxy=
8
+
9
+ ## Build Image
10
+ FROM ubuntu:${UBUNTU_VERSION} AS build
11
+
12
+ # Pass proxy args to build stage
13
+ ARG http_proxy
14
+ ARG https_proxy
15
+
16
+ RUN apt-get update && \
17
+ apt-get install -y --no-install-recommends \
18
+ ca-certificates \
19
+ gnupg \
20
+ wget \
21
+ git \
22
+ cmake \
23
+ ninja-build \
24
+ build-essential \
25
+ libtbb12 \
26
+ libssl-dev \
27
+ ocl-icd-opencl-dev \
28
+ opencl-headers \
29
+ opencl-clhpp-headers \
30
+ intel-opencl-icd && \
31
+ rm -rf /var/lib/apt/lists/*
32
+
33
+ # Install OpenVINO for Ubuntu 24.04
34
+ ARG OPENVINO_VERSION_MAJOR
35
+ ARG OPENVINO_VERSION_FULL
36
+ RUN mkdir -p /opt/intel && \
37
+ wget https://storage.openvinotoolkit.org/repositories/openvino/packages/${OPENVINO_VERSION_MAJOR}/linux/openvino_toolkit_ubuntu24_${OPENVINO_VERSION_FULL}_x86_64.tgz && \
38
+ tar -xf openvino_toolkit_ubuntu24_${OPENVINO_VERSION_FULL}_x86_64.tgz && \
39
+ mv openvino_toolkit_ubuntu24_${OPENVINO_VERSION_FULL}_x86_64 /opt/intel/openvino_${OPENVINO_VERSION_MAJOR} && \
40
+ cd /opt/intel/openvino_${OPENVINO_VERSION_MAJOR} && \
41
+ echo "Y" | ./install_dependencies/install_openvino_dependencies.sh && \
42
+ cd - && \
43
+ ln -s /opt/intel/openvino_${OPENVINO_VERSION_MAJOR} /opt/intel/openvino
44
+
45
+ ENV OpenVINO_DIR=/opt/intel/openvino
46
+
47
+ WORKDIR /app
48
+
49
+ COPY . .
50
+
51
+ # Build Stage
52
+ RUN bash -c "source ${OpenVINO_DIR}/setupvars.sh && \
53
+ cmake -B build/ReleaseOV -G Ninja \
54
+ -DCMAKE_BUILD_TYPE=Release \
55
+ -DGGML_OPENVINO=ON && \
56
+ cmake --build build/ReleaseOV -j$(nproc)"
57
+
58
+ # Copy all necessary libraries
59
+ RUN mkdir -p /app/lib && \
60
+ find build/ReleaseOV -name '*.so*' -exec cp {} /app/lib \; && \
61
+ find ${OpenVINO_DIR}/runtime/lib/intel64 -name '*.so*' -exec cp -P {} /app/lib \; 2>/dev/null || \
62
+ find ${OpenVINO_DIR}/lib/intel64 -name '*.so*' -exec cp -P {} /app/lib \;
63
+
64
+ # Create runtime directories and copy binaries
65
+ RUN mkdir -p /app/full \
66
+ && cp build/ReleaseOV/bin/* /app/full/ \
67
+ && cp *.py /app/full \
68
+ && cp -r gguf-py /app/full \
69
+ && cp -r requirements /app/full \
70
+ && cp requirements.txt /app/full \
71
+ && cp .devops/tools.sh /app/full/tools.sh
72
+
73
+ ## Base Runtime Image
74
+ FROM ubuntu:${UBUNTU_VERSION} AS base
75
+
76
+ # Pass proxy args to runtime stage
77
+ ARG http_proxy
78
+ ARG https_proxy
79
+
80
+ RUN apt-get update \
81
+ && apt-get install -y libgomp1 libtbb12 curl\
82
+ && apt autoremove -y \
83
+ && apt clean -y \
84
+ && rm -rf /tmp/* /var/tmp/* \
85
+ && find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \
86
+ && find /var/cache -type f -delete
87
+
88
+ COPY --from=build /app/lib/ /app/
89
+
90
+ ### Full (all binaries)
91
+ FROM base AS full
92
+
93
+ ARG http_proxy
94
+ ARG https_proxy
95
+
96
+ COPY --from=build /app/full /app/
97
+
98
+ WORKDIR /app
99
+
100
+ RUN apt-get update && \
101
+ apt-get install -y --no-install-recommends \
102
+ git \
103
+ python3 \
104
+ python3-venv \
105
+ python3-pip && \
106
+ python3 -m venv /ov-venv && \
107
+ /ov-venv/bin/pip install --no-cache-dir --upgrade pip setuptools wheel && \
108
+ /ov-venv/bin/pip install --no-cache-dir -r requirements.txt && \
109
+ apt-get autoremove -y && \
110
+ apt-get clean && \
111
+ rm -rf /tmp/* /var/tmp/* && \
112
+ find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete && \
113
+ find /var/cache -type f -delete
114
+
115
+ ENTRYPOINT ["/bin/bash", "-c", "source /ov-venv/bin/activate && exec /app/tools.sh \"$@\"", "--"]
116
+
117
+
118
+ ### Light, CLI only
119
+ FROM base AS light
120
+
121
+ COPY --from=build /app/full/llama-cli /app/
122
+
123
+ WORKDIR /app
124
+
125
+ ENTRYPOINT [ "/app/llama-cli" ]
126
+
127
+ ### Server, Server only
128
+ FROM base AS server
129
+
130
+ ENV LLAMA_ARG_HOST=0.0.0.0
131
+
132
+ COPY --from=build /app/full/llama-server /app/
133
+
134
+ WORKDIR /app
135
+
136
+ HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
137
+
138
+ ENTRYPOINT [ "/app/llama-server" ]
.devops/rocm.Dockerfile ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ARG UBUNTU_VERSION=24.04
2
+
3
+ # This needs to generally match the container host's environment.
4
+ ARG ROCM_VERSION=7.2
5
+ ARG AMDGPU_VERSION=7.2
6
+
7
+ # Target the ROCm build image
8
+ ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-complete
9
+
10
+ ### Build image
11
+ FROM ${BASE_ROCM_DEV_CONTAINER} AS build
12
+
13
+ # Unless otherwise specified, we make a fat build.
14
+ # This is mostly tied to rocBLAS supported archs.
15
+ # check https://rocm.docs.amd.com/projects/install-on-linux/en/docs-7.2.0/reference/system-requirements.html
16
+ # check https://rocm.docs.amd.com/projects/radeon-ryzen/en/latest/docs/compatibility/compatibilityrad/native_linux/native_linux_compatibility.html
17
+ # check https://rocm.docs.amd.com/projects/radeon-ryzen/en/latest/docs/compatibility/compatibilityryz/native_linux/native_linux_compatibility.html
18
+
19
+ ARG ROCM_DOCKER_ARCH='gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1151;gfx1150;gfx1200;gfx1201'
20
+
21
+ # Set ROCm architectures
22
+ ENV AMDGPU_TARGETS=${ROCM_DOCKER_ARCH}
23
+
24
+ RUN apt-get update \
25
+ && apt-get install -y \
26
+ build-essential \
27
+ cmake \
28
+ git \
29
+ libssl-dev \
30
+ curl \
31
+ libgomp1
32
+
33
+ WORKDIR /app
34
+
35
+ COPY . .
36
+
37
+ RUN HIPCXX="$(hipconfig -l)/clang" HIP_PATH="$(hipconfig -R)" \
38
+ cmake -S . -B build \
39
+ -DGGML_HIP=ON \
40
+ -DGGML_HIP_ROCWMMA_FATTN=ON \
41
+ -DAMDGPU_TARGETS="$ROCM_DOCKER_ARCH" \
42
+ -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON \
43
+ -DCMAKE_BUILD_TYPE=Release -DLLAMA_BUILD_TESTS=OFF \
44
+ && cmake --build build --config Release -j$(nproc)
45
+
46
+ RUN mkdir -p /app/lib \
47
+ && find build -name "*.so*" -exec cp -P {} /app/lib \;
48
+
49
+ RUN mkdir -p /app/full \
50
+ && cp build/bin/* /app/full \
51
+ && cp *.py /app/full \
52
+ && cp -r gguf-py /app/full \
53
+ && cp -r requirements /app/full \
54
+ && cp requirements.txt /app/full \
55
+ && cp .devops/tools.sh /app/full/tools.sh
56
+
57
+ ## Base image
58
+ FROM ${BASE_ROCM_DEV_CONTAINER} AS base
59
+
60
+ RUN apt-get update \
61
+ && apt-get install -y libgomp1 curl\
62
+ && apt autoremove -y \
63
+ && apt clean -y \
64
+ && rm -rf /tmp/* /var/tmp/* \
65
+ && find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \
66
+ && find /var/cache -type f -delete
67
+
68
+ COPY --from=build /app/lib/ /app
69
+
70
+ ### Full
71
+ FROM base AS full
72
+
73
+ COPY --from=build /app/full /app
74
+
75
+ WORKDIR /app
76
+
77
+ RUN apt-get update \
78
+ && apt-get install -y \
79
+ git \
80
+ python3-pip \
81
+ python3 \
82
+ python3-wheel\
83
+ && pip install --break-system-packages --upgrade setuptools \
84
+ && pip install --break-system-packages -r requirements.txt \
85
+ && apt autoremove -y \
86
+ && apt clean -y \
87
+ && rm -rf /tmp/* /var/tmp/* \
88
+ && find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \
89
+ && find /var/cache -type f -delete
90
+
91
+ ENTRYPOINT ["/app/tools.sh"]
92
+
93
+ ### Light, CLI only
94
+ FROM base AS light
95
+
96
+ COPY --from=build /app/full/llama-cli /app/full/llama-completion /app
97
+
98
+ WORKDIR /app
99
+
100
+ ENTRYPOINT [ "/app/llama-cli" ]
101
+
102
+ ### Server, Server only
103
+ FROM base AS server
104
+
105
+ ENV LLAMA_ARG_HOST=0.0.0.0
106
+
107
+ COPY --from=build /app/full/llama-server /app
108
+
109
+ WORKDIR /app
110
+
111
+ HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
112
+
113
+ ENTRYPOINT [ "/app/llama-server" ]
.devops/s390x.Dockerfile ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ARG GCC_VERSION=15.2.0
2
+ ARG UBUNTU_VERSION=24.04
3
+
4
+ ### Build Llama.cpp stage
5
+ FROM gcc:${GCC_VERSION} AS build
6
+
7
+ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
8
+ --mount=type=cache,target=/var/lib/apt/lists,sharing=locked \
9
+ apt update -y && \
10
+ apt upgrade -y && \
11
+ apt install -y --no-install-recommends \
12
+ git cmake ccache ninja-build \
13
+ # WARNING: Do not use libopenblas-openmp-dev. libopenblas-dev is faster.
14
+ libopenblas-dev libssl-dev && \
15
+ rm -rf /var/lib/apt/lists/*
16
+
17
+ WORKDIR /app
18
+ COPY . .
19
+
20
+ RUN --mount=type=cache,target=/root/.ccache \
21
+ --mount=type=cache,target=/app/build \
22
+ cmake -S . -B build -G Ninja \
23
+ -DCMAKE_BUILD_TYPE=Release \
24
+ -DCMAKE_C_COMPILER_LAUNCHER=ccache \
25
+ -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
26
+ -DLLAMA_BUILD_TESTS=OFF \
27
+ -DGGML_NATIVE=OFF \
28
+ -DGGML_BACKEND_DL=ON \
29
+ -DGGML_CPU_ALL_VARIANTS=ON \
30
+ -DGGML_BLAS=ON \
31
+ -DGGML_BLAS_VENDOR=OpenBLAS && \
32
+ cmake --build build --config Release -j $(nproc) && \
33
+ cmake --install build --prefix /opt/llama.cpp
34
+
35
+ COPY *.py /opt/llama.cpp/bin
36
+ COPY .devops/tools.sh /opt/llama.cpp/bin
37
+
38
+ COPY gguf-py /opt/llama.cpp/gguf-py
39
+ COPY requirements.txt /opt/llama.cpp/gguf-py
40
+ COPY requirements /opt/llama.cpp/gguf-py/requirements
41
+
42
+
43
+ ### Collect all llama.cpp binaries, libraries and distro libraries
44
+ FROM scratch AS collector
45
+
46
+ # Copy llama.cpp binaries and libraries
47
+ COPY --from=build /opt/llama.cpp/bin /llama.cpp/bin
48
+ COPY --from=build /opt/llama.cpp/lib /llama.cpp/lib
49
+ COPY --from=build /opt/llama.cpp/gguf-py /llama.cpp/gguf-py
50
+
51
+
52
+ ### Base image
53
+ FROM ubuntu:${UBUNTU_VERSION} AS base
54
+
55
+ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
56
+ --mount=type=cache,target=/var/lib/apt/lists,sharing=locked \
57
+ apt update -y && \
58
+ apt install -y --no-install-recommends \
59
+ # WARNING: Do not use libopenblas-openmp-dev. libopenblas-dev is faster.
60
+ # See: https://github.com/ggml-org/llama.cpp/pull/15915#issuecomment-3317166506
61
+ curl libgomp1 libopenblas-dev && \
62
+ apt autoremove -y && \
63
+ apt clean -y && \
64
+ rm -rf /tmp/* /var/tmp/* && \
65
+ find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete && \
66
+ find /var/cache -type f -delete
67
+
68
+ # Copy llama.cpp libraries
69
+ COPY --from=collector /llama.cpp/lib /usr/lib/s390x-linux-gnu
70
+
71
+
72
+ ### Full
73
+ FROM base AS full
74
+
75
+ ENV PATH="/root/.cargo/bin:${PATH}"
76
+ WORKDIR /app
77
+
78
+ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
79
+ --mount=type=cache,target=/var/lib/apt/lists,sharing=locked \
80
+ apt update -y && \
81
+ apt install -y \
82
+ git cmake libjpeg-dev \
83
+ python3 python3-pip python3-dev && \
84
+ apt autoremove -y && \
85
+ apt clean -y && \
86
+ rm -rf /tmp/* /var/tmp/* && \
87
+ find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete && \
88
+ find /var/cache -type f -delete
89
+
90
+ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y
91
+
92
+ COPY --from=collector /llama.cpp/bin /app
93
+ COPY --from=collector /llama.cpp/gguf-py /app/gguf-py
94
+
95
+ RUN pip install --no-cache-dir --break-system-packages \
96
+ -r /app/gguf-py/requirements.txt
97
+
98
+ ENTRYPOINT [ "/app/tools.sh" ]
99
+
100
+
101
+ ### CLI Only
102
+ FROM base AS light
103
+
104
+ WORKDIR /llama.cpp/bin
105
+
106
+ # Copy llama.cpp binaries and libraries
107
+ COPY --from=collector /llama.cpp/bin/*.so /llama.cpp/bin
108
+ COPY --from=collector /llama.cpp/bin/llama-cli /llama.cpp/bin/llama-completion /llama.cpp/bin
109
+
110
+ ENTRYPOINT [ "/llama.cpp/bin/llama-cli" ]
111
+
112
+
113
+ ### Server
114
+ FROM base AS server
115
+
116
+ ENV LLAMA_ARG_HOST=0.0.0.0
117
+
118
+ WORKDIR /llama.cpp/bin
119
+
120
+ # Copy llama.cpp binaries and libraries
121
+ COPY --from=collector /llama.cpp/bin/*.so /llama.cpp/bin
122
+ COPY --from=collector /llama.cpp/bin/llama-server /llama.cpp/bin
123
+
124
+ EXPOSE 8080
125
+
126
+ ENTRYPOINT [ "/llama.cpp/bin/llama-server" ]
.devops/tools.sh ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -e
3
+
4
+ # Read the first argument into a variable
5
+ arg1="$1"
6
+
7
+ # Shift the arguments to remove the first one
8
+ shift
9
+
10
+ if [[ "$arg1" == '--convert' || "$arg1" == '-c' ]]; then
11
+ exec python3 ./convert_hf_to_gguf.py "$@"
12
+ elif [[ "$arg1" == '--quantize' || "$arg1" == '-q' ]]; then
13
+ exec ./llama-quantize "$@"
14
+ elif [[ "$arg1" == '--run' || "$arg1" == '-r' ]]; then
15
+ exec ./llama-cli "$@"
16
+ elif [[ "$arg1" == '--run-legacy' || "$arg1" == '-l' ]]; then
17
+ exec ./llama-completion "$@"
18
+ elif [[ "$arg1" == '--bench' || "$arg1" == '-b' ]]; then
19
+ exec ./llama-bench "$@"
20
+ elif [[ "$arg1" == '--perplexity' || "$arg1" == '-p' ]]; then
21
+ exec ./llama-perplexity "$@"
22
+ elif [[ "$arg1" == '--all-in-one' || "$arg1" == '-a' ]]; then
23
+ echo "Converting PTH to GGML..."
24
+ for i in $(ls $1/$2/ggml-model-f16.bin*); do
25
+ if [ -f "${i/f16/q4_0}" ]; then
26
+ echo "Skip model quantization, it already exists: ${i/f16/q4_0}"
27
+ else
28
+ echo "Converting PTH to GGML: $i into ${i/f16/q4_0}..."
29
+ exec ./llama-quantize "$i" "${i/f16/q4_0}" q4_0
30
+ fi
31
+ done
32
+ elif [[ "$arg1" == '--server' || "$arg1" == '-s' ]]; then
33
+ exec ./llama-server "$@"
34
+ else
35
+ echo "Unknown command: $arg1"
36
+ echo "Available commands: "
37
+ echo " --run (-r): Run a model (chat) previously converted into ggml"
38
+ echo " ex: -m /models/7B/ggml-model-q4_0.bin"
39
+ echo " --run-legacy (-l): Run a model (legacy completion) previously converted into ggml"
40
+ echo " ex: -m /models/7B/ggml-model-q4_0.bin -no-cnv -p \"Building a website can be done in 10 simple steps:\" -n 512"
41
+ echo " --bench (-b): Benchmark the performance of the inference for various parameters."
42
+ echo " ex: -m model.gguf"
43
+ echo " --perplexity (-p): Measure the perplexity of a model over a given text."
44
+ echo " ex: -m model.gguf -f file.txt"
45
+ echo " --convert (-c): Convert a llama model into ggml"
46
+ echo " ex: --outtype f16 \"/models/7B/\" "
47
+ echo " --quantize (-q): Optimize with quantization process ggml"
48
+ echo " ex: \"/models/7B/ggml-model-f16.bin\" \"/models/7B/ggml-model-q4_0.bin\" 2"
49
+ echo " --all-in-one (-a): Execute --convert & --quantize"
50
+ echo " ex: \"/models/\" 7B"
51
+ echo " --server (-s): Run a model on the server"
52
+ echo " ex: -m /models/7B/ggml-model-q4_0.bin -c 2048 -ngl 43 -mg 1 --port 8080"
53
+ fi
.devops/vulkan.Dockerfile ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ARG UBUNTU_VERSION=26.04
2
+
3
+ FROM ubuntu:$UBUNTU_VERSION AS build
4
+
5
+ # Install build tools
6
+ RUN apt update && apt install -y git build-essential cmake wget xz-utils
7
+
8
+ # Install SSL and Vulkan SDK dependencies
9
+ RUN apt install -y libssl-dev curl \
10
+ libxcb-xinput0 libxcb-xinerama0 libxcb-cursor-dev libvulkan-dev glslc
11
+
12
+ # Build it
13
+ WORKDIR /app
14
+
15
+ COPY . .
16
+
17
+ RUN cmake -B build -DGGML_NATIVE=OFF -DGGML_VULKAN=ON -DLLAMA_BUILD_TESTS=OFF -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON && \
18
+ cmake --build build --config Release -j$(nproc)
19
+
20
+ RUN mkdir -p /app/lib && \
21
+ find build -name "*.so*" -exec cp -P {} /app/lib \;
22
+
23
+ RUN mkdir -p /app/full \
24
+ && cp build/bin/* /app/full \
25
+ && cp *.py /app/full \
26
+ && cp -r gguf-py /app/full \
27
+ && cp -r requirements /app/full \
28
+ && cp requirements.txt /app/full \
29
+ && cp .devops/tools.sh /app/full/tools.sh
30
+
31
+ ## Base image
32
+ FROM ubuntu:$UBUNTU_VERSION AS base
33
+
34
+ RUN apt-get update \
35
+ && apt-get install -y libgomp1 curl libvulkan1 mesa-vulkan-drivers \
36
+ libglvnd0 libgl1 libglx0 libegl1 libgles2 \
37
+ && apt autoremove -y \
38
+ && apt clean -y \
39
+ && rm -rf /tmp/* /var/tmp/* \
40
+ && find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \
41
+ && find /var/cache -type f -delete
42
+
43
+ COPY --from=build /app/lib/ /app
44
+
45
+ ### Full
46
+ FROM base AS full
47
+
48
+ COPY --from=build /app/full /app
49
+
50
+ WORKDIR /app
51
+
52
+ RUN apt-get update \
53
+ && apt-get install -y \
54
+ build-essential \
55
+ git \
56
+ python3.13 \
57
+ python3.13-dev \
58
+ python3-pip \
59
+ python3-wheel \
60
+ && update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.13 100 \
61
+ && pip install --break-system-packages --upgrade setuptools \
62
+ && pip install --break-system-packages -r requirements.txt \
63
+ && apt autoremove -y \
64
+ && apt clean -y \
65
+ && rm -rf /tmp/* /var/tmp/* \
66
+ && find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \
67
+ && find /var/cache -type f -delete
68
+
69
+ ENTRYPOINT ["/app/tools.sh"]
70
+
71
+ ### Light, CLI only
72
+ FROM base AS light
73
+
74
+ COPY --from=build /app/full/llama-cli /app/full/llama-completion /app
75
+
76
+ WORKDIR /app
77
+
78
+ ENTRYPOINT [ "/app/llama-cli" ]
79
+
80
+ ### Server, Server only
81
+ FROM base AS server
82
+
83
+ ENV LLAMA_ARG_HOST=0.0.0.0
84
+
85
+ COPY --from=build /app/full/llama-server /app
86
+
87
+ WORKDIR /app
88
+
89
+ HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
90
+
91
+ ENTRYPOINT [ "/app/llama-server" ]
.dockerignore ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.o
2
+ *.a
3
+ .cache/
4
+ # Do not ignore .git directory, otherwise the reported build number will always be 0
5
+ .github/
6
+ .gitignore
7
+ .vs/
8
+ .vscode/
9
+ .DS_Store
10
+
11
+ build*/
12
+
13
+ models/*
14
+
15
+ /llama-cli
16
+ /llama-quantize
17
+
18
+ arm_neon.h
19
+ compile_commands.json
20
+ Dockerfile
.ecrc ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "Exclude": ["^\\.gitmodules$", "stb_image\\.h"],
3
+ "Disable": {
4
+ "IndentSize": true
5
+ }
6
+ }
.editorconfig ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://EditorConfig.org
2
+
3
+ # Top-most EditorConfig file
4
+ root = true
5
+
6
+ # Unix-style newlines with a newline ending every file, utf-8 charset
7
+ [*]
8
+ end_of_line = lf
9
+ insert_final_newline = true
10
+ trim_trailing_whitespace = true
11
+ charset = utf-8
12
+ indent_style = space
13
+ indent_size = 4
14
+
15
+ [Makefile]
16
+ indent_style = tab
17
+
18
+ [scripts/*.mk]
19
+ indent_style = tab
20
+
21
+ [prompts/*.txt]
22
+ insert_final_newline = unset
23
+
24
+ [tools/server/public/*]
25
+ indent_size = 2
26
+
27
+ [tools/server/public/deps_*]
28
+ trim_trailing_whitespace = unset
29
+ indent_style = unset
30
+ indent_size = unset
31
+
32
+ [tools/server/deps_*]
33
+ trim_trailing_whitespace = unset
34
+ indent_style = unset
35
+ indent_size = unset
36
+
37
+ [examples/llama.swiftui/llama.swiftui.xcodeproj/*]
38
+ indent_style = tab
39
+
40
+ [tools/cvector-generator/*.txt]
41
+ trim_trailing_whitespace = unset
42
+ insert_final_newline = unset
43
+
44
+ [models/templates/*.jinja]
45
+ indent_style = unset
46
+ indent_size = unset
47
+ end_of_line = unset
48
+ charset = unset
49
+ trim_trailing_whitespace = unset
50
+ insert_final_newline = unset
51
+
52
+ [vendor/miniaudio/miniaudio.h]
53
+ trim_trailing_whitespace = unset
54
+ insert_final_newline = unset
55
+
56
+ [tools/server/webui/**]
57
+ indent_style = unset
58
+ indent_size = unset
59
+ end_of_line = unset
60
+ charset = unset
61
+ trim_trailing_whitespace = unset
62
+ insert_final_newline = unset
63
+
64
+ [benches/**]
65
+ indent_style = unset
66
+ indent_size = unset
67
+ end_of_line = unset
68
+ charset = unset
69
+ trim_trailing_whitespace = unset
70
+ insert_final_newline = unset
.flake8 ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [flake8]
2
+ max-line-length = 125
3
+ ignore = E203,E211,E221,E225,E231,E241,E251,E261,E266,E501,E701,E704,W503
4
+ exclude =
5
+ # Do not traverse examples and tools
6
+ examples,
7
+ tools,
8
+ # Do not include package initializers
9
+ __init__.py,
10
+ # No need to traverse our git directory
11
+ .git,
12
+ # There's no value in checking cache directories
13
+ __pycache__,
14
+ # No need to include the build path
15
+ build,
16
+ # This contains builds that we don't want to check
17
+ dist # This is generated with `python build .` for package releases
18
+ # max-complexity = 10
.gemini/settings.json ADDED
@@ -0,0 +1 @@
 
 
1
+ { "contextFileName": "AGENTS.md" }
.gitattributes CHANGED
@@ -1,4 +1,7 @@
1
  *.7z filter=lfs diff=lfs merge=lfs -text
 
 
 
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.avro filter=lfs diff=lfs merge=lfs -text
4
  *.bin filter=lfs diff=lfs merge=lfs -text
 
1
  *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.gguf filter=lfs diff=lfs merge=lfs -text
3
+ *.key filter=lfs diff=lfs merge=lfs -text
4
+ *.pdf filter=lfs diff=lfs merge=lfs -text
5
  *.arrow filter=lfs diff=lfs merge=lfs -text
6
  *.avro filter=lfs diff=lfs merge=lfs -text
7
  *.bin filter=lfs diff=lfs merge=lfs -text
.github/ISSUE_TEMPLATE/010-bug-compilation.yml ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Bug (compilation)
2
+ description: Something goes wrong when trying to compile llama.cpp.
3
+ title: "Compile bug: "
4
+ labels: ["bug-unconfirmed", "compilation"]
5
+ body:
6
+ - type: markdown
7
+ attributes:
8
+ value: >
9
+ Thanks for taking the time to fill out this bug report!
10
+ This issue template is intended for bug reports where the compilation of llama.cpp fails.
11
+ Before opening an issue, please confirm that the compilation still fails
12
+ after recreating the CMake build directory and with `-DGGML_CCACHE=OFF`.
13
+ If the compilation succeeds with ccache disabled you should be able to permanently fix the issue
14
+ by clearing `~/.cache/ccache` (on Linux).
15
+ - type: textarea
16
+ id: commit
17
+ attributes:
18
+ label: Git commit
19
+ description: Which commit are you trying to compile?
20
+ placeholder: |
21
+ $git rev-parse HEAD
22
+ 84a07a17b1b08cf2b9747c633a2372782848a27f
23
+ validations:
24
+ required: true
25
+ - type: dropdown
26
+ id: operating-system
27
+ attributes:
28
+ label: Operating systems
29
+ description: Which operating systems do you know to be affected?
30
+ multiple: true
31
+ options:
32
+ - Linux
33
+ - Mac
34
+ - Windows
35
+ - BSD
36
+ - Other? (Please let us know in description)
37
+ validations:
38
+ required: true
39
+ - type: dropdown
40
+ id: backends
41
+ attributes:
42
+ label: GGML backends
43
+ description: Which GGML backends do you know to be affected?
44
+ options: [AMX, BLAS, CANN, CPU, CUDA, Hexagon, HIP, Metal, Musa, OpenCL, RPC, SYCL, VirtGPU, Vulkan, WebGPU, zDNN, ZenDNN]
45
+ multiple: true
46
+ validations:
47
+ required: true
48
+ - type: textarea
49
+ id: info
50
+ attributes:
51
+ label: Problem description & steps to reproduce
52
+ description: >
53
+ Please give us a summary of the problem and tell us how to reproduce it.
54
+ If you can narrow down the bug to specific compile flags, that information would be very much appreciated by us.
55
+ placeholder: >
56
+ I'm trying to compile llama.cpp with CUDA support on a fresh install of Ubuntu and get error XY.
57
+ Here are the exact commands that I used: ...
58
+ validations:
59
+ required: true
60
+ - type: textarea
61
+ id: first_bad_commit
62
+ attributes:
63
+ label: First Bad Commit
64
+ description: >
65
+ If the bug was not present on an earlier version: when did it start appearing?
66
+ If possible, please do a git bisect and identify the exact commit that introduced the bug.
67
+ validations:
68
+ required: false
69
+ - type: textarea
70
+ id: command
71
+ attributes:
72
+ label: Compile command
73
+ description: >
74
+ Please provide the exact command you used to compile llama.cpp. For example: `cmake -B ...`.
75
+ This will be automatically formatted into code, so no need for backticks.
76
+ render: shell
77
+ validations:
78
+ required: true
79
+ - type: textarea
80
+ id: logs
81
+ attributes:
82
+ label: Relevant log output
83
+ description: >
84
+ Please copy and paste any relevant log output, including any generated text.
85
+ This will be automatically formatted into code, so no need for backticks.
86
+ render: shell
87
+ validations:
88
+ required: true
.github/ISSUE_TEMPLATE/011-bug-results.yml ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Bug (model use)
2
+ description: Something goes wrong when using a model (in general, not specific to a single llama.cpp module).
3
+ title: "Eval bug: "
4
+ labels: ["bug-unconfirmed", "model evaluation"]
5
+ body:
6
+ - type: markdown
7
+ attributes:
8
+ value: >
9
+ Thanks for taking the time to fill out this bug report!
10
+ This issue template is intended for bug reports where the model evaluation results
11
+ (i.e. the generated text) are incorrect or llama.cpp crashes during model evaluation.
12
+ If you encountered the issue while using an external UI (e.g. ollama),
13
+ please reproduce your issue using one of the examples/binaries in this repository.
14
+ The `llama-completion` binary can be used for simple and reproducible model inference.
15
+ - type: textarea
16
+ id: version
17
+ attributes:
18
+ label: Name and Version
19
+ description: Which version of our software are you running? (use `--version` to get a version string)
20
+ placeholder: |
21
+ $./llama-cli --version
22
+ version: 2999 (42b4109e)
23
+ built with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu
24
+ validations:
25
+ required: true
26
+ - type: dropdown
27
+ id: operating-system
28
+ attributes:
29
+ label: Operating systems
30
+ description: Which operating systems do you know to be affected?
31
+ multiple: true
32
+ options:
33
+ - Linux
34
+ - Mac
35
+ - Windows
36
+ - BSD
37
+ - Other? (Please let us know in description)
38
+ validations:
39
+ required: true
40
+ - type: dropdown
41
+ id: backends
42
+ attributes:
43
+ label: GGML backends
44
+ description: Which GGML backends do you know to be affected?
45
+ options: [AMX, BLAS, CANN, CPU, CUDA, Hexagon, HIP, Metal, Musa, OpenCL, RPC, SYCL, VirtGPU, Vulkan, WebGPU, zDNN, ZenDNN]
46
+ multiple: true
47
+ validations:
48
+ required: true
49
+ - type: textarea
50
+ id: hardware
51
+ attributes:
52
+ label: Hardware
53
+ description: Which CPUs/GPUs are you using?
54
+ placeholder: >
55
+ e.g. Ryzen 5950X + 2x RTX 4090
56
+ validations:
57
+ required: true
58
+ - type: textarea
59
+ id: model
60
+ attributes:
61
+ label: Models
62
+ description: >
63
+ Which model(s) at which quantization were you using when encountering the bug?
64
+ If you downloaded a GGUF file off of Huggingface, please provide a link.
65
+ placeholder: >
66
+ e.g. Meta LLaMA 3.1 Instruct 8b q4_K_M
67
+ validations:
68
+ required: false
69
+ - type: textarea
70
+ id: info
71
+ attributes:
72
+ label: Problem description & steps to reproduce
73
+ description: >
74
+ Please give us a summary of the problem and tell us how to reproduce it.
75
+ If you can narrow down the bug to specific hardware, compile flags, or command line arguments,
76
+ that information would be very much appreciated by us.
77
+
78
+ If possible, please try to reproduce the issue using `llama-completion` with `-fit off`.
79
+ If you can only reproduce the issue with `-fit on`, please provide logs both with and without `--verbose`.
80
+ placeholder: >
81
+ e.g. when I run llama-completion with `-fa on` I get garbled outputs for very long prompts.
82
+ With short prompts or `-fa off` it works correctly.
83
+ Here are the exact commands that I used: ...
84
+ validations:
85
+ required: true
86
+ - type: textarea
87
+ id: first_bad_commit
88
+ attributes:
89
+ label: First Bad Commit
90
+ description: >
91
+ If the bug was not present on an earlier version: when did it start appearing?
92
+ If possible, please do a git bisect and identify the exact commit that introduced the bug.
93
+ validations:
94
+ required: false
95
+ - type: textarea
96
+ id: logs
97
+ attributes:
98
+ label: Relevant log output
99
+ description: >
100
+ Please copy and paste any relevant log output, including the command that you entered and any generated text.
101
+ For very long logs (thousands of lines), preferably upload them as files instead.
102
+ On Linux you can redirect console output into a file by appending ` > llama.log 2>&1` to your command.
103
+ value: |
104
+ <details>
105
+ <summary>Logs</summary>
106
+ <!-- Copy-pasted short logs go into the "console" area here -->
107
+
108
+ ```console
109
+
110
+ ```
111
+ </details>
112
+
113
+ <!-- Long logs that you upload as files go here, outside the "console" area -->
114
+ validations:
115
+ required: true
.github/ISSUE_TEMPLATE/019-bug-misc.yml ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Bug (misc.)
2
+ description: Something is not working the way it should (and it's not covered by any of the above cases).
3
+ title: "Misc. bug: "
4
+ labels: ["bug-unconfirmed"]
5
+ body:
6
+ - type: markdown
7
+ attributes:
8
+ value: >
9
+ Thanks for taking the time to fill out this bug report!
10
+ This issue template is intended for miscellaneous bugs that don't fit into any other category.
11
+ If you encountered the issue while using an external UI (e.g. ollama),
12
+ please reproduce your issue using one of the examples/binaries in this repository.
13
+ - type: textarea
14
+ id: version
15
+ attributes:
16
+ label: Name and Version
17
+ description: Which version of our software is affected? (You can use `--version` to get a version string.)
18
+ placeholder: |
19
+ $./llama-cli --version
20
+ version: 2999 (42b4109e)
21
+ built with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu
22
+ validations:
23
+ required: true
24
+ - type: dropdown
25
+ id: operating-system
26
+ attributes:
27
+ label: Operating systems
28
+ description: Which operating systems do you know to be affected?
29
+ multiple: true
30
+ options:
31
+ - Linux
32
+ - Mac
33
+ - Windows
34
+ - BSD
35
+ - Other? (Please let us know in description)
36
+ validations:
37
+ required: false
38
+ - type: dropdown
39
+ id: module
40
+ attributes:
41
+ label: Which llama.cpp modules do you know to be affected?
42
+ multiple: true
43
+ options:
44
+ - Documentation/Github
45
+ - libllama (core library)
46
+ - llama-cli
47
+ - llama-server
48
+ - llama-bench
49
+ - llama-quantize
50
+ - Python/Bash scripts
51
+ - Test code
52
+ - Other (Please specify in the next section)
53
+ validations:
54
+ required: false
55
+ - type: textarea
56
+ id: command
57
+ attributes:
58
+ label: Command line
59
+ description: >
60
+ Please provide the exact commands you entered, if applicable. For example: `llama-server -m ... -c ...`, `llama-cli -m ...`, etc.
61
+ This will be automatically formatted into code, so no need for backticks.
62
+ render: shell
63
+ validations:
64
+ required: false
65
+ - type: textarea
66
+ id: info
67
+ attributes:
68
+ label: Problem description & steps to reproduce
69
+ description: >
70
+ Please give us a summary of the problem and tell us how to reproduce it (if applicable).
71
+ validations:
72
+ required: true
73
+ - type: textarea
74
+ id: first_bad_commit
75
+ attributes:
76
+ label: First Bad Commit
77
+ description: >
78
+ If the bug was not present on an earlier version and it's not trivial to track down: when did it start appearing?
79
+ If possible, please do a git bisect and identify the exact commit that introduced the bug.
80
+ validations:
81
+ required: false
82
+ - type: textarea
83
+ id: logs
84
+ attributes:
85
+ label: Relevant log output
86
+ description: >
87
+ If applicable, please copy and paste any relevant log output, including any generated text.
88
+ If you are encountering problems specifically with the `llama_params_fit` module, always upload `--verbose` logs as well.
89
+ For very long logs (thousands of lines), please upload them as files instead.
90
+ On Linux you can redirect console output into a file by appending ` > llama.log 2>&1` to your command.
91
+ value: |
92
+ <details>
93
+ <summary>Logs</summary>
94
+ <!-- Copy-pasted short logs go into the "console" area here -->
95
+
96
+ ```console
97
+
98
+ ```
99
+ </details>
100
+
101
+ <!-- Long logs that you upload as files go here, outside the "console" area -->
102
+ validations:
103
+ required: false
.github/ISSUE_TEMPLATE/020-enhancement.yml ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Enhancement
2
+ description: Used to request enhancements for llama.cpp.
3
+ title: "Feature Request: "
4
+ labels: ["enhancement"]
5
+ body:
6
+ - type: markdown
7
+ attributes:
8
+ value: |
9
+ [Please post your idea first in Discussion if there is not yet a consensus for this enhancement request. This will help to keep this issue tracker focused on enhancements that the community has agreed needs to be implemented.](https://github.com/ggml-org/llama.cpp/discussions/categories/ideas)
10
+
11
+ - type: checkboxes
12
+ id: prerequisites
13
+ attributes:
14
+ label: Prerequisites
15
+ description: Please confirm the following before submitting your enhancement request.
16
+ options:
17
+ - label: I am running the latest code. Mention the version if possible as well.
18
+ required: true
19
+ - label: I carefully followed the [README.md](https://github.com/ggml-org/llama.cpp/blob/master/README.md).
20
+ required: true
21
+ - label: I searched using keywords relevant to my issue to make sure that I am creating a new issue that is not already open (or closed).
22
+ required: true
23
+ - label: I reviewed the [Discussions](https://github.com/ggml-org/llama.cpp/discussions), and have a new and useful enhancement to share.
24
+ required: true
25
+
26
+ - type: textarea
27
+ id: feature-description
28
+ attributes:
29
+ label: Feature Description
30
+ description: Please provide a detailed written description of what you were trying to do, and what you expected `llama.cpp` to do as an enhancement.
31
+ placeholder: Detailed description of the enhancement
32
+ validations:
33
+ required: true
34
+
35
+ - type: textarea
36
+ id: motivation
37
+ attributes:
38
+ label: Motivation
39
+ description: Please provide a detailed written description of reasons why this feature is necessary and how it is useful to `llama.cpp` users.
40
+ placeholder: Explanation of why this feature is needed and its benefits
41
+ validations:
42
+ required: true
43
+
44
+ - type: textarea
45
+ id: possible-implementation
46
+ attributes:
47
+ label: Possible Implementation
48
+ description: If you have an idea as to how it can be implemented, please write a detailed description. Feel free to give links to external sources or share visuals that might be helpful to understand the details better.
49
+ placeholder: Detailed description of potential implementation
50
+ validations:
51
+ required: false
.github/ISSUE_TEMPLATE/030-research.yml ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Research
2
+ description: Track new technical research area.
3
+ title: "Research: "
4
+ labels: ["research 🔬"]
5
+ body:
6
+ - type: markdown
7
+ attributes:
8
+ value: |
9
+ Don't forget to check for any [duplicate research issue tickets](https://github.com/ggml-org/llama.cpp/issues?q=is%3Aopen+is%3Aissue+label%3A%22research+%F0%9F%94%AC%22)
10
+
11
+ - type: checkboxes
12
+ id: research-stage
13
+ attributes:
14
+ label: Research Stage
15
+ description: Track general state of this research ticket
16
+ options:
17
+ - label: Background Research (Let's try to avoid reinventing the wheel)
18
+ - label: Hypothesis Formed (How do you think this will work and it's effect?)
19
+ - label: Strategy / Implementation Forming
20
+ - label: Analysis of results
21
+ - label: Debrief / Documentation (So people in the future can learn from us)
22
+
23
+ - type: textarea
24
+ id: background
25
+ attributes:
26
+ label: Previous existing literature and research
27
+ description: Whats the current state of the art and whats the motivation for this research?
28
+
29
+ - type: textarea
30
+ id: hypothesis
31
+ attributes:
32
+ label: Hypothesis
33
+ description: How do you think this will work and it's effect?
34
+
35
+ - type: textarea
36
+ id: implementation
37
+ attributes:
38
+ label: Implementation
39
+ description: Got an approach? e.g. a PR ready to go?
40
+
41
+ - type: textarea
42
+ id: analysis
43
+ attributes:
44
+ label: Analysis
45
+ description: How does the proposed implementation behave?
46
+
47
+ - type: textarea
48
+ id: logs
49
+ attributes:
50
+ label: Relevant log output
51
+ description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks.
52
+ render: shell
.github/ISSUE_TEMPLATE/040-refactor.yml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Refactor (Maintainers)
2
+ description: Used to track refactoring opportunities.
3
+ title: "Refactor: "
4
+ labels: ["refactor"]
5
+ body:
6
+ - type: markdown
7
+ attributes:
8
+ value: |
9
+ Don't forget to [check for existing refactor issue tickets](https://github.com/ggml-org/llama.cpp/issues?q=is%3Aopen+is%3Aissue+label%3Arefactoring) in case it's already covered.
10
+ Also you may want to check [Pull request refactor label as well](https://github.com/ggml-org/llama.cpp/pulls?q=is%3Aopen+is%3Apr+label%3Arefactoring) for duplicates too.
11
+
12
+ - type: textarea
13
+ id: background-description
14
+ attributes:
15
+ label: Background Description
16
+ description: Please provide a detailed written description of the pain points you are trying to solve.
17
+ placeholder: Detailed description behind your motivation to request refactor
18
+ validations:
19
+ required: true
20
+
21
+ - type: textarea
22
+ id: possible-approaches
23
+ attributes:
24
+ label: Possible Refactor Approaches
25
+ description: If you have some idea of possible approaches to solve this problem. You may want to make it a todo list.
26
+ placeholder: Your idea of possible refactoring opportunity/approaches
27
+ validations:
28
+ required: false
.github/ISSUE_TEMPLATE/config.yml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ blank_issues_enabled: true
2
+ contact_links:
3
+ - name: Got an idea?
4
+ url: https://github.com/ggml-org/llama.cpp/discussions/categories/ideas
5
+ about: Pop it there. It may then become an enhancement ticket.
6
+ - name: Got a question?
7
+ url: https://github.com/ggml-org/llama.cpp/discussions/categories/q-a
8
+ about: Ask a question there!
9
+ - name: Want to contribute?
10
+ url: https://github.com/ggml-org/llama.cpp/wiki/contribute
11
+ about: Head to the contribution guide page of the wiki for areas you can help with
.github/actions/get-tag-name/action.yml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "Determine tag name"
2
+ description: "Determine the tag name to use for a release"
3
+ outputs:
4
+ name:
5
+ description: "The name of the tag"
6
+ value: ${{ steps.tag.outputs.name }}
7
+
8
+ runs:
9
+ using: "composite"
10
+ steps:
11
+ - name: Determine tag name
12
+ id: tag
13
+ shell: bash
14
+ run: |
15
+ BUILD_NUMBER="$(git rev-list --count HEAD)"
16
+ SHORT_HASH="$(git rev-parse --short=7 HEAD)"
17
+ if [[ "${{ env.BRANCH_NAME }}" == "master" ]]; then
18
+ echo "name=b${BUILD_NUMBER}" >> $GITHUB_OUTPUT
19
+ else
20
+ SAFE_NAME=$(echo "${{ env.BRANCH_NAME }}" | tr '/' '-')
21
+ echo "name=${SAFE_NAME}-b${BUILD_NUMBER}-${SHORT_HASH}" >> $GITHUB_OUTPUT
22
+ fi
.github/actions/install-exe/action.yml ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "Install exe"
2
+ description: "Download and install exe"
3
+ inputs:
4
+ url:
5
+ description: "URL of the exe installer"
6
+ required: true
7
+ args:
8
+ description: "Installer arguments"
9
+ required: true
10
+ timeout:
11
+ description: "Timeout (in ms)"
12
+ required: false
13
+ default: "600000"
14
+
15
+ runs:
16
+ using: "composite"
17
+ steps:
18
+ - name: Install EXE
19
+ shell: pwsh
20
+ run: |
21
+ $ErrorActionPreference = "Stop"
22
+ write-host "Downloading Installer EXE"
23
+ Invoke-WebRequest -Uri "${{ inputs.url }}" -OutFile "${env:RUNNER_TEMP}\temp-install.exe"
24
+ write-host "Installing"
25
+ $proc = Start-Process "${env:RUNNER_TEMP}\temp-install.exe" -ArgumentList '${{ inputs.args }}' -NoNewWindow -PassThru
26
+ $completed = $proc.WaitForExit(${{ inputs.timeout }})
27
+ if (-not $completed) {
28
+ Write-Error "Installer timed out. Killing the process"
29
+ $proc.Kill()
30
+ exit 1
31
+ }
32
+ if ($proc.ExitCode -ne 0) {
33
+ Write-Error "Installer failed with exit code $($proc.ExitCode)"
34
+ exit 1
35
+ }
36
+ write-host "Completed installation"
.github/actions/linux-setup-openvino/action.yml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "Linux - Setup OpenVINO Toolkit"
2
+ description: "Setup OpenVINO Toolkit for Linux"
3
+ inputs:
4
+ path:
5
+ description: "Installation path"
6
+ required: true
7
+ version_major:
8
+ description: "OpenVINO major version (e.g., 2025.3)"
9
+ required: true
10
+ version_full:
11
+ description: "OpenVINO full version (e.g., 2025.3.0.19807.44526285f24)"
12
+ required: true
13
+
14
+ runs:
15
+ using: "composite"
16
+ steps:
17
+ - name: Setup OpenVINO Toolkit
18
+ id: setup
19
+ uses: ./.github/actions/unarchive-tar
20
+ with:
21
+ url: https://storage.openvinotoolkit.org/repositories/openvino/packages/${{ inputs.version_major }}/linux/openvino_toolkit_ubuntu24_${{ inputs.version_full }}_x86_64.tgz
22
+ path: ${{ inputs.path }}
23
+ type: z
24
+ strip: 1
25
+
.github/actions/linux-setup-spacemit/action.yml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "Linux - Setup SpacemiT Toolchain"
2
+ description: "Setup SpacemiT Toolchain for Linux"
3
+ inputs:
4
+ path:
5
+ description: "Installation path"
6
+ required: true
7
+ version:
8
+ description: "SpacemiT toolchain version"
9
+ required: true
10
+
11
+ runs:
12
+ using: "composite"
13
+ steps:
14
+ - name: Setup SpacemiT Toolchain
15
+ id: setup
16
+ uses: ./.github/actions/unarchive-tar
17
+ with:
18
+ url: https://archive.spacemit.com/toolchain/spacemit-toolchain-linux-glibc-x86_64-v${{ inputs.version }}.tar.xz
19
+ path: ${{ inputs.path }}
20
+ strip: 1
.github/actions/linux-setup-vulkan/action.yml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "Linux - Setup Vulkan SDK"
2
+ description: "Setup Vulkan SDK for Linux"
3
+ inputs:
4
+ path:
5
+ description: "Installation path"
6
+ required: true
7
+ version:
8
+ description: "Vulkan SDK version"
9
+ required: true
10
+
11
+ runs:
12
+ using: "composite"
13
+ steps:
14
+ - name: Setup Vulkan SDK
15
+ id: setup
16
+ uses: ./.github/actions/unarchive-tar
17
+ with:
18
+ url: https://sdk.lunarg.com/sdk/download/${{ inputs.version }}/linux/vulkan_sdk.tar.xz
19
+ path: ${{ inputs.path }}
20
+ strip: 1
.github/actions/unarchive-tar/action.yml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "Unarchive tar"
2
+ description: "Download and unarchive tar into directory"
3
+ inputs:
4
+ url:
5
+ description: "URL of the tar archive"
6
+ required: true
7
+ path:
8
+ description: "Directory to unarchive into"
9
+ required: true
10
+ type:
11
+ description: "Compression type (tar option)"
12
+ required: false
13
+ default: "J"
14
+ strip:
15
+ description: "Strip components"
16
+ required: false
17
+ default: "0"
18
+
19
+ runs:
20
+ using: "composite"
21
+ steps:
22
+ - name: Unarchive into directory
23
+ shell: bash
24
+ run: |
25
+ mkdir -p ${{ inputs.path }}
26
+ cd ${{ inputs.path }}
27
+ curl --no-progress-meter ${{ inputs.url }} | tar -${{ inputs.type }}x --strip-components=${{ inputs.strip }}
.github/actions/windows-setup-cuda/action.yml ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "Windows - Setup CUDA Toolkit"
2
+ description: "Setup CUDA Toolkit for Windows"
3
+ inputs:
4
+ cuda_version:
5
+ description: "CUDA toolkit version"
6
+ required: true
7
+
8
+ runs:
9
+ using: "composite"
10
+ steps:
11
+ - name: Install Cuda Toolkit 11.7
12
+ if: ${{ inputs.cuda_version == '11.7' }}
13
+ shell: pwsh
14
+ run: |
15
+ mkdir -p "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7"
16
+ choco install unzip -y
17
+ curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_cudart/windows-x86_64/cuda_cudart-windows-x86_64-11.7.99-archive.zip"
18
+ curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvcc/windows-x86_64/cuda_nvcc-windows-x86_64-11.7.99-archive.zip"
19
+ curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvrtc/windows-x86_64/cuda_nvrtc-windows-x86_64-11.7.99-archive.zip"
20
+ curl -O "https://developer.download.nvidia.com/compute/cuda/redist/libcublas/windows-x86_64/libcublas-windows-x86_64-11.7.4.6-archive.zip"
21
+ curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvtx/windows-x86_64/cuda_nvtx-windows-x86_64-11.7.91-archive.zip"
22
+ curl -O "https://developer.download.nvidia.com/compute/cuda/redist/visual_studio_integration/windows-x86_64/visual_studio_integration-windows-x86_64-11.7.91-archive.zip"
23
+ curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvprof/windows-x86_64/cuda_nvprof-windows-x86_64-11.7.101-archive.zip"
24
+ curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_cccl/windows-x86_64/cuda_cccl-windows-x86_64-11.7.91-archive.zip"
25
+ unzip '*.zip' -d "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7"
26
+ xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\cuda_cudart-windows-x86_64-11.7.99-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y
27
+ xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\cuda_nvcc-windows-x86_64-11.7.99-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y
28
+ xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\cuda_nvrtc-windows-x86_64-11.7.99-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y
29
+ xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\libcublas-windows-x86_64-11.7.4.6-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y
30
+ xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\cuda_nvtx-windows-x86_64-11.7.91-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y
31
+ xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\visual_studio_integration-windows-x86_64-11.7.91-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y
32
+ xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\cuda_nvprof-windows-x86_64-11.7.101-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y
33
+ xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\cuda_cccl-windows-x86_64-11.7.91-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y
34
+ echo "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
35
+ echo "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\libnvvp" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
36
+ echo "CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8
37
+ echo "CUDA_PATH_V11_7=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8
38
+
39
+ - name: Install Cuda Toolkit 12.4
40
+ if: ${{ inputs.cuda_version == '12.4' }}
41
+ shell: pwsh
42
+ run: |
43
+ mkdir -p "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4"
44
+ choco install unzip -y
45
+ curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_cudart/windows-x86_64/cuda_cudart-windows-x86_64-12.4.127-archive.zip"
46
+ curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvcc/windows-x86_64/cuda_nvcc-windows-x86_64-12.4.131-archive.zip"
47
+ curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvrtc/windows-x86_64/cuda_nvrtc-windows-x86_64-12.4.127-archive.zip"
48
+ curl -O "https://developer.download.nvidia.com/compute/cuda/redist/libcublas/windows-x86_64/libcublas-windows-x86_64-12.4.5.8-archive.zip"
49
+ curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvtx/windows-x86_64/cuda_nvtx-windows-x86_64-12.4.127-archive.zip"
50
+ curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_profiler_api/windows-x86_64/cuda_profiler_api-windows-x86_64-12.4.127-archive.zip"
51
+ curl -O "https://developer.download.nvidia.com/compute/cuda/redist/visual_studio_integration/windows-x86_64/visual_studio_integration-windows-x86_64-12.4.127-archive.zip"
52
+ curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvprof/windows-x86_64/cuda_nvprof-windows-x86_64-12.4.127-archive.zip"
53
+ curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_cccl/windows-x86_64/cuda_cccl-windows-x86_64-12.4.127-archive.zip"
54
+ unzip '*.zip' -d "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4"
55
+ xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_cudart-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y
56
+ xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_nvcc-windows-x86_64-12.4.131-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y
57
+ xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_nvrtc-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y
58
+ xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\libcublas-windows-x86_64-12.4.5.8-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y
59
+ xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_nvtx-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y
60
+ xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_profiler_api-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y
61
+ xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\visual_studio_integration-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y
62
+ xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_nvprof-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y
63
+ xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_cccl-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y
64
+ echo "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
65
+ echo "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\libnvvp" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
66
+ echo "CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8
67
+ echo "CUDA_PATH_V12_4=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8
68
+
69
+ - name: Install Cuda Toolkit 13.1
70
+ if: ${{ inputs.cuda_version == '13.1' }}
71
+ shell: pwsh
72
+ run: |
73
+ mkdir -p "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1"
74
+ choco install unzip -y
75
+ curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_crt/windows-x86_64/cuda_crt-windows-x86_64-13.1.80-archive.zip"
76
+ curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_cudart/windows-x86_64/cuda_cudart-windows-x86_64-13.1.80-archive.zip"
77
+ curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvcc/windows-x86_64/cuda_nvcc-windows-x86_64-13.1.80-archive.zip"
78
+ curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvrtc/windows-x86_64/cuda_nvrtc-windows-x86_64-13.1.80-archive.zip"
79
+ curl -O "https://developer.download.nvidia.com/compute/cuda/redist/libcublas/windows-x86_64/libcublas-windows-x86_64-13.2.0.9-archive.zip"
80
+ curl -O "https://developer.download.nvidia.com/compute/cuda/redist/libnvvm/windows-x86_64/libnvvm-windows-x86_64-13.1.80-archive.zip"
81
+ curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvtx/windows-x86_64/cuda_nvtx-windows-x86_64-13.1.68-archive.zip"
82
+ curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_profiler_api/windows-x86_64/cuda_profiler_api-windows-x86_64-13.1.80-archive.zip"
83
+ curl -O "https://developer.download.nvidia.com/compute/cuda/redist/visual_studio_integration/windows-x86_64/visual_studio_integration-windows-x86_64-13.1.68-archive.zip"
84
+ curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_cccl/windows-x86_64/cuda_cccl-windows-x86_64-13.1.78-archive.zip"
85
+ unzip '*.zip' -d "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1"
86
+ xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1\cuda_crt-windows-x86_64-13.1.80-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1" /E /I /H /Y
87
+ xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1\cuda_cudart-windows-x86_64-13.1.80-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1" /E /I /H /Y
88
+ xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1\cuda_nvcc-windows-x86_64-13.1.80-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1" /E /I /H /Y
89
+ xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1\cuda_nvrtc-windows-x86_64-13.1.80-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1" /E /I /H /Y
90
+ xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1\libcublas-windows-x86_64-13.2.0.9-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1" /E /I /H /Y
91
+ xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1\libnvvm-windows-x86_64-13.1.80-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1" /E /I /H /Y
92
+ xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1\cuda_nvtx-windows-x86_64-13.1.68-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1" /E /I /H /Y
93
+ xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1\cuda_profiler_api-windows-x86_64-13.1.80-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1" /E /I /H /Y
94
+ xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1\visual_studio_integration-windows-x86_64-13.1.68-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1" /E /I /H /Y
95
+ xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1\cuda_cccl-windows-x86_64-13.1.78-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1" /E /I /H /Y
96
+ echo "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
97
+ echo "CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8
98
+ echo "CUDA_PATH_V13_1=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8
.github/actions/windows-setup-rocm/action.yml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "Windows - Setup ROCm"
2
+ description: "Setup ROCm for Windows"
3
+ inputs:
4
+ version:
5
+ description: "ROCm version"
6
+ required: true
7
+
8
+ runs:
9
+ using: "composite"
10
+ steps:
11
+ - name: Setup ROCm
12
+ uses: ./.github/actions/install-exe
13
+ with:
14
+ url: https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-${{ inputs.version }}-Win11-For-HIP.exe
15
+ args: -install
.github/labeler.yml ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://github.com/actions/labeler
2
+ Apple Metal:
3
+ - changed-files:
4
+ - any-glob-to-any-file:
5
+ - ggml/include/ggml-metal.h
6
+ - ggml/src/ggml-metal/**
7
+ - README-metal.md
8
+ SYCL:
9
+ - changed-files:
10
+ - any-glob-to-any-file:
11
+ - ggml/include/ggml-sycl.h
12
+ - ggml/src/ggml-sycl/**
13
+ - docs/backend/SYCL.md
14
+ - examples/sycl/**
15
+ Nvidia GPU:
16
+ - changed-files:
17
+ - any-glob-to-any-file:
18
+ - ggml/include/ggml-cuda.h
19
+ - ggml/src/ggml-cuda/**
20
+ Vulkan:
21
+ - changed-files:
22
+ - any-glob-to-any-file:
23
+ - ggml/include/ggml-vulkan.h
24
+ - ggml/src/ggml-vulkan/**
25
+ IBM zDNN:
26
+ - changed-files:
27
+ - any-glob-to-any-file:
28
+ - ggml/include/ggml-zdnn.h
29
+ - ggml/src/ggml-zdnn/**
30
+ documentation:
31
+ - changed-files:
32
+ - any-glob-to-any-file:
33
+ - docs/**
34
+ - media/**
35
+ testing:
36
+ - changed-files:
37
+ - any-glob-to-any-file:
38
+ - tests/**
39
+ build:
40
+ - changed-files:
41
+ - any-glob-to-any-file:
42
+ - cmake/**
43
+ - CMakeLists.txt
44
+ - CMakePresets.json
45
+ examples:
46
+ - changed-files:
47
+ - any-glob-to-any-file:
48
+ - examples/**
49
+ - tools/**
50
+ devops:
51
+ - changed-files:
52
+ - any-glob-to-any-file:
53
+ - .devops/**
54
+ - .github/**
55
+ - ci/**
56
+ python:
57
+ - changed-files:
58
+ - any-glob-to-any-file:
59
+ - "**/*.py"
60
+ - requirements/**
61
+ - gguf-py/**
62
+ - .flake8
63
+ script:
64
+ - changed-files:
65
+ - any-glob-to-any-file:
66
+ - scripts/**
67
+ android:
68
+ - changed-files:
69
+ - any-glob-to-any-file:
70
+ - examples/llama.android/**
71
+ server:
72
+ - changed-files:
73
+ - any-glob-to-any-file:
74
+ - tools/server/**
75
+ ggml:
76
+ - changed-files:
77
+ - any-glob-to-any-file:
78
+ - ggml/**
79
+ model:
80
+ - changed-files:
81
+ - any-glob-to-any-file:
82
+ - src/models/**
83
+ nix:
84
+ - changed-files:
85
+ - any-glob-to-any-file:
86
+ - "**/*.nix"
87
+ - .github/workflows/nix-*.yml
88
+ - .devops/nix/nixpkgs-instances.nix
89
+ embedding:
90
+ - changed-files:
91
+ - any-glob-to-any-file: examples/embedding/
92
+ jinja parser:
93
+ - changed-files:
94
+ - any-glob-to-any-file:
95
+ - common/jinja/**
96
+ Ascend NPU:
97
+ - changed-files:
98
+ - any-glob-to-any-file:
99
+ - ggml/include/ggml-cann.h
100
+ - ggml/src/ggml-cann/**
101
+ - docs/backend/CANN.md
102
+ OpenCL:
103
+ - changed-files:
104
+ - any-glob-to-any-file:
105
+ - ggml/include/ggml-opencl.h
106
+ - ggml/src/ggml-opencl/**
107
+ - docs/backend/OPENCL.md
108
+ Hexagon:
109
+ - changed-files:
110
+ - any-glob-to-any-file:
111
+ - ggml/include/ggml-hexagon.h
112
+ - ggml/src/ggml-hexagon/**
113
+ WebGPU:
114
+ - changed-files:
115
+ - any-glob-to-any-file:
116
+ - ggml/include/ggml-webgpu.h
117
+ - ggml/src/ggml-webgpu/**
118
+ OpenVINO:
119
+ - changed-files:
120
+ - any-glob-to-any-file:
121
+ - ggml/include/ggml-openvino.h
122
+ - ggml/src/ggml-openvino/**
123
+ - docs/backend/OPENVINO.md
.github/pull_request_template.md ADDED
@@ -0,0 +1 @@
 
 
1
+ *Make sure to read the [contributing guidelines](https://github.com/ggml-org/llama.cpp/blob/master/CONTRIBUTING.md) before submitting a PR*
.github/workflows/bench.yml.disabled ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # TODO: there have been some issues with the workflow, so disabling for now
2
+ # https://github.com/ggml-org/llama.cpp/issues/7893
3
+ #
4
+ # Benchmark
5
+ name: Benchmark
6
+
7
+ on:
8
+ workflow_dispatch:
9
+ inputs:
10
+ gpu-series:
11
+ description: 'Azure GPU series to run with'
12
+ required: true
13
+ type: choice
14
+ options:
15
+ - Standard_NC4as_T4_v3
16
+ - Standard_NC24ads_A100_v4
17
+ - Standard_NC80adis_H100_v5
18
+ sha:
19
+ description: 'Commit SHA1 to build'
20
+ required: false
21
+ type: string
22
+ duration:
23
+ description: 'Duration of the bench'
24
+ type: string
25
+ default: 10m
26
+
27
+ push:
28
+ branches:
29
+ - master
30
+ paths: ['llama.cpp', 'ggml.c', 'ggml-backend.cpp', 'ggml-quants.c', '**/*.cu', 'tools/server/*.h*', 'tools/server/*.cpp']
31
+ pull_request_target:
32
+ types: [opened, synchronize, reopened]
33
+ paths: ['llama.cpp', 'ggml.c', 'ggml-backend.cpp', 'ggml-quants.c', '**/*.cu', 'tools/server/*.h*', 'tools/server/*.cpp']
34
+ schedule:
35
+ - cron: '04 2 * * *'
36
+
37
+ concurrency:
38
+ group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref || github.run_id }}-${{ github.event.inputs.sha }}
39
+ cancel-in-progress: true
40
+
41
+ jobs:
42
+ bench-server-baseline:
43
+ runs-on: Standard_NC4as_T4_v3
44
+ env:
45
+ RUNNER_LABEL: Standard_NC4as_T4_v3 # FIXME Do not find a way to not duplicate it
46
+ N_USERS: 8
47
+ DURATION: 10m
48
+
49
+ strategy:
50
+ matrix:
51
+ model: [phi-2]
52
+ ftype: [q4_0, q8_0, f16]
53
+ include:
54
+ - model: phi-2
55
+ ftype: q4_0
56
+ pr_comment_enabled: "true"
57
+
58
+ if: |
59
+ inputs.gpu-series == 'Standard_NC4as_T4_v3'
60
+ || github.event_name == 'pull_request_target'
61
+ steps:
62
+ - name: Clone
63
+ id: checkout
64
+ uses: actions/checkout@v4
65
+ with:
66
+ fetch-depth: 0
67
+ ref: ${{ github.event.inputs.sha || github.event.pull_request.head.sha || github.sha || github.head_ref || github.ref_name }}
68
+
69
+ - name: Install python env
70
+ id: pipenv
71
+ run: |
72
+ cd tools/server/bench
73
+ python3 -m venv venv
74
+ source venv/bin/activate
75
+ pip install -r requirements.txt
76
+
77
+ - name: Prometheus
78
+ id: install_prometheus
79
+ run: |
80
+ wget --quiet https://github.com/prometheus/prometheus/releases/download/v2.51.0/prometheus-2.51.0.linux-amd64.tar.gz
81
+ tar xzf prometheus*.tar.gz --strip-components=1
82
+ ./prometheus --config.file=tools/server/bench/prometheus.yml &
83
+ while ! nc -z localhost 9090; do
84
+ sleep 0.1
85
+ done
86
+
87
+ - name: Set up Go
88
+ uses: actions/setup-go@v5
89
+ with:
90
+ go-version: '1.21'
91
+
92
+ - name: Install k6 and xk6-sse
93
+ id: k6_installation
94
+ run: |
95
+ cd tools/server/bench
96
+ go install go.k6.io/xk6/cmd/xk6@latest
97
+ xk6 build master \
98
+ --with github.com/phymbert/xk6-sse
99
+
100
+ - name: Build
101
+ id: cmake_build
102
+ run: |
103
+ set -eux
104
+ cmake -B build \
105
+ -DGGML_NATIVE=OFF \
106
+ -DLLAMA_BUILD_SERVER=ON \
107
+ -DLLAMA_CUBLAS=ON \
108
+ -DCUDAToolkit_ROOT=/usr/local/cuda \
109
+ -DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc \
110
+ -DCMAKE_CUDA_ARCHITECTURES=75 \
111
+ -DLLAMA_FATAL_WARNINGS=OFF \
112
+ -DLLAMA_ALL_WARNINGS=OFF \
113
+ -DCMAKE_BUILD_TYPE=Release;
114
+ cmake --build build --config Release -j $(nproc) --target llama-server
115
+
116
+ - name: Download the dataset
117
+ id: download_dataset
118
+ run: |
119
+ cd tools/server/bench
120
+ wget --quiet https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json
121
+
122
+ - name: Server bench
123
+ id: server_bench
124
+ env:
125
+ HEAD_REF: ${{ github.head_ref || github.ref_name }}
126
+ run: |
127
+ set -eux
128
+
129
+ cd tools/server/bench
130
+ source venv/bin/activate
131
+ python bench.py \
132
+ --runner-label ${{ env.RUNNER_LABEL }} \
133
+ --name ${{ github.job }} \
134
+ --branch $HEAD_REF \
135
+ --commit ${{ github.event.inputs.sha || github.event.pull_request.head.sha || github.sha }} \
136
+ --scenario script.js \
137
+ --duration ${{ github.event.inputs.duration || env.DURATION }} \
138
+ --hf-repo ggml-org/models \
139
+ --hf-file ${{ matrix.model }}/ggml-model-${{ matrix.ftype }}.gguf \
140
+ --model-path-prefix /models \
141
+ --parallel ${{ env.N_USERS }} \
142
+ -ngl 33 \
143
+ --batch-size 2048 \
144
+ --ubatch-size 256 \
145
+ --ctx-size 16384 \
146
+ --n-prompts 1000 \
147
+ --max-prompt-tokens 1024 \
148
+ --max-tokens 2048
149
+
150
+ cat results.github.env >> $GITHUB_ENV
151
+
152
+ # Remove dataset as we do not want it in the artefact
153
+ rm ShareGPT_V3_unfiltered_cleaned_split.json
154
+
155
+ - uses: actions/upload-artifact@v4
156
+ with:
157
+ name: bench-server-${{ github.job }}-${{ env.RUNNER_LABEL }}-${{ matrix.model }}-${{ matrix.ftype }}
158
+ compression-level: 9
159
+ path: |
160
+ tools/server/bench/*.jpg
161
+ tools/server/bench/*.json
162
+ tools/server/bench/*.log
163
+
164
+ - name: Commit status
165
+ uses: Sibz/github-status-action@v1
166
+ with:
167
+ authToken: ${{secrets.GITHUB_TOKEN}}
168
+ sha: ${{ inputs.sha || github.event.pull_request.head.sha || github.sha }}
169
+ context: bench-server-${{ github.job }}-${{ env.RUNNER_LABEL }}-${{ matrix.model }}-${{ matrix.ftype }}
170
+ description: |
171
+ ${{ env.BENCH_RESULTS }}
172
+ state: 'success'
173
+
174
+ - name: Upload benchmark images
175
+ uses: devicons/public-upload-to-imgur@v2.2.2
176
+ continue-on-error: true # Important as it looks unstable: 503
177
+ id: imgur_step
178
+ with:
179
+ client_id: ${{secrets.IMGUR_CLIENT_ID}}
180
+ path: |
181
+ tools/server/bench/prompt_tokens_seconds.jpg
182
+ tools/server/bench/predicted_tokens_seconds.jpg
183
+ tools/server/bench/kv_cache_usage_ratio.jpg
184
+ tools/server/bench/requests_processing.jpg
185
+
186
+ - name: Extract mermaid
187
+ id: set_mermaid
188
+ run: |
189
+ set -eux
190
+
191
+ cd tools/server/bench
192
+ PROMPT_TOKENS_SECONDS=$(cat prompt_tokens_seconds.mermaid)
193
+ echo "PROMPT_TOKENS_SECONDS<<EOF" >> $GITHUB_ENV
194
+ echo "$PROMPT_TOKENS_SECONDS" >> $GITHUB_ENV
195
+ echo "EOF" >> $GITHUB_ENV
196
+
197
+ PREDICTED_TOKENS_SECONDS=$(cat predicted_tokens_seconds.mermaid)
198
+ echo "PREDICTED_TOKENS_SECONDS<<EOF" >> $GITHUB_ENV
199
+ echo "$PREDICTED_TOKENS_SECONDS" >> $GITHUB_ENV
200
+ echo "EOF" >> $GITHUB_ENV
201
+
202
+ KV_CACHE_USAGE_RATIO=$(cat kv_cache_usage_ratio.mermaid)
203
+ echo "KV_CACHE_USAGE_RATIO<<EOF" >> $GITHUB_ENV
204
+ echo "$KV_CACHE_USAGE_RATIO" >> $GITHUB_ENV
205
+ echo "EOF" >> $GITHUB_ENV
206
+
207
+ REQUESTS_PROCESSING=$(cat requests_processing.mermaid)
208
+ echo "REQUESTS_PROCESSING<<EOF" >> $GITHUB_ENV
209
+ echo "$REQUESTS_PROCESSING" >> $GITHUB_ENV
210
+ echo "EOF" >> $GITHUB_ENV
211
+
212
+ - name: Extract image url
213
+ id: extract_image_url
214
+ continue-on-error: true
215
+ run: |
216
+ set -eux
217
+
218
+ echo "IMAGE_O=${{ fromJSON(steps.imgur_step.outputs.imgur_urls)[0] }}" >> $GITHUB_ENV
219
+ echo "IMAGE_1=${{ fromJSON(steps.imgur_step.outputs.imgur_urls)[1] }}" >> $GITHUB_ENV
220
+ echo "IMAGE_2=${{ fromJSON(steps.imgur_step.outputs.imgur_urls)[2] }}" >> $GITHUB_ENV
221
+ echo "IMAGE_3=${{ fromJSON(steps.imgur_step.outputs.imgur_urls)[3] }}" >> $GITHUB_ENV
222
+
223
+ - name: Comment PR
224
+ uses: mshick/add-pr-comment@v2
225
+ id: comment_pr
226
+ if: ${{ github.event.pull_request != '' && matrix.pr_comment_enabled == 'true' }}
227
+ with:
228
+ message-id: bench-server-${{ github.job }}-${{ env.RUNNER_LABEL }}-${{ matrix.model }}-${{ matrix.ftype }}
229
+ message: |
230
+ <p align="center">
231
+
232
+ 📈 **llama.cpp server** for _${{ github.job }}_ on _${{ env.RUNNER_LABEL }}_ for `${{ matrix.model }}`-`${{ matrix.ftype }}`: **${{ env.BENCH_ITERATIONS}} iterations** 🚀
233
+
234
+ </p>
235
+
236
+ <details>
237
+
238
+ <summary>Expand details for performance related PR only</summary>
239
+
240
+ - Concurrent users: ${{ env.N_USERS }}, duration: ${{ github.event.inputs.duration || env.DURATION }}
241
+ - HTTP request : avg=${{ env.HTTP_REQ_DURATION_AVG }}ms p(95)=${{ env.HTTP_REQ_DURATION_P_95_ }}ms fails=${{ env.HTTP_REQ_FAILED_PASSES }}, finish reason: stop=${{ env.LLAMACPP_COMPLETIONS_STOP_RATE_PASSES }} truncated=${{ env.LLAMACPP_COMPLETIONS_TRUNCATED_RATE_PASSES }}
242
+ - Prompt processing (pp): avg=${{ env.LLAMACPP_PROMPT_PROCESSING_SECOND_AVG }}tk/s p(95)=${{ env.LLAMACPP_PROMPT_PROCESSING_SECOND_P_95_ }}tk/s
243
+ - Token generation (tg): avg=${{ env.LLAMACPP_TOKENS_SECOND_AVG }}tk/s p(95)=${{ env.LLAMACPP_TOKENS_SECOND_P_95_ }}tk/s
244
+ - ${{ env.BENCH_GRAPH_XLABEL }}
245
+
246
+
247
+ <p align="center">
248
+
249
+ <img width="100%" height="100%" src="${{ env.IMAGE_O }}" alt="prompt_tokens_seconds" />
250
+
251
+ <details>
252
+
253
+ <summary>More</summary>
254
+
255
+ ```mermaid
256
+ ${{ env.PROMPT_TOKENS_SECONDS }}
257
+ ```
258
+
259
+ </details>
260
+
261
+ <img width="100%" height="100%" src="${{ env.IMAGE_1 }}" alt="predicted_tokens_seconds"/>
262
+
263
+ <details>
264
+ <summary>More</summary>
265
+
266
+ ```mermaid
267
+ ${{ env.PREDICTED_TOKENS_SECONDS }}
268
+ ```
269
+
270
+ </details>
271
+
272
+ </p>
273
+
274
+ <details>
275
+
276
+ <summary>Details</summary>
277
+
278
+ <p align="center">
279
+
280
+ <img width="100%" height="100%" src="${{ env.IMAGE_2 }}" alt="kv_cache_usage_ratio" />
281
+
282
+ <details>
283
+ <summary>More</summary>
284
+
285
+ ```mermaid
286
+ ${{ env.KV_CACHE_USAGE_RATIO }}
287
+ ```
288
+
289
+ </details>
290
+
291
+ <img width="100%" height="100%" src="${{ env.IMAGE_3 }}" alt="requests_processing"/>
292
+
293
+ <details>
294
+ <summary>More</summary>
295
+
296
+ ```mermaid
297
+ ${{ env.REQUESTS_PROCESSING }}
298
+ ```
299
+
300
+ </details>
301
+
302
+ </p>
303
+ </details>
304
+ </details>